diff --git a/.github/workflows/update-config-docs.yml b/.github/workflows/update-config-docs.yml new file mode 100644 index 0000000000..477615dd8b --- /dev/null +++ b/.github/workflows/update-config-docs.yml @@ -0,0 +1,62 @@ +# This file is part of Dependency-Track. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) OWASP Foundation. All Rights Reserved. +name: Update Config Documentation + +on: + push: + branches: + - master + paths: + - ".github/workflows/update-config-docs.yml" + - "dev/scripts/config-docs.md.peb" + - "src/main/resources/application.properties" + workflow_dispatch: { } + +permissions: { } + +jobs: + generate-docs: + name: Generate Documentation + runs-on: ubuntu-latest + timeout-minutes: 5 + permissions: + contents: write + pull-requests: write + if: "${{ github.repository_owner == 'DependencyTrack' }}" + steps: + - name: Checkout Repository + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # tag=v4.1.6 + - name: Generate Documentation + uses: jbangdev/jbang-action@74844c9631cf1f35650427323e9bb3ffa41dfbd9 # tag=v0.115.0 + with: + trust: https://github.com/DependencyTrack/jbang-catalog + script: gen-config-docs@DependencyTrack + scriptargs: >- + --template ./dev/scripts/config-docs.md.peb + --output docs/_docs/getting-started/configuration-apiserver.md + ./src/main/resources/application.properties + - name: Create Pull Request + uses: peter-evans/create-pull-request@6d6857d36972b65feb161a90e484f2984215f83e # tag=v6.0.5 + with: + add-paths: "docs/_docs/getting-started/configuration-apiserver.md" + branch: update-config-docs + body: "Updates configuration documentation." + commit-message: Update config docs + delete-branch: true + labels: documentation + signoff: true + title: Update config docs \ No newline at end of file diff --git a/dev/scripts/config-docs.md.peb b/dev/scripts/config-docs.md.peb new file mode 100644 index 0000000000..35e4d6b7c5 --- /dev/null +++ b/dev/scripts/config-docs.md.peb @@ -0,0 +1,163 @@ +--- +title: Configuration - API Server +category: Getting Started +chapter: 1 +order: 7 +--- + + +The central configuration file `application.properties` resides in the classpath of the WAR by default. +This configuration file controls many performance tuning parameters but is most useful for defining +optional external database sources, directory services (LDAP), and proxy settings. + +For containerized deployments, the properties defined in the configuration file can also be specified +as environment variables. All environment variables are upper case with periods (.) replaced with underscores (_). +Refer to the [Docker instructions]({{ "{{ site.baseurl }}{% link _docs/getting-started/deploy-docker.md %}" }}) for +configuration examples using environment variables. + +Dependency-Track administrators are highly encouraged to create a copy of this file in the +Dependency-Track data directory and customize it prior to deploying to production. + + +> The default embedded H2 database is designed to quickly evaluate and experiment with Dependency-Track. +> Do not use the embedded H2 database in production environments. +> +> See: [Database Support]({{ "{{ site.baseurl }}{% link _docs/getting-started/database-support.md %}" }}). + + +To start Dependency-Track using custom configuration, add the system property +`alpine.application.properties` when executing. For example: + +```bash +-Dalpine.application.properties=~/.dependency-track/application.properties +``` + +### Proxy Configuration + +Proxy support can be configured in one of two ways, using the proxy settings defined +in `application.properties` or through environment variables. By default, the system +will attempt to read the `https_proxy`, `http_proxy` and `no_proxy` environment variables. If one +of these are set, Dependency-Track will use them automatically. + +`no_proxy` specifies URLs that should be excluded from proxying. +This can be a comma-separated list of hostnames, domain names, or a mixture of both. +If a port number is specified for a URL, only the requests with that port number to that URL will be excluded from proxying. +`no_proxy` can also set to be a single asterisk ('*') to match all hosts. + +Dependency-Track supports proxies that require BASIC, DIGEST, and NTLM authentication. + +### Logging Levels + +Logging levels (INFO, WARN, ERROR, DEBUG, TRACE) can be specified by passing the level +to the `dependencyTrack.logging.level` system property on startup. For example, the +following command will start Dependency-Track (embedded) with DEBUG logging: + +```bash +java -Xmx4G -DdependencyTrack.logging.level=DEBUG -jar dependency-track-embedded.war +``` + +For Docker deployments, simply set the `LOGGING_LEVEL` environment variable to one of +INFO, WARN, ERROR, DEBUG, or TRACE. + +### Secret Key + +Dependency-Track will encrypt certain confidential data (e.g. access tokens for external service providers) with AES256 +prior to storing it in the database. The secret key used for encrypting and decrypting will be automatically generated +when Dependency-Track starts for the first time, and is placed in `/keys/secret.key` +(`/data/.dependency-track/keys/secret.key` for containerized deployments). + +Starting with Dependency-Track 4.7, it is possible to change the location of the secret key via the `alpine.secret.key.path` +property. This makes it possible to use Kubernetes secrets for example, to mount secrets into the custom location. + +Secret keys may be generated manually upfront instead of relying on Dependency-Track to do it. This can be achieved +with OpenSSL like this: + +```shell +openssl rand 32 > secret.key +``` + +> Note that the default key format has changed in version 4.7. While existing keys using the old format will continue +> to work, keys for new instances will be generated in the new format. Old keys may be converted using the following +> [JShell](https://docs.oracle.com/en/java/javase/17/jshell/introduction-jshell.html) script: +> ```java +> import java.io.ObjectInputStream; +> import java.nio.file.Files; +> import java.nio.file.Paths; +> import javax.crypto.SecretKey; +> String inputFilePath = System.getProperty("secret.key.input") +> String outputFilePath = System.getProperty("secret.key.output"); +> SecretKey secretKey = null; +> System.out.println("Reading old key from " + inputFilePath); +> try (var fis = Files.newInputStream(Paths.get(inputFilePath)); +> var ois = new ObjectInputStream(fis)) { +> secretKey = (SecretKey) ois.readObject(); +> } +> System.out.println("Writing new key to " + outputFilePath); +> try (var fos = Files.newOutputStream(Paths.get(outputFilePath))) { +> fos.write(secretKey.getEncoded()); +> } +> /exit +> ``` +> Example execution: +> ```shell +> jshell -R"-Dsecret.key.input=$HOME/.dependency-track/keys/secret.key" -R"-Dsecret.key.output=secret.key.new" convert-key.jsh +> ``` + +--- + +## Reference + + + +{% for entry in propertiesByCategory %} +### {{ entry.key }} + +{% for property in entry.value -%} +#### {{ property.name }} + +{{ property.description | replace({' ': ' +'}) }} + + + + + + + + + + + + {% if property.validValues -%} + + + + + {% endif -%} + + + + + {% if property.example -%} + + + + + {% endif -%} + + + + + +
Required{{ property.required ? "Yes" : "No" }}
Type{{ property.type }}
Valid Values{{ property.validValues }}
Default{{ property.defaultValue }}
Example{{ property.example }}
ENV{{ property.env }}
+ +{% endfor %} +{% endfor %} \ No newline at end of file diff --git a/docs/Gemfile b/docs/Gemfile index 1eff7bb71a..b71946fa5d 100644 --- a/docs/Gemfile +++ b/docs/Gemfile @@ -11,4 +11,6 @@ group :jekyll_plugins do gem 'jekyll-sitemap', '1.4.0' gem 'jekyll-redirect-from', '0.16.0' gem 'jekyll-mermaid', '1.0.0' + # https://github.com/ffi/ffi/issues/1103#issuecomment-2186974923 + gem 'ffi', '< 1.17.0' end diff --git a/docs/_docs/datasources/snyk.md b/docs/_docs/datasources/snyk.md index 81be386869..cdf6e6513d 100644 --- a/docs/_docs/datasources/snyk.md +++ b/docs/_docs/datasources/snyk.md @@ -93,7 +93,7 @@ When evaluating the severity of a vulnerability, it's important to note that the **NOTE:** For Beta version, user can select either from NVD or SNYK to prioritize the cvss vectors. [Authentication for API]: https://docs.snyk.io/snyk-api-info/authentication-for-api -[Configuration]: {{ site.baseurl }}{% link _docs/getting-started/configuration.md %} +[Configuration]: {{ site.baseurl }}{% link _docs/getting-started/configuration-apiserver.md %} [Finding the Snyk ID and internal name of an Organization]: https://docs.snyk.io/products/snyk-code/cli-for-snyk-code/before-you-start-set-the-organization-for-the-cli-tests/finding-the-snyk-id-and-internal-name-of-an-organization [Monitoring]: {{ site.baseurl }}{% link _docs/getting-started/monitoring.md %}#retries [REST API documentation]: https://apidocs.snyk.io diff --git a/docs/_docs/getting-started/configuration-apiserver.md b/docs/_docs/getting-started/configuration-apiserver.md new file mode 100644 index 0000000000..d64d4fce5f --- /dev/null +++ b/docs/_docs/getting-started/configuration-apiserver.md @@ -0,0 +1,2629 @@ +--- +title: Configuration - API Server +category: Getting Started +chapter: 1 +order: 7 +--- + + +The central configuration file `application.properties` resides in the classpath of the WAR by default. +This configuration file controls many performance tuning parameters but is most useful for defining +optional external database sources, directory services (LDAP), and proxy settings. + +For containerized deployments, the properties defined in the configuration file can also be specified +as environment variables. All environment variables are upper case with periods (.) replaced with underscores (_). +Refer to the [Docker instructions]({{ site.baseurl }}{% link _docs/getting-started/deploy-docker.md %}) for +configuration examples using environment variables. + +Dependency-Track administrators are highly encouraged to create a copy of this file in the +Dependency-Track data directory and customize it prior to deploying to production. + + +> The default embedded H2 database is designed to quickly evaluate and experiment with Dependency-Track. +> Do not use the embedded H2 database in production environments. +> +> See: [Database Support]({{ site.baseurl }}{% link _docs/getting-started/database-support.md %}). + + +To start Dependency-Track using custom configuration, add the system property +`alpine.application.properties` when executing. For example: + +```bash +-Dalpine.application.properties=~/.dependency-track/application.properties +``` + +### Proxy Configuration + +Proxy support can be configured in one of two ways, using the proxy settings defined +in `application.properties` or through environment variables. By default, the system +will attempt to read the `https_proxy`, `http_proxy` and `no_proxy` environment variables. If one +of these are set, Dependency-Track will use them automatically. + +`no_proxy` specifies URLs that should be excluded from proxying. +This can be a comma-separated list of hostnames, domain names, or a mixture of both. +If a port number is specified for a URL, only the requests with that port number to that URL will be excluded from proxying. +`no_proxy` can also set to be a single asterisk ('*') to match all hosts. + +Dependency-Track supports proxies that require BASIC, DIGEST, and NTLM authentication. + +### Logging Levels + +Logging levels (INFO, WARN, ERROR, DEBUG, TRACE) can be specified by passing the level +to the `dependencyTrack.logging.level` system property on startup. For example, the +following command will start Dependency-Track (embedded) with DEBUG logging: + +```bash +java -Xmx4G -DdependencyTrack.logging.level=DEBUG -jar dependency-track-embedded.war +``` + +For Docker deployments, simply set the `LOGGING_LEVEL` environment variable to one of +INFO, WARN, ERROR, DEBUG, or TRACE. + +### Secret Key + +Dependency-Track will encrypt certain confidential data (e.g. access tokens for external service providers) with AES256 +prior to storing it in the database. The secret key used for encrypting and decrypting will be automatically generated +when Dependency-Track starts for the first time, and is placed in `/keys/secret.key` +(`/data/.dependency-track/keys/secret.key` for containerized deployments). + +Starting with Dependency-Track 4.7, it is possible to change the location of the secret key via the `alpine.secret.key.path` +property. This makes it possible to use Kubernetes secrets for example, to mount secrets into the custom location. + +Secret keys may be generated manually upfront instead of relying on Dependency-Track to do it. This can be achieved +with OpenSSL like this: + +```shell +openssl rand 32 > secret.key +``` + +> Note that the default key format has changed in version 4.7. While existing keys using the old format will continue +> to work, keys for new instances will be generated in the new format. Old keys may be converted using the following +> [JShell](https://docs.oracle.com/en/java/javase/17/jshell/introduction-jshell.html) script: +> ```java +> import java.io.ObjectInputStream; +> import java.nio.file.Files; +> import java.nio.file.Paths; +> import javax.crypto.SecretKey; +> String inputFilePath = System.getProperty("secret.key.input") +> String outputFilePath = System.getProperty("secret.key.output"); +> SecretKey secretKey = null; +> System.out.println("Reading old key from " + inputFilePath); +> try (var fis = Files.newInputStream(Paths.get(inputFilePath)); +> var ois = new ObjectInputStream(fis)) { +> secretKey = (SecretKey) ois.readObject(); +> } +> System.out.println("Writing new key to " + outputFilePath); +> try (var fos = Files.newOutputStream(Paths.get(outputFilePath))) { +> fos.write(secretKey.getEncoded()); +> } +> /exit +> ``` +> Example execution: +> ```shell +> jshell -R"-Dsecret.key.input=$HOME/.dependency-track/keys/secret.key" -R"-Dsecret.key.output=secret.key.new" convert-key.jsh +> ``` + +--- + +## Reference + + + + +### Analyzers + +#### ossindex.retry.backoff.max.duration.ms + +Defines the maximum duration in milliseconds to wait before attempting the next retry. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default600000
ENVOSSINDEX_RETRY_BACKOFF_MAX_DURATION_MS
+ +#### ossindex.retry.backoff.multiplier + +Defines the multiplier for the exponential backoff retry strategy. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default2
ENVOSSINDEX_RETRY_BACKOFF_MULTIPLIER
+ +#### ossindex.retry.max.attempts + +Defines the maximum amount of retries to perform for each request to the OSS Index API. +Retries are performed with increasing delays between attempts using an exponential backoff strategy. +The initial duration defined in ossindex.retry.backoff.initial.duration.ms will be +multiplied with the value defined in [`ossindex.retry.backoff.multiplier`](#ossindexretrybackoffmultiplier) after each retry attempt, +until the maximum duration defined in [`ossindex.retry.backoff.max.duration.ms`](#ossindexretrybackoffmaxdurationms) is reached. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default10
ENVOSSINDEX_RETRY_MAX_ATTEMPTS
+ +#### repo.meta.analyzer.cacheStampedeBlocker.enabled + +This flag activate the cache stampede blocker for the repository meta analyzer allowing to handle high concurrency workloads when there +is a high ratio of duplicate components which can cause unnecessary external calls and index violation on PUBLIC.REPOSITORY_META_COMPONENT_COMPOUND_IDX during cache population. +The default value is true. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeboolean
Defaulttrue
ENVREPO_META_ANALYZER_CACHESTAMPEDEBLOCKER_ENABLED
+ +#### repo.meta.analyzer.cacheStampedeBlocker.lock.buckets + +The cache stampede blocker use a striped (partitioned) lock to distribute locks across keys. +This parameter defines the number of bucket used by the striped lock. The lock used for a given key is derived from the key hashcode and number of buckets. +This value should be set according to your portfolio profile (i.e. number of projects and proportion of duplicates). +Too few buckets and an unbalanced portfolio (i.e. high number of purl going to the same partition) can lead to forced serialization +Too much buckets can lead to unnecessary memory usage. Note that the memory footprint of Striped Lock is 32 * (nbOfBuckets * 1) Bytes. +A value between 1_000 (~32 KB) and 1_000_000 (~32 MB) seems reasonable. +The default value is 1000 (~32KB). + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default1000
ENVREPO_META_ANALYZER_CACHESTAMPEDEBLOCKER_LOCK_BUCKETS
+ +#### repo.meta.analyzer.cacheStampedeBlocker.max.attempts + +Defines the maximum number of attempts used by Resilience4J for exponential backoff retry regarding repo meta analyzer cache loading per key. +The default value is 10. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default10
ENVREPO_META_ANALYZER_CACHESTAMPEDEBLOCKER_MAX_ATTEMPTS
+ +#### snyk.retry.backoff.initial.duration.ms + +Defines the duration in milliseconds to wait before attempting the first retry. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default1000
ENVSNYK_RETRY_BACKOFF_INITIAL_DURATION_MS
+ +#### snyk.retry.backoff.max.duration.ms + +Defines the maximum duration in milliseconds to wait before attempting the next retry. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default60000
ENVSNYK_RETRY_BACKOFF_MAX_DURATION_MS
+ +#### snyk.retry.backoff.multiplier + +Defines the multiplier for the exponential backoff retry strategy. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default2
ENVSNYK_RETRY_BACKOFF_MULTIPLIER
+ +#### snyk.retry.max.attempts + +Defines the maximum amount of retries to perform for each request to the Snyk API. +Retries are performed with increasing delays between attempts using an exponential backoff strategy. +The initial duration defined in [`snyk.retry.backoff.initial.duration.ms`](#snykretrybackoffinitialdurationms) will be +multiplied with the value defined in [`snyk.retry.backoff.multiplier`](#snykretrybackoffmultiplier) after each retry attempt, +until the maximum duration defined in [`snyk.retry.backoff.max.duration.ms`](#snykretrybackoffmaxdurationms) is reached. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default10
ENVSNYK_RETRY_MAX_ATTEMPTS
+ +#### snyk.thread.pool.size + +Defines the size of the thread pool used to perform requests to the Snyk API in parallel. +The thread pool will only be used when Snyk integration is enabled. +A high number may result in quicker exceeding of API rate limits, +while a number that is too low may result in vulnerability analyses taking longer. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default10
ENVSNYK_THREAD_POOL_SIZE
+ +#### trivy.retry.backoff.initial.duration.ms + +Defines the duration in milliseconds to wait before attempting the first retry. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default1000
ENVTRIVY_RETRY_BACKOFF_INITIAL_DURATION_MS
+ +#### trivy.retry.backoff.max.duration.ms + +Defines the maximum duration in milliseconds to wait before attempting the next retry. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default60000
ENVTRIVY_RETRY_BACKOFF_MAX_DURATION_MS
+ +#### trivy.retry.backoff.multiplier + +Defines the multiplier for the exponential backoff retry strategy. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default2
ENVTRIVY_RETRY_BACKOFF_MULTIPLIER
+ +#### trivy.retry.max.attempts + +Defines the maximum amount of retries to perform for each request to the Trivy API. +Retries are performed with increasing delays between attempts using an exponential backoff strategy. +The initial duration defined in [`trivy.retry.backoff.initial.duration.ms`](#trivyretrybackoffinitialdurationms) will be +multiplied with the value defined in [`trivy.retry.backoff.multiplier`](#trivyretrybackoffmultiplier) after each retry attempt, +until the maximum duration defined in [`trivy.retry.backoff.max.duration.ms`](#trivyretrybackoffmaxdurationms) is reached. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default10
ENVTRIVY_RETRY_MAX_ATTEMPTS
+ + + +### CORS + +#### alpine.cors.allow.credentials + +Controls the content of the `Access-Control-Allow-Credentials` response header. +
+Has no effect when [`alpine.cors.enabled`](#alpinecorsenabled) is `false`. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeboolean
Defaulttrue
ENVALPINE_CORS_ALLOW_CREDENTIALS
+ +#### alpine.cors.allow.headers + +Controls the content of the `Access-Control-Allow-Headers` response header. +
+Has no effect when [`alpine.cors.enabled`](#alpinecorsenabled) is `false`. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
DefaultOrigin, Content-Type, Authorization, X-Requested-With, Content-Length, Accept, Origin, X-Api-Key, X-Total-Count, *
ENVALPINE_CORS_ALLOW_HEADERS
+ +#### alpine.cors.allow.methods + +Controls the content of the `Access-Control-Allow-Methods` response header. +
+Has no effect when [`alpine.cors.enabled`](#alpinecorsenabled) is `false`. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
DefaultGET POST PUT DELETE OPTIONS
ENVALPINE_CORS_ALLOW_METHODS
+ +#### alpine.cors.allow.origin + +Controls the content of the `Access-Control-Allow-Origin` response header. +
+Has no effect when [`alpine.cors.enabled`](#alpinecorsenabled) is `false`. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Default*
ENVALPINE_CORS_ALLOW_ORIGIN
+ +#### alpine.cors.enabled + +Defines whether [Cross Origin Resource Sharing](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) +(CORS) headers shall be included in REST API responses. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeboolean
Defaulttrue
ENVALPINE_CORS_ENABLED
+ +#### alpine.cors.expose.headers + +Controls the content of the `Access-Control-Expose-Headers` response header. +
+Has no effect when [`alpine.cors.enabled`](#alpinecorsenabled) is `false`. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
DefaultOrigin, Content-Type, Authorization, X-Requested-With, Content-Length, Accept, Origin, X-Api-Key, X-Total-Count
ENVALPINE_CORS_EXPOSE_HEADERS
+ +#### alpine.cors.max.age + +Controls the content of the `Access-Control-Max-Age` response header. +
+Has no effect when [`alpine.cors.enabled`](#alpinecorsenabled) is `false`. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default3600
ENVALPINE_CORS_MAX_AGE
+ + + +### Database + +#### alpine.database.mode + +Defines the database mode of operation. +In server mode, the database will listen for connections from remote +hosts. In embedded mode, the system will be more secure and slightly +faster. External mode should be used when utilizing an external +database server (i.e. mysql, postgresql, etc). + + + + + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeenum
Valid Values[server, embedded, external]
Defaultembedded
ENVALPINE_DATABASE_MODE
+ +#### alpine.database.password + +Specifies the password to use when authenticating to the database. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Defaultnull
ENVALPINE_DATABASE_PASSWORD
+ +#### alpine.database.password.file + +Specifies a path to the file holding the database password. +To be used as alternative to [`alpine.database.password`](#alpinedatabasepassword). + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Defaultnull
ENVALPINE_DATABASE_PASSWORD_FILE
+ +#### alpine.database.pool.enabled + +Specifies if the database connection pool is enabled. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeboolean
Defaulttrue
ENVALPINE_DATABASE_POOL_ENABLED
+ +#### alpine.database.pool.idle.timeout + +This property controls the maximum amount of time that a connection is +allowed to sit idle in the pool. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default300000
ENVALPINE_DATABASE_POOL_IDLE_TIMEOUT
+ +#### alpine.database.pool.max.lifetime + +This property controls the maximum lifetime of a connection in the pool. +An in-use connection will never be retired, only when it is closed will +it then be removed. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default600000
ENVALPINE_DATABASE_POOL_MAX_LIFETIME
+ +#### alpine.database.pool.max.size + +This property controls the maximum size that the pool is allowed to reach, +including both idle and in-use connections. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default20
ENVALPINE_DATABASE_POOL_MAX_SIZE
+ +#### alpine.database.pool.min.idle + +This property controls the minimum number of idle connections in the pool. +This value should be equal to or less than [`alpine.database.pool.max.size`](#alpinedatabasepoolmaxsize). +Warning: If the value is less than [`alpine.database.pool.max.size`](#alpinedatabasepoolmaxsize), +[`alpine.database.pool.idle.timeout`](#alpinedatabasepoolidletimeout) will have no effect. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default10
ENVALPINE_DATABASE_POOL_MIN_IDLE
+ +#### alpine.database.pool.nontx.idle.timeout + +Overwrite [`alpine.database.pool.idle.timeout`](#alpinedatabasepoolidletimeout) for the non-transactional connection pool. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Defaultnull
ENVALPINE_DATABASE_POOL_NONTX_IDLE_TIMEOUT
+ +#### alpine.database.pool.nontx.max.lifetime + +Overwrite [`alpine.database.pool.max.lifetime`](#alpinedatabasepoolmaxlifetime) for the non-transactional connection pool. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Defaultnull
ENVALPINE_DATABASE_POOL_NONTX_MAX_LIFETIME
+ +#### alpine.database.pool.nontx.max.size + +Overwrite [`alpine.database.pool.max.size`](#alpinedatabasepoolmaxsize) for the non-transactional connection pool. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Defaultnull
ENVALPINE_DATABASE_POOL_NONTX_MAX_SIZE
+ +#### alpine.database.pool.nontx.min.idle + +Overwrite [`alpine.database.pool.min.idle`](#alpinedatabasepoolminidle) for the non-transactional connection pool. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Defaultnull
ENVALPINE_DATABASE_POOL_NONTX_MIN_IDLE
+ +#### alpine.database.pool.tx.idle.timeout + +Overwrite [`alpine.database.pool.idle.timeout`](#alpinedatabasepoolidletimeout) for the transactional connection pool. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Defaultnull
ENVALPINE_DATABASE_POOL_TX_IDLE_TIMEOUT
+ +#### alpine.database.pool.tx.max.lifetime + +Overwrite [`alpine.database.pool.max.lifetime`](#alpinedatabasepoolmaxlifetime) for the transactional connection pool. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Defaultnull
ENVALPINE_DATABASE_POOL_TX_MAX_LIFETIME
+ +#### alpine.database.pool.tx.max.size + +Overwrite [`alpine.database.pool.max.size`](#alpinedatabasepoolmaxsize) for the transactional connection pool. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Defaultnull
ENVALPINE_DATABASE_POOL_TX_MAX_SIZE
+ +#### alpine.database.pool.tx.min.idle + +Overwrite [`alpine.database.pool.min.idle`](#alpinedatabasepoolminidle) for the transactional connection pool. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Defaultnull
ENVALPINE_DATABASE_POOL_TX_MIN_IDLE
+ +#### alpine.database.port + +Defines the TCP port to use when [`alpine.database.mode`](#alpinedatabasemode) is set to 'server'. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default9092
ENVALPINE_DATABASE_PORT
+ +#### alpine.database.url + +Specifies the JDBC URL to use when connecting to the database. + + + + + + + + + + + + + + + + + + + + + + + + + +
RequiredYes
Typestring
Defaultjdbc:h2:~/.dependency-track/db;DB_CLOSE_ON_EXIT=FALSE
Examplejdbc:postgresql://localhost:5432/dtrack
ENVALPINE_DATABASE_URL
+ +#### alpine.database.username + +Specifies the username to use when authenticating to the database. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Defaultsa
ENVALPINE_DATABASE_USERNAME
+ +#### alpine.datanucleus.cache.level2.type + +Controls the 2nd level cache type used by DataNucleus, the Object Relational Mapper (ORM). +See . +Setting this property to "none" may help in reducing the memory footprint of Dependency-Track, +but has the potential to slow down database operations. +Size of the cache may be monitored through the "datanucleus_cache_second_level_entries" metric, +refer to for details. + + + + + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeenum
Valid Values[soft, weak, none]
Defaultsoft
ENVALPINE_DATANUCLEUS_CACHE_LEVEL2_TYPE
+ + + +### General + +#### alpine.api.key.prefix + +Defines the prefix to be used for API keys. A maximum prefix length of 251 +characters is supported. The prefix may also be left empty. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Defaultodt_
ENVALPINE_API_KEY_PREFIX
+ +#### alpine.bcrypt.rounds + +Specifies the number of bcrypt rounds to use when hashing a user's password. +The higher the number the more secure the password, at the expense of +hardware resources and additional time to generate the hash. + + + + + + + + + + + + + + + + + + + + + +
RequiredYes
Typeinteger
Default14
ENVALPINE_BCRYPT_ROUNDS
+ +#### alpine.data.directory + +Defines the path to the data directory. This directory will hold logs, +keys, and any database or index files along with application-specific +files or directories. + + + + + + + + + + + + + + + + + + + + + +
RequiredYes
Typestring
Default~/.dependency-track
ENVALPINE_DATA_DIRECTORY
+ +#### alpine.private.key.path + +Defines the path to the private key of the public-private key pair. +The key will be generated upon first startup if it does not exist. +The key pair is currently not used by Dependency-Track. + + + + + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Default${alpine.data.directory}/keys/private.key
Example/var/run/secrets/private.key
ENVALPINE_PRIVATE_KEY_PATH
+ +#### alpine.public.key.path + +Defines the path to the public key of the public-private key pair. +The key will be generated upon first startup if it does not exist. +The key pair is currently not used by Dependency-Track. + + + + + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Default${alpine.data.directory}/keys/public.key
Example/var/run/secrets/public.key
ENVALPINE_PUBLIC_KEY_PATH
+ +#### alpine.secret.key.path + +Defines the path to the secret key to be used for data encryption and decryption. +The key will be generated upon first startup if it does not exist. + + + + + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Default${alpine.data.directory}/keys/secret.key
Example/var/run/secrets/secret.key
ENVALPINE_SECRET_KEY_PATH
+ +#### system.requirement.check.enabled + +Define whether system requirement check is enabled. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeboolean
Defaulttrue
ENVSYSTEM_REQUIREMENT_CHECK_ENABLED
+ + + +### HTTP + +#### alpine.http.proxy.address + +HTTP proxy address. If set, then [`alpine.http.proxy.port`](#alpinehttpproxyport) must be set too. + + + + + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Defaultnull
Exampleproxy.example.com
ENVALPINE_HTTP_PROXY_ADDRESS
+ +#### alpine.http.proxy.password + + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Defaultnull
ENVALPINE_HTTP_PROXY_PASSWORD
+ +#### alpine.http.proxy.port + + + + + + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Defaultnull
Example8888
ENVALPINE_HTTP_PROXY_PORT
+ +#### alpine.http.proxy.username + + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Defaultnull
ENVALPINE_HTTP_PROXY_USERNAME
+ +#### alpine.http.timeout.connection + +Defines the connection timeout in seconds for outbound HTTP connections. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default30
ENVALPINE_HTTP_TIMEOUT_CONNECTION
+ +#### alpine.http.timeout.pool + +Defines the request timeout in seconds for outbound HTTP connections. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default60
ENVALPINE_HTTP_TIMEOUT_POOL
+ +#### alpine.http.timeout.socket + +Defines the socket / read timeout in seconds for outbound HTTP connections. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeinteger
Default30
ENVALPINE_HTTP_TIMEOUT_SOCKET
+ +#### alpine.no.proxy + + + + + + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Defaultnull
Examplelocalhost,127.0.0.1
ENVALPINE_NO_PROXY
+ + + +### LDAP + +#### alpine.ldap.attribute.mail + +Specifies the LDAP attribute used to store a users email address + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Defaultmail
ENVALPINE_LDAP_ATTRIBUTE_MAIL
+ +#### alpine.ldap.attribute.name + +Specifies the Attribute that identifies a users ID. +

+Example (Microsoft Active Directory): +
  • userPrincipalName
+Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): +
  • uid
+ + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
DefaultuserPrincipalName
ENVALPINE_LDAP_ATTRIBUTE_NAME
+ +#### alpine.ldap.auth.username.format + +Specifies if the username entered during login needs to be formatted prior +to asserting credentials against the directory. For Active Directory, the +userPrincipal attribute typically ends with the domain, whereas the +samAccountName attribute and other directory server implementations do not. +The %s variable will be substituted with the username asserted during login. +

+Example (Microsoft Active Directory): +
  • %s@example.com
+Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): +
  • %s
+ + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Default%s@example.com
ENVALPINE_LDAP_AUTH_USERNAME_FORMAT
+ +#### alpine.ldap.basedn + +Specifies the base DN that all queries should search from + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Defaultdc=example,dc=com
ENVALPINE_LDAP_BASEDN
+ +#### alpine.ldap.bind.password + +If anonymous access is not permitted, specify a password for the username +used to bind. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Defaultnull
ENVALPINE_LDAP_BIND_PASSWORD
+ +#### alpine.ldap.bind.username + +If anonymous access is not permitted, specify a username with limited access +to the directory, just enough to perform searches. This should be the fully +qualified DN of the user. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Defaultnull
ENVALPINE_LDAP_BIND_USERNAME
+ +#### alpine.ldap.enabled + +Defines if LDAP will be used for user authentication. If enabled, +`alpine.ldap.*` properties should be set accordingly. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeboolean
Defaultfalse
ENVALPINE_LDAP_ENABLED
+ +#### alpine.ldap.groups.filter + +Specifies the LDAP search filter used to retrieve all groups from the directory. +

+Example (Microsoft Active Directory): +
  • (&(objectClass=group)(objectCategory=Group))
+Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): +
  • (&(objectClass=groupOfUniqueNames))
+ + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Default(&(objectClass=group)(objectCategory=Group))
ENVALPINE_LDAP_GROUPS_FILTER
+ +#### alpine.ldap.groups.search.filter + +Specifies the LDAP search filter used to search for groups by their name. +The `{SEARCH_TERM}` variable will be substituted at runtime. +

+Example (Microsoft Active Directory): +
  • (&(objectClass=group)(objectCategory=Group)(cn=*{SEARCH_TERM}*))
+Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): +
  • (&(objectClass=groupOfUniqueNames)(cn=*{SEARCH_TERM}*))
+ + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Default(&(objectClass=group)(objectCategory=Group)(cn=*{SEARCH_TERM}*))
ENVALPINE_LDAP_GROUPS_SEARCH_FILTER
+ +#### alpine.ldap.security.auth + +Specifies the LDAP security authentication level to use. +If this property is empty or unspecified, the behaviour is determined by the service provider. + + + + + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeenum
Valid Values[none, simple, strong]
Defaultsimple
ENVALPINE_LDAP_SECURITY_AUTH
+ +#### alpine.ldap.server.url + +Specifies the LDAP server URL. +

+Examples (Microsoft Active Directory): +
    +
  • ldap://ldap.example.com:3268
  • +
  • ldaps://ldap.example.com:3269
  • +
+Examples (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): +
    +
  • ldap://ldap.example.com:389
  • +
  • ldaps://ldap.example.com:636
  • +
+ + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Defaultldap://ldap.example.com:389
ENVALPINE_LDAP_SERVER_URL
+ +#### alpine.ldap.team.synchronization + +This option will ensure that team memberships for LDAP users are dynamic and +synchronized with membership of LDAP groups. When a team is mapped to an LDAP +group, all local LDAP users will automatically be assigned to the team if +they are a member of the group the team is mapped to. If the user is later +removed from the LDAP group, they will also be removed from the team. This +option provides the ability to dynamically control user permissions via an +external directory. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeboolean
Defaultfalse
ENVALPINE_LDAP_TEAM_SYNCHRONIZATION
+ +#### alpine.ldap.user.groups.filter + +Specifies the LDAP search filter to use to query a user and retrieve a list +of groups the user is a member of. The `{USER_DN}` variable will be substituted +with the actual value of the users DN at runtime. +

+Example (Microsoft Active Directory): +
  • (&(objectClass=group)(objectCategory=Group)(member={USER_DN}))
+Example (Microsoft Active Directory - with nested group support): +
  • (member:1.2.840.113556.1.4.1941:={USER_DN})
+Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): +
  • (&(objectClass=groupOfUniqueNames)(uniqueMember={USER_DN}))
+ + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Default(member:1.2.840.113556.1.4.1941:={USER_DN})
ENVALPINE_LDAP_USER_GROUPS_FILTER
+ +#### alpine.ldap.user.provisioning + +Specifies if mapped LDAP accounts are automatically created upon successful +authentication. When a user logs in with valid credentials but an account has +not been previously provisioned, an authentication failure will be returned. +This allows admins to control specifically which ldap users can access the +system and which users cannot. When this value is set to true, a local ldap +user will be created and mapped to the ldap account automatically. This +automatic provisioning only affects authentication, not authorization. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeboolean
Defaultfalse
ENVALPINE_LDAP_USER_PROVISIONING
+ +#### alpine.ldap.users.search.filter + +Specifies the LDAP search filter used to search for users by their name. +The {SEARCH_TERM} variable will be substituted at runtime. +

+Example (Microsoft Active Directory): +
  • (&(objectClass=group)(objectCategory=Group)(cn=*{SEARCH_TERM}*))
+Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): +
  • (&(objectClass=inetOrgPerson)(cn=*{SEARCH_TERM}*))
+ + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Default(&(objectClass=user)(objectCategory=Person)(cn=*{SEARCH_TERM}*))
ENVALPINE_LDAP_USERS_SEARCH_FILTER
+ + + +### Observability + +#### alpine.metrics.auth.password + +Defines the password required to access metrics. +Has no effect when [`alpine.metrics.auth.username`](#alpinemetricsauthusername) is not set. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Defaultnull
ENVALPINE_METRICS_AUTH_PASSWORD
+ +#### alpine.metrics.auth.username + +Defines the username required to access metrics. +Has no effect when [`alpine.metrics.auth.password`](#alpinemetricsauthpassword) is not set. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Defaultnull
ENVALPINE_METRICS_AUTH_USERNAME
+ +#### alpine.metrics.enabled + +Defines whether Prometheus metrics will be exposed. +If enabled, metrics will be available via the /metrics endpoint. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeboolean
Defaultfalse
ENVALPINE_METRICS_ENABLED
+ + + +### OpenID Connect + +#### alpine.oidc.client.id + +Defines the client ID to be used for OpenID Connect. +The client ID should be the same as the one configured for the frontend, +and will only be used to validate ID tokens. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Defaultnull
ENVALPINE_OIDC_CLIENT_ID
+ +#### alpine.oidc.enabled + +Defines if OpenID Connect will be used for user authentication. +If enabled, `alpine.oidc.*` properties should be set accordingly. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeboolean
Defaultfalse
ENVALPINE_OIDC_ENABLED
+ +#### alpine.oidc.issuer + +Defines the issuer URL to be used for OpenID Connect. +This issuer MUST support provider configuration via the `/.well-known/openid-configuration` endpoint. +See also: +
    +
  • https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
  • +
  • https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfig
  • +
+ + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Defaultnull
ENVALPINE_OIDC_ISSUER
+ +#### alpine.oidc.team.synchronization + +This option will ensure that team memberships for OpenID Connect users are dynamic and +synchronized with membership of OpenID Connect groups or assigned roles. When a team is +mapped to an OpenID Connect group, all local OpenID Connect users will automatically be +assigned to the team if they are a member of the group the team is mapped to. If the user +is later removed from the OpenID Connect group, they will also be removed from the team. This +option provides the ability to dynamically control user permissions via the identity provider. +Note that team synchronization is only performed during user provisioning and after successful +authentication. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeboolean
Defaultfalse
ENVALPINE_OIDC_TEAM_SYNCHRONIZATION
+ +#### alpine.oidc.teams.claim + +Defines the name of the claim that contains group memberships or role assignments in the provider's userinfo endpoint. +The claim must be an array of strings. Most public identity providers do not support group or role management. +When using a customizable / on-demand hosted identity provider, name, content, and inclusion in the userinfo endpoint +will most likely need to be configured. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Defaultgroups
ENVALPINE_OIDC_TEAMS_CLAIM
+ +#### alpine.oidc.teams.default + +Defines one or more team names that auto-provisioned OIDC users shall be added to. +Multiple team names may be provided as comma-separated list. +Has no effect when [`alpine.oidc.user.provisioning`](#alpineoidcuserprovisioning) is false, or [`alpine.oidc.team.synchronization`](#alpineoidcteamsynchronization) is true. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Defaultnull
ENVALPINE_OIDC_TEAMS_DEFAULT
+ +#### alpine.oidc.user.provisioning + +Specifies if mapped OpenID Connect accounts are automatically created upon successful +authentication. When a user logs in with a valid access token but an account has +not been previously provisioned, an authentication failure will be returned. +This allows admins to control specifically which OpenID Connect users can access the +system and which users cannot. When this value is set to true, a local OpenID Connect +user will be created and mapped to the OpenID Connect account automatically. This +automatic provisioning only affects authentication, not authorization. + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typeboolean
Defaultfalse
ENVALPINE_OIDC_USER_PROVISIONING
+ +#### alpine.oidc.username.claim + +Defines the name of the claim that contains the username in the provider's userinfo endpoint. +Common claims are `name`, `username`, `preferred_username` or `nickname`. +See also: +
    +
  • https://openid.net/specs/openid-connect-core-1_0.html#UserInfoResponse
  • +
+ + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Typestring
Defaultname
ENVALPINE_OIDC_USERNAME_CLAIM
+ + + +### Other + +#### alpine.worker.pool.drain.timeout.duration + +Required +Defines the maximum duration for which Dependency-Track will wait for queued +events and notifications to be processed when shutting down. +During shutdown, newly dispatched events will not be accepted. +The duration must be specified in ISO 8601 notation (https://en.wikipedia.org/wiki/ISO_8601#Durations). + + + + + + + + + + + + + + + + + + + + + +
RequiredNo
Type
DefaultPT5S
ENVALPINE_WORKER_POOL_DRAIN_TIMEOUT_DURATION
+ + + +### Task Execution + +#### alpine.worker.thread.multiplier + +Defines a multiplier that is used to calculate the number of threads used +by the event subsystem. This property is only used when [`alpine.worker.threads`](#alpineworkerthreads) +is set to 0. A machine with 4 cores and a multiplier of 4, will use (at most) +16 worker threads. + + + + + + + + + + + + + + + + + + + + + +
RequiredYes
Typeinteger
Default4
ENVALPINE_WORKER_THREAD_MULTIPLIER
+ +#### alpine.worker.threads + +Defines the number of worker threads that the event subsystem will consume. +Events occur asynchronously and are processed by the Event subsystem. This +value should be large enough to handle most production situations without +introducing much delay, yet small enough not to pose additional load on an +already resource-constrained server. +A value of 0 will instruct Alpine to allocate 1 thread per CPU core. This +can further be tweaked using the [`alpine.worker.thread.multiplier`](#alpineworkerthreadmultiplier) property. + + + + + + + + + + + + + + + + + + + + + +
RequiredYes
Typeinteger
Default0
ENVALPINE_WORKER_THREADS
+ + diff --git a/docs/_docs/getting-started/configuration-frontend.md b/docs/_docs/getting-started/configuration-frontend.md new file mode 100644 index 0000000000..e025e22d4a --- /dev/null +++ b/docs/_docs/getting-started/configuration-frontend.md @@ -0,0 +1,61 @@ +--- +title: Configuration - Frontend +category: Getting Started +chapter: 1 +order: 7 +--- + +The frontend uses a static `config.json` file that is dynamically requested and evaluated via AJAX. +This file resides in `/static/config.json`. + +#### Default configuration + +```json +{ + // Required + // The base URL of the API server. + // NOTE: + // * This URL must be reachable by the browsers of your users. + // * The frontend container itself does NOT communicate with the API server directly, it just serves static files. + // * When deploying to dedicated servers, please use the external IP or domain of the API server. + "API_BASE_URL": "", + // Optional + // Defines the issuer URL to be used for OpenID Connect. + // See alpine.oidc.issuer property of the API server. + "OIDC_ISSUER": "", + // Optional + // Defines the client ID for OpenID Connect. + "OIDC_CLIENT_ID": "", + // Optional + // Defines the scopes to request for OpenID Connect. + // See also: https://openid.net/specs/openid-connect-basic-1_0.html#Scopes + "OIDC_SCOPE": "openid profile email", + // Optional + // Specifies the OpenID Connect flow to use. + // Values other than "implicit" will result in the Code+PKCE flow to be used. + // Usage of the implicit flow is strongly discouraged, but may be necessary when + // the IdP of choice does not support the Code+PKCE flow. + // See also: + // - https://oauth.net/2/grant-types/implicit/ + // - https://oauth.net/2/pkce/ + "OIDC_FLOW": "", + // Optional + // Defines the text of the OpenID Connect login button. + "OIDC_LOGIN_BUTTON_TEXT": "" +} +``` + +For containerized deployments, these settings can be overwritten by either: + +* mounting a customized `config.json` to `/app/static/config.json` inside the container +* providing them as environment variables + +The names of the environment variables are equivalent to their counterparts in `config.json`. + +#### Base path + +For containerized deployments the environment variable `BASE_PATH` is also available to set the base path of the frontend. +This configures nginx to serve the frontend from a subdirectory. For example, if the frontend is served from `https://example.com/dependency-track`, you would set `BASE_PATH` to `/dependency-track`. + +> A mounted `config.json` takes precedence over environment variables. +> If both are provided, environment variables will be ignored. diff --git a/docs/_docs/getting-started/configuration.md b/docs/_docs/getting-started/configuration.md deleted file mode 100644 index e0682203a6..0000000000 --- a/docs/_docs/getting-started/configuration.md +++ /dev/null @@ -1,607 +0,0 @@ ---- -title: Configuration -category: Getting Started -chapter: 1 -order: 6 ---- - -### API server - -The central configuration file `application.properties` resides in the classpath of the WAR by default. -This configuration file controls many performance tuning parameters but is most useful for defining -optional external database sources, directory services (LDAP), and proxy settings. - -For containerized deployments, the properties defined in the configuration file can also be specified -as environment variables. All environment variables are upper case with periods (.) replaced with underscores (_). -Refer to the [Docker instructions]({{ site.baseurl }}{% link _docs/getting-started/deploy-docker.md %}) for -configuration examples using environment variables. - -Dependency-Track administrators are highly encouraged to create a copy of this file in the -Dependency-Track data directory and customize it prior to deploying to production. - - -> The default embedded H2 database is designed to quickly evaluate and experiment with Dependency-Track. -> Do not use the embedded H2 database in production environments. -> -> See: [Database Support]({{ site.baseurl }}{% link _docs/getting-started/database-support.md %}). - - -To start Dependency-Track using custom configuration, add the system property -`alpine.application.properties` when executing. For example: - -```bash --Dalpine.application.properties=~/.dependency-track/application.properties -``` - -#### Default configuration - -```ini -############################ Alpine Configuration ########################### - -# Required -# Defines the number of worker threads that the event subsystem will consume. -# Events occur asynchronously and are processed by the Event subsystem. This -# value should be large enough to handle most production situations without -# introducing much delay, yet small enough not to pose additional load on an -# already resource-constrained server. -# A value of 0 will instruct Alpine to allocate 1 thread per CPU core. This -# can further be tweaked using the alpine.worker.thread.multiplier property. -# Default value is 0. -alpine.worker.threads=0 - -# Required -# Defines a multiplier that is used to calculate the number of threads used -# by the event subsystem. This property is only used when alpine.worker.threads -# is set to 0. A machine with 4 cores and a multiplier of 4, will use (at most) -# 16 worker threads. Default value is 4. -alpine.worker.thread.multiplier=4 - -# Required -# Defines the maximum duration for which Dependency-Track will wait for queued -# events and notifications to be processed when shutting down. -# During shutdown, newly dispatched events will not be accepted. -# The duration must be specified in ISO 8601 notation (https://en.wikipedia.org/wiki/ISO_8601#Durations). -alpine.worker.pool.drain.timeout.duration=PT5S - -# Required -# Defines the path to the data directory. This directory will hold logs, keys, -# and any database or index files along with application-specific files or -# directories. -alpine.data.directory=~/.dependency-track - -# Optional -# Defines the path to the secret key to be used for data encryption and decryption. -# The key will be generated upon first startup if it does not exist. -# Default is "/keys/secret.key". -# alpine.secret.key.path=/var/run/secrets/secret.key - -# Optional -# Defines the prefix to be used for API keys. A maximum prefix length of 251 -# characters is supported. -# The prefix may also be left empty. -alpine.api.key.prefix=odt_ - -# Required -# Defines the interval (in seconds) to log general heath information. If value -# equals 0, watchdog logging will be disabled. -alpine.watchdog.logging.interval=0 - -# Required -# Defines the database mode of operation. Valid choices are: -# 'server', 'embedded', and 'external'. -# In server mode, the database will listen for connections from remote hosts. -# In embedded mode, the system will be more secure and slightly faster. -# External mode should be used when utilizing an external database server -# (i.e. mysql, postgresql, etc). -alpine.database.mode=embedded - -# Optional -# Defines the TCP port to use when the database.mode is set to 'server'. -alpine.database.port=9092 - -# Required -# Specifies the JDBC URL to use when connecting to the database. -alpine.database.url=jdbc:h2:~/.dependency-track/db - -# Required -# Specifies the JDBC driver class to use. -alpine.database.driver=org.h2.Driver - -# Optional -# Specifies the username to use when authenticating to the database. -alpine.database.username=sa - -# Optional -# Specifies the password to use when authenticating to the database. -# alpine.database.password= - -# Optional -# Specifies a path to the file holding the database password. -# To be used as alternative to alpine.database.password. -# alpine.database.password.file= - -# Optional -# Specifies if the database connection pool is enabled. -alpine.database.pool.enabled=true - -# Optional -# This property controls the maximum size that the pool is allowed to reach, -# including both idle and in-use connections. -# The property can be set globally for both transactional and non-transactional -# connection pools, or for each pool type separately. When both global and pool-specific -# properties are set, the pool-specific properties take precedence. -alpine.database.pool.max.size=20 -# alpine.database.pool.tx.max.size= -# alpine.database.pool.nontx.max.size= - -# Optional -# This property controls the minimum number of idle connections in the pool. -# This value should be equal to or less than alpine.database.pool.max.size. -# Warning: If the value is less than alpine.database.pool.max.size, -# alpine.database.pool.idle.timeout will have no effect. -# The property can be set globally for both transactional and non-transactional -# connection pools, or for each pool type separately. When both global and pool-specific -# properties are set, the pool-specific properties take precedence. -alpine.database.pool.min.idle=10 -# alpine.database.pool.tx.min.idle= -# alpine.database.pool.nontx.min.idle= - -# Optional -# This property controls the maximum amount of time that a connection is -# allowed to sit idle in the pool. -# The property can be set globally for both transactional and non-transactional -# connection pools, or for each pool type separately. When both global and pool-specific -# properties are set, the pool-specific properties take precedence. -alpine.database.pool.idle.timeout=300000 -# alpine.database.pool.tx.idle.timeout= -# alpine.database.pool.nontx.idle.timeout= - -# Optional -# This property controls the maximum lifetime of a connection in the pool. -# An in-use connection will never be retired, only when it is closed will -# it then be removed. -# The property can be set globally for both transactional and non-transactional -# connection pools, or for each pool type separately. When both global and pool-specific -# properties are set, the pool-specific properties take precedence. -alpine.database.pool.max.lifetime=600000 -# alpine.database.pool.tx.max.lifetime= -# alpine.database.pool.nontx.max.lifetime= - -# Optional -# Controls the 2nd level cache type used by DataNucleus, the Object Relational Mapper (ORM). -# See https://www.datanucleus.org/products/accessplatform_6_0/jdo/persistence.html#cache_level2 -# Values supported by Dependency-Track are "soft" (default), "weak", and "none". -# -# Setting this property to "none" may help in reducing the memory footprint of Dependency-Track, -# but has the potential to slow down database operations. -# Size of the cache may be monitored through the "datanucleus_cache_second_level_entries" metric, -# refer to https://docs.dependencytrack.org/getting-started/monitoring/#metrics for details. -# -# DO NOT CHANGE UNLESS THERE IS A GOOD REASON TO. -# alpine.datanucleus.cache.level2.type= - -# Required -# Specifies the number of bcrypt rounds to use when hashing a users password. -# The higher the number the more secure the password, at the expense of -# hardware resources and additional time to generate the hash. -alpine.bcrypt.rounds=14 - -# Required -# Defines if LDAP will be used for user authentication. If enabled, -# alpine.ldap.* properties should be set accordingly. -alpine.ldap.enabled=false - -# Optional -# Specifies the LDAP server URL -# Example (Microsoft Active Directory): -# alpine.ldap.server.url=ldap://ldap.example.com:3268 -# alpine.ldap.server.url=ldaps://ldap.example.com:3269 -# Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): -# alpine.ldap.server.url=ldap://ldap.example.com:389 -# alpine.ldap.server.url=ldaps://ldap.example.com:636 -alpine.ldap.server.url=ldap://ldap.example.com:389 - -# Optional -# Specifies the base DN that all queries should search from -alpine.ldap.basedn=dc=example,dc=com - -# Optional -# Specifies the LDAP security authentication level to use. Its value is one of -# the following strings: "none", "simple", "strong". If this property is empty -# or unspecified, the behaviour is determined by the service provider. -alpine.ldap.security.auth=simple - -# Optional -# If anonymous access is not permitted, specify a username with limited access -# to the directory, just enough to perform searches. This should be the fully -# qualified DN of the user. -alpine.ldap.bind.username= - -# Optional -# If anonymous access is not permitted, specify a password for the username -# used to bind. -alpine.ldap.bind.password= - -# Optional -# Specifies if the username entered during login needs to be formatted prior -# to asserting credentials against the directory. For Active Directory, the -# userPrincipal attribute typically ends with the domain, whereas the -# samAccountName attribute and other directory server implementations do not. -# The %s variable will be substitued with the username asserted during login. -# Example (Microsoft Active Directory): -# alpine.ldap.auth.username.format=%s@example.com -# Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): -# alpine.ldap.auth.username.format=%s -alpine.ldap.auth.username.format=%s@example.com - -# Optional -# Specifies the Attribute that identifies a users ID -# Example (Microsoft Active Directory): -# alpine.ldap.attribute.name=userPrincipalName -# Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): -# alpine.ldap.attribute.name=uid -alpine.ldap.attribute.name=userPrincipalName - -# Optional -# Specifies the LDAP attribute used to store a users email address -alpine.ldap.attribute.mail=mail - -# Optional -# Specifies the LDAP search filter used to retrieve all groups from the -# directory. -# Example (Microsoft Active Directory): -# alpine.ldap.groups.filter=(&(objectClass=group)(objectCategory=Group)) -# Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): -# alpine.ldap.groups.filter=(&(objectClass=groupOfUniqueNames)) -alpine.ldap.groups.filter=(&(objectClass=group)(objectCategory=Group)) - -# Optional -# Specifies the LDAP search filter to use to query a user and retrieve a list -# of groups the user is a member of. The {USER_DN} variable will be substituted -# with the actual value of the users DN at runtime. -# Example (Microsoft Active Directory): -# alpine.ldap.user.groups.filter=(&(objectClass=group)(objectCategory=Group)(member={USER_DN})) -# Example (Microsoft Active Directory - with nested group support): -# alpine.ldap.user.groups.filter=(member:1.2.840.113556.1.4.1941:={USER_DN}) -# Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): -# alpine.ldap.user.groups.filter=(&(objectClass=groupOfUniqueNames)(uniqueMember={USER_DN})) -alpine.ldap.user.groups.filter=(member:1.2.840.113556.1.4.1941:={USER_DN}) - -# Optional -# Specifies the LDAP search filter used to search for groups by their name. -# The {SEARCH_TERM} variable will be substituted at runtime. -# Example (Microsoft Active Directory): -# alpine.ldap.groups.search.filter=(&(objectClass=group)(objectCategory=Group)(cn=*{SEARCH_TERM}*)) -# Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): -# alpine.ldap.groups.search.filter=(&(objectClass=groupOfUniqueNames)(cn=*{SEARCH_TERM}*)) -alpine.ldap.groups.search.filter=(&(objectClass=group)(objectCategory=Group)(cn=*{SEARCH_TERM}*)) - -# Optional -# Specifies the LDAP search filter used to search for users by their name. -# The {SEARCH_TERM} variable will be substituted at runtime. -# Example (Microsoft Active Directory): -# alpine.ldap.users.search.filter=(&(objectClass=group)(objectCategory=Group)(cn=*{SEARCH_TERM}*)) -# Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): -# alpine.ldap.users.search.filter=(&(objectClass=inetOrgPerson)(cn=*{SEARCH_TERM}*)) -alpine.ldap.users.search.filter=(&(objectClass=user)(objectCategory=Person)(cn=*{SEARCH_TERM}*)) - -# Optional -# Specifies if mapped LDAP accounts are automatically created upon successful -# authentication. When a user logs in with valid credentials but an account has -# not been previously provisioned, an authentication failure will be returned. -# This allows admins to control specifically which ldap users can access the -# system and which users cannot. When this value is set to true, a local ldap -# user will be created and mapped to the ldap account automatically. This -# automatic provisioning only affects authentication, not authorization. -alpine.ldap.user.provisioning=false - -# Optional -# This option will ensure that team memberships for LDAP users are dynamic and -# synchronized with membership of LDAP groups. When a team is mapped to an LDAP -# group, all local LDAP users will automatically be assigned to the team if -# they are a member of the group the team is mapped to. If the user is later -# removed from the LDAP group, they will also be removed from the team. This -# option provides the ability to dynamically control user permissions via an -# external directory. -alpine.ldap.team.synchronization=false - -# Optional -# HTTP proxy. If the address is set, then the port must be set too. -# alpine.http.proxy.address=proxy.example.com -# alpine.http.proxy.port=8888 -# alpine.http.proxy.username= -# alpine.http.proxy.password= -# alpine.no.proxy=localhost,127.0.0.1 - -# Optional -# HTTP Outbound Connection Timeout Settings. All values are in seconds. -# alpine.http.timeout.connection=30 -# alpine.http.timeout.socket=30 -# alpine.http.timeout.pool=60 - -# Optional -# Cross-Origin Resource Sharing (CORS) headers to include in REST responses. -# If 'alpine.cors.enabled' is true, CORS headers will be sent, if false, no -# CORS headers will be sent. -# See Also: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS -# The following are default values -#alpine.cors.enabled=true -#alpine.cors.allow.origin=* -#alpine.cors.allow.methods=GET, POST, PUT, DELETE, OPTIONS -#alpine.cors.allow.headers=Origin, Content-Type, Authorization, X-Requested-With, Content-Length, Accept, Origin, X-Api-Key, X-Total-Count, * -#alpine.cors.expose.headers=Origin, Content-Type, Authorization, X-Requested-With, Content-Length, Accept, Origin, X-Api-Key, X-Total-Count -#alpine.cors.allow.credentials=true -#alpine.cors.max.age=3600 - -# Optional -# Defines whether Prometheus metrics will be exposed. -# If enabled, metrics will be available via the /metrics endpoint. -alpine.metrics.enabled=false - -# Optional -# Defines the username required to access metrics. -# Has no effect when alpine.metrics.auth.password is not set. -alpine.metrics.auth.username= - -# Optional -# Defines the password required to access metrics. -# Has no effect when alpine.metrics.auth.username is not set. -alpine.metrics.auth.password= - -# Required -# Defines if OpenID Connect will be used for user authentication. -# If enabled, alpine.oidc.* properties should be set accordingly. -alpine.oidc.enabled=false - -# Optional -# Defines the client ID to be used for OpenID Connect. -# The client ID should be the same as the one configured for the frontend, -# and will only be used to validate ID tokens. -alpine.oidc.client.id= - -# Optional -# Defines the issuer URL to be used for OpenID Connect. -# This issuer MUST support provider configuration via the /.well-known/openid-configuration endpoint. -# See also: -# - https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata -# - https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfig -alpine.oidc.issuer= - -# Optional -# Defines the name of the claim that contains the username in the provider's userinfo endpoint. -# Common claims are "name", "username", "preferred_username" or "nickname". -# See also: https://openid.net/specs/openid-connect-core-1_0.html#UserInfoResponse -alpine.oidc.username.claim=name - -# Optional -# Specifies if mapped OpenID Connect accounts are automatically created upon successful -# authentication. When a user logs in with a valid access token but an account has -# not been previously provisioned, an authentication failure will be returned. -# This allows admins to control specifically which OpenID Connect users can access the -# system and which users cannot. When this value is set to true, a local OpenID Connect -# user will be created and mapped to the OpenID Connect account automatically. This -# automatic provisioning only affects authentication, not authorization. -alpine.oidc.user.provisioning=false - -# Optional -# This option will ensure that team memberships for OpenID Connect users are dynamic and -# synchronized with membership of OpenID Connect groups or assigned roles. When a team is -# mapped to an OpenID Connect group, all local OpenID Connect users will automatically be -# assigned to the team if they are a member of the group the team is mapped to. If the user -# is later removed from the OpenID Connect group, they will also be removed from the team. This -# option provides the ability to dynamically control user permissions via the identity provider. -# Note that team synchronization is only performed during user provisioning and after successful -# authentication. -alpine.oidc.team.synchronization=false - -# Optional -# Defines the name of the claim that contains group memberships or role assignments in the provider's userinfo endpoint. -# The claim must be an array of strings. Most public identity providers do not support group or role management. -# When using a customizable / on-demand hosted identity provider, name, content, and inclusion in the userinfo endpoint -# will most likely need to be configured. -alpine.oidc.teams.claim=groups - -# Optional -# Defines one or more team names that auto-provisioned OIDC users shall be added to. -# Multiple team names may be provided as comma-separated list. -# Has no effect when alpine.oidc.user.provisioning=false, or alpine.oidc.team.synchronization=true. -alpine.oidc.teams.default= - -# Optional -# Define whether system requirement check is enable. -# The default value is true. -system.requirement.check.enabled=true - -# Optional -# Defines the size of the thread pool used to perform requests to the Snyk API in parallel. -# The thread pool will only be used when Snyk integration is enabled. -# A high number may result in quicker exceeding of API rate limits, -# while a number that is too low may result in vulnerability analyses taking longer. -snyk.thread.pool.size=10 - -# Optional -# Defines the maximum amount of retries to perform for each request to the Snyk API. -# Retries are performed with increasing delays between attempts using an exponential backoff strategy. -# The initial duration defined in snyk.retry.exponential.backoff.initial.duration.seconds will be -# multiplied with the value defined in snyk.retry.exponential.backoff.multiplier after each retry attempt, -# until the maximum duration defined in snyk.retry.exponential.backoff.max.duration.seconds is reached. -snyk.retry.max.attempts=6 - -# Optional -# Defines the multiplier for the exponential backoff retry strategy. -snyk.retry.exponential.backoff.multiplier=2 - -# Optional -# Defines the duration in seconds to wait before attempting the first retry. -snyk.retry.exponential.backoff.initial.duration.seconds=1 - -# Optional -# Defines the maximum duration in seconds to wait before attempting the next retry. -snyk.retry.exponential.backoff.max.duration.seconds=60 - -# Optional -#Defines the maximum number of purl sent in a single request to OSS Index. -# The default value is 128. -ossindex.request.max.purl=128 - -# Optional -#Defines the maximum number of attempts used by Resilience4J for exponential backoff retry regarding OSSIndex calls. -# The default value is 50. -ossindex.retry.backoff.max.attempts=50 - -# Optional -#Defines the multiplier used by Resilience4J for exponential backoff retry regarding OSSIndex calls. -# The default value is 2. -ossindex.retry.backoff.multiplier=2 - -# Optional -#Defines the maximum duration used by Resilience4J for exponential backoff retry regarding OSSIndex calls. This value is in milliseconds -# The default value is 10 minutes. -ossindex.retry.backoff.max.duration=600000 - -# Optional -#This flag activate the cache stampede blocker for the repository meta analyzer allowing to handle high concurrency workloads when there -#is a high ratio of duplicate components which can cause unnecessary external calls and index violation on PUBLIC.REPOSITORY_META_COMPONENT_COMPOUND_IDX during cache population. -# The default value is false as enabling the cache stampede blocker can create useless locking if the portfolio does not have a high ratio of duplicate components. -repo.meta.analyzer.cacheStampedeBlocker.enabled=false - -# Optional -#The cache stampede blocker uses a striped (partitioned) lock to distribute locks across keys. -#This parameter defines the number of buckets used by the striped lock. The lock used for a given key is derived from the key hashcode and number of buckets. -# The default value is 1000. -repo.meta.analyzer.cacheStampedeBlocker.lock.buckets=1000 - -# Optional -#Defines the maximum number of attempts used by Resilience4J for exponential backoff retry regarding repo meta analyzer cache loading per key. -# The default value is 10. -repo.meta.analyzer.cacheStampedeBlocker.max.attempts=10 -``` - -#### Proxy Configuration - -Proxy support can be configured in one of two ways, using the proxy settings defined -in `application.properties` or through environment variables. By default, the system -will attempt to read the `https_proxy`, `http_proxy` and `no_proxy` environment variables. If one -of these are set, Dependency-Track will use them automatically. - -`no_proxy` specifies URLs that should be excluded from proxying. -This can be a comma-separated list of hostnames, domain names, or a mixture of both. -If a port number is specified for a URL, only the requests with that port number to that URL will be excluded from proxying. -`no_proxy` can also set to be a single asterisk ('*') to match all hosts. - -Dependency-Track supports proxies that require BASIC, DIGEST, and NTLM authentication. - -#### Logging Levels - -Logging levels (INFO, WARN, ERROR, DEBUG, TRACE) can be specified by passing the level -to the `dependencyTrack.logging.level` system property on startup. For example, the -following command will start Dependency-Track (embedded) with DEBUG logging: - -```bash -java -Xmx4G -DdependencyTrack.logging.level=DEBUG -jar dependency-track-embedded.war -``` - -For Docker deployments, simply set the `LOGGING_LEVEL` environment variable to one of -INFO, WARN, ERROR, DEBUG, or TRACE. - -#### Secret Key - -Dependency-Track will encrypt certain confidential data (e.g. access tokens for external service providers) with AES256 -prior to storing it in the database. The secret key used for encrypting and decrypting will be automatically generated -when Dependency-Track starts for the first time, and is placed in `/keys/secret.key` -(`/data/.dependency-track/keys/secret.key` for containerized deployments). - -Starting with Dependency-Track 4.7, it is possible to change the location of the secret key via the `alpine.secret.key.path` -property. This makes it possible to use Kubernetes secrets for example, to mount secrets into the custom location. - -Secret keys may be generated manually upfront instead of relying on Dependency-Track to do it. This can be achieved -with OpenSSL like this: - -```shell -openssl rand 32 > secret.key -``` - -> Note that the default key format has changed in version 4.7. While existing keys using the old format will continue -> to work, keys for new instances will be generated in the new format. Old keys may be converted using the following -> [JShell](https://docs.oracle.com/en/java/javase/17/jshell/introduction-jshell.html) script: -> ```java -> import java.io.ObjectInputStream; -> import java.nio.file.Files; -> import java.nio.file.Paths; -> import javax.crypto.SecretKey; -> String inputFilePath = System.getProperty("secret.key.input") -> String outputFilePath = System.getProperty("secret.key.output"); -> SecretKey secretKey = null; -> System.out.println("Reading old key from " + inputFilePath); -> try (var fis = Files.newInputStream(Paths.get(inputFilePath)); -> var ois = new ObjectInputStream(fis)) { -> secretKey = (SecretKey) ois.readObject(); -> } -> System.out.println("Writing new key to " + outputFilePath); -> try (var fos = Files.newOutputStream(Paths.get(outputFilePath))) { -> fos.write(secretKey.getEncoded()); -> } -> /exit -> ``` -> Example execution: -> ```shell -> jshell -R"-Dsecret.key.input=$HOME/.dependency-track/keys/secret.key" -R"-Dsecret.key.output=secret.key.new" convert-key.jsh -> ``` - -### Frontend - -The frontend uses a static `config.json` file that is dynamically requested and evaluated via AJAX. -This file resides in `/static/config.json`. - -#### Default configuration - -```json -{ - // Required - // The base URL of the API server. - // NOTE: - // * This URL must be reachable by the browsers of your users. - // * The frontend container itself does NOT communicate with the API server directly, it just serves static files. - // * When deploying to dedicated servers, please use the external IP or domain of the API server. - "API_BASE_URL": "", - // Optional - // Defines the issuer URL to be used for OpenID Connect. - // See alpine.oidc.issuer property of the API server. - "OIDC_ISSUER": "", - // Optional - // Defines the client ID for OpenID Connect. - "OIDC_CLIENT_ID": "", - // Optional - // Defines the scopes to request for OpenID Connect. - // See also: https://openid.net/specs/openid-connect-basic-1_0.html#Scopes - "OIDC_SCOPE": "openid profile email", - // Optional - // Specifies the OpenID Connect flow to use. - // Values other than "implicit" will result in the Code+PKCE flow to be used. - // Usage of the implicit flow is strongly discouraged, but may be necessary when - // the IdP of choice does not support the Code+PKCE flow. - // See also: - // - https://oauth.net/2/grant-types/implicit/ - // - https://oauth.net/2/pkce/ - "OIDC_FLOW": "", - // Optional - // Defines the text of the OpenID Connect login button. - "OIDC_LOGIN_BUTTON_TEXT": "" -} -``` - -For containerized deployments, these settings can be overridden by either: - -* mounting a customized `config.json` to `/app/static/config.json` inside the container -* providing them as environment variables - -The names of the environment variables are equivalent to their counterparts in `config.json`. - -#### Base path - -For containerized deployments the environment variable `BASE_PATH` is also available to set the base path of the frontend. -This configures nginx to serve the frontend from a subdirectory. For example, if the frontend is served from `https://example.com/dependency-track`, you would set `BASE_PATH` to `/dependency-track`. - - -> A mounted `config.json` takes precedence over environment variables. -> If both are provided, environment variables will be ignored. diff --git a/docs/_docs/getting-started/data-directory.md b/docs/_docs/getting-started/data-directory.md index 3dcaed534b..88902d2b0e 100644 --- a/docs/_docs/getting-started/data-directory.md +++ b/docs/_docs/getting-started/data-directory.md @@ -2,7 +2,7 @@ title: Data Directory category: Getting Started chapter: 1 -order: 8 +order: 9 --- Dependency-Track uses `~/.dependency-track` on UNIX/Linux systems and `.dependency-track` in the current users diff --git a/docs/_docs/getting-started/database-support.md b/docs/_docs/getting-started/database-support.md index a2c8aedbbb..bdef136389 100644 --- a/docs/_docs/getting-started/database-support.md +++ b/docs/_docs/getting-started/database-support.md @@ -2,7 +2,7 @@ title: Database Support category: Getting Started chapter: 1 -order: 7 +order: 8 --- Dependency-Track includes an embedded H2 database enabled by default. The intended purpose of this @@ -193,7 +193,7 @@ java -cp h2-2.1.214.jar org.h2.tools.RunScript \ [About Pool Sizing]: https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing -[Configuration]: {{ site.baseurl }}{% link _docs/getting-started/configuration.md %} +[Configuration]: {{ site.baseurl }}{% link _docs/getting-started/configuration-apiserver.md %} [HikariCP]: https://github.com/brettwooldridge/HikariCP [Monitoring]: {{ site.baseurl }}{% link _docs/getting-started/monitoring.md %} [SQL mode]: https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html \ No newline at end of file diff --git a/docs/_docs/getting-started/internal-ca.md b/docs/_docs/getting-started/internal-ca.md index 008a7435fe..b09dd1694e 100644 --- a/docs/_docs/getting-started/internal-ca.md +++ b/docs/_docs/getting-started/internal-ca.md @@ -2,7 +2,7 @@ title: Internal Certificate Authorities category: Getting Started chapter: 1 -order: 12 +order: 13 --- Many organizations use their own [certificate authority](https://en.wikipedia.org/wiki/Certificate_authority) (CA) to diff --git a/docs/_docs/getting-started/ldap-configuration.md b/docs/_docs/getting-started/ldap-configuration.md index 860644f8db..087fad49af 100644 --- a/docs/_docs/getting-started/ldap-configuration.md +++ b/docs/_docs/getting-started/ldap-configuration.md @@ -2,7 +2,7 @@ title: LDAP Configuration category: Getting Started chapter: 1 -order: 10 +order: 11 --- Dependency-Track has been tested with multiple LDAP servers. The following are diff --git a/docs/_docs/getting-started/monitoring.md b/docs/_docs/getting-started/monitoring.md index 2b729a852c..fadbeb2d6b 100644 --- a/docs/_docs/getting-started/monitoring.md +++ b/docs/_docs/getting-started/monitoring.md @@ -2,7 +2,7 @@ title: Monitoring category: Getting Started chapter: 1 -order: 13 +order: 14 --- @@ -357,7 +357,7 @@ An [example dashboard] is provided as a quickstart. Refer to the [Grafana docume ![Event Metrics in Grafana]({{ site.baseurl }}/images/screenshots/monitoring-metrics-events.png) [community integrations]: {{ site.baseurl }}{% link _docs/integrations/community-integrations.md %} -[Configuration]: {{ site.baseurl }}{% link _docs/getting-started/configuration.md %} +[Configuration]: {{ site.baseurl }}{% link _docs/getting-started/configuration-apiserver.md %} [Connection Pooling]: {{ site.baseurl }}{% link _docs/getting-started/database-support.md %}#connection-pooling [default Logback configuration]: https://github.com/DependencyTrack/dependency-track/blob/master/src/main/docker/logback.xml [dependency-track-exporter]: https://github.com/jetstack/dependency-track-exporter diff --git a/docs/_docs/getting-started/openidconnect-configuration.md b/docs/_docs/getting-started/openidconnect-configuration.md index d214ad8c1b..540b381d54 100644 --- a/docs/_docs/getting-started/openidconnect-configuration.md +++ b/docs/_docs/getting-started/openidconnect-configuration.md @@ -2,14 +2,14 @@ title: OpenID Connect Configuration category: Getting Started chapter: 1 -order: 11 +order: 12 --- > OpenID Connect is supported in Dependency-Track 4.0.0 and above In the context of OAuth2 / OIDC, Dependency-Track's frontend acts as _client_ while the API server acts as _resource server_ (see [OAuth2 roles](https://tools.ietf.org/html/rfc6749#section-1.1)). Due to this, the frontend requires additional configuration, which is currently only supported when deploying it separately from the API server. -Refer to the [Configuration]({{ site.baseurl }}{% link _docs/getting-started/configuration.md %}) and [Docker deployment]({{ site.baseurl }}{% link _docs/getting-started/deploy-docker.md %}) pages for instructions. The “bundled” Docker image and "Classic" Dependency-Track deployments using solely the [executable WAR]({{ site.baseurl }}{% link _docs/getting-started/deploy-exewar.md %}) are not supported! +Refer to the [Configuration]({{ site.baseurl }}{% link _docs/getting-started/configuration-apiserver.md %}) and [Docker deployment]({{ site.baseurl }}{% link _docs/getting-started/deploy-docker.md %}) pages for instructions. The “bundled” Docker image and "Classic" Dependency-Track deployments using solely the [executable WAR]({{ site.baseurl }}{% link _docs/getting-started/deploy-exewar.md %}) are not supported! If configured properly, users will be able to sign in by clicking the _OpenID_ button on the login page: @@ -37,7 +37,7 @@ Multiple identity providers have been tested, the following are some example con Note that some providers may not support specific features like team synchronization, or require further configuration to make them work. If you find that the provider of your choice does not work with Dependency-Track, please [file an issue](https://github.com/DependencyTrack/dependency-track/issues). -For a complete overview of available configuration options for both API server and frontend, please refer to the [Configuration page]({{ site.baseurl }}{% link _docs/getting-started/configuration.md %}). +For a complete overview of available configuration options for both API server and frontend, please refer to the [Configuration page]({{ site.baseurl }}{% link _docs/getting-started/configuration-apiserver.md %}#openid-connect). #### Auth0 diff --git a/docs/_docs/getting-started/recurring-tasks.md b/docs/_docs/getting-started/recurring-tasks.md index 50bdfb3e4c..d1956c1bd2 100644 --- a/docs/_docs/getting-started/recurring-tasks.md +++ b/docs/_docs/getting-started/recurring-tasks.md @@ -2,7 +2,7 @@ title: Recurring Tasks category: Getting Started chapter: 1 -order: 9 +order: 10 --- Dependency-Track heavily relies on asynchronous recurring tasks to perform various forms of analyses, calculations, diff --git a/docs/_posts/2018-10-25-v3.3.0.md b/docs/_posts/2018-10-25-v3.3.0.md index ff08901be6..36a8c557b8 100644 --- a/docs/_posts/2018-10-25-v3.3.0.md +++ b/docs/_posts/2018-10-25-v3.3.0.md @@ -53,7 +53,7 @@ alpine.ldap.team.synchronization ``` **See Also:** -* [Configuration]({{ site.baseurl }}{% link _docs/getting-started/configuration.md %}) (updated) +* [Configuration]({{ site.baseurl }}{% link _docs/getting-started/configuration-apiserver.md %}) (updated) * [LDAP Configuration]({{ site.baseurl }}{% link _docs/getting-started/ldap-configuration.md %}) (examples) ###### [dependency-track-embedded.war](https://github.com/DependencyTrack/dependency-track/releases/download/3.3.0/dependency-track-embedded.war) diff --git a/docs/_posts/2019-06-07-v3.5.0.md b/docs/_posts/2019-06-07-v3.5.0.md index 882e35b735..d335826980 100644 --- a/docs/_posts/2019-06-07-v3.5.0.md +++ b/docs/_posts/2019-06-07-v3.5.0.md @@ -21,7 +21,7 @@ Two new LDAP properties were introduced in v3.5.0 that affect LDAP configuration * alpine.ldap.groups.search.filter * alpine.ldap.users.search.filter -Refer to [Configuration]({{ site.baseurl }}{% link _docs/getting-started/configuration.md %}) +Refer to [Configuration]({{ site.baseurl }}{% link _docs/getting-started/configuration-apiserver.md %}) and [Deploying Docker Container]({{ site.baseurl }}{% link _docs/getting-started/deploy-docker.md %}) for details. diff --git a/docs/_posts/2022-12-16-v4.7.0.md b/docs/_posts/2022-12-16-v4.7.0.md index d83cd48cfe..4321f294fd 100644 --- a/docs/_posts/2022-12-16-v4.7.0.md +++ b/docs/_posts/2022-12-16-v4.7.0.md @@ -178,7 +178,7 @@ Special thanks to everyone who contributed code to implement enhancements and fi [frontend/#356]: https://github.com/DependencyTrack/frontend/issues/356 [configure database connection pools]: {{ site.baseurl }}{% link _docs/getting-started/database-support.md %}#connection-pooling -[configure the secret key path]: {{ site.baseurl }}{% link _docs/getting-started/configuration.md %}#secret-key +[configure the secret key path]: {{ site.baseurl }}{% link _docs/getting-started/configuration-apiserver.md %}#secret-key [Snyk]: https://snyk.io/ [@AZenker]: https://github.com/AZenker diff --git a/docs/_posts/2023-10-16-v4.9.0.md b/docs/_posts/2023-10-16-v4.9.0.md index b7555a710e..05b686d51c 100644 --- a/docs/_posts/2023-10-16-v4.9.0.md +++ b/docs/_posts/2023-10-16-v4.9.0.md @@ -169,7 +169,7 @@ Special thanks to everyone who contributed code to implement enhancements and fi [frontend/#611]: https://github.com/DependencyTrack/frontend/pull/576 [frontend/#613]: https://github.com/DependencyTrack/frontend/pull/613 -[Configuration]: {{ site.baseurl }}{% link _docs/getting-started/configuration.md %} +[Configuration]: {{ site.baseurl }}{% link _docs/getting-started/configuration-apiserver.md %} [Policy Compliance]: {{ site.baseurl }}{% link _docs/usage/policy-compliance.md %}#license-violation [reporting a defect]: https://github.com/DependencyTrack/dependency-track/issues/new?assignees=&labels=defect%2Cin+triage&projects=&template=defect-report.yml diff --git a/src/main/resources/application.properties b/src/main/resources/application.properties index b271bd40d8..7e5a9c617d 100644 --- a/src/main/resources/application.properties +++ b/src/main/resources/application.properties @@ -1,6 +1,3 @@ -############################ Alpine Configuration ########################### - -# Required # Defines the number of worker threads that the event subsystem will consume. # Events occur asynchronously and are processed by the Event subsystem. This # value should be large enough to handle most production situations without @@ -8,14 +5,20 @@ # already resource-constrained server. # A value of 0 will instruct Alpine to allocate 1 thread per CPU core. This # can further be tweaked using the alpine.worker.thread.multiplier property. -# Default value is 0. +# +# @category: Task Execution +# @type: integer +# @required alpine.worker.threads=0 -# Required # Defines a multiplier that is used to calculate the number of threads used # by the event subsystem. This property is only used when alpine.worker.threads # is set to 0. A machine with 4 cores and a multiplier of 4, will use (at most) -# 16 worker threads. Default value is 4. +# 16 worker threads. +# +# @category: Task Execution +# @type: integer +# @required alpine.worker.thread.multiplier=4 # Required @@ -25,233 +28,353 @@ alpine.worker.thread.multiplier=4 # The duration must be specified in ISO 8601 notation (https://en.wikipedia.org/wiki/ISO_8601#Durations). alpine.worker.pool.drain.timeout.duration=PT5S -# Required # Defines the path to the data directory. This directory will hold logs, # keys, and any database or index files along with application-specific # files or directories. +# +# @category: General +# @type: string +# @required alpine.data.directory=~/.dependency-track -# Optional # Defines the path to the secret key to be used for data encryption and decryption. # The key will be generated upon first startup if it does not exist. -# Default is "/keys/secret.key". -# alpine.secret.key.path=/var/run/secrets/secret.key +# +# @category: General +# @default: ${alpine.data.directory}/keys/secret.key +# @example: /var/run/secrets/secret.key +# @type: string +# alpine.secret.key.path= + +# Defines the path to the private key of the public-private key pair. +# The key will be generated upon first startup if it does not exist. +# The key pair is currently not used by Dependency-Track. +# +# @category: General +# @default: ${alpine.data.directory}/keys/private.key +# @example: /var/run/secrets/private.key +# @type: string +# alpine.private.key.path= + +# Defines the path to the public key of the public-private key pair. +# The key will be generated upon first startup if it does not exist. +# The key pair is currently not used by Dependency-Track. +# +# @category: General +# @default: ${alpine.data.directory}/keys/public.key +# @example: /var/run/secrets/public.key +# @type: string +# alpine.public.key.path= -# Optional # Defines the prefix to be used for API keys. A maximum prefix length of 251 -# characters is supported. -# The prefix may also be left empty. +# characters is supported. The prefix may also be left empty. +# +# @category: General +# @type: string alpine.api.key.prefix=odt_ -# Required -# Defines the interval (in seconds) to log general heath information. +# Defines the interval (in seconds) to log general health information. # If value equals 0, watchdog logging will be disabled. +# +# @category: General +# @hidden alpine.watchdog.logging.interval=0 -# Required -# Defines the database mode of operation. Valid choices are: -# 'server', 'embedded', and 'external'. +# Defines the database mode of operation. # In server mode, the database will listen for connections from remote # hosts. In embedded mode, the system will be more secure and slightly # faster. External mode should be used when utilizing an external # database server (i.e. mysql, postgresql, etc). +# +# @category: Database +# @type: enum +# @valid-values: [server, embedded, external] alpine.database.mode=embedded -# Optional -# Defines the TCP port to use when the database.mode is set to 'server'. +# Defines the TCP port to use when alpine.database.mode is set to 'server'. +# +# @category: Database +# @type: integer alpine.database.port=9092 -# Required # Specifies the JDBC URL to use when connecting to the database. +# +# @category: Database +# @example: jdbc:postgresql://localhost:5432/dtrack +# @type: string +# @required alpine.database.url=jdbc:h2:~/.dependency-track/db;DB_CLOSE_ON_EXIT=FALSE -# Required # Specifies the JDBC driver class to use. +# +# @category: Database +# @type: enum +# @valid-values: [org.h2.Driver, com.microsoft.sqlserver.jdbc.SQLServerDriver, org.postgresql.Driver, com.mysql.cj.jdbc.Driver] +# @hidden alpine.database.driver=org.h2.Driver -# Optional -# Specifies the path (including filename) to where the JDBC driver is located. -# alpine.database.driver.path=/path/to/dbdriver.jar - -# Optional # Specifies the username to use when authenticating to the database. +# +# @category: Database +# @type: string alpine.database.username=sa -# Optional # Specifies the password to use when authenticating to the database. +# +# @category: Database +# @type: string # alpine.database.password= -# Optional # Specifies a path to the file holding the database password. # To be used as alternative to alpine.database.password. +# +# @category: Database +# @type: string # alpine.database.password.file= -# Optional # Specifies if the database connection pool is enabled. +# +# @category: Database +# @type: boolean alpine.database.pool.enabled=true -# Optional # This property controls the maximum size that the pool is allowed to reach, # including both idle and in-use connections. -# The property can be set globally for both transactional and non-transactional -# connection pools, or for each pool type separately. When both global and pool-specific -# properties are set, the pool-specific properties take precedence. +# +# @category: Database +# @type: integer alpine.database.pool.max.size=20 + +# Overwrite alpine.database.pool.max.size for the transactional connection pool. +# +# @category: Database +# @type: integer # alpine.database.pool.tx.max.size= + +# Overwrite alpine.database.pool.max.size for the non-transactional connection pool. +# +# @category: Database +# @type: integer # alpine.database.pool.nontx.max.size= -# Optional # This property controls the minimum number of idle connections in the pool. # This value should be equal to or less than alpine.database.pool.max.size. # Warning: If the value is less than alpine.database.pool.max.size, # alpine.database.pool.idle.timeout will have no effect. -# The property can be set globally for both transactional and non-transactional -# connection pools, or for each pool type separately. When both global and pool-specific -# properties are set, the pool-specific properties take precedence. +# +# @category: Database +# @type: integer alpine.database.pool.min.idle=10 + +# Overwrite alpine.database.pool.min.idle for the transactional connection pool. +# +# @category: Database +# @type: integer # alpine.database.pool.tx.min.idle= + +# Overwrite alpine.database.pool.min.idle for the non-transactional connection pool. +# +# @category: Database +# @type: integer # alpine.database.pool.nontx.min.idle= -# Optional # This property controls the maximum amount of time that a connection is # allowed to sit idle in the pool. -# The property can be set globally for both transactional and non-transactional -# connection pools, or for each pool type separately. When both global and pool-specific -# properties are set, the pool-specific properties take precedence. +# +# @category: Database +# @type: integer alpine.database.pool.idle.timeout=300000 + +# Overwrite alpine.database.pool.idle.timeout for the transactional connection pool. +# +# @category: Database +# @type: integer # alpine.database.pool.tx.idle.timeout= + +# Overwrite alpine.database.pool.idle.timeout for the non-transactional connection pool. +# +# @category: Database +# @type: integer # alpine.database.pool.nontx.idle.timeout= -# Optional # This property controls the maximum lifetime of a connection in the pool. # An in-use connection will never be retired, only when it is closed will # it then be removed. -# The property can be set globally for both transactional and non-transactional -# connection pools, or for each pool type separately. When both global and pool-specific -# properties are set, the pool-specific properties take precedence. +# +# @category: Database +# @type: integer alpine.database.pool.max.lifetime=600000 + +# Overwrite alpine.database.pool.max.lifetime for the transactional connection pool. +# +# @category: Database +# @type: integer # alpine.database.pool.tx.max.lifetime= + +# Overwrite alpine.database.pool.max.lifetime for the non-transactional connection pool. +# +# @category: Database +# @type: integer # alpine.database.pool.nontx.max.lifetime= -# Optional # Controls the 2nd level cache type used by DataNucleus, the Object Relational Mapper (ORM). -# See https://www.datanucleus.org/products/accessplatform_6_0/jdo/persistence.html#cache_level2 -# Values supported by Dependency-Track are "soft" (default), "weak", and "none". +# See . # # Setting this property to "none" may help in reducing the memory footprint of Dependency-Track, # but has the potential to slow down database operations. # Size of the cache may be monitored through the "datanucleus_cache_second_level_entries" metric, -# refer to https://docs.dependencytrack.org/getting-started/monitoring/#metrics for details. +# refer to for details. # -# DO NOT CHANGE UNLESS THERE IS A GOOD REASON TO. +# @category: Database +# @default: soft +# @type: enum +# @valid-values: [soft, weak, none] # alpine.datanucleus.cache.level2.type= -# Required -# Specifies the number of bcrypt rounds to use when hashing a users password. +# Specifies the number of bcrypt rounds to use when hashing a user's password. # The higher the number the more secure the password, at the expense of # hardware resources and additional time to generate the hash. +# +# @category: General +# @type: integer +# @required alpine.bcrypt.rounds=14 -# Required # Defines if LDAP will be used for user authentication. If enabled, -# alpine.ldap.* properties should be set accordingly. +# `alpine.ldap.*` properties should be set accordingly. +# +# @category: LDAP +# @type: boolean alpine.ldap.enabled=false -# Optional -# Specifies the LDAP server URL -# Example (Microsoft Active Directory): -# alpine.ldap.server.url=ldap://ldap.example.com:3268 -# alpine.ldap.server.url=ldaps://ldap.example.com:3269 -# Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): -# alpine.ldap.server.url=ldap://ldap.example.com:389 -# alpine.ldap.server.url=ldaps://ldap.example.com:636 +# Specifies the LDAP server URL. +#

+# Examples (Microsoft Active Directory): +#
    +#
  • ldap://ldap.example.com:3268
  • +#
  • ldaps://ldap.example.com:3269
  • +#
+# Examples (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): +#
    +#
  • ldap://ldap.example.com:389
  • +#
  • ldaps://ldap.example.com:636
  • +#
+# +# @category: LDAP +# @type: string alpine.ldap.server.url=ldap://ldap.example.com:389 -# Optional # Specifies the base DN that all queries should search from +# +# @category: LDAP +# @type: string alpine.ldap.basedn=dc=example,dc=com -# Optional -# Specifies the LDAP security authentication level to use. Its value is one of -# the following strings: "none", "simple", "strong". If this property is empty -# or unspecified, the behaviour is determined by the service provider. +# Specifies the LDAP security authentication level to use. +# If this property is empty or unspecified, the behaviour is determined by the service provider. +# +# @category: LDAP +# @type: enum +# @valid-values: [none, simple, strong] alpine.ldap.security.auth=simple -# Optional # If anonymous access is not permitted, specify a username with limited access # to the directory, just enough to perform searches. This should be the fully # qualified DN of the user. +# +# @category: LDAP +# @type: string alpine.ldap.bind.username= -# Optional # If anonymous access is not permitted, specify a password for the username # used to bind. +# +# @category: LDAP +# @type: string alpine.ldap.bind.password= -# Optional # Specifies if the username entered during login needs to be formatted prior # to asserting credentials against the directory. For Active Directory, the # userPrincipal attribute typically ends with the domain, whereas the # samAccountName attribute and other directory server implementations do not. -# The %s variable will be substitued with the username asserted during login. +# The %s variable will be substituted with the username asserted during login. +#

# Example (Microsoft Active Directory): -# alpine.ldap.auth.username.format=%s@example.com +#
  • %s@example.com
# Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): -# alpine.ldap.auth.username.format=%s +#
  • %s
+# +# @category: LDAP +# @type: string alpine.ldap.auth.username.format=%s@example.com -# Optional -# Specifies the Attribute that identifies a users ID +# Specifies the Attribute that identifies a users ID. +#

# Example (Microsoft Active Directory): -# alpine.ldap.attribute.name=userPrincipalName +#
  • userPrincipalName
# Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): -# alpine.ldap.attribute.name=uid +#
  • uid
+# +# @category: LDAP +# @type: string alpine.ldap.attribute.name=userPrincipalName -# Optional # Specifies the LDAP attribute used to store a users email address +# +# @category: LDAP +# @type: string alpine.ldap.attribute.mail=mail -# Optional -# Specifies the LDAP search filter used to retrieve all groups from the -# directory. +# Specifies the LDAP search filter used to retrieve all groups from the directory. +#

# Example (Microsoft Active Directory): -# alpine.ldap.groups.filter=(&(objectClass=group)(objectCategory=Group)) +#
  • (&(objectClass=group)(objectCategory=Group))
# Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): -# alpine.ldap.groups.filter=(&(objectClass=groupOfUniqueNames)) +#
  • (&(objectClass=groupOfUniqueNames))
+# +# @category: LDAP +# @type: string alpine.ldap.groups.filter=(&(objectClass=group)(objectCategory=Group)) -# Optional # Specifies the LDAP search filter to use to query a user and retrieve a list -# of groups the user is a member of. The {USER_DN} variable will be substituted +# of groups the user is a member of. The `{USER_DN}` variable will be substituted # with the actual value of the users DN at runtime. +#

# Example (Microsoft Active Directory): -# alpine.ldap.user.groups.filter=(&(objectClass=group)(objectCategory=Group)(member={USER_DN})) +#
  • (&(objectClass=group)(objectCategory=Group)(member={USER_DN}))
# Example (Microsoft Active Directory - with nested group support): -# alpine.ldap.user.groups.filter=(member:1.2.840.113556.1.4.1941:={USER_DN}) +#
  • (member:1.2.840.113556.1.4.1941:={USER_DN})
# Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): -# alpine.ldap.user.groups.filter=(&(objectClass=groupOfUniqueNames)(uniqueMember={USER_DN})) +#
  • (&(objectClass=groupOfUniqueNames)(uniqueMember={USER_DN}))
+# +# @category: LDAP +# @type: string alpine.ldap.user.groups.filter=(member:1.2.840.113556.1.4.1941:={USER_DN}) -# Optional # Specifies the LDAP search filter used to search for groups by their name. -# The {SEARCH_TERM} variable will be substituted at runtime. +# The `{SEARCH_TERM}` variable will be substituted at runtime. +#

# Example (Microsoft Active Directory): -# alpine.ldap.groups.search.filter=(&(objectClass=group)(objectCategory=Group)(cn=*{SEARCH_TERM}*)) +#
  • (&(objectClass=group)(objectCategory=Group)(cn=*{SEARCH_TERM}*))
# Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): -# alpine.ldap.groups.search.filter=(&(objectClass=groupOfUniqueNames)(cn=*{SEARCH_TERM}*)) +#
  • (&(objectClass=groupOfUniqueNames)(cn=*{SEARCH_TERM}*))
+# +# @category: LDAP +# @type: string alpine.ldap.groups.search.filter=(&(objectClass=group)(objectCategory=Group)(cn=*{SEARCH_TERM}*)) -# Optional # Specifies the LDAP search filter used to search for users by their name. -# The {SEARCH_TERM} variable will be substituted at runtime. +# The {SEARCH_TERM} variable will be substituted at runtime. +#

# Example (Microsoft Active Directory): -# alpine.ldap.users.search.filter=(&(objectClass=group)(objectCategory=Group)(cn=*{SEARCH_TERM}*)) +#
  • (&(objectClass=group)(objectCategory=Group)(cn=*{SEARCH_TERM}*))
# Example (ApacheDS, Fedora 389 Directory, NetIQ/Novell eDirectory, etc): -# alpine.ldap.users.search.filter=(&(objectClass=inetOrgPerson)(cn=*{SEARCH_TERM}*)) +#
  • (&(objectClass=inetOrgPerson)(cn=*{SEARCH_TERM}*))
+# +# @category: LDAP +# @type: string alpine.ldap.users.search.filter=(&(objectClass=user)(objectCategory=Person)(cn=*{SEARCH_TERM}*)) -# Optional # Specifies if mapped LDAP accounts are automatically created upon successful # authentication. When a user logs in with valid credentials but an account has # not been previously provisioned, an authentication failure will be returned. @@ -259,9 +382,11 @@ alpine.ldap.users.search.filter=(&(objectClass=user)(objectCategory=Person)(cn=* # system and which users cannot. When this value is set to true, a local ldap # user will be created and mapped to the ldap account automatically. This # automatic provisioning only affects authentication, not authorization. +# +# @category: LDAP +# @type: boolean alpine.ldap.user.provisioning=false -# Optional # This option will ensure that team memberships for LDAP users are dynamic and # synchronized with membership of LDAP groups. When a team is mapped to an LDAP # group, all local LDAP users will automatically be assigned to the team if @@ -269,77 +394,168 @@ alpine.ldap.user.provisioning=false # removed from the LDAP group, they will also be removed from the team. This # option provides the ability to dynamically control user permissions via an # external directory. +# +# @category: LDAP +# @type: boolean alpine.ldap.team.synchronization=false -# Optional -# HTTP proxy. If the address is set, then the port must be set too. -# alpine.http.proxy.address=proxy.example.com -# alpine.http.proxy.port=8888 +# HTTP proxy address. If set, then alpine.http.proxy.port must be set too. +# +# @category: HTTP +# @example: proxy.example.com +# @type: string +# alpine.http.proxy.address= + +# @category: HTTP +# @example: 8888 +# @type: integer +# alpine.http.proxy.port= + +# @category: HTTP +# @type: string # alpine.http.proxy.username= + +# @category: HTTP +# @type: string # alpine.http.proxy.password= -# alpine.no.proxy=localhost,127.0.0.1 -# Optional -# HTTP Outbound Connection Timeout Settings. All values are in seconds. +# @category: HTTP +# @example: localhost,127.0.0.1 +# @type: string +# alpine.no.proxy= + +# Defines the connection timeout in seconds for outbound HTTP connections. +# +# @category: HTTP +# @type: integer # alpine.http.timeout.connection=30 + +# Defines the socket / read timeout in seconds for outbound HTTP connections. +# +# @category: HTTP +# @type: integer # alpine.http.timeout.socket=30 + +# Defines the request timeout in seconds for outbound HTTP connections. +# +# @category: HTTP +# @type: integer # alpine.http.timeout.pool=60 -# Optional -# Cross-Origin Resource Sharing (CORS) headers to include in REST responses. -# If 'alpine.cors.enabled' is true, CORS headers will be sent, if false, no -# CORS headers will be sent. -# See Also: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS -# The following are default values +# Defines whether [Cross Origin Resource Sharing](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) +# (CORS) headers shall be included in REST API responses. +# +# @category: CORS +# @type: boolean # alpine.cors.enabled=true + +# Controls the content of the `Access-Control-Allow-Origin` response header. +#
+# Has no effect when alpine.cors.enabled is `false`. +# +# @category: CORS +# @type: string # alpine.cors.allow.origin=* + +# Controls the content of the `Access-Control-Allow-Methods` response header. +#
+# Has no effect when alpine.cors.enabled is `false`. +# +# @category: CORS +# @type: string # alpine.cors.allow.methods=GET POST PUT DELETE OPTIONS + +# Controls the content of the `Access-Control-Allow-Headers` response header. +#
+# Has no effect when alpine.cors.enabled is `false`. +# +# @category: CORS +# @type: string # alpine.cors.allow.headers=Origin, Content-Type, Authorization, X-Requested-With, Content-Length, Accept, Origin, X-Api-Key, X-Total-Count, * + +# Controls the content of the `Access-Control-Expose-Headers` response header. +#
+# Has no effect when alpine.cors.enabled is `false`. +# +# @category: CORS +# @type: string # alpine.cors.expose.headers=Origin, Content-Type, Authorization, X-Requested-With, Content-Length, Accept, Origin, X-Api-Key, X-Total-Count + +# Controls the content of the `Access-Control-Allow-Credentials` response header. +#
+# Has no effect when alpine.cors.enabled is `false`. +# +# @category: CORS +# @type: boolean # alpine.cors.allow.credentials=true + +# Controls the content of the `Access-Control-Max-Age` response header. +#
+# Has no effect when alpine.cors.enabled is `false`. +# +# @category: CORS +# @type: integer # alpine.cors.max.age=3600 -# Optional # Defines whether Prometheus metrics will be exposed. # If enabled, metrics will be available via the /metrics endpoint. +# +# @category: Observability +# @type: boolean alpine.metrics.enabled=false -# Optional # Defines the username required to access metrics. # Has no effect when alpine.metrics.auth.password is not set. +# +# @category: Observability +# @type: string alpine.metrics.auth.username= -# Optional # Defines the password required to access metrics. # Has no effect when alpine.metrics.auth.username is not set. +# +# @category: Observability +# @type: string alpine.metrics.auth.password= -# Required # Defines if OpenID Connect will be used for user authentication. -# If enabled, alpine.oidc.* properties should be set accordingly. +# If enabled, `alpine.oidc.*` properties should be set accordingly. +# +# @category: OpenID Connect +# @type: boolean alpine.oidc.enabled=false -# Optional # Defines the client ID to be used for OpenID Connect. # The client ID should be the same as the one configured for the frontend, # and will only be used to validate ID tokens. +# +# @category: OpenID Connect +# @type: string alpine.oidc.client.id= -# Optional # Defines the issuer URL to be used for OpenID Connect. -# This issuer MUST support provider configuration via the /.well-known/openid-configuration endpoint. +# This issuer MUST support provider configuration via the `/.well-known/openid-configuration` endpoint. # See also: -# - https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata -# - https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfig +#
    +#
  • https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
  • +#
  • https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfig
  • +#
+# +# @category: OpenID Connect +# @type: string alpine.oidc.issuer= -# Optional # Defines the name of the claim that contains the username in the provider's userinfo endpoint. -# Common claims are "name", "username", "preferred_username" or "nickname". -# See also: https://openid.net/specs/openid-connect-core-1_0.html#UserInfoResponse +# Common claims are `name`, `username`, `preferred_username` or `nickname`. +# See also: +#
    +#
  • https://openid.net/specs/openid-connect-core-1_0.html#UserInfoResponse
  • +#
+# +# @category: OpenID Connect +# @type: string alpine.oidc.username.claim=name -# Optional # Specifies if mapped OpenID Connect accounts are automatically created upon successful # authentication. When a user logs in with a valid access token but an account has # not been previously provisioned, an authentication failure will be returned. @@ -347,9 +563,11 @@ alpine.oidc.username.claim=name # system and which users cannot. When this value is set to true, a local OpenID Connect # user will be created and mapped to the OpenID Connect account automatically. This # automatic provisioning only affects authentication, not authorization. +# +# @category: OpenID Connect +# @type: boolean alpine.oidc.user.provisioning=false -# Optional # This option will ensure that team memberships for OpenID Connect users are dynamic and # synchronized with membership of OpenID Connect groups or assigned roles. When a team is # mapped to an OpenID Connect group, all local OpenID Connect users will automatically be @@ -358,101 +576,137 @@ alpine.oidc.user.provisioning=false # option provides the ability to dynamically control user permissions via the identity provider. # Note that team synchronization is only performed during user provisioning and after successful # authentication. +# +# @category: OpenID Connect +# @type: boolean alpine.oidc.team.synchronization=false -# Optional # Defines the name of the claim that contains group memberships or role assignments in the provider's userinfo endpoint. # The claim must be an array of strings. Most public identity providers do not support group or role management. # When using a customizable / on-demand hosted identity provider, name, content, and inclusion in the userinfo endpoint # will most likely need to be configured. +# +# @category: OpenID Connect +# @type: string alpine.oidc.teams.claim=groups -# Optional # Defines one or more team names that auto-provisioned OIDC users shall be added to. # Multiple team names may be provided as comma-separated list. -# Has no effect when alpine.oidc.user.provisioning=false, or alpine.oidc.team.synchronization=true. +# Has no effect when alpine.oidc.user.provisioning is false, or alpine.oidc.team.synchronization is true. +# +# @category: OpenID Connect +# @type: string alpine.oidc.teams.default= -# Optional -# Define whether system requirement check is enable. -# The default value is true. +# Define whether system requirement check is enabled. +# +# @category: General +# @type: boolean system.requirement.check.enabled=true -# Optional # Defines the size of the thread pool used to perform requests to the Snyk API in parallel. # The thread pool will only be used when Snyk integration is enabled. # A high number may result in quicker exceeding of API rate limits, # while a number that is too low may result in vulnerability analyses taking longer. +# +# @category: Analyzers +# @type: integer snyk.thread.pool.size=10 -# Optional # Defines the maximum amount of retries to perform for each request to the Snyk API. # Retries are performed with increasing delays between attempts using an exponential backoff strategy. # The initial duration defined in snyk.retry.backoff.initial.duration.ms will be # multiplied with the value defined in snyk.retry.backoff.multiplier after each retry attempt, # until the maximum duration defined in snyk.retry.backoff.max.duration.ms is reached. +# +# @category: Analyzers +# @type: integer snyk.retry.max.attempts=10 -# Optional # Defines the multiplier for the exponential backoff retry strategy. +# +# @category: Analyzers +# @type: integer snyk.retry.backoff.multiplier=2 -# Optional # Defines the duration in milliseconds to wait before attempting the first retry. +# +# @category: Analyzers +# @type: integer snyk.retry.backoff.initial.duration.ms=1000 -# Optional # Defines the maximum duration in milliseconds to wait before attempting the next retry. +# +# @category: Analyzers +# @type: integer snyk.retry.backoff.max.duration.ms=60000 -# Optional -#Defines the maximum number of purl sent in a single request to OSS Index. +# Defines the maximum number of PURLs sent in a single request to OSS Index. # The default value is 128. +# +# @category: Analyzers +# @type: integer +# @hidden ossindex.request.max.purl=128 -# Optional # Defines the maximum amount of retries to perform for each request to the OSS Index API. # Retries are performed with increasing delays between attempts using an exponential backoff strategy. # The initial duration defined in ossindex.retry.backoff.initial.duration.ms will be # multiplied with the value defined in ossindex.retry.backoff.multiplier after each retry attempt, # until the maximum duration defined in ossindex.retry.backoff.max.duration.ms is reached. +# +# @category: Analyzers +# @type: integer ossindex.retry.max.attempts=10 -# Optional # Defines the multiplier for the exponential backoff retry strategy. +# +# @category: Analyzers +# @type: integer ossindex.retry.backoff.multiplier=2 -# Optional # Defines the maximum duration in milliseconds to wait before attempting the next retry. +# +# @category: Analyzers +# @type: integer ossindex.retry.backoff.max.duration.ms=600000 -# Optional # Defines the maximum amount of retries to perform for each request to the Trivy API. # Retries are performed with increasing delays between attempts using an exponential backoff strategy. # The initial duration defined in trivy.retry.backoff.initial.duration.ms will be # multiplied with the value defined in trivy.retry.backoff.multiplier after each retry attempt, # until the maximum duration defined in trivy.retry.backoff.max.duration.ms is reached. +# +# @category: Analyzers +# @type: integer trivy.retry.max.attempts=10 -# Optional # Defines the multiplier for the exponential backoff retry strategy. +# +# @category: Analyzers +# @type: integer trivy.retry.backoff.multiplier=2 -# Optional # Defines the duration in milliseconds to wait before attempting the first retry. +# +# @category: Analyzers +# @type: integer trivy.retry.backoff.initial.duration.ms=1000 -# Optional # Defines the maximum duration in milliseconds to wait before attempting the next retry. +# +# @category: Analyzers +# @type: integer trivy.retry.backoff.max.duration.ms=60000 -# Optional # This flag activate the cache stampede blocker for the repository meta analyzer allowing to handle high concurrency workloads when there # is a high ratio of duplicate components which can cause unnecessary external calls and index violation on PUBLIC.REPOSITORY_META_COMPONENT_COMPOUND_IDX during cache population. # The default value is true. +# +# @category: Analyzers +# @type: boolean repo.meta.analyzer.cacheStampedeBlocker.enabled=true -# Optional # The cache stampede blocker use a striped (partitioned) lock to distribute locks across keys. # This parameter defines the number of bucket used by the striped lock. The lock used for a given key is derived from the key hashcode and number of buckets. # This value should be set according to your portfolio profile (i.e. number of projects and proportion of duplicates). @@ -460,10 +714,15 @@ repo.meta.analyzer.cacheStampedeBlocker.enabled=true # Too much buckets can lead to unnecessary memory usage. Note that the memory footprint of Striped Lock is 32 * (nbOfBuckets * 1) Bytes. # A value between 1_000 (~32 KB) and 1_000_000 (~32 MB) seems reasonable. # The default value is 1000 (~32KB). +# +# @category: Analyzers +# @type: integer repo.meta.analyzer.cacheStampedeBlocker.lock.buckets=1000 -# Optional # Defines the maximum number of attempts used by Resilience4J for exponential backoff retry regarding repo meta analyzer cache loading per key. # The default value is 10. +# +# @category: Analyzers +# @type: integer repo.meta.analyzer.cacheStampedeBlocker.max.attempts=10