diff --git a/.env.example b/.env.example
index df8ad2c4b..2d466403d 100644
--- a/.env.example
+++ b/.env.example
@@ -1,4 +1,4 @@
NEO4J_URI=
NEO4J_USERNAME=neo4j
NEO4J_PASSWORD=
-DOCS_VERSION=5
\ No newline at end of file
+DOCS_VERSION=current
\ No newline at end of file
diff --git a/.github/workflows/docs-pr-checks.yml b/.github/workflows/docs-pr-checks.yml
index 68dafc033..41dae5694 100644
--- a/.github/workflows/docs-pr-checks.yml
+++ b/.github/workflows/docs-pr-checks.yml
@@ -5,13 +5,13 @@ on:
pull_request:
branches:
- main
- - dev
+ - console
jobs:
# Generate HTML
docs-build-pr:
- uses: neo4j/docs-tools/.github/workflows/reusable-docs-build.yml@v1.1.2
+ uses: neo4j/docs-tools/.github/workflows/reusable-docs-build.yml@v1.2.0
with:
deploy-id: ${{ github.event.number }}
retain-artifacts: 14
@@ -21,7 +21,7 @@ jobs:
# By default, the job fails if there are errors, passes if there are warnings only.
docs-verify-pr:
needs: docs-build-pr
- uses: neo4j/docs-tools/.github/workflows/reusable-docs-verify.yml@v1.1.2
+ uses: neo4j/docs-tools/.github/workflows/reusable-docs-verify.yml@v1.2.0
with:
failOnWarnings: true
@@ -38,7 +38,7 @@ jobs:
steps:
- name: Get file changes
id: get-file-changes
- uses: tj-actions/changed-files@cbda684547adc8c052d50711417fa61b428a9f88 # v41.1.2
+ uses: tj-actions/changed-files@2f7c5bfce28377bc069a65ba478de0a74aa0ca32 # v46.0.1
with:
separator: ','
files_yaml: |
@@ -53,7 +53,7 @@ jobs:
docs-updates-comment-pr:
if: needs.docs-build-pr.outputs.pages-listed == 'success'
needs: [docs-build-pr, docs-changes-pr]
- uses: neo4j/docs-tools/.github/workflows/reusable-docs-pr-changes.yml@v1.1.2
+ uses: neo4j/docs-tools/.github/workflows/reusable-docs-pr-changes.yml@v1.2.0
with:
pages-modified: ${{ needs.docs-changes-pr.outputs.pages-modified }}
pages-added: ${{ needs.docs-changes-pr.outputs.pages-added }}
diff --git a/.github/workflows/docs-teardown.yml b/.github/workflows/docs-teardown.yml
index 19ffe022f..526393e52 100644
--- a/.github/workflows/docs-teardown.yml
+++ b/.github/workflows/docs-teardown.yml
@@ -5,7 +5,7 @@ on:
pull_request_target:
branches:
- main
- - dev
+ - console
types:
- closed
diff --git a/README.adoc b/README.adoc
index fd6fff9d1..e9fc1ed27 100644
--- a/README.adoc
+++ b/README.adoc
@@ -30,7 +30,7 @@ NOTE: The script requires Python 3.
1. Activate the virtual environment (if not already active): `source venv/bin/activate` (`venv\Scripts\activate` on Windows).
2. Run `python scripts/manage_instances.py --create INSTANCE_NAME` to create an Aura instance with name _INSTANCE_NAME_, or `python scripts/manage_instances.py --destroy INSTANCE_NAME` to destroy it.
- You can use the optional parameter `--instance-type` together with `--create` to select the instance type to create (the default is `enterprise-ds`).
- - You can use different tenants using the optional parameter `--tenant` together with `--create` (the default is `gcp`). Every tenant is used with a default region.
+ - You can use different projects using the optional parameter `--tenant` together with `--create` (the default is `gcp`). Every project is used with a default region.
- Run `python scripts/manage_instances.py --help` for more details on the usage.
The script creates an _INSTANCE_NAME_.env file containing the Aura instance credentials.
@@ -39,7 +39,7 @@ The script creates an _INSTANCE_NAME_.env file containing the Aura instance cred
This script can be used to extract runnable code (Python or Cypher statements) from a doc HTML page. This means that the docset has to be built first, for example by running `npm run build`.
-NOTE: The script requires Python 3.
+NOTE: The script requires Python 3.
NOTE: AuraDS credentials must be available in an `aura.env` file.
@@ -58,7 +58,7 @@ NOTE: AuraDS credentials must be available in an `aura.env` file.
== Replace partials in an AsciiDoc file
-Some applications cannot use Asciidoc files that use partials. The `replace_partials.py` script can be used to preprocess an Asciidoc file and replace the `include::partial` lines with the actual content of the referenced partials.
+Some applications cannot use Asciidoc files that use partials. The `replace_partials.py` script can be used to preprocess an Asciidoc file and replace the `include::partial` lines with the actual content of the referenced partials.
NOTE: The script requires Python 3.
diff --git a/antora.yml b/antora.yml
index 5be3b1f5d..22cd89795 100644
--- a/antora.yml
+++ b/antora.yml
@@ -1,6 +1,7 @@
name: aura
title: Neo4j Aura
version: ~
+display_version: current
start_page: ROOT:index.adoc
nav:
- modules/ROOT/content-nav.adoc
diff --git a/modules/ROOT/content-nav.adoc b/modules/ROOT/content-nav.adoc
index ab47594ae..14d104557 100644
--- a/modules/ROOT/content-nav.adoc
+++ b/modules/ROOT/content-nav.adoc
@@ -1,111 +1,167 @@
////
Generic Start
////
-* *Neo4j Aura*
+* Neo4j Aura
+
+* Introduction
+** xref:index.adoc[About Aura]
+** xref:new-console.adoc[New Neo4j Aura console]
+** xref:microsoft-fabric.adoc[Workload for Microsoft Fabric]
+** xref:visual-tour/index.adoc[Visual tour]
+
+* xref:graph-analytics/index.adoc[]
+
+* Quick start
+** xref:getting-started/create-account.adoc[Create an account]
+** xref:getting-started/create-instance.adoc[Create an instance]
+** xref:getting-started/connect-instance.adoc[Connect to an instance]
+** xref:getting-started/migrate-metadata.adoc[Migrate metadata from Workspace]
+
+* xref:cloud-providers.adoc[Cloud provider marketplaces]
+
+* Manage instances
+** xref:managing-instances/instance-actions.adoc[Instance actions]
+** xref:managing-instances/instance-details.adoc[Instance details]
+** xref:managing-instances/secondaries.adoc[Secondaries]
+** xref:managing-instances/instance-resources.adoc[Resources]
+** xref:managing-instances/custom-endpoints.adoc[Custom endpoints]
+** xref:managing-instances/migration-readiness.adoc[Migration Readiness Report]
+** xref:managing-instances/develop.adoc[Develop]
+** xref:managing-instances/regions.adoc[Regions]
+** xref:managing-instances/backup-restore-export.adoc[Backup, export, and restore]
+** xref:managing-instances/vector-optimization.adoc[Vector optimization]
+** xref:managing-instances/cypher-version.adoc[Cypher version]
+
+
+* Import data
+** xref:import/introduction.adoc[What is Import?]
+** xref:import/quick-start.adoc[Quick start]
+** xref:import/visual-tour.adoc[Visual tour]
+** xref:import/file-provision.adoc[Data provision]
+** xref:import/modeling.adoc[Model data]
+** xref:import/mapping.adoc[Map data]
+** xref:import/indexes-and-constraints.adoc[Indexes and constraints]
+** xref:import/import.adoc[Run the import]
+
+
+* Explore data
+** xref:explore/introduction.adoc[What is Explore?]
+** xref:explore/explore-quick-start.adoc[Quick start]
+
+** Visual tour
+*** xref:explore/explore-visual-tour/explore-overview.adoc[Explore overview]
+*** xref:explore/explore-visual-tour/perspective-drawer.adoc[Perspective drawer]
+//*** xref:auradb/explore/explore-visual-tour/settings-drawer.adoc[Settings drawer]
+*** xref:explore/explore-visual-tour/legend-panel.adoc[Legend panel]
+*** xref:explore/explore-visual-tour/search-bar.adoc[Search bar]
+*** xref:explore/explore-visual-tour/card-list.adoc[Card list]
+*** xref:explore/explore-visual-tour/scene-interactions.adoc[Scene interactions]
+
+** Perspectives
+*** xref:explore/explore-perspectives/perspectives.adoc[Perspectives - A business view of the graph]
+*** xref:explore/explore-perspectives/perspective-creation.adoc[Creation and use]
+*** xref:explore/explore-perspectives/refresh-perspectives.adoc[Refresh Perspectives]
+*** xref:explore/explore-perspectives/database-scans.adoc[Database scans]
+
+** Explore features in detail
+*** xref:explore/explore-features/graph-pattern-search.adoc[Graph pattern search]
+*** xref:explore/explore-features/search-phrases-advanced.adoc[Search phrases for advanced queries]
+*** xref:explore/explore-features/scene-actions.adoc[Scene actions]
+*** xref:explore/explore-features/full-text-search.adoc[Full-text search]
+*** xref:explore/explore-features/edit-graph-data.adoc[Edit graph data]
+*** xref:explore/explore-features/slicer.adoc[Slicer]
+** xref:explore/explore-default-actions.adoc[Default actions and shortcuts]
+
+* Query data
+** xref:query/introduction.adoc[What is Query?]
+** xref:query/visual-tour.adoc[Visual tour]
+** xref:query/operations.adoc[Query operations]
+** xref:query/command-reference.adoc[Command reference]
+
+* xref:apoc.adoc[APOC support]
+
+* Aura CLI
+** xref:aura-cli/index.adoc[Introduction]
+** xref:aura-cli/installation.adoc[Installation]
+** xref:aura-cli/initial-configuration.adoc[Initial configuration]
+** xref:aura-cli/auradb-tenants.adoc[Working with AuraDB tenants]
+** xref:aura-cli/auradb-instances.adoc[Managing AuraDB instances]
+** xref:aura-cli/configuration.adoc[Configuration]
+** xref:aura-cli/migration.adoc[Migration]
+
+* Dashboards
+** xref:dashboards/index.adoc[Overview]
+** xref:dashboards/getting-started.adoc[Getting started]
+** xref:dashboards/managing-dashboards.adoc[Managing dashboards]
+** xref:dashboards/import.adoc[Import]
+** xref:dashboards/ai-dashboards.adoc[AI dashboards]
+** xref:dashboards/parameters-and-filters.adoc[Parameters and filters]
+** xref:dashboards/sharing-dashboards.adoc[Sharing dashboards]
+** xref:dashboards/visualizations/index.adoc[Visualizations]
+*** xref:dashboards/visualizations/graph.adoc[Graph]
+*** xref:dashboards/visualizations/table.adoc[Table]
+*** xref:dashboards/visualizations/linechart.adoc[Line chart]
+*** xref:dashboards/visualizations/barchart.adoc[Bar chart]
+*** xref:dashboards/visualizations/piechart.adoc[Pie chart]
+*** xref:dashboards/visualizations/single-value.adoc[Single value]
+*** xref:dashboards/visualizations/text.adoc[Text]
+** xref:dashboards/faq-and-resources.adoc[FAQ and resources]
+
+* Metrics
+** xref:metrics/view-metrics.adoc[View metrics]
+** Metrics integration
+*** xref:metrics/metrics-integration/introduction.adoc[Introduction]
+*** xref:metrics/metrics-integration/process.adoc[Integration Process]
+*** xref:metrics/metrics-integration/status.adoc[Endpoint Status]
+*** xref:metrics/metrics-integration/examples.adoc[Examples]
+*** xref:metrics/metrics-integration/reference.adoc[Reference]
+
+* Logs
+// ** xref:logging/download-logs.adoc[Request and download logs]
+** xref:logging/query-log-analyzer.adoc[Query log analyzer]
+** xref:logging/security-log-analyzer.adoc[Security log analyzer]
+** xref:logging/log-forwarding.adoc[Security log forwarding]
+** xref:logging/log-downloads.adoc[Download logs]
-* xref:index.adoc[Overview]
+* Security
+** xref:security/mfa.adoc[Multi-factor authentication]
+** xref:security/single-sign-on.adoc[Single sign-on]
+** xref:security/ip-filtering.adoc[IP filtering]
+** xref:security/secure-connections.adoc[Secure connections]
+** xref:security/encryption.adoc[Encryption]
+** xref:security/tool-auth.adoc[Tool authentication with Aura user]
-* xref:platform/create-account.adoc[]
-* xref:platform/cloud-providers.adoc[Cloud provider marketplaces]
-* Security
-** xref:platform/security/secure-connections.adoc[]
-** xref:platform/security/single-sign-on.adoc[]
-** xref:platform/security/encryption.adoc[]
+* xref:user-management.adoc[User management]
-* xref:platform/user-management.adoc[]
-* xref:platform/apoc.adoc[]
-* xref:platform/metrics-integration.adoc[Metrics Integration]
+* xref:billing.adoc[Billing]
-* Logging
-** xref:platform/logging/download-logs.adoc[]
-** xref:platform/logging/log-forwarding.adoc[]
-// ** xref:platform/logging/query-log-analyzer.adoc[]
+* Connecting applications
+** xref:connecting-applications/overview.adoc[Drivers and libraries]
+** xref:connecting-applications/query-api.adoc[Using Query API]
-* Neo4j connectors
-** xref:platform/connectors/spark.adoc[]
-** xref:platform/connectors/kafka.adoc[]
-** xref:platform/connectors/bi.adoc[]
+* Neo4j Connectors
+** xref:connectors/spark.adoc[Neo4j Connector for Apache Spark]
+** xref:connectors/kafka.adoc[Neo4j Connector for Apache Kafka]
+** xref:connectors/bi.adoc[Neo4j Connector for BI]
* Aura API
-** xref:platform/api/overview.adoc[]
-** xref:platform/api/authentication.adoc[]
+** xref:api/overview.adoc[]
+** xref:api/authentication.adoc[]
** link:{neo4j-docs-base-uri}/aura/platform/api/specification/[API Specification]
-////
-Generic End
-////
-
-////
-AuraDB Start
-////
-* *Neo4j AuraDB*
-
-* xref:auradb/index.adoc[Overview]
-
-* Getting Started
-** xref:auradb/getting-started/create-database.adoc[]
-** xref:auradb/getting-started/connect-database.adoc[]
-** xref:auradb/getting-started/query-database.adoc[]
-
-* Importing
-** xref:auradb/importing/importing-data.adoc[]
-** xref:auradb/importing/import-database.adoc[]
-
-* Managing instances
-** xref:auradb/managing-databases/monitoring.adoc[]
-** xref:auradb/managing-databases/advanced-metrics.adoc[]
-** xref:auradb/managing-databases/backup-restore-export.adoc[]
-** xref:auradb/managing-databases/database-actions.adoc[]
-
-* xref:auradb/connecting-applications/overview.adoc[Connecting applications]
-////
-AuraDB End
-////
-
-////
-AuraDS Start
-////
-* *Neo4j AuraDS*
-
-* xref:aurads/index.adoc[Overview]
-* xref:aurads/architecture.adoc[]
-
-* xref:aurads/create-instance.adoc[]
-
-* xref:aurads/connecting/index.adoc[]
-** xref:aurads/connecting/neo4j-applications.adoc[]
-** xref:aurads/connecting/python.adoc[]
-
-* Usage examples
-** xref:aurads/tutorials/graph-catalog.adoc[]
-** xref:aurads/tutorials/algorithm-modes.adoc[]
-** xref:aurads/tutorials/memory-estimation.adoc[]
-** xref:aurads/tutorials/algorithm-progress.adoc[]
-** xref:aurads/tutorials/model-catalog.adoc[]
-** xref:aurads/tutorials/arrow-examples.adoc[]
-
-* xref:aurads/importing-data/index.adoc[]
-** xref:aurads/importing-data/import-db.adoc[]
-** xref:aurads/importing-data/data-importer.adoc[]
-** xref:aurads/importing-data/load-csv.adoc[]
-
-* Managing instances
-** xref:aurads/managing-instances/monitoring.adoc[]
-** xref:aurads/managing-instances/advanced-metrics.adoc[]
-** xref:aurads/managing-instances/backup-restore-export.adoc[]
-** xref:aurads/managing-instances/instance-actions.adoc[]
-////
-AuraDS End
-////
-
* *Tutorials*
* Upgrade and migration
** xref:tutorials/upgrade.adoc[]
** xref:tutorials/migration.adoc[]
+** xref:tutorials/migration-free.adoc[]
* Integrating with Neo4j Connectors
** xref:tutorials/spark.adoc[]
** xref:tutorials/bi.adoc[]
* xref:tutorials/performance-improvements.adoc[]
* xref:tutorials/troubleshooting.adoc[]
* xref:tutorials/create-auradb-instance-from-terminal.adoc[]
+
+////
+AuraDB End
+////
diff --git a/modules/ROOT/images/action-availability.png b/modules/ROOT/images/action-availability.png
new file mode 100644
index 000000000..b1c2381d4
Binary files /dev/null and b/modules/ROOT/images/action-availability.png differ
diff --git a/modules/ROOT/images/add_range.png b/modules/ROOT/images/add_range.png
new file mode 100644
index 000000000..95b991983
Binary files /dev/null and b/modules/ROOT/images/add_range.png differ
diff --git a/modules/ROOT/images/adjust_storage.png b/modules/ROOT/images/adjust_storage.png
new file mode 100644
index 000000000..a1cf1626a
Binary files /dev/null and b/modules/ROOT/images/adjust_storage.png differ
diff --git a/modules/ROOT/images/advanced-expansion.png b/modules/ROOT/images/advanced-expansion.png
new file mode 100644
index 000000000..42b21f7af
Binary files /dev/null and b/modules/ROOT/images/advanced-expansion.png differ
diff --git a/modules/ROOT/images/aura-cli/console-classic-api-keys.png b/modules/ROOT/images/aura-cli/console-classic-api-keys.png
new file mode 100644
index 000000000..e7b7ecffd
Binary files /dev/null and b/modules/ROOT/images/aura-cli/console-classic-api-keys.png differ
diff --git a/modules/ROOT/images/aura-cli/console-classic-home.png b/modules/ROOT/images/aura-cli/console-classic-home.png
new file mode 100644
index 000000000..e43c629ba
Binary files /dev/null and b/modules/ROOT/images/aura-cli/console-classic-home.png differ
diff --git a/modules/ROOT/images/aura-cli/unified-console-account-dropdown.png b/modules/ROOT/images/aura-cli/unified-console-account-dropdown.png
new file mode 100644
index 000000000..1b0b885a2
Binary files /dev/null and b/modules/ROOT/images/aura-cli/unified-console-account-dropdown.png differ
diff --git a/modules/ROOT/images/aura-cli/unified-console-api-keys.png b/modules/ROOT/images/aura-cli/unified-console-api-keys.png
new file mode 100644
index 000000000..78dd64592
Binary files /dev/null and b/modules/ROOT/images/aura-cli/unified-console-api-keys.png differ
diff --git a/modules/ROOT/images/aura-cli/unified-console-create-api-key.png b/modules/ROOT/images/aura-cli/unified-console-create-api-key.png
new file mode 100644
index 000000000..be651accf
Binary files /dev/null and b/modules/ROOT/images/aura-cli/unified-console-create-api-key.png differ
diff --git a/modules/ROOT/images/azure_privatelink_01_before_enabling.png b/modules/ROOT/images/azure_privatelink_01_before_enabling.png
index 42fee0246..f18a6e804 100644
Binary files a/modules/ROOT/images/azure_privatelink_01_before_enabling.png and b/modules/ROOT/images/azure_privatelink_01_before_enabling.png differ
diff --git a/modules/ROOT/images/azure_privatelink_03_browser_bloom_over_vpn.png b/modules/ROOT/images/azure_privatelink_03_browser_bloom_over_vpn.png
index a01c70c19..ce29558ec 100644
Binary files a/modules/ROOT/images/azure_privatelink_03_browser_bloom_over_vpn.png and b/modules/ROOT/images/azure_privatelink_03_browser_bloom_over_vpn.png differ
diff --git a/modules/ROOT/images/breadcrumbs.png b/modules/ROOT/images/breadcrumbs.png
new file mode 100644
index 000000000..6618ae406
Binary files /dev/null and b/modules/ROOT/images/breadcrumbs.png differ
diff --git a/modules/ROOT/images/captions.png b/modules/ROOT/images/captions.png
new file mode 100644
index 000000000..9f4890d00
Binary files /dev/null and b/modules/ROOT/images/captions.png differ
diff --git a/modules/ROOT/images/card-list.png b/modules/ROOT/images/card-list.png
new file mode 100644
index 000000000..c68942a1e
Binary files /dev/null and b/modules/ROOT/images/card-list.png differ
diff --git a/modules/ROOT/images/cmi_apm_config_input.png b/modules/ROOT/images/cmi_apm_config_input.png
new file mode 100644
index 000000000..eac6bc139
Binary files /dev/null and b/modules/ROOT/images/cmi_apm_config_input.png differ
diff --git a/modules/ROOT/images/cmi_error_status.png b/modules/ROOT/images/cmi_error_status.png
new file mode 100644
index 000000000..08a396f05
Binary files /dev/null and b/modules/ROOT/images/cmi_error_status.png differ
diff --git a/modules/ROOT/images/cmi_instance_config.png b/modules/ROOT/images/cmi_instance_config.png
new file mode 100644
index 000000000..f176fe4b8
Binary files /dev/null and b/modules/ROOT/images/cmi_instance_config.png differ
diff --git a/modules/ROOT/images/cmi_primaries_az_plot.png b/modules/ROOT/images/cmi_primaries_az_plot.png
new file mode 100644
index 000000000..ce5ce1b18
Binary files /dev/null and b/modules/ROOT/images/cmi_primaries_az_plot.png differ
diff --git a/modules/ROOT/images/cmi_process_overview.png b/modules/ROOT/images/cmi_process_overview.png
new file mode 100644
index 000000000..a4621cd8c
Binary files /dev/null and b/modules/ROOT/images/cmi_process_overview.png differ
diff --git a/modules/ROOT/images/cmi_project_config.png b/modules/ROOT/images/cmi_project_config.png
new file mode 100644
index 000000000..c4b8017f0
Binary files /dev/null and b/modules/ROOT/images/cmi_project_config.png differ
diff --git a/modules/ROOT/images/cmi_prometheus_job_config.png b/modules/ROOT/images/cmi_prometheus_job_config.png
new file mode 100644
index 000000000..a2580d701
Binary files /dev/null and b/modules/ROOT/images/cmi_prometheus_job_config.png differ
diff --git a/modules/ROOT/images/cmi_prometheus_jobs_example.png b/modules/ROOT/images/cmi_prometheus_jobs_example.png
new file mode 100644
index 000000000..ab739ec05
Binary files /dev/null and b/modules/ROOT/images/cmi_prometheus_jobs_example.png differ
diff --git a/modules/ROOT/images/cmi_prometheus_targets.png b/modules/ROOT/images/cmi_prometheus_targets.png
new file mode 100644
index 000000000..25abf05f9
Binary files /dev/null and b/modules/ROOT/images/cmi_prometheus_targets.png differ
diff --git a/modules/ROOT/images/cmi_status_table.png b/modules/ROOT/images/cmi_status_table.png
new file mode 100644
index 000000000..5b6436a16
Binary files /dev/null and b/modules/ROOT/images/cmi_status_table.png differ
diff --git a/modules/ROOT/images/connection-dropdown.png b/modules/ROOT/images/connection-dropdown.png
new file mode 100644
index 000000000..546143125
Binary files /dev/null and b/modules/ROOT/images/connection-dropdown.png differ
diff --git a/modules/ROOT/images/connectionauthentication.png b/modules/ROOT/images/connectionauthentication.png
new file mode 100644
index 000000000..13e515f6a
Binary files /dev/null and b/modules/ROOT/images/connectionauthentication.png differ
diff --git a/modules/ROOT/images/connectionbanner.png b/modules/ROOT/images/connectionbanner.png
new file mode 100644
index 000000000..215eb4498
Binary files /dev/null and b/modules/ROOT/images/connectionbanner.png differ
diff --git a/modules/ROOT/images/connectionbanner1.png b/modules/ROOT/images/connectionbanner1.png
new file mode 100644
index 000000000..9b144b6c1
Binary files /dev/null and b/modules/ROOT/images/connectionbanner1.png differ
diff --git a/modules/ROOT/images/connectionmodal.png b/modules/ROOT/images/connectionmodal.png
new file mode 100644
index 000000000..9aef938cc
Binary files /dev/null and b/modules/ROOT/images/connectionmodal.png differ
diff --git a/modules/ROOT/images/connectionmodalnonremote.png b/modules/ROOT/images/connectionmodalnonremote.png
new file mode 100644
index 000000000..496d1b2ba
Binary files /dev/null and b/modules/ROOT/images/connectionmodalnonremote.png differ
diff --git a/modules/ROOT/images/constraints-tab.png b/modules/ROOT/images/constraints-tab.png
new file mode 100644
index 000000000..138bfc920
Binary files /dev/null and b/modules/ROOT/images/constraints-tab.png differ
diff --git a/modules/ROOT/images/consumptionreport.png b/modules/ROOT/images/consumptionreport.png
new file mode 100644
index 000000000..5268c0b64
Binary files /dev/null and b/modules/ROOT/images/consumptionreport.png differ
diff --git a/modules/ROOT/images/context-double.png b/modules/ROOT/images/context-double.png
new file mode 100644
index 000000000..5cf83df1d
Binary files /dev/null and b/modules/ROOT/images/context-double.png differ
diff --git a/modules/ROOT/images/coordinate-layout.png b/modules/ROOT/images/coordinate-layout.png
new file mode 100644
index 000000000..6dcd2f0f6
Binary files /dev/null and b/modules/ROOT/images/coordinate-layout.png differ
diff --git a/modules/ROOT/images/create-node.png b/modules/ROOT/images/create-node.png
new file mode 100644
index 000000000..473692302
Binary files /dev/null and b/modules/ROOT/images/create-node.png differ
diff --git a/modules/ROOT/images/create-relationship.png b/modules/ROOT/images/create-relationship.png
new file mode 100644
index 000000000..9aae16f97
Binary files /dev/null and b/modules/ROOT/images/create-relationship.png differ
diff --git a/modules/ROOT/images/cypher-reference.png b/modules/ROOT/images/cypher-reference.png
new file mode 100644
index 000000000..722b7eb9e
Binary files /dev/null and b/modules/ROOT/images/cypher-reference.png differ
diff --git a/modules/ROOT/images/dashboards/ai-dashboard-data-focus.png b/modules/ROOT/images/dashboards/ai-dashboard-data-focus.png
new file mode 100644
index 000000000..aadb6ad8f
Binary files /dev/null and b/modules/ROOT/images/dashboards/ai-dashboard-data-focus.png differ
diff --git a/modules/ROOT/images/dashboards/ai-dashboard-dual-focus-1.png b/modules/ROOT/images/dashboards/ai-dashboard-dual-focus-1.png
new file mode 100644
index 000000000..ee3f49e51
Binary files /dev/null and b/modules/ROOT/images/dashboards/ai-dashboard-dual-focus-1.png differ
diff --git a/modules/ROOT/images/dashboards/ai-dashboard-dual-focus-2.png b/modules/ROOT/images/dashboards/ai-dashboard-dual-focus-2.png
new file mode 100644
index 000000000..5e28c36b8
Binary files /dev/null and b/modules/ROOT/images/dashboards/ai-dashboard-dual-focus-2.png differ
diff --git a/modules/ROOT/images/dashboards/ai-dashboard-prompt-data-focus.png b/modules/ROOT/images/dashboards/ai-dashboard-prompt-data-focus.png
new file mode 100644
index 000000000..06f94ed81
Binary files /dev/null and b/modules/ROOT/images/dashboards/ai-dashboard-prompt-data-focus.png differ
diff --git a/modules/ROOT/images/dashboards/ai-dashboard-prompt-dual-focus.png b/modules/ROOT/images/dashboards/ai-dashboard-prompt-dual-focus.png
new file mode 100644
index 000000000..565483366
Binary files /dev/null and b/modules/ROOT/images/dashboards/ai-dashboard-prompt-dual-focus.png differ
diff --git a/modules/ROOT/images/dashboards/ai-dashboard-prompt-visualization-focus.png b/modules/ROOT/images/dashboards/ai-dashboard-prompt-visualization-focus.png
new file mode 100644
index 000000000..8511beea5
Binary files /dev/null and b/modules/ROOT/images/dashboards/ai-dashboard-prompt-visualization-focus.png differ
diff --git a/modules/ROOT/images/dashboards/ai-dashboard-visualization-focus.png b/modules/ROOT/images/dashboards/ai-dashboard-visualization-focus.png
new file mode 100644
index 000000000..8ca2bfa5f
Binary files /dev/null and b/modules/ROOT/images/dashboards/ai-dashboard-visualization-focus.png differ
diff --git a/modules/ROOT/images/dashboards/create-first-dashboard.png b/modules/ROOT/images/dashboards/create-first-dashboard.png
new file mode 100644
index 000000000..9d1a7ca57
Binary files /dev/null and b/modules/ROOT/images/dashboards/create-first-dashboard.png differ
diff --git a/modules/ROOT/images/dashboards/dashboard-full.png b/modules/ROOT/images/dashboards/dashboard-full.png
new file mode 100644
index 000000000..43e571b4e
Binary files /dev/null and b/modules/ROOT/images/dashboards/dashboard-full.png differ
diff --git a/modules/ROOT/images/dashboards/import/import-dashboard-dialog.png b/modules/ROOT/images/dashboards/import/import-dashboard-dialog.png
new file mode 100644
index 000000000..0ac2fd815
Binary files /dev/null and b/modules/ROOT/images/dashboards/import/import-dashboard-dialog.png differ
diff --git a/modules/ROOT/images/dashboards/parameters-and-filters/filter-and-card.gif b/modules/ROOT/images/dashboards/parameters-and-filters/filter-and-card.gif
new file mode 100644
index 000000000..5b6ec4fb2
Binary files /dev/null and b/modules/ROOT/images/dashboards/parameters-and-filters/filter-and-card.gif differ
diff --git a/modules/ROOT/images/dashboards/parameters-and-filters/filter-units-in-stock-card.png b/modules/ROOT/images/dashboards/parameters-and-filters/filter-units-in-stock-card.png
new file mode 100644
index 000000000..069b7ed61
Binary files /dev/null and b/modules/ROOT/images/dashboards/parameters-and-filters/filter-units-in-stock-card.png differ
diff --git a/modules/ROOT/images/dashboards/parameters-and-filters/filter-units-in-stock-value.png b/modules/ROOT/images/dashboards/parameters-and-filters/filter-units-in-stock-value.png
new file mode 100644
index 000000000..53ecdbc0b
Binary files /dev/null and b/modules/ROOT/images/dashboards/parameters-and-filters/filter-units-in-stock-value.png differ
diff --git a/modules/ROOT/images/dashboards/parameters-and-filters/filter-units-in-stock.png b/modules/ROOT/images/dashboards/parameters-and-filters/filter-units-in-stock.png
new file mode 100644
index 000000000..8f0bae36e
Binary files /dev/null and b/modules/ROOT/images/dashboards/parameters-and-filters/filter-units-in-stock.png differ
diff --git a/modules/ROOT/images/dashboards/parameters-and-filters/parameter-in-query.png b/modules/ROOT/images/dashboards/parameters-and-filters/parameter-in-query.png
new file mode 100644
index 000000000..506823b8a
Binary files /dev/null and b/modules/ROOT/images/dashboards/parameters-and-filters/parameter-in-query.png differ
diff --git a/modules/ROOT/images/dashboards/parameters-and-filters/parameters-drawer.png b/modules/ROOT/images/dashboards/parameters-and-filters/parameters-drawer.png
new file mode 100644
index 000000000..adb20bc77
Binary files /dev/null and b/modules/ROOT/images/dashboards/parameters-and-filters/parameters-drawer.png differ
diff --git a/modules/ROOT/images/dashboards/sharing-dialog.png b/modules/ROOT/images/dashboards/sharing-dialog.png
new file mode 100644
index 000000000..6ef38ffcc
Binary files /dev/null and b/modules/ROOT/images/dashboards/sharing-dialog.png differ
diff --git a/modules/ROOT/images/dashboards/sharing-hover.png b/modules/ROOT/images/dashboards/sharing-hover.png
new file mode 100644
index 000000000..aba7e9262
Binary files /dev/null and b/modules/ROOT/images/dashboards/sharing-hover.png differ
diff --git a/modules/ROOT/images/dashboards/visualizations/visualization-bar-chart-stacked.png b/modules/ROOT/images/dashboards/visualizations/visualization-bar-chart-stacked.png
new file mode 100644
index 000000000..487b7b62c
Binary files /dev/null and b/modules/ROOT/images/dashboards/visualizations/visualization-bar-chart-stacked.png differ
diff --git a/modules/ROOT/images/dashboards/visualizations/visualization-bar-chart.png b/modules/ROOT/images/dashboards/visualizations/visualization-bar-chart.png
new file mode 100644
index 000000000..23e1525b5
Binary files /dev/null and b/modules/ROOT/images/dashboards/visualizations/visualization-bar-chart.png differ
diff --git a/modules/ROOT/images/dashboards/visualizations/visualization-graph.png b/modules/ROOT/images/dashboards/visualizations/visualization-graph.png
new file mode 100644
index 000000000..f5a06e2e2
Binary files /dev/null and b/modules/ROOT/images/dashboards/visualizations/visualization-graph.png differ
diff --git a/modules/ROOT/images/dashboards/visualizations/visualization-line-chart-multi.png b/modules/ROOT/images/dashboards/visualizations/visualization-line-chart-multi.png
new file mode 100644
index 000000000..ad8f0470d
Binary files /dev/null and b/modules/ROOT/images/dashboards/visualizations/visualization-line-chart-multi.png differ
diff --git a/modules/ROOT/images/dashboards/visualizations/visualization-line-chart.png b/modules/ROOT/images/dashboards/visualizations/visualization-line-chart.png
new file mode 100644
index 000000000..6aa8852c9
Binary files /dev/null and b/modules/ROOT/images/dashboards/visualizations/visualization-line-chart.png differ
diff --git a/modules/ROOT/images/dashboards/visualizations/visualization-pie-chart.png b/modules/ROOT/images/dashboards/visualizations/visualization-pie-chart.png
new file mode 100644
index 000000000..bc18d40a5
Binary files /dev/null and b/modules/ROOT/images/dashboards/visualizations/visualization-pie-chart.png differ
diff --git a/modules/ROOT/images/dashboards/visualizations/visualization-single-value.png b/modules/ROOT/images/dashboards/visualizations/visualization-single-value.png
new file mode 100644
index 000000000..0128e6b25
Binary files /dev/null and b/modules/ROOT/images/dashboards/visualizations/visualization-single-value.png differ
diff --git a/modules/ROOT/images/dashboards/visualizations/visualization-table-basic.png b/modules/ROOT/images/dashboards/visualizations/visualization-table-basic.png
new file mode 100644
index 000000000..1896437a5
Binary files /dev/null and b/modules/ROOT/images/dashboards/visualizations/visualization-table-basic.png differ
diff --git a/modules/ROOT/images/dashboards/visualizations/visualization-table-nodes-and-collections.png b/modules/ROOT/images/dashboards/visualizations/visualization-table-nodes-and-collections.png
new file mode 100644
index 000000000..9df6c3ecc
Binary files /dev/null and b/modules/ROOT/images/dashboards/visualizations/visualization-table-nodes-and-collections.png differ
diff --git a/modules/ROOT/images/dashboards/visualizations/visualization-text-card.png b/modules/ROOT/images/dashboards/visualizations/visualization-text-card.png
new file mode 100644
index 000000000..a651c6550
Binary files /dev/null and b/modules/ROOT/images/dashboards/visualizations/visualization-text-card.png differ
diff --git a/modules/ROOT/images/dashboards/visualizations/visualization-text-editor.png b/modules/ROOT/images/dashboards/visualizations/visualization-text-editor.png
new file mode 100644
index 000000000..4709383d8
Binary files /dev/null and b/modules/ROOT/images/dashboards/visualizations/visualization-text-editor.png differ
diff --git a/modules/ROOT/images/data-source-fields.png b/modules/ROOT/images/data-source-fields.png
new file mode 100644
index 000000000..d42dd3197
Binary files /dev/null and b/modules/ROOT/images/data-source-fields.png differ
diff --git a/modules/ROOT/images/data-source.png b/modules/ROOT/images/data-source.png
new file mode 100644
index 000000000..ea82889ae
Binary files /dev/null and b/modules/ROOT/images/data-source.png differ
diff --git a/modules/ROOT/images/data-sources-interaction.png b/modules/ROOT/images/data-sources-interaction.png
new file mode 100644
index 000000000..5ed384d8f
Binary files /dev/null and b/modules/ROOT/images/data-sources-interaction.png differ
diff --git a/modules/ROOT/images/database-drawer.png b/modules/ROOT/images/database-drawer.png
new file mode 100644
index 000000000..2ad7c3d4d
Binary files /dev/null and b/modules/ROOT/images/database-drawer.png differ
diff --git a/modules/ROOT/images/dataimporterdocs.png b/modules/ROOT/images/dataimporterdocs.png
new file mode 100644
index 000000000..dd9c26806
Binary files /dev/null and b/modules/ROOT/images/dataimporterdocs.png differ
diff --git a/modules/ROOT/images/datascan-generate.png b/modules/ROOT/images/datascan-generate.png
new file mode 100644
index 000000000..9073bd515
Binary files /dev/null and b/modules/ROOT/images/datascan-generate.png differ
diff --git a/modules/ROOT/images/datascan-refresh.png b/modules/ROOT/images/datascan-refresh.png
new file mode 100644
index 000000000..2702355da
Binary files /dev/null and b/modules/ROOT/images/datascan-refresh.png differ
diff --git a/modules/ROOT/images/dataservices.png b/modules/ROOT/images/dataservices.png
new file mode 100644
index 000000000..fd9c530d9
Binary files /dev/null and b/modules/ROOT/images/dataservices.png differ
diff --git a/modules/ROOT/images/degree-centrality.png b/modules/ROOT/images/degree-centrality.png
new file mode 100644
index 000000000..5e1191782
Binary files /dev/null and b/modules/ROOT/images/degree-centrality.png differ
diff --git a/modules/ROOT/images/delete-node.png b/modules/ROOT/images/delete-node.png
new file mode 100644
index 000000000..9512f4d98
Binary files /dev/null and b/modules/ROOT/images/delete-node.png differ
diff --git a/modules/ROOT/images/delete-relationship.png b/modules/ROOT/images/delete-relationship.png
new file mode 100644
index 000000000..7f4704a28
Binary files /dev/null and b/modules/ROOT/images/delete-relationship.png differ
diff --git a/modules/ROOT/images/develop.png b/modules/ROOT/images/develop.png
new file mode 100644
index 000000000..6fd574b3c
Binary files /dev/null and b/modules/ROOT/images/develop.png differ
diff --git a/modules/ROOT/images/di-ui.png b/modules/ROOT/images/di-ui.png
new file mode 100644
index 000000000..0e6374b3e
Binary files /dev/null and b/modules/ROOT/images/di-ui.png differ
diff --git a/modules/ROOT/images/dismiss-single-nodes.png b/modules/ROOT/images/dismiss-single-nodes.png
new file mode 100644
index 000000000..effc39352
Binary files /dev/null and b/modules/ROOT/images/dismiss-single-nodes.png differ
diff --git a/modules/ROOT/images/dropdown.png b/modules/ROOT/images/dropdown.png
new file mode 100644
index 000000000..79abc9526
Binary files /dev/null and b/modules/ROOT/images/dropdown.png differ
diff --git a/modules/ROOT/images/edit-a-filter.png b/modules/ROOT/images/edit-a-filter.png
new file mode 100644
index 000000000..1930f7b62
Binary files /dev/null and b/modules/ROOT/images/edit-a-filter.png differ
diff --git a/modules/ROOT/images/edit-label.png b/modules/ROOT/images/edit-label.png
new file mode 100644
index 000000000..2029b7873
Binary files /dev/null and b/modules/ROOT/images/edit-label.png differ
diff --git a/modules/ROOT/images/edit-properties.png b/modules/ROOT/images/edit-properties.png
new file mode 100644
index 000000000..d8f533437
Binary files /dev/null and b/modules/ROOT/images/edit-properties.png differ
diff --git a/modules/ROOT/images/emptyexplore.png b/modules/ROOT/images/emptyexplore.png
new file mode 100644
index 000000000..173f4523c
Binary files /dev/null and b/modules/ROOT/images/emptyexplore.png differ
diff --git a/modules/ROOT/images/emptyquery.png b/modules/ROOT/images/emptyquery.png
new file mode 100644
index 000000000..a427d6da0
Binary files /dev/null and b/modules/ROOT/images/emptyquery.png differ
diff --git a/modules/ROOT/images/enable-genai-assistance.png b/modules/ROOT/images/enable-genai-assistance.png
new file mode 100644
index 000000000..c3617bcc7
Binary files /dev/null and b/modules/ROOT/images/enable-genai-assistance.png differ
diff --git a/modules/ROOT/images/endpoints.png b/modules/ROOT/images/endpoints.png
new file mode 100644
index 000000000..ef29314a3
Binary files /dev/null and b/modules/ROOT/images/endpoints.png differ
diff --git a/modules/ROOT/images/expand-nodes.png b/modules/ROOT/images/expand-nodes.png
new file mode 100644
index 000000000..5101119fd
Binary files /dev/null and b/modules/ROOT/images/expand-nodes.png differ
diff --git a/modules/ROOT/images/expand.svg b/modules/ROOT/images/expand.svg
new file mode 100644
index 000000000..423d1bde0
--- /dev/null
+++ b/modules/ROOT/images/expand.svg
@@ -0,0 +1,9 @@
+
diff --git a/modules/ROOT/images/expandedresources.png b/modules/ROOT/images/expandedresources.png
new file mode 100644
index 000000000..80ccc0bd0
Binary files /dev/null and b/modules/ROOT/images/expandedresources.png differ
diff --git a/modules/ROOT/images/explore-copilot.png b/modules/ROOT/images/explore-copilot.png
new file mode 100644
index 000000000..37f586069
Binary files /dev/null and b/modules/ROOT/images/explore-copilot.png differ
diff --git a/modules/ROOT/images/explore-overview.png b/modules/ROOT/images/explore-overview.png
new file mode 100644
index 000000000..efb4c1939
Binary files /dev/null and b/modules/ROOT/images/explore-overview.png differ
diff --git a/modules/ROOT/images/explore-ui.png b/modules/ROOT/images/explore-ui.png
new file mode 100644
index 000000000..0560bcd0a
Binary files /dev/null and b/modules/ROOT/images/explore-ui.png differ
diff --git a/modules/ROOT/images/explore.png b/modules/ROOT/images/explore.png
new file mode 100644
index 000000000..02abc7e9f
Binary files /dev/null and b/modules/ROOT/images/explore.png differ
diff --git a/modules/ROOT/images/export-model.png b/modules/ROOT/images/export-model.png
new file mode 100644
index 000000000..63234a3f8
Binary files /dev/null and b/modules/ROOT/images/export-model.png differ
diff --git a/modules/ROOT/images/export-perspective.png b/modules/ROOT/images/export-perspective.png
new file mode 100644
index 000000000..b243b57af
Binary files /dev/null and b/modules/ROOT/images/export-perspective.png differ
diff --git a/modules/ROOT/images/export-saved-cypher.png b/modules/ROOT/images/export-saved-cypher.png
new file mode 100644
index 000000000..40d688bee
Binary files /dev/null and b/modules/ROOT/images/export-saved-cypher.png differ
diff --git a/modules/ROOT/images/file-filtering.png b/modules/ROOT/images/file-filtering.png
new file mode 100644
index 000000000..93a259148
Binary files /dev/null and b/modules/ROOT/images/file-filtering.png differ
diff --git a/modules/ROOT/images/files.png b/modules/ROOT/images/files.png
new file mode 100644
index 000000000..35dc79b23
Binary files /dev/null and b/modules/ROOT/images/files.png differ
diff --git a/modules/ROOT/images/filtering-dismiss.png b/modules/ROOT/images/filtering-dismiss.png
new file mode 100644
index 000000000..c6f22b25c
Binary files /dev/null and b/modules/ROOT/images/filtering-dismiss.png differ
diff --git a/modules/ROOT/images/filtering-histogram.png b/modules/ROOT/images/filtering-histogram.png
new file mode 100644
index 000000000..da9b68f07
Binary files /dev/null and b/modules/ROOT/images/filtering-histogram.png differ
diff --git a/modules/ROOT/images/full-text-search.jpg b/modules/ROOT/images/full-text-search.jpg
new file mode 100644
index 000000000..1b6a03cce
Binary files /dev/null and b/modules/ROOT/images/full-text-search.jpg differ
diff --git a/modules/ROOT/images/go-back.png b/modules/ROOT/images/go-back.png
new file mode 100644
index 000000000..3d4597a77
Binary files /dev/null and b/modules/ROOT/images/go-back.png differ
diff --git a/modules/ROOT/images/graph-result-frame.png b/modules/ROOT/images/graph-result-frame.png
new file mode 100644
index 000000000..599dff14b
Binary files /dev/null and b/modules/ROOT/images/graph-result-frame.png differ
diff --git a/modules/ROOT/images/hover.png b/modules/ROOT/images/hover.png
new file mode 100644
index 000000000..c88c42d0a
Binary files /dev/null and b/modules/ROOT/images/hover.png differ
diff --git a/modules/ROOT/images/icon-add.png b/modules/ROOT/images/icon-add.png
new file mode 100644
index 000000000..2bd6bfe27
Binary files /dev/null and b/modules/ROOT/images/icon-add.png differ
diff --git a/modules/ROOT/images/icon-clear.png b/modules/ROOT/images/icon-clear.png
new file mode 100644
index 000000000..0cdea3217
Binary files /dev/null and b/modules/ROOT/images/icon-clear.png differ
diff --git a/modules/ROOT/images/icon-dismiss.png b/modules/ROOT/images/icon-dismiss.png
new file mode 100644
index 000000000..20680b2f9
Binary files /dev/null and b/modules/ROOT/images/icon-dismiss.png differ
diff --git a/modules/ROOT/images/icon-duplicate.png b/modules/ROOT/images/icon-duplicate.png
new file mode 100644
index 000000000..295df2273
Binary files /dev/null and b/modules/ROOT/images/icon-duplicate.png differ
diff --git a/modules/ROOT/images/icon-expand-reveal.png b/modules/ROOT/images/icon-expand-reveal.png
new file mode 100644
index 000000000..1f9ad891f
Binary files /dev/null and b/modules/ROOT/images/icon-expand-reveal.png differ
diff --git a/modules/ROOT/images/icon-fit-selection.png b/modules/ROOT/images/icon-fit-selection.png
new file mode 100644
index 000000000..cfd00c7be
Binary files /dev/null and b/modules/ROOT/images/icon-fit-selection.png differ
diff --git a/modules/ROOT/images/icon-invert.png b/modules/ROOT/images/icon-invert.png
new file mode 100644
index 000000000..54906d594
Binary files /dev/null and b/modules/ROOT/images/icon-invert.png differ
diff --git a/modules/ROOT/images/icon-jumpto.png b/modules/ROOT/images/icon-jumpto.png
new file mode 100644
index 000000000..32461f762
Binary files /dev/null and b/modules/ROOT/images/icon-jumpto.png differ
diff --git a/modules/ROOT/images/icon-magnifying-glass.png b/modules/ROOT/images/icon-magnifying-glass.png
new file mode 100644
index 000000000..fae493ebb
Binary files /dev/null and b/modules/ROOT/images/icon-magnifying-glass.png differ
diff --git a/modules/ROOT/images/icon-path.png b/modules/ROOT/images/icon-path.png
new file mode 100644
index 000000000..b06a5171d
Binary files /dev/null and b/modules/ROOT/images/icon-path.png differ
diff --git a/modules/ROOT/images/icon-pencil.png b/modules/ROOT/images/icon-pencil.png
new file mode 100644
index 000000000..b857c536f
Binary files /dev/null and b/modules/ROOT/images/icon-pencil.png differ
diff --git a/modules/ROOT/images/icon-redo.png b/modules/ROOT/images/icon-redo.png
new file mode 100644
index 000000000..b492af1fd
Binary files /dev/null and b/modules/ROOT/images/icon-redo.png differ
diff --git a/modules/ROOT/images/icon-undo.png b/modules/ROOT/images/icon-undo.png
new file mode 100644
index 000000000..46bf2c150
Binary files /dev/null and b/modules/ROOT/images/icon-undo.png differ
diff --git a/modules/ROOT/images/image22.png b/modules/ROOT/images/image22.png
new file mode 100644
index 000000000..919b40807
Binary files /dev/null and b/modules/ROOT/images/image22.png differ
diff --git a/modules/ROOT/images/import-model.png b/modules/ROOT/images/import-model.png
new file mode 100644
index 000000000..d0e099d1d
Binary files /dev/null and b/modules/ROOT/images/import-model.png differ
diff --git a/modules/ROOT/images/import-perspective.png b/modules/ROOT/images/import-perspective.png
new file mode 100644
index 000000000..b9099a38a
Binary files /dev/null and b/modules/ROOT/images/import-perspective.png differ
diff --git a/modules/ROOT/images/import-ready.png b/modules/ROOT/images/import-ready.png
new file mode 100644
index 000000000..118d90f51
Binary files /dev/null and b/modules/ROOT/images/import-ready.png differ
diff --git a/modules/ROOT/images/import-saved-cypher.png b/modules/ROOT/images/import-saved-cypher.png
new file mode 100644
index 000000000..92d90b625
Binary files /dev/null and b/modules/ROOT/images/import-saved-cypher.png differ
diff --git a/modules/ROOT/images/inspect-details1.png b/modules/ROOT/images/inspect-details1.png
new file mode 100644
index 000000000..8c6a315e8
Binary files /dev/null and b/modules/ROOT/images/inspect-details1.png differ
diff --git a/modules/ROOT/images/inspectdetails.png b/modules/ROOT/images/inspectdetails.png
new file mode 100644
index 000000000..90ef19da5
Binary files /dev/null and b/modules/ROOT/images/inspectdetails.png differ
diff --git a/modules/ROOT/images/inspectdetails1.png b/modules/ROOT/images/inspectdetails1.png
new file mode 100644
index 000000000..72e3acfe1
Binary files /dev/null and b/modules/ROOT/images/inspectdetails1.png differ
diff --git a/modules/ROOT/images/instanceactions.png b/modules/ROOT/images/instanceactions.png
new file mode 100644
index 000000000..07cd1eaa9
Binary files /dev/null and b/modules/ROOT/images/instanceactions.png differ
diff --git a/modules/ROOT/images/instancedetails.png b/modules/ROOT/images/instancedetails.png
new file mode 100644
index 000000000..a2a162bb0
Binary files /dev/null and b/modules/ROOT/images/instancedetails.png differ
diff --git a/modules/ROOT/images/instancedetailsexpanded.png b/modules/ROOT/images/instancedetailsexpanded.png
new file mode 100644
index 000000000..43cfb1a91
Binary files /dev/null and b/modules/ROOT/images/instancedetailsexpanded.png differ
diff --git a/modules/ROOT/images/instanceemptystate.png b/modules/ROOT/images/instanceemptystate.png
new file mode 100644
index 000000000..6b51256ce
Binary files /dev/null and b/modules/ROOT/images/instanceemptystate.png differ
diff --git a/modules/ROOT/images/instances.png b/modules/ROOT/images/instances.png
new file mode 100644
index 000000000..389bcf9d0
Binary files /dev/null and b/modules/ROOT/images/instances.png differ
diff --git a/modules/ROOT/images/inviteusers.png b/modules/ROOT/images/inviteusers.png
new file mode 100644
index 000000000..ba8d5b996
Binary files /dev/null and b/modules/ROOT/images/inviteusers.png differ
diff --git a/modules/ROOT/images/ip-filtering.png b/modules/ROOT/images/ip-filtering.png
new file mode 100644
index 000000000..131a2204e
Binary files /dev/null and b/modules/ROOT/images/ip-filtering.png differ
diff --git a/modules/ROOT/images/lasso-tool.png b/modules/ROOT/images/lasso-tool.png
new file mode 100644
index 000000000..1254b10f1
Binary files /dev/null and b/modules/ROOT/images/lasso-tool.png differ
diff --git a/modules/ROOT/images/layouts-hierarchy.png b/modules/ROOT/images/layouts-hierarchy.png
new file mode 100644
index 000000000..f211d1b09
Binary files /dev/null and b/modules/ROOT/images/layouts-hierarchy.png differ
diff --git a/modules/ROOT/images/leftsidepanel.png b/modules/ROOT/images/leftsidepanel.png
new file mode 100644
index 000000000..2f2efed6c
Binary files /dev/null and b/modules/ROOT/images/leftsidepanel.png differ
diff --git a/modules/ROOT/images/legend-panel-intro.png b/modules/ROOT/images/legend-panel-intro.png
new file mode 100644
index 000000000..8eb024b69
Binary files /dev/null and b/modules/ROOT/images/legend-panel-intro.png differ
diff --git a/modules/ROOT/images/logsicons.png b/modules/ROOT/images/logsicons.png
new file mode 100644
index 000000000..2f0248d4b
Binary files /dev/null and b/modules/ROOT/images/logsicons.png differ
diff --git a/modules/ROOT/images/louvain.png b/modules/ROOT/images/louvain.png
new file mode 100644
index 000000000..bf8361db0
Binary files /dev/null and b/modules/ROOT/images/louvain.png differ
diff --git a/modules/ROOT/images/manage-endpoints.png b/modules/ROOT/images/manage-endpoints.png
new file mode 100644
index 000000000..feebd4acd
Binary files /dev/null and b/modules/ROOT/images/manage-endpoints.png differ
diff --git a/modules/ROOT/images/map.png b/modules/ROOT/images/map.png
new file mode 100644
index 000000000..6fbe98ac9
Binary files /dev/null and b/modules/ROOT/images/map.png differ
diff --git a/modules/ROOT/images/mark-as-production.png b/modules/ROOT/images/mark-as-production.png
new file mode 100644
index 000000000..5b8aa02f2
Binary files /dev/null and b/modules/ROOT/images/mark-as-production.png differ
diff --git a/modules/ROOT/images/marked-as-production.png b/modules/ROOT/images/marked-as-production.png
new file mode 100644
index 000000000..eb937ff0b
Binary files /dev/null and b/modules/ROOT/images/marked-as-production.png differ
diff --git a/modules/ROOT/images/marquee-tools.png b/modules/ROOT/images/marquee-tools.png
new file mode 100644
index 000000000..e2898dc51
Binary files /dev/null and b/modules/ROOT/images/marquee-tools.png differ
diff --git a/modules/ROOT/images/metrics.png b/modules/ROOT/images/metrics.png
new file mode 100644
index 000000000..1caa89af4
Binary files /dev/null and b/modules/ROOT/images/metrics.png differ
diff --git a/modules/ROOT/images/metrics_integration.png b/modules/ROOT/images/metrics_integration.png
deleted file mode 100644
index 95b4b4ec9..000000000
Binary files a/modules/ROOT/images/metrics_integration.png and /dev/null differ
diff --git a/modules/ROOT/images/model-panel.png b/modules/ROOT/images/model-panel.png
new file mode 100644
index 000000000..cbb45b50b
Binary files /dev/null and b/modules/ROOT/images/model-panel.png differ
diff --git a/modules/ROOT/images/moreinfo.png b/modules/ROOT/images/moreinfo.png
new file mode 100644
index 000000000..26039c526
Binary files /dev/null and b/modules/ROOT/images/moreinfo.png differ
diff --git a/modules/ROOT/images/mrr-deprecation-query-timeline.png b/modules/ROOT/images/mrr-deprecation-query-timeline.png
new file mode 100644
index 000000000..8c678537f
Binary files /dev/null and b/modules/ROOT/images/mrr-deprecation-query-timeline.png differ
diff --git a/modules/ROOT/images/mrr-deprecation-table.png b/modules/ROOT/images/mrr-deprecation-table.png
new file mode 100644
index 000000000..eb1e9644e
Binary files /dev/null and b/modules/ROOT/images/mrr-deprecation-table.png differ
diff --git a/modules/ROOT/images/mrr-driver-table.png b/modules/ROOT/images/mrr-driver-table.png
new file mode 100644
index 000000000..dff7355d7
Binary files /dev/null and b/modules/ROOT/images/mrr-driver-table.png differ
diff --git a/modules/ROOT/images/mrr-fetch-driver-stats.png b/modules/ROOT/images/mrr-fetch-driver-stats.png
new file mode 100644
index 000000000..6aebe7922
Binary files /dev/null and b/modules/ROOT/images/mrr-fetch-driver-stats.png differ
diff --git a/modules/ROOT/images/mrr-fetch-logs.png b/modules/ROOT/images/mrr-fetch-logs.png
new file mode 100644
index 000000000..884ab9016
Binary files /dev/null and b/modules/ROOT/images/mrr-fetch-logs.png differ
diff --git a/modules/ROOT/images/mrr-index-replacement.png b/modules/ROOT/images/mrr-index-replacement.png
new file mode 100644
index 000000000..c07b35202
Binary files /dev/null and b/modules/ROOT/images/mrr-index-replacement.png differ
diff --git a/modules/ROOT/images/mrr-live-migration-ready-for-test.png b/modules/ROOT/images/mrr-live-migration-ready-for-test.png
new file mode 100644
index 000000000..bedd86468
Binary files /dev/null and b/modules/ROOT/images/mrr-live-migration-ready-for-test.png differ
diff --git a/modules/ROOT/images/mrr-resolution-guide.png b/modules/ROOT/images/mrr-resolution-guide.png
new file mode 100644
index 000000000..86e09d134
Binary files /dev/null and b/modules/ROOT/images/mrr-resolution-guide.png differ
diff --git a/modules/ROOT/images/mrr-show-query-log-button.png b/modules/ROOT/images/mrr-show-query-log-button.png
new file mode 100644
index 000000000..c601ac188
Binary files /dev/null and b/modules/ROOT/images/mrr-show-query-log-button.png differ
diff --git a/modules/ROOT/images/mrr-test-instance-ready.png b/modules/ROOT/images/mrr-test-instance-ready.png
new file mode 100644
index 000000000..7b5bc4774
Binary files /dev/null and b/modules/ROOT/images/mrr-test-instance-ready.png differ
diff --git a/modules/ROOT/images/newinstance.png b/modules/ROOT/images/newinstance.png
new file mode 100644
index 000000000..e8f6ed5d3
Binary files /dev/null and b/modules/ROOT/images/newinstance.png differ
diff --git a/modules/ROOT/images/node-exclude.png b/modules/ROOT/images/node-exclude.png
new file mode 100644
index 000000000..ca5d024d7
Binary files /dev/null and b/modules/ROOT/images/node-exclude.png differ
diff --git a/modules/ROOT/images/node-id.png b/modules/ROOT/images/node-id.png
new file mode 100644
index 000000000..568e3202d
Binary files /dev/null and b/modules/ROOT/images/node-id.png differ
diff --git a/modules/ROOT/images/node-inspector.png b/modules/ROOT/images/node-inspector.png
new file mode 100644
index 000000000..226f0ab91
Binary files /dev/null and b/modules/ROOT/images/node-inspector.png differ
diff --git a/modules/ROOT/images/node-mapping.png b/modules/ROOT/images/node-mapping.png
new file mode 100644
index 000000000..d3b88a8ef
Binary files /dev/null and b/modules/ROOT/images/node-mapping.png differ
diff --git a/modules/ROOT/images/node-relationship.png b/modules/ROOT/images/node-relationship.png
new file mode 100644
index 000000000..ff1d8b9bc
Binary files /dev/null and b/modules/ROOT/images/node-relationship.png differ
diff --git a/modules/ROOT/images/node-styling.png b/modules/ROOT/images/node-styling.png
new file mode 100644
index 000000000..097c8bc65
Binary files /dev/null and b/modules/ROOT/images/node-styling.png differ
diff --git a/modules/ROOT/images/northwind-as-a-graph.png b/modules/ROOT/images/northwind-as-a-graph.png
new file mode 100644
index 000000000..602182bbd
Binary files /dev/null and b/modules/ROOT/images/northwind-as-a-graph.png differ
diff --git a/modules/ROOT/images/northwind-customer-perspective.png b/modules/ROOT/images/northwind-customer-perspective.png
new file mode 100644
index 000000000..bbeddbee9
Binary files /dev/null and b/modules/ROOT/images/northwind-customer-perspective.png differ
diff --git a/modules/ROOT/images/northwind-purchasing-perspective.png b/modules/ROOT/images/northwind-purchasing-perspective.png
new file mode 100644
index 000000000..a531ddb14
Binary files /dev/null and b/modules/ROOT/images/northwind-purchasing-perspective.png differ
diff --git a/modules/ROOT/images/northwind-sales-perspective.png b/modules/ROOT/images/northwind-sales-perspective.png
new file mode 100644
index 000000000..c59581bcc
Binary files /dev/null and b/modules/ROOT/images/northwind-sales-perspective.png differ
diff --git a/modules/ROOT/images/northwind-shipping-perspective.png b/modules/ROOT/images/northwind-shipping-perspective.png
new file mode 100644
index 000000000..00a5c660f
Binary files /dev/null and b/modules/ROOT/images/northwind-shipping-perspective.png differ
diff --git a/modules/ROOT/images/organization-users.png b/modules/ROOT/images/organization-users.png
new file mode 100644
index 000000000..656baa1b4
Binary files /dev/null and b/modules/ROOT/images/organization-users.png differ
diff --git a/modules/ROOT/images/organization.png b/modules/ROOT/images/organization.png
new file mode 100644
index 000000000..fca97980a
Binary files /dev/null and b/modules/ROOT/images/organization.png differ
diff --git a/modules/ROOT/images/organizationnav.png b/modules/ROOT/images/organizationnav.png
new file mode 100644
index 000000000..27525fabb
Binary files /dev/null and b/modules/ROOT/images/organizationnav.png differ
diff --git a/modules/ROOT/images/organizationsettings.png b/modules/ROOT/images/organizationsettings.png
new file mode 100644
index 000000000..19baec6b2
Binary files /dev/null and b/modules/ROOT/images/organizationsettings.png differ
diff --git a/modules/ROOT/images/param-assist.png b/modules/ROOT/images/param-assist.png
new file mode 100644
index 000000000..49547f30c
Binary files /dev/null and b/modules/ROOT/images/param-assist.png differ
diff --git a/modules/ROOT/images/param-drawer.png b/modules/ROOT/images/param-drawer.png
new file mode 100644
index 000000000..60f2626a2
Binary files /dev/null and b/modules/ROOT/images/param-drawer.png differ
diff --git a/modules/ROOT/images/param-settings.png b/modules/ROOT/images/param-settings.png
new file mode 100644
index 000000000..1e127585a
Binary files /dev/null and b/modules/ROOT/images/param-settings.png differ
diff --git a/modules/ROOT/images/parameter-chaining.png b/modules/ROOT/images/parameter-chaining.png
new file mode 100644
index 000000000..28a6b76f2
Binary files /dev/null and b/modules/ROOT/images/parameter-chaining.png differ
diff --git a/modules/ROOT/images/parameterized-search-phrase.png b/modules/ROOT/images/parameterized-search-phrase.png
new file mode 100644
index 000000000..2cf58fc57
Binary files /dev/null and b/modules/ROOT/images/parameterized-search-phrase.png differ
diff --git a/modules/ROOT/images/params-assist.png b/modules/ROOT/images/params-assist.png
new file mode 100644
index 000000000..2f778e40c
Binary files /dev/null and b/modules/ROOT/images/params-assist.png differ
diff --git a/modules/ROOT/images/password.png b/modules/ROOT/images/password.png
new file mode 100644
index 000000000..cba17f59e
Binary files /dev/null and b/modules/ROOT/images/password.png differ
diff --git a/modules/ROOT/images/perspective-components.png b/modules/ROOT/images/perspective-components.png
new file mode 100644
index 000000000..a205d70a0
Binary files /dev/null and b/modules/ROOT/images/perspective-components.png differ
diff --git a/modules/ROOT/images/perspective-creation.png b/modules/ROOT/images/perspective-creation.png
new file mode 100644
index 000000000..0ec55c2e9
Binary files /dev/null and b/modules/ROOT/images/perspective-creation.png differ
diff --git a/modules/ROOT/images/perspective-drawer.png b/modules/ROOT/images/perspective-drawer.png
new file mode 100644
index 000000000..cbad51bf3
Binary files /dev/null and b/modules/ROOT/images/perspective-drawer.png differ
diff --git a/modules/ROOT/images/perspective-export-import.png b/modules/ROOT/images/perspective-export-import.png
new file mode 100644
index 000000000..47285552f
Binary files /dev/null and b/modules/ROOT/images/perspective-export-import.png differ
diff --git a/modules/ROOT/images/perspective-refresh-magnified.png b/modules/ROOT/images/perspective-refresh-magnified.png
new file mode 100644
index 000000000..6f77ad276
Binary files /dev/null and b/modules/ROOT/images/perspective-refresh-magnified.png differ
diff --git a/modules/ROOT/images/plan-view.png b/modules/ROOT/images/plan-view.png
new file mode 100644
index 000000000..18c37f588
Binary files /dev/null and b/modules/ROOT/images/plan-view.png differ
diff --git a/modules/ROOT/images/playback.png b/modules/ROOT/images/playback.png
new file mode 100644
index 000000000..1dcb7929f
Binary files /dev/null and b/modules/ROOT/images/playback.png differ
diff --git a/modules/ROOT/images/prioritize.png b/modules/ROOT/images/prioritize.png
new file mode 100644
index 000000000..6aa8c6546
Binary files /dev/null and b/modules/ROOT/images/prioritize.png differ
diff --git a/modules/ROOT/images/privatelink_01_before_enabling.png b/modules/ROOT/images/privatelink_01_before_enabling.png
index a0d9f5d44..14e221b58 100644
Binary files a/modules/ROOT/images/privatelink_01_before_enabling.png and b/modules/ROOT/images/privatelink_01_before_enabling.png differ
diff --git a/modules/ROOT/images/privatelink_03_browser_bloom_over_vpn.png b/modules/ROOT/images/privatelink_03_browser_bloom_over_vpn.png
index 5b5ebeded..868616131 100644
Binary files a/modules/ROOT/images/privatelink_03_browser_bloom_over_vpn.png and b/modules/ROOT/images/privatelink_03_browser_bloom_over_vpn.png differ
diff --git a/modules/ROOT/images/privateserviceconnect_01_before_enabling.png b/modules/ROOT/images/privateserviceconnect_01_before_enabling.png
index 2542cc382..9937abfa6 100644
Binary files a/modules/ROOT/images/privateserviceconnect_01_before_enabling.png and b/modules/ROOT/images/privateserviceconnect_01_before_enabling.png differ
diff --git a/modules/ROOT/images/privateserviceconnect_03_browser_bloom_over_vpn.png b/modules/ROOT/images/privateserviceconnect_03_browser_bloom_over_vpn.png
index 3ada62bbe..fea760bc3 100644
Binary files a/modules/ROOT/images/privateserviceconnect_03_browser_bloom_over_vpn.png and b/modules/ROOT/images/privateserviceconnect_03_browser_bloom_over_vpn.png differ
diff --git a/modules/ROOT/images/proactive-blank-input.png b/modules/ROOT/images/proactive-blank-input.png
new file mode 100644
index 000000000..972ed50d0
Binary files /dev/null and b/modules/ROOT/images/proactive-blank-input.png differ
diff --git a/modules/ROOT/images/proactive-product-selected.png b/modules/ROOT/images/proactive-product-selected.png
new file mode 100644
index 000000000..85d4d216e
Binary files /dev/null and b/modules/ROOT/images/proactive-product-selected.png differ
diff --git a/modules/ROOT/images/project.png b/modules/ROOT/images/project.png
new file mode 100644
index 000000000..38582424a
Binary files /dev/null and b/modules/ROOT/images/project.png differ
diff --git a/modules/ROOT/images/projectsettings.png b/modules/ROOT/images/projectsettings.png
new file mode 100644
index 000000000..d341330d1
Binary files /dev/null and b/modules/ROOT/images/projectsettings.png differ
diff --git a/modules/ROOT/images/property-key-refresh.png b/modules/ROOT/images/property-key-refresh.png
new file mode 100644
index 000000000..363cabd36
Binary files /dev/null and b/modules/ROOT/images/property-key-refresh.png differ
diff --git a/modules/ROOT/images/query-connected-dropdown.png b/modules/ROOT/images/query-connected-dropdown.png
new file mode 100644
index 000000000..5fb52fba1
Binary files /dev/null and b/modules/ROOT/images/query-connected-dropdown.png differ
diff --git a/modules/ROOT/images/query-connection-dropdown.png b/modules/ROOT/images/query-connection-dropdown.png
new file mode 100644
index 000000000..27492693c
Binary files /dev/null and b/modules/ROOT/images/query-connection-dropdown.png differ
diff --git a/modules/ROOT/images/query-copilot.png b/modules/ROOT/images/query-copilot.png
new file mode 100644
index 000000000..062dfb167
Binary files /dev/null and b/modules/ROOT/images/query-copilot.png differ
diff --git a/modules/ROOT/images/query-limit.png b/modules/ROOT/images/query-limit.png
new file mode 100644
index 000000000..9b12ac653
Binary files /dev/null and b/modules/ROOT/images/query-limit.png differ
diff --git a/modules/ROOT/images/query-log-analyzer.png b/modules/ROOT/images/query-log-analyzer.png
new file mode 100644
index 000000000..654c59c0a
Binary files /dev/null and b/modules/ROOT/images/query-log-analyzer.png differ
diff --git a/modules/ROOT/images/query-settings.png b/modules/ROOT/images/query-settings.png
new file mode 100644
index 000000000..097a66488
Binary files /dev/null and b/modules/ROOT/images/query-settings.png differ
diff --git a/modules/ROOT/images/query-styling.png b/modules/ROOT/images/query-styling.png
new file mode 100644
index 000000000..4206d0bae
Binary files /dev/null and b/modules/ROOT/images/query-styling.png differ
diff --git a/modules/ROOT/images/query-ui.png b/modules/ROOT/images/query-ui.png
new file mode 100644
index 000000000..a8874f0e5
Binary files /dev/null and b/modules/ROOT/images/query-ui.png differ
diff --git a/modules/ROOT/images/query-upx.png b/modules/ROOT/images/query-upx.png
new file mode 100644
index 000000000..e8bcfbe5d
Binary files /dev/null and b/modules/ROOT/images/query-upx.png differ
diff --git a/modules/ROOT/images/query.png b/modules/ROOT/images/query.png
new file mode 100644
index 000000000..4c3111217
Binary files /dev/null and b/modules/ROOT/images/query.png differ
diff --git a/modules/ROOT/images/raw.png b/modules/ROOT/images/raw.png
new file mode 100644
index 000000000..4aef6f474
Binary files /dev/null and b/modules/ROOT/images/raw.png differ
diff --git a/modules/ROOT/images/refresh-data.png b/modules/ROOT/images/refresh-data.png
new file mode 100644
index 000000000..ab1dfa815
Binary files /dev/null and b/modules/ROOT/images/refresh-data.png differ
diff --git a/modules/ROOT/images/relationship-mapping.png b/modules/ROOT/images/relationship-mapping.png
new file mode 100644
index 000000000..353d68759
Binary files /dev/null and b/modules/ROOT/images/relationship-mapping.png differ
diff --git a/modules/ROOT/images/relationship-styling.png b/modules/ROOT/images/relationship-styling.png
new file mode 100644
index 000000000..421238b7b
Binary files /dev/null and b/modules/ROOT/images/relationship-styling.png differ
diff --git a/modules/ROOT/images/relationship.png b/modules/ROOT/images/relationship.png
new file mode 100644
index 000000000..4fb0c561d
Binary files /dev/null and b/modules/ROOT/images/relationship.png differ
diff --git a/modules/ROOT/images/relationships-of-a-node.png b/modules/ROOT/images/relationships-of-a-node.png
new file mode 100644
index 000000000..af286efd9
Binary files /dev/null and b/modules/ROOT/images/relationships-of-a-node.png differ
diff --git a/modules/ROOT/images/reveal-relationships.png b/modules/ROOT/images/reveal-relationships.png
new file mode 100644
index 000000000..41440444d
Binary files /dev/null and b/modules/ROOT/images/reveal-relationships.png differ
diff --git a/modules/ROOT/images/roles1.png b/modules/ROOT/images/roles1.png
new file mode 100644
index 000000000..a39bff67f
Binary files /dev/null and b/modules/ROOT/images/roles1.png differ
diff --git a/modules/ROOT/images/roles2.png b/modules/ROOT/images/roles2.png
new file mode 100644
index 000000000..9fe1a3e22
Binary files /dev/null and b/modules/ROOT/images/roles2.png differ
diff --git a/modules/ROOT/images/rule-based-styling-range.png b/modules/ROOT/images/rule-based-styling-range.png
new file mode 100644
index 000000000..1b8d6eef2
Binary files /dev/null and b/modules/ROOT/images/rule-based-styling-range.png differ
diff --git a/modules/ROOT/images/rule-based-styling-single.png b/modules/ROOT/images/rule-based-styling-single.png
new file mode 100644
index 000000000..98c8316cf
Binary files /dev/null and b/modules/ROOT/images/rule-based-styling-single.png differ
diff --git a/modules/ROOT/images/rule-based-styling-unique-values.png b/modules/ROOT/images/rule-based-styling-unique-values.png
new file mode 100644
index 000000000..21d9443a8
Binary files /dev/null and b/modules/ROOT/images/rule-based-styling-unique-values.png differ
diff --git a/modules/ROOT/images/rule-based-time.png b/modules/ROOT/images/rule-based-time.png
new file mode 100644
index 000000000..f350df814
Binary files /dev/null and b/modules/ROOT/images/rule-based-time.png differ
diff --git a/modules/ROOT/images/save-cypher.png b/modules/ROOT/images/save-cypher.png
new file mode 100644
index 000000000..39273656d
Binary files /dev/null and b/modules/ROOT/images/save-cypher.png differ
diff --git a/modules/ROOT/images/saved-cypher-drawer.png b/modules/ROOT/images/saved-cypher-drawer.png
new file mode 100644
index 000000000..627ede6f5
Binary files /dev/null and b/modules/ROOT/images/saved-cypher-drawer.png differ
diff --git a/modules/ROOT/images/scene-action-context.png b/modules/ROOT/images/scene-action-context.png
new file mode 100644
index 000000000..90453b0e7
Binary files /dev/null and b/modules/ROOT/images/scene-action-context.png differ
diff --git a/modules/ROOT/images/scene-action-relationship.png b/modules/ROOT/images/scene-action-relationship.png
new file mode 100644
index 000000000..2f8c7ba8c
Binary files /dev/null and b/modules/ROOT/images/scene-action-relationship.png differ
diff --git a/modules/ROOT/images/scene-action.png b/modules/ROOT/images/scene-action.png
new file mode 100644
index 000000000..c07277dfe
Binary files /dev/null and b/modules/ROOT/images/scene-action.png differ
diff --git a/modules/ROOT/images/search-bar-5.png b/modules/ROOT/images/search-bar-5.png
new file mode 100644
index 000000000..e4b30a8aa
Binary files /dev/null and b/modules/ROOT/images/search-bar-5.png differ
diff --git a/modules/ROOT/images/search-bar-6.png b/modules/ROOT/images/search-bar-6.png
new file mode 100644
index 000000000..6cce16151
Binary files /dev/null and b/modules/ROOT/images/search-bar-6.png differ
diff --git a/modules/ROOT/images/search-bar-7.png b/modules/ROOT/images/search-bar-7.png
new file mode 100644
index 000000000..443d614c0
Binary files /dev/null and b/modules/ROOT/images/search-bar-7.png differ
diff --git a/modules/ROOT/images/secondary-count-console.png b/modules/ROOT/images/secondary-count-console.png
new file mode 100644
index 000000000..e0334f7ec
Binary files /dev/null and b/modules/ROOT/images/secondary-count-console.png differ
diff --git a/modules/ROOT/images/select-related-nodes.png b/modules/ROOT/images/select-related-nodes.png
new file mode 100644
index 000000000..46acc5b81
Binary files /dev/null and b/modules/ROOT/images/select-related-nodes.png differ
diff --git a/modules/ROOT/images/selected-values.png b/modules/ROOT/images/selected-values.png
new file mode 100644
index 000000000..26b17bcb0
Binary files /dev/null and b/modules/ROOT/images/selected-values.png differ
diff --git a/modules/ROOT/images/shortest-path.png b/modules/ROOT/images/shortest-path.png
new file mode 100644
index 000000000..9ce07dd6a
Binary files /dev/null and b/modules/ROOT/images/shortest-path.png differ
diff --git a/modules/ROOT/images/show-me-a-graph.png b/modules/ROOT/images/show-me-a-graph.png
new file mode 100644
index 000000000..5d63820fd
Binary files /dev/null and b/modules/ROOT/images/show-me-a-graph.png differ
diff --git a/modules/ROOT/images/slicer.png b/modules/ROOT/images/slicer.png
new file mode 100644
index 000000000..044615599
Binary files /dev/null and b/modules/ROOT/images/slicer.png differ
diff --git a/modules/ROOT/images/snapshot-actions.png b/modules/ROOT/images/snapshot-actions.png
new file mode 100644
index 000000000..9197987e3
Binary files /dev/null and b/modules/ROOT/images/snapshot-actions.png differ
diff --git a/modules/ROOT/images/sources.png b/modules/ROOT/images/sources.png
new file mode 100644
index 000000000..7f4a81321
Binary files /dev/null and b/modules/ROOT/images/sources.png differ
diff --git a/modules/ROOT/images/sso.png b/modules/ROOT/images/sso.png
new file mode 100644
index 000000000..95b1a3114
Binary files /dev/null and b/modules/ROOT/images/sso.png differ
diff --git a/modules/ROOT/images/static-search-phrase.png b/modules/ROOT/images/static-search-phrase.png
new file mode 100644
index 000000000..3a4969a62
Binary files /dev/null and b/modules/ROOT/images/static-search-phrase.png differ
diff --git a/modules/ROOT/images/stream.png b/modules/ROOT/images/stream.png
new file mode 100644
index 000000000..93aeff934
Binary files /dev/null and b/modules/ROOT/images/stream.png differ
diff --git a/modules/ROOT/images/table.png b/modules/ROOT/images/table.png
new file mode 100644
index 000000000..77138ff97
Binary files /dev/null and b/modules/ROOT/images/table.png differ
diff --git a/modules/ROOT/images/timezones.png b/modules/ROOT/images/timezones.png
new file mode 100644
index 000000000..fb20af05d
Binary files /dev/null and b/modules/ROOT/images/timezones.png differ
diff --git a/modules/ROOT/images/tool-authentication.png b/modules/ROOT/images/tool-authentication.png
new file mode 100644
index 000000000..0fb5ae0d5
Binary files /dev/null and b/modules/ROOT/images/tool-authentication.png differ
diff --git a/modules/ROOT/images/tools.png b/modules/ROOT/images/tools.png
new file mode 100644
index 000000000..f6e1d018b
Binary files /dev/null and b/modules/ROOT/images/tools.png differ
diff --git a/modules/ROOT/images/tooltip-node-2.png b/modules/ROOT/images/tooltip-node-2.png
new file mode 100644
index 000000000..82d530f42
Binary files /dev/null and b/modules/ROOT/images/tooltip-node-2.png differ
diff --git a/modules/ROOT/images/upgradeprotobc.png b/modules/ROOT/images/upgradeprotobc.png
new file mode 100644
index 000000000..4eff31142
Binary files /dev/null and b/modules/ROOT/images/upgradeprotobc.png differ
diff --git a/modules/ROOT/images/upx-import.png b/modules/ROOT/images/upx-import.png
new file mode 100644
index 000000000..996fd2829
Binary files /dev/null and b/modules/ROOT/images/upx-import.png differ
diff --git a/modules/ROOT/images/upx-import2.png b/modules/ROOT/images/upx-import2.png
new file mode 100644
index 000000000..e659623a0
Binary files /dev/null and b/modules/ROOT/images/upx-import2.png differ
diff --git a/modules/ROOT/images/upx-query.png b/modules/ROOT/images/upx-query.png
new file mode 100644
index 000000000..069b582d8
Binary files /dev/null and b/modules/ROOT/images/upx-query.png differ
diff --git a/modules/ROOT/images/users.png b/modules/ROOT/images/users.png
new file mode 100644
index 000000000..a5e72d803
Binary files /dev/null and b/modules/ROOT/images/users.png differ
diff --git a/modules/ROOT/images/ux-research-promo.svg b/modules/ROOT/images/ux-research-promo.svg
new file mode 100644
index 000000000..b62f92e25
--- /dev/null
+++ b/modules/ROOT/images/ux-research-promo.svg
@@ -0,0 +1,16 @@
+
diff --git a/modules/ROOT/images/whiteboardfriendly.png b/modules/ROOT/images/whiteboardfriendly.png
new file mode 100644
index 000000000..29ea76275
Binary files /dev/null and b/modules/ROOT/images/whiteboardfriendly.png differ
diff --git a/modules/ROOT/pages/platform/api/authentication.adoc b/modules/ROOT/pages/api/authentication.adoc
similarity index 66%
rename from modules/ROOT/pages/platform/api/authentication.adoc
rename to modules/ROOT/pages/api/authentication.adoc
index 694fcdead..f4fac1dd5 100644
--- a/modules/ROOT/pages/platform/api/authentication.adoc
+++ b/modules/ROOT/pages/api/authentication.adoc
@@ -6,16 +6,23 @@ The Aura API uses OAuth 2.0 for API authentication.
== Creating credentials
-[NOTE]
-====
-Enterprise users have unrestricted access to creating API credentials.
-However, users with Free and Professional instances must have entered billing information or be a member of a marketplace tenant before they can create API credentials.
-====
+label:AuraDB-Free[]
+label:AuraDB-Professional[]
+label:AuraDS-Enterprise[]
+label:AuraDB-Virtual-Dedicated-Cloud[]
+
+AuraDB Virtual Dedicated Cloud users, and AuraDS Enterprise users have unrestricted access to creating API credentials.
+However, users with Free and Professional instances must have entered billing information or be a member of a marketplace project before they can create API credentials.
+API credentials are linked to the user account, inheriting its capabilities and roles.
+The API credentials never expire unless you delete them.
+
+To create credentials in the Aura console go to the https://console-preview.neo4j.io/account/api-keys[API Keys page] in your browser, or:
-. Navigate to the https://console.neo4j.io/#account[Neo4j Aura Console Account Details page] in your browser.
-. Select the *Create* button in the *Aura API Credentials* section.
-. Enter a *Client name*, and select *Create*.
-. Securely save the *Client ID* and *Client Secret* you are given in the resulting modal; you will need these for the next step.
+* Go to your profile menu from the top right corner of your screen
+* Select *API Keys*
+* Select the *Create* button
+* Enter a Client *Name*, and select *Create*.
+* *Save* the Client ID and Client Secret you are given in the resulting modal; you will need these for the next step.
[CAUTION]
====
@@ -63,7 +70,7 @@ Authentication to the token endpoint uses HTTP Basic Authentication, where the c
|===
|Parameter |Value
-|grant_type
+|`grant_type`
|`client_credentials`
|===
@@ -74,7 +81,7 @@ Both the request and response contain sensitive information and must be kept sec
You are responsible for keeping the client credentials and access tokens confidential, whether in transit (by specifying HTTPS), if stored at rest, in log files, etc.
====
-==== Request examples
+==== Request examples
[.tabbed-example]
====
@@ -123,12 +130,14 @@ print(response.json())
----
{
"access_token": "", <1>
- "expires_in": 3600,
+ "expires_in": 3600, <2>
"token_type": "bearer"
}
----
<1> The `access_token` returned here is what you will provide as the Bearer Token in the `Authorization` header of Aura API requests.
+<2> The value of `expires_in` is the token expiration time in seconds.
+Once the token expires, the application must request a new one.
==== HTTP response codes
@@ -157,7 +166,17 @@ print(response.json())
|The request body is missing.
|===
+=== Excessive token requests
+
+Excessive token requests may cause inefficiencies or rate-limiting.
+You can optimize your API usage by following these steps:
+
+* *Retrieve the token once per hour:* As tokens remain valid for an hour, reduce the frequency of token requests and reuse the same token for multiple API calls.
+
+* *Implement token caching:* Store the token securely within your system to reuse it for subsequent requests during its validity period, minimizing unnecessary calls to the endpoint.
+
=== Token expiration
-If you attempt to send a request to the Aura API, authenticated using an expired access token, you will receive a 403 Forbidden response.
-You will need to obtain a new token to continue using the API.
+Access tokens are temporary and expire after one hour.
+If you send a request to the Aura API using an expired token, you will receive a `403 Forbidden` response.
+To continue using the API, you must obtain a new token using the Aura API credentials.
diff --git a/modules/ROOT/pages/platform/api/overview.adoc b/modules/ROOT/pages/api/overview.adoc
similarity index 56%
rename from modules/ROOT/pages/platform/api/overview.adoc
rename to modules/ROOT/pages/api/overview.adoc
index 34a90350d..f62b8b106 100644
--- a/modules/ROOT/pages/platform/api/overview.adoc
+++ b/modules/ROOT/pages/api/overview.adoc
@@ -1,6 +1,7 @@
[[aura-api-overview]]
= Overview
:description: This page introduces the Aura API.
+:page-alias: platform/api/overview.adoc
The Aura API allows you to programmatically perform actions on your Aura instances without the need to log in to the Console.
@@ -8,7 +9,7 @@ A complete list of the available endpoints can be seen and tested in the link:{n
[TIP]
====
-Before using the API, you must follow the steps outlined in xref:platform/api/authentication.adoc[] to create your credentials and authenticate your requests.
+Before using the API, you must follow the steps outlined in xref:api/authentication.adoc[] to create your credentials and authenticate your requests.
====
== API URL
@@ -21,9 +22,9 @@ The base URL for the Aura API is `\https://api.neo4j.io`.
The current version of the Aura API is `link:{neo4j-docs-base-uri}/aura/platform/api/specification/[v1]`
-As and when we need to introduce breaking changes to the API, we will release a new version to ensure we do not break existing integrations.
+If and when there is a need to introduce breaking changes to the API, a new version will be released to ensure no existing integrations are broken.
-In the future, as we deprecate legacy API versions, we will provide notice.
+In the future, legacy API versions are deprectaed, notice will be issued.
Once the expiry date for a deprecated version has passed, that version will no longer be available.
=== Example request
@@ -32,15 +33,21 @@ The following example shows how to use the base URL and versioning to make a req
`GET \https://api.neo4j.io/v1/instances`
-== Retries
+== Rate limits
+
+* *Non-paying customers get a lower rate limit of 25 requests per minute:* This includes AuraDB Free, and AuraDB Professional trial users without a billing method.
-In the event of `5xx` server error responses, you may consider retrying the request after a delay if it is safe to do so. The response may include a `Retry-After` header with a suggestion of a suitable minimum delay before attempting to retry.
+* *Paying customers get a higher rate limit of 125 requests per minute:* This includes AuraDB Professional with a billing method attached to the account, AuraDB Virtual Dedicated Cloud, AuraDB Business Critical, and AuraDS Enterprise.
+
+== Retries
-Rate limiting is set to 125 requests per minute.
+In the event of `5xx` server error responses, you may consider retrying the request after a delay if it is safe to do so.
+The response may include a `Retry-After` header with a suggestion of a suitable minimum delay before attempting to retry.
-You should consider your use of the Rate Limit before attempting to retry, and we recommend using an exponential backoff delay with a limited number of retries before giving up.
+You should consider your use of the rate limit before attempting to retry, and it is recommended that you use an exponential backoff delay with a limited number of retries before giving up.
-A request is only guaranteed to be safe to retry if it uses an idempotent HTTP method, such as `GET`. If for example, you attempt to retry a request for creating an instance, you may end up with duplicate instances and end up being charged extra as a result.
+A request is only guaranteed to be safe to retry if it uses an idempotent HTTP method, such as `GET`.
+If, for example, you retry a request for creating an instance, you may end up with duplicate instances and end up being charged extra as a result.
In the case of `429 Too Many Requests`, we would recommend slowing down the rate of all requests sent from your client application and consider retrying with a suitable minimum delay and backoff strategy.
@@ -52,4 +59,4 @@ An `X-Request-Id` response header is returned with each request and can be used
The value of this header contains a unique ID that can be used to track the journey of a request.
-If you run into any issues with a particular request, you can https://support.neo4j.com/[raise a support ticket] and provide the `X-Request-Id`.
+If you run into any issues with a particular request, you can https://support.neo4j.com/[raise a support ticket] and provide the `X-Request-Id`.
\ No newline at end of file
diff --git a/modules/ROOT/pages/platform/apoc.adoc b/modules/ROOT/pages/apoc.adoc
similarity index 80%
rename from modules/ROOT/pages/platform/apoc.adoc
rename to modules/ROOT/pages/apoc.adoc
index 264f0082f..3edf59de5 100644
--- a/modules/ROOT/pages/platform/apoc.adoc
+++ b/modules/ROOT/pages/apoc.adoc
@@ -1,5 +1,7 @@
[[aura-apoc-support]]
= APOC support
+:description: This page lists supported APOC procedures in Neo4j Aura.
+:page-aliases: platform/apoc.adoc
APOC (Awesome Procedures on Cypher) is a Neo4j library that provides access to additional procedures and functions, extending the use of the Cypher query language. For more information on APOC, see https://neo4j.com/docs/apoc/[the APOC documentation].
diff --git a/modules/ROOT/pages/aura-cli/auradb-instances.adoc b/modules/ROOT/pages/aura-cli/auradb-instances.adoc
new file mode 100644
index 000000000..8e37f50b9
--- /dev/null
+++ b/modules/ROOT/pages/aura-cli/auradb-instances.adoc
@@ -0,0 +1,298 @@
+= Managing AuraDB instances
+:description: Manage AuraDB instance with the Neo4j Aura command line interface.
+
+[NOTE]
+====
+The Aura CLI makes heavy use of console commands containing the keyword `tenant`.
+To be consistent with that, all Aura CLI documentation pages use the term "tenant", which other parts of the documentation refer to as "project" instead, which is the xref:new-console.adoc#_topology[topological evolution] of "tenant".
+====
+
+The Aura CLI provides a full set of commands to manage the lifecycle of an AuraDB instance.
+
+
+== Create
+
+Before you can create an AuraDB instance, make sure that your desired configuration is available in a project where the AuraDB will reside.
+Get these by using the tenant command from xref:aura-cli/auradb-tenants.adoc[Working with AuraDB tenants].
+If you are using Aura Free, there is no need to do this as the configurations are fixed.
+Create an AuraDB instance with:
+
+[source, shell]
+----
+aura-cli instance create --name YOUR\_INSTANCE\_NAME --type free-db --await
+----
+
+For other Aura tiers, provide the following:
+
+* Instance name
+* Memory
+* Cloud provider
+* Cloud region
+* Type
+
+You can find the values for these by using the tenant commands.
+Once you have them, proceed with:
+
+[source, shell]
+----
+aura-cli instance create --name YOUR_INSTANCE_NAME --cloud-provider YOUR_CLOUD_PROVIDER --region CLOUD_REGION --memory MEMORY --type AURA_INSTANCE_TYPE --tenant-id YOUR_TENANT_ID
+----
+
+You can skip `--tenant-id` if you have set a default tenant.
+
+The response provides the connection details for the request AuraDB and contains authentication details, the username and password.
+Make sure to record these safely and securely as they are only shown once.
+
+
+== List
+
+To see a list of AuraDB instances, use:
+
+[source, shell]
+----
+aura-cli instance list
+----
+
+Use the table format output option to improve readability:
+
+[source, shell]
+----
+aura-cli instance list --output table
+----
+
+From the list, you can then use the ID for an AuraDB instance to get detailed information about it, including the URL to use for metrics:
+
+[source, shell]
+----
+aura-cli instance get YOUR_INSTANCE_ID
+----
+
+
+== Update
+
+You can change name, memory, or both for a deployed AuraDB instance:
+
+[source, shell]
+----
+aura-cli instance update YOUR_INSTANCE_ID --name NEW_NAME --memory NEW_MEMORY
+----
+
+You may omit `--name` and `--memory` as needed but at least one must be present.
+
+
+== Delete
+
+If you want to delete an instance, keep in mind that the operation starts immediately and without requiring confirmation.
+Therefore, use with caution:
+
+[source, shell]
+----
+aura-cli instance delete YOUR_INSTANCE_ID
+----
+
+
+== Pause and resume
+
+A paused AuraDB instance incurs a lower cost per hour than when it is running.
+Pausing AuraDB instances when they are not in use and resuming them when needed is an effective mechanism for cost control.
+
+
+=== Pause
+
+[source, shell]
+----
+aura-cli instance pause YOUR_INSTANCE_ID
+----
+
+
+=== Resume
+
+[source, shell]
+----
+aura-cli instance resume YOUR_INSTANCE_ID
+----
+
+
+== Snapshots
+
+A snapshot is a copy of an AuraDB instances data at a specific point in time.
+It can then be restored with the `overwrite` command of the Aura CLI.
+
+
+=== Create
+
+An instance must be running for a snapshot to be created:
+
+[source, shell]
+----
+aura-cli instance snapshot create --instance-id YOUR_INSTANCE_ID
+----
+
+
+=== List
+
+All of the snapshots for an AuraDB instance are returned in the response.
+
+[source, shell]
+----
+aura-cli instance snapshot list --instance-id YOUR_INSTANCE_ID
+----
+
+
+=== Get
+
+To obtain the details for a single snapshot, use the `get` command:
+
+[source, shell]
+----
+aura-cli instance snapshot SNAPSHOT_ID --instance-id YOUR_INSTANCE_ID
+----
+
+
+## Overwrite
+
+You can overwrite the content of one AuraDB instance with the content of another while leaving the configuration as is.
+This requires that the destination storage is large enough.
+You use a snapshot of the source AuraDB instance for this operation.
+Overwrites can be used for restoration of an AuraDB instance databases, for duplication, moving between regions or any situation where you want to use the content of one AuraDB with another AuraDB.
+Keep in mind that the content of the destination AuraDB is **completely overwritten** and the operation does not require confirmation.
+Therefore, use with caution.
+
+The steps to overwrite an existing AuraDB with the snapshot from another differs for a historical snapshot and the latest snapshot, as described in the following sections.
+
+In both cases, the destination AuraDB instance must be running.
+
+
+=== Steps for a historical snapshot
+
+. Obtain the ID of the AuraDB instance that has the desired snapshot you wish to use, referred to as the "source instance.
++
+[source, shell]
+----
+aura-cli instance list --output table
+----
++
+. Decide which of its snapshots to use and note the snapshot ID, referred to as source snapshot:
++
+[source, shell]
+----
+aura-cli instance snapshot list --instance-id SOURCE_INSTANCE_ID
+----
++
+. Locate the destination AuraDB instance and obtain its ID, referred to as the destination instance:
++
+[source, shell]
+----
+aura-cli instance list --output table
+----
++
+. Perform the overwrite:
++
+[source, shell]
+----
+aura-cli instance overwrite DESTINATION_INSTANCE_ID --source-instance-id SOURCE_INSTANCE_ID --source-snapshot-id SOURCE_SNAPSHOT_ID
+----
++
+If you receive a response that looks like the following, select a different snapshot:
++
+[source, shell]
+----
+Error: [Source snapshot SOURCE_SNAPSHOT_ID is not exportable ]
+----
++
+It is not possible at this time for the Aura CLI to indicate which snapshots are exportable.
+See xref:managing-instances/backup-restore-export.adoc#export-create[Export / Create] for more information about exportable snapshots.
++
+. The destination AuraDB instance content will now be overwritten.
+ Depending on the size, this will take several minutes to complete.
+ You can check the status with:
++
+[source, shell]
+----
+aura-cli instance get DESTINATION_INSTANCE_ID
+----
+
+When the status is "Running" the overwrite is completed.
+
+
+=== Steps for the latest snapshot
+
+. Locate the destination AuraDB instance and obtain its ID, referred to as the destination instance:
++
+[source, shell]
+----
+aura-cli instance list --output table
+----
++
+. Perform the overwrite:
++
+[source, shell]
+----
+aura-cli instance overwrite DESTINATION_INSTANCE_ID --source-instance-id SOURCE_INSTANCE_ID
+----
++
+. The destination AuraDB instance content will now be overwritten.
+ Depending on the size, this will take several minutes to complete.
+ You can check the status with:
++
+[source, shell]
+----
+aura-cli instance get DESTINATION_INSTANCE_ID
+----
+
+When the status is "Running" the overwrite is completed.
+
+
+== Customer-managed keys
+
+Encryption of data at REST is a standard feature of AuraDB and uses keys from a supported cloud key management service (KMS).
+AuraDB Virtual Dedicated Cloud customers may wish to use their own encryption keys, a capability that is referred to as Customer-Managed Encryption Keys (CMEK).
+For more information about Customer Managed Keys, see xref:security/encryption.adoc#customer-managed-keys/[Encryption].
+It is recommended to familiarize yourself with this before proceeding.
+The Aura CLI allows management of this feature with these commands:
+
+* `create` - allows Aura to use the key defined in your Cloud Key Management System.
+* `delete` - removes the permission for Aura to use a key. This makes all data encrypted with that key inaccessible.
+* `list` - lists already defined CMEKs.
+* `get` - detailed information about an individual CMEK.
+
+
+=== Create
+
+To use this command, you must have created your custom managed key in your cloud provider's Key Management System (KMS) and configured its permissions correctly.
+For more information, see xref:security/encryption.adoc[Encryption].
+
+[source, shell]
+----
+aura-cli customer-managed-key create --tenant-id YOUR_TENANT_ID --type AURADB_TYPE --region CLOUD_REGION_OF_THE_AURADB_INSTANCE --name YOUR_CUSTOM_KEY_NAME --key-id YOUR_CUSTOM_KEY_ARN --cloud-provider YOUR_CLOUD_PROVIDE_THAT_HAS_THE_CUSTOM_KEY
+----
+
+
+=== Delete
+
+Keep in mind that this command executes immediately, resulting in a loss of data access by any AuraDB which is using the CMEK.
+Therefore, use with caution:
+
+[source, shell]
+----
+aura-cli customer-managed-key delete YOUR\_AURA\_CMEK\_ID
+----
+
+
+=== List
+
+List all configured CMEKs:
+
+[source, shell]
+----
+aura-cli customer-managed-key list --tenant-id YOUR\_TENANT\_ID --output table
+----
+
+=== Get
+
+Provide detailed information for a particular CMEK:
+
+[source, shell]
+----
+aura-cli customer-managed-key get YOUR\_CMEK\_ID
+----
\ No newline at end of file
diff --git a/modules/ROOT/pages/aura-cli/auradb-tenants.adoc b/modules/ROOT/pages/aura-cli/auradb-tenants.adoc
new file mode 100644
index 000000000..03dc52609
--- /dev/null
+++ b/modules/ROOT/pages/aura-cli/auradb-tenants.adoc
@@ -0,0 +1,33 @@
+= Working with AuraDB tenants
+:description: Work with AuraDB tenants in the Neo4j Aura command line interface.
+
+[NOTE]
+====
+The Aura CLI makes heavy use of console commands containing the keyword `tenant`.
+To be consistent with that, all Aura CLI documentation pages use the term "tenant", which other parts of the documentation refer to as "project" instead, which is the xref:new-console.adoc#_topology[topological evolution] of "tenant".
+====
+
+Aura CLI allows you to list the Aura tenants you can access and then obtain the available AuraDB instance for each one.
+To list the tenants:
+
+[source, shell]
+----
+aura-cli tenant list
+----
+
+To get available AuraDB instances for an individual tenant, change `TENANT-ID` to the one you are interested in.
+The output is substantial as all available AuraDB instance configurations are returned.
+Consider filtering the output, for example by using the link:https://jqlang.org/[jq] utility.
+
+[source, shell]
+----
+aura-cli tenant get TENANT-ID
+----
+
+If you have a single tenant or one that you use most frequently, it is recommended that you set it as the default to avoid repetition with other Aura CLI commands.
+Do this with:
+
+[source, shell]
+----
+aura-cli config set default-tenant TENANT-ID
+----
\ No newline at end of file
diff --git a/modules/ROOT/pages/aura-cli/configuration.adoc b/modules/ROOT/pages/aura-cli/configuration.adoc
new file mode 100644
index 000000000..6961b383e
--- /dev/null
+++ b/modules/ROOT/pages/aura-cli/configuration.adoc
@@ -0,0 +1,85 @@
+= Configuration of Aura CLI
+:description: Configure the Neo4j Aura command line interface.
+
+[NOTE]
+====
+The Aura CLI makes heavy use of console commands containing the keyword `tenant`.
+To be consistent with that, all Aura CLI documentation pages use the term "tenant", which other parts of the documentation refer to as "project" instead, which is the xref:new-console.adoc#_topology[topological evolution] of "tenant".
+====
+
+Aura CLI has two commands for its own configuration:
+
+* `credential` - sets of client IDs and client secrets that are used to authenticate with the Aura API that the Aura CLI uses to perform its own operations.
+* `config` - addtional configuration options for the Aura CLI, such as turning Beta features on or off.
+
+
+== Credential
+
+
+=== Add
+
+Adds a set of credentials, client ID and client secret, that were obtained from the Aura Console:
+
+[source, shell]
+----
+aura-cli credential add --name YOUR_LABEL --client-id YOUR_CLIENT_ID --client-secret YOUR_CLIENT_SECRET
+----
+
+=== List
+
+Show all configured credentials that can be used by the Aura CLI:
+
+[source, shell]
+----
+aura-cli credential list
+----
+
+=== Remove
+
+Remove a set of credentials:
+
+[source, shell]
+----
+aura-cli credential list --name NAME_TO_REMOVE
+----
+
+=== Use
+
+Set the default credentials for the Aura CLI to use:
+
+[source, shell]
+----
+aura-cli credential use --name NAME_TO_USE
+----
+
+== Config
+
+There are various configuration settings that can be controlled by this command, for example, enabling beta features.
+
+
+=== List
+
+Show the current configuration settings:
+
+[source, shell]
+----
+aura-cli config list
+----
+
+=== Get
+
+Show the value of a selected setting:
+
+[source, shell]
+----
+aura-cli config set SETTING\_NAME
+----
+
+=== Set
+
+Set the value for a selected setting:
+
+[source, shell]
+----
+aura-cli config set SETTING_NAME SETTING_VALUE
+----
\ No newline at end of file
diff --git a/modules/ROOT/pages/aura-cli/index.adoc b/modules/ROOT/pages/aura-cli/index.adoc
new file mode 100644
index 000000000..1571750ba
--- /dev/null
+++ b/modules/ROOT/pages/aura-cli/index.adoc
@@ -0,0 +1,16 @@
+= Aura CLI
+:description: A guide to the Neo4j Aura command line interface.
+
+[NOTE]
+====
+The Aura CLI makes heavy use of console commands containing the keyword `tenant`.
+To be consistent with that, all Aura CLI documentation pages use the term "tenant", which other parts of the documentation refer to as "project" instead, which is the xref:new-console.adoc#_topology[topological evolution] of "tenant".
+====
+
+The Neo4j Aura Command Line Interface (CLI) allows you to manage your Neo4j Aura resources from the command line.
+
+
+== Feedback
+
+Report feedback via link:https://github.com/neo4j/aura-cli/issues[GitHub issues].
+This covers requested enhancements, defects that you may encounter and general feedback.
diff --git a/modules/ROOT/pages/aura-cli/initial-configuration.adoc b/modules/ROOT/pages/aura-cli/initial-configuration.adoc
new file mode 100644
index 000000000..acbf0680e
--- /dev/null
+++ b/modules/ROOT/pages/aura-cli/initial-configuration.adoc
@@ -0,0 +1,72 @@
+= Initial configuration
+:description: First configuration of the Neo4j Aura command line interface.
+
+[NOTE]
+====
+The Aura CLI makes heavy use of console commands containing the keyword `tenant`.
+To be consistent with that, all Aura CLI documentation pages use the term "tenant", which other parts of the documentation refer to as "project" instead, which is the xref:new-console.adoc#_topology[topological evolution] of "tenant".
+====
+
+
+== Obtain an Aura API Key
+
+[NOTE]
+====
+You must have payment information in your account to access this functionality.
+This also applies to Aura Free.
+====
+
+
+=== Aura Console
+
+. Log in to the link:https://console.neo4j.io/[Neo4j Aura Console].
+ Navigate to the top right where your account name is displayed and use the down arrow to access the account menu.
++
+image::/aura-cli/unified-console-account-dropdown.png[]
++
+. Use **Create** from **API Keys** in the menu to create a new key.
++
+image::/aura-cli/unified-console-api-keys.png[]
++
+. In the pop-up window, enter a name for the client and then **Create**.
++
+image::/aura-cli/unified-console-create-api-key.png[]
++
+. Make sure to securely save the generated client ID and client secret as they are required to use the Aura CLI and they are not shown again.
+
+
+=== Aura Console classic
+
+. Log in to the link:https://console.neo4j.io/[Neo4j Aura Console].
+. Navigate to the top right where your account name is displayed and use the down arrow to access the account menu.
++
+image::/aura-cli/console-classic-home.png[]
++
+. Navigate to **Account Details**.
+. In the **Account Details** display, click **Create** under **Aura API** and **Credentials**.
++
+image::/aura-cli/console-classic-api-keys.png[]
++
+. In the pop-up window, enter a name for the client and then **Create**.
+. Make sure to securely save the generated client ID and client secret as they are required to use the Aura CLI.
+
+
+== Configure the Aura CLI with an Aura API key
+
+Configure the Aura CLI with the Aura API client ID and client secret you obtained earlier.
+The Aura CLI refers to these as "credentials".
+You can have several sets of credentials and select which one to use.
+
+. At the command prompt, enter the following, using your own values:
++
+[source, shell]
+----
+aura-cli credential add --name YOUR\_LABEL --client-id YOUR\_CLIENT\_ID --client-secret YOUR\_CLIENT\_SECRET
+----
++
+. To confirm the credentials are working, list your Aura instances:
++
+[source, shell]
+----
+aura-cli instance list --output table
+----
\ No newline at end of file
diff --git a/modules/ROOT/pages/aura-cli/installation.adoc b/modules/ROOT/pages/aura-cli/installation.adoc
new file mode 100644
index 000000000..d23362241
--- /dev/null
+++ b/modules/ROOT/pages/aura-cli/installation.adoc
@@ -0,0 +1,50 @@
+= Installation
+:description: Install the Neo4j Aura command line interface.
+
+[NOTE]
+====
+The Aura CLI makes heavy use of console commands containing the keyword `tenant`.
+To be consistent with that, all Aura CLI documentation pages use the term "tenant", which other parts of the documentation refer to as "project" instead, which is the xref:new-console.adoc#_topology[topological evolution] of "tenant".
+====
+
+The Aura CLI is available on link:https://github.com/neo4j/aura-cli/releases[GitHub] and works with all versions onf the Aura API.
+Select the latest release and download the archive suitable for your platform and architecture of choice.
+The CLI is fully compatible with Mac, Linux and Windows.
+
+. Navigate to link:https://github.com/neo4j/aura-cli/releases[https://github.com/neo4j/aura-cli/releases].
+. Download the compressed file that matches your OS.
+ Make a note of the folder where the file is located.
+. Once the file is downloaded, extract the contents.
+. Open a command prompt and move to the location where you extracted the files.
+. Complete the installation by moving the aura-cli executable file into the file path.
++
+Mac users:
++
+[source, shell]
+----
+sudo mv aura-cli /usr/local/bin
+----
++
+Windows users:
++
+[source, shell]
+----
+move aura-cli c:\windows\system32
+----
+
+Check the installation with:
+
+[source, shell]
+----
+aura-cli -v
+----
+
+You should see the version of the Aura CLI displayed.
+
+[NOTE]
+====
+If you are using a Mac, you may receive a warning from Apple that aura-cli could not be verified.
+If this happens, open **System Settings**, select **Privacy & Security** on the left, and scroll down on the right.
+Select **Open Anyway**.
+This is temporary and will be resolved shortly.
+====
\ No newline at end of file
diff --git a/modules/ROOT/pages/aura-cli/migration.adoc b/modules/ROOT/pages/aura-cli/migration.adoc
new file mode 100644
index 000000000..5510d666d
--- /dev/null
+++ b/modules/ROOT/pages/aura-cli/migration.adoc
@@ -0,0 +1,101 @@
+= Migrating to the Aura CLI
+:description: Migrate to the Neo4j Aura command line interface.
+
+[NOTE]
+====
+The Aura CLI makes heavy use of console commands containing the keyword `tenant`.
+To be consistent with that, all Aura CLI documentation pages use the term "tenant", which other parts of the documentation refer to as "project" instead, which is the xref:new-console.adoc#_topology[topological evolution] of "tenant".
+====
+
+The Aura CLI has evolved from link:https://neo4j.com/labs/aura-cli/[Neo4j Labs] to a supported Neo4j product.
+The Neo4j Labs Aura CLI continues to be available for installation for the foreseeable future, but without any further development.
+It is recommended that you move to the Aura CLI as soon as you are able.
+This section outlines what you must consider to move the Aura CLI.
+
+
+== Behavior
+
+* The name of the CLI is now `aura-cli`.
+* Support for using environmental variables has been removed as they can be visible in process listings and can be accidentally logged making them vulnerable to exposure.
+ For sensitive values e.g. Aura API client ID and client secret, a secrets manager is recommended.
+* Neo4j Labs Aura CLI used plural names for commands; the new Aura CLI has singular naming instead.
+* It is not possible to return the raw API response body with the Aura CLI.
+* Flags with the Aura CLI do not have a short form and must be specified in full length.
+
+
+== Feature set
+
+At the time of general availability, the Aura CLI supports these new features compared to the previous Labs project:
+
+* Customer Managed keys
+* Beta of GraphQL Data APIs
+
+
+== Installation
+
+Download the Aura CLI from link:https://github.com/neo4j/aura-cli/releases[https://github.com/neo4j/aura-cli/releases].
+It is available as a native binary for Windows, Linux and Mac platforms.
+After downloading the relevant file and extracting the executable, the Aura CLI can be run immediately as there are no dependencies.
+
+
+== Commands
+
+[cols="3,^1,3", options="header"]
+|====
+| Labs Aura CLI | maps to | Aura CLI
+| config | → | config
+| credentials | → | credential
+| instances | → | instance
+| snapshots | → | instance → snapshot
+| tenants | → | tenant
+| tenants → get-metrics-integration | → | instance get `` returns the URL for metrics integration
+| | | **NEW**: customer-managed-key
+| | | **NEW**: data-api (beta)
+|====
+
+
+== Command and flag changes with Aura CLI
+
+As far as was possible, the Aura CLI commands and their flags are the same as for the Labs based Aura CLI.
+However, there are some changes as outlined in the following sections.
+
+
+== Config
+
+It is not possible to remove a setting and its value although the value itself can be changed.
+If deletion is wanted, then remove it from the JSON configuration file, either `$HOME/Library/Preferences/neo4j/cli/config.json` (MAC) or `%LOCALAPPDATA%\neo4j\cli\config.json` (Windows).
+
+
+== Create a new AuraDB
+
+When creating an Aura instance with the Aura CLI, the memory size is specified with an integer appended with "GB" e.g. "8GB".
+Previously, this was just the integer.
+
+
+== Delete an AuraDB
+
+The Aura CLI does not prompt for confirmation when deleting an AuraDB and starts the operation immediately.
+
+
+== Update an AuraDB
+
+Only the ID can be used to identify the AuraDB to update.
+The same properties can be changed as previously: its name and memory size.
+
+
+== Get AuraDB instance details
+
+This now includes the URL to obtain metrics.
+
+
+== Tenants
+
+This command no longer includes a flag to obtain metric integration information.
+It is also now part of the details returned for an individual AuraDB.
+See Get AuraDB instance details.
+
+
+== Snapshots
+
+Management of snapshots is now a sub-command of instance and allows for their creation, listing and obtaining their details.
+Restoration of a snapshot is performed by using the aura-cli instance overwrite command.
\ No newline at end of file
diff --git a/modules/ROOT/pages/auradb/getting-started/connect-database.adoc b/modules/ROOT/pages/auradb/getting-started/connect-database.adoc
deleted file mode 100644
index af986b6d2..000000000
--- a/modules/ROOT/pages/auradb/getting-started/connect-database.adoc
+++ /dev/null
@@ -1,153 +0,0 @@
-[[aura-connect-instance]]
-= Connecting to an instance
-:description: This page describes how to connect to an instance using Neo4j AuraDB.
-
-There are several different methods of connecting to an instance in Neo4j AuraDB:
-
-* <<_neo4j_browser>> - A browser-based interface for querying and viewing data in an instance.
-* <<_neo4j_bloom>> - A graph exploration application for visually interacting with graph data.
-* <<_neo4j_workspace>> - A browser-based interface used to import, visualize, and query graph data.
-* <<_neo4j_desktop>> - An installable desktop application used to manage local and cloud instances.
-* <<_neo4j_cypher_shell>> - A command-line tool used to run Cypher queries against a Neo4j instance.
-
-== Neo4j Browser
-
-You can query an instance using Neo4j Browser.
-
-To open an instance with Browser:
-
-. Navigate to the https://console.neo4j.io/?product=aura-db[Neo4j Aura Console] in your browser.
-. Select the *Query* button on the instance you want to open.
-. Enter the *Username* and *Password* credentials in the window that opens.
-These are the same credentials you stored when xref:auradb/getting-started/create-database.adoc[creating the instance].
-. Select *Connect*.
-
-Once you have successfully connected, there are built-in guides you can complete to familiarize yourself with Neo4j Browser.
-
-For more information on using Neo4j Browser, please see the link:{neo4j-docs-base-uri}/browser-manual/[Browser manual].
-
-== Neo4j Bloom
-
-You can explore an instance using Neo4j Bloom.
-
-To open an instance with Bloom:
-
-. Navigate to the https://console.neo4j.io/?product=aura-db[Neo4j Aura Console] in your browser.
-. Select the *Explore* button on the instance you want to open.
-. Select *Neo4j Bloom* from the dropdown menu.
-. Enter the *Username* and *Password* credentials in the window that opens.
-These are the same credentials you stored when xref:auradb/getting-started/create-database.adoc[creating the instance].
-. Select *Connect*.
-
-For more details on using Neo4j Bloom, please see the link:{neo4j-docs-base-uri}/bloom-user-guide/[Neo4j Bloom documentation].
-
-[NOTE]
-.Perspectives in AuraDB
-====
-
-Due to the nature of AuraDB's infrastructure, it is not currently possible to share Perspectives in Bloom, as the data for a given Perspective is stored in local storage in the user's web browser.
-
-An alternative is to export your Perspective as a JSON file and import it into another Bloom session.
-
-To export a Perspective:
-
-. Open the Bloom interface for your Neo4j AuraDB instance.
-. Navigate to the _Perspectives Gallery_.
-. Click on the vertical ellipsis (*...*) and select *Export*.
-. Save the file to your local disk.
-
-You can import perspectives by clicking the blue "Import Perspective" button in the Perspective gallery.
-Please note that the Perspective exposes details about your graph's schema but not the actual data within.
-
-For more information, see link:{neo4j-docs-base-uri}/bloom-user-guide/current/bloom-perspectives/[Bloom Perspectives].
-
-*Deep links*
-
-As data for a given Perspective is stored in local storage in the user's web browser, if you want to access a deep link referencing perspectives, you will first need to import the perspectives into your local instance of Bloom.
-====
-
-== Neo4j Workspace
-
-Neo4j Workspace combines the functionality of Neo4j Browser, Neo4j Bloom, and Neo4j Data Importer into a single interface.
-
-To open an instance with Workspace:
-
-. Navigate to the https://console.neo4j.io/?product=aura-db[Neo4j Aura Console] in your browser.
-. Select the *Open* button on the instance you want to open.
-. Enter the *Database user* and *Password* credentials in the window that opens.
-These are the same credentials you stored when xref:auradb/getting-started/create-database.adoc[creating the instance].
-. Select *Connect*.
-
-For more information on using Neo4j Workspace, see the https://neo4j.com/product/workspace/[Product page].
-
-[NOTE]
-====
-Workspace is enabled by default on AuraDB Free and AuraDB Professional instances but needs to be enabled for AuraDB Enterprise instances.
-If you do not see the *Open* button on your instance, you can enable it by selecting the *Settings* cog in the top menu bar and toggling *Enable workspace*.
-====
-
-== Neo4j Desktop
-
-You can connect AuraDB instances to the Neo4j Desktop application, allowing the ability to have a single portal for interacting with all instances of Neo4j, whether local or located in the cloud.
-
-To connect to an instance using Neo4j Desktop:
-
-. Navigate to the https://console.neo4j.io/?product=aura-db[Neo4j Aura Console] in your browser.
-. Copy the *Connection URI* of the instance you want to connect to. The URI is below the instance status indicator.
-. In Neo4j Desktop, select the *Projects* tab and select an existing project or create a new one.
-. Select the *Add* dropdown and choose *Remote connection*.
-. Enter a name for the instance and enter the URL from the Neo4j Aura console from the second step.
-Once complete, select *Next*.
-. With *Username/Password* selected, enter your credentials and select *Next*.
-These are the same credentials you stored when xref:auradb/getting-started/create-database.adoc[creating the instance].
-. When available, activate the connection by clicking the *Connect* button.
-
-[NOTE]
-====
-* Neo4j Desktop only allows 1 connection at a time to an instance (local or remote).
-* Deactivating an instance in Neo4j Desktop won't shut it down or stop a remote instance - it will only temporarily close the connection to it in Neo4j Desktop.
-====
-
-As with other instances in Neo4j Desktop, you can install https://install.graphapp.io/[Graph Apps] for monitoring and other functionality.
-
-To do this, follow the same process to install the graph application you need, and open it from Neo4j Desktop or a web browser with the running and activated Neo4j AuraDB instance.
-
-== Neo4j Cypher Shell
-
-You can connect to an AuraDB instance using the Neo4j Cypher Shell command-line interface (CLI) and run Cypher commands against your instance from the command-line.
-
-To connect to an instance using Neo4j Cypher Shell:
-
-. Navigate to the https://console.neo4j.io/?product=aura-db[Neo4j Aura Console] in your browser.
-. Copy the *Connection URI* of the instance you want to connect to. The URI is below the instance status indicator.
-. Open a terminal and navigate to the folder where you have installed Cypher Shell.
-. Run the following `cypher-shell` command replacing:
-* *``* with the URI you copied in step 2.
-* *``* with the username for your instance.
-* *``* with the password for your instance.
-+
-[source, shell]
-----
-./cypher-shell -a -u -p
-----
-
-Once connected, you can run `:help` for a list of available commands.
-
-----
-Available commands:
- :begin Open a transaction
- :commit Commit the currently open transaction
- :exit Exit the logger
- :help Show this help message
- :history Print a list of the last commands executed
- :param Set the value of a query parameter
- :params Print all currently set query parameters and their values
- :rollback Rollback the currently open transaction
- :source Interactively executes cypher statements from a file
- :use Set the active instance
-
-For help on a specific command type:
- :help command
-----
-
-For more information on Cypher Shell, including how to install it, see the link:{neo4j-docs-base-uri}/operations-manual/current/tools/cypher-shell/[Cypher Shell documentation].
\ No newline at end of file
diff --git a/modules/ROOT/pages/auradb/getting-started/create-database.adoc b/modules/ROOT/pages/auradb/getting-started/create-database.adoc
deleted file mode 100644
index 7d3bbf224..000000000
--- a/modules/ROOT/pages/auradb/getting-started/create-database.adoc
+++ /dev/null
@@ -1,113 +0,0 @@
-[[aura-create-instance]]
-= Creating an instance
-:description: This page describes how to create a Neo4j AuraDB instance.
-
-The process of creating an instance differs depending on the type.
-
-You can select from the options below to display the relevant process.
-
-[.tabbed-example]
-====
-[.include-with-AuraDB-Free]
-=====
-
-To create an *AuraDB Free* instance in Neo4j AuraDB:
-
-. Navigate to the https://console.neo4j.io/?product=aura-db[Neo4j Aura Console] in your browser.
-. Select *New Instance*.
-. Select *Create Free instance*.
-. Copy and store the instance's *Username* and *Generated password* or download the credentials as a `.txt` file.
-. Tick the confirmation checkbox, and select *Continue*.
-
-[NOTE]
-======
-You can only create one AuraDB Free instance per account.
-======
-
-=====
-[.include-with-AuraDB-Professional]
-=====
-
-To create an *AuraDB Professional* instance in Neo4j AuraDB:
-
-. Navigate to the https://console.neo4j.io/?product=aura-db[Neo4j Aura Console] in your browser.
-. Select *New Instance* to open the *Create an instance* page. (Additionally, you will need to select *Select Professional instance* if you have yet to create an AuraDB Free instance.)
-. Select your preferred *Cloud provider* and *Region*. The region is the physical location of the instance. Set this as close to your location as possible. The closer the region is to your location, the faster the response time for any network interactions with the instance.
-. Set your *Instance size*, the memory, CPU, and storage allocated to the instance. The larger the instance size, the more it costs to run. Once selected, you can see the running cost at the bottom of the page.
-. Set your *Instance details*:
-* *Instance Name* - The name to give the instance. This name can be whatever you like.
-* *Neo4j Version* - The version of the Neo4j instance.
-. Tick the *I understand* checkbox next to the running cost confirmation.
-. Select *Create* when happy with your instance details and size.
-. Copy and store the instance's *Username* and *Generated password* or download the credentials as a `.txt` file.
-. Tick the confirmation checkbox, and select *Continue*.
-
-[NOTE]
-======
-Aura retains some of your provisioned resources for managing your instance.
-======
-
-=====
-[.include-with-AuraDB-Business-Critical]
-=====
-
-[NOTE]
-======
-Pay-as-you-go (PAYG) is available on all instance sizes up to 128GB. Prepaid is available from 16GB+.
-======
-
-To create an *AuraDB Business Critical* instance in Neo4j AuraDB:
-
-. Navigate to the link:https://console.neo4j.io/?product=aura-db[Neo4j Aura Console] in your browser.
-. Select *New Instance* to open the *Create an instance* page.
-(Additionally, you need to select *Select Business Critical instance* if you have yet to create an AuraDB Professional instance.)
-. Select your preferred *Cloud provider* and *Region*.
-The region is the physical location of the instance.
-Set this as close to your location as possible.
-The closer the region is to your location, the faster the response time for any network interactions with the instance.
-. Set your *Instance size*, the memory, CPU, and storage allocated to the instance.
-Once selected, you can see the running cost at the bottom of the page.
-. Set your *Instance details*:
-* *Instance Name* - The name of the instance.
-This name can be whatever you like.
-* *Neo4j Version* - The version of the Neo4j instance.
-. Tick the *I understand* checkbox next to the running cost confirmation.
-. Select *Create* when happy with your instance details and size.
-. Copy and store the instance's *Username* and *Generated password* or download the credentials as a `.txt` file.
-. Tick the confirmation checkbox, and select *Continue*.
-
-=====
-[.include-with-AuraDB-Enterprise]
-=====
-
-To create an *AuraDB Enterprise* instance in Neo4j AuraDB:
-
-. Navigate to the https://console.neo4j.io/?product=aura-db[Neo4j Aura Console] in your browser.
-. Select *New Instance* to open the *Create an instance* page.
-. Set your *Instance size*, the memory, CPU, and storage allocated to the instance. The larger the instance size, the more it costs to run. Please refer to your enterprise contract for pricing.
-. Set your *Instance details*:
-* *Instance Name* - The name to give the instance. This name can be whatever you like.
-* *Neo4j Version* - The version of the Neo4j instance.
-* *Region* - The physical location of the instance. Set this as close to your location as possible. The closer the region to your location, the faster the response time for any network interactions with the instance.
-. Tick the *I understand* checkbox.
-. Select *Create Instance* when happy with your instance details and size.
-. Copy and store the instance's *Username* and *Generated password* or download the credentials as a `.txt` file.
-. Tick the confirmation checkbox, and select *Continue*.
-
-[NOTE]
-======
-Aura retains some of your provisioned resources for managing your instance.
-======
-
-=====
-====
-
-[NOTE]
-====
-Multi-database is not currently supported within Neo4j AuraDB.
-====
-
-
-
-
-
diff --git a/modules/ROOT/pages/auradb/getting-started/query-database.adoc b/modules/ROOT/pages/auradb/getting-started/query-database.adoc
deleted file mode 100644
index d94965fef..000000000
--- a/modules/ROOT/pages/auradb/getting-started/query-database.adoc
+++ /dev/null
@@ -1,12 +0,0 @@
-[[aura-query-instance]]
-= Querying an instance
-:description: This page describes how to query data using Cypher.
-
-You can query data in an AuraDB instance using Cypher.
-
-Cypher is the declarative graph query language created by Neo4j and can be used to query, update, and administer your AuraDB instance.
-
-You can run Cypher statements through Neo4j Browser and Neo4j Cypher Shell.
-For more information on how to open an AuraDB instance in Browser and Cypher Shell, see xref:auradb/getting-started/connect-database.adoc[].
-
-For more information on Cypher and Aura, see link:{neo4j-docs-base-uri}/cypher-manual/current/introduction/cypher_aura/[the Neo4j Cypher Manual].
\ No newline at end of file
diff --git a/modules/ROOT/pages/auradb/importing/import-database.adoc b/modules/ROOT/pages/auradb/importing/import-database.adoc
deleted file mode 100644
index c69444538..000000000
--- a/modules/ROOT/pages/auradb/importing/import-database.adoc
+++ /dev/null
@@ -1,10 +0,0 @@
-[[aura-importing-database]]
-= Importing an existing database
-:description: This page describes how to import an existing Neo4j database into an AuraDB instance.
-
-[NOTE]
-====
-The process of importing or loading data requires you to xref:auradb/getting-started/create-database.adoc[create an AuraDB instance] beforehand.
-====
-
-include::partial$import-database.adoc[]
diff --git a/modules/ROOT/pages/auradb/importing/importing-data.adoc b/modules/ROOT/pages/auradb/importing/importing-data.adoc
deleted file mode 100644
index 6e099eaee..000000000
--- a/modules/ROOT/pages/auradb/importing/importing-data.adoc
+++ /dev/null
@@ -1,46 +0,0 @@
-[[aura-importing-data]]
-= Importing data
-:description: This page describes how to get data into a Neo4j AuraDB instance.
-
-[NOTE]
-====
-The process of importing or loading data requires you to xref:auradb/getting-started/create-database.adoc[create an AuraDB instance] beforehand.
-====
-
-There are two ways you can import data from a *_.csv_* file into an AuraDB instance:
-
-* <<_load_csv>> - A Cypher statement that you run from Neo4j Browser or Neo4j Cypher Shell.
-* <<_neo4j_data_importer>> - A visual application that you launch from the Console.
-
-== Load CSV
-
-The link:{neo4j-docs-base-uri}/cypher-manual/current/clauses/load-csv/[`LOAD CSV`] Cypher statement can be used from within Neo4j Browser and Cypher Shell.
-For instructions on how to open an AuraDB instance with Browser or Cypher Shell, see xref:auradb/getting-started/connect-database.adoc[].
-
-There are some limitations to consider when using this method to load a *_.csv_* file into an AuraDB instance:
-
-* For security reasons, you must host your *_.csv_* file on a publicly accessible HTTP or HTTPS server. Examples of such servers include AWS signed URLs, GitHub, Google Drive, and Dropbox.
-
-* The `LOAD CSV` command is built to handle small to medium-sized data sets, such as anything up to 10 million nodes and relationships. You should avoid using this command for any data sets exceeding this limit.
-
-== Neo4j Data Importer
-
-Neo4j Data Importer is a UI-based tool for importing data that lets you:
-
-. Load data from flat files (`.csv` and `.tsv`).
-. Define a graph model and map data to it.
-. Import the data into an AuraDB instance.
-
-To load data with Neo4j Data Importer:
-
-. Navigate to the https://console.neo4j.io/?product=aura-db[Neo4j Aura Console^] in your browser.
-. Select the *Import* button on the instance you want to open.
-
-Alternatively, you can access Data Importer from the *Import* tab of xref:auradb/getting-started/connect-database#_neo4j_workspace[Neo4j Workspace].
-
-For more information on Neo4j Data Importer, see the link:{neo4j-docs-base-uri}/data-importer/current/[Neo4j Data Importer documentation].
-
-[NOTE]
-====
-You must provide your AuraDB instance password before importing from the Neo4j Data Importer.
-====
\ No newline at end of file
diff --git a/modules/ROOT/pages/auradb/index.adoc b/modules/ROOT/pages/auradb/index.adoc
deleted file mode 100644
index d6ab9fdac..000000000
--- a/modules/ROOT/pages/auradb/index.adoc
+++ /dev/null
@@ -1,27 +0,0 @@
-[[auradb]]
-= Neo4j AuraDB overview
-:description: This section describes how to use Neo4j AuraDB.
-
-Neo4j AuraDB is a fully managed cloud graph database service.
-
-Built to leverage relationships in data, AuraDB enables lightning-fast queries for real-time analytics and insights.
-AuraDB is reliable, secure, and fully automated, enabling you to focus on building graph applications without worrying about database administration.
-
-== Plans
-
-AuraDB offers the following subscription plans: *AuraDB Free*, *AuraDB Professional*, *AuraDB Business Critical*, and *AuraDB Enterprise*.
-The full list of features available in each plan is available on the link:https://neo4j.com/pricing/[Neo4j Pricing page].
-
-== Updates and upgrades
-
-AuraDB does not have any scheduled maintenance windows.
-It is designed to be always on and available, with all corrections, fixes, and upgrades automatically applied in the background.
-
-Releases for the Neo4j database are also deployed when they become available.
-Operations are non-disruptive, and you shouldn't experience any downtime as a result.
-
-== Support
-
-For a breakdown of the support offered across plan types as well as the support holiday schedule, see the https://support.neo4j.com/s/article/360053850514-Neo4j-Aura-Customer-Support-Holiday-Schedule[Aura Support page].
-
-Additionally, you can access the https://status.neo4j.io/[Aura Status page] to check the current operational status of Aura and subscribe to updates.
diff --git a/modules/ROOT/pages/auradb/managing-databases/backup-restore-export.adoc b/modules/ROOT/pages/auradb/managing-databases/backup-restore-export.adoc
deleted file mode 100644
index 386b0469e..000000000
--- a/modules/ROOT/pages/auradb/managing-databases/backup-restore-export.adoc
+++ /dev/null
@@ -1,72 +0,0 @@
-[[aura-backup-restore-export]]
-= Backup, export and restore
-:description: This page describes how to backup, export and restore your data from a snapshot.
-
-The data in your AuraDB instance can be backed up, exported, and restored using snapshots.
-
-A snapshot is a copy of the data in an instance at a specific point in time.
-
-The *Snapshots* tab within an AuraDB instance shows a list of available snapshots.
-
-To access the *Snapshots* tab:
-
-. Navigate to the https://console.neo4j.io/?product=aura-db[Neo4j Aura Console] in your browser.
-. Select the instance you want to access.
-. Select the *Snapshots* tab.
-
-[NOTE]
-====
-Only the latest snapshot is available for Free instances.
-Snapshots are available for 7 days for Professional instances, 30 days for Business Critical instances, and 60 days for Enterprise instances.
-====
-
-== Snapshot types
-
-=== Scheduled
-
-label:AuraDB-Professional[]
-label:AuraDB-Enterprise[]
-
-A *Scheduled* snapshot is a snapshot that is automatically triggered when you first create an instance, when changes to the underlying system occur (for example, a new patch release), and at a cadence depending on your plan type.
-
-Scheduled snapshots are run automatically once a day for Professional instances and once an hour for Business Critical and Enterprise instances.
-
-[NOTE]
-====
-For AuraDB Enterprise database instances running Neo4j v4.x, from day 0 to 7 scheduled snapshots run automatically once every 6 hours.
-From day 8 to 60, snapshots run once a day.
-====
-
-=== On Demand
-
-An *On Demand* snapshot is a snapshot that you manually trigger by selecting *Take snapshot* from the *Snapshots* tab of an instance.
-
-== Snapshot actions
-
-=== Restore
-
-[CAUTION]
-====
-Restoring a snapshot overwrites the data in your instance, replacing it with the data contained in the snapshot.
-====
-
-You can restore data in your instance to a previous snapshot by selecting *Restore* next to the snapshot you want to restore.
-
-Restoring a snapshot requires you to confirm the action by typing RESTORE and selecting *Restore*.
-
-=== Export/Create
-
-By selecting the ellipses (*...*) button next to an existing snapshot, you can:
-
-* *Export* - Download the instance as *_.dump_* file, allowing you to store a local copy and work on your data offline.
-* *Create instance from snapshot* - Create a new AuraDB instance using the data from the snapshot.
-
-[NOTE]
-====
-The ability to Export or Create an instance from a Scheduled Enterprise snapshot is limited to 14 days.
-
-Additionally, for Enterprise instances running Neo4j version 5, the ability to export or create an instance from a Scheduled snapshot is limited to the first full snapshot, taken once per day.
-====
-
-
-
diff --git a/modules/ROOT/pages/auradb/managing-databases/database-actions.adoc b/modules/ROOT/pages/auradb/managing-databases/database-actions.adoc
deleted file mode 100644
index 5b197b386..000000000
--- a/modules/ROOT/pages/auradb/managing-databases/database-actions.adoc
+++ /dev/null
@@ -1,221 +0,0 @@
-[[aura-db-actions]]
-= Instance actions
-:description: This page describes the following instance actions - rename, resest, upgrade, resize, pause, resume, clone to a new database, clone to an existing database, or delete and instance.
-
-You can perform several instance actions from an AuraDB instance card on the https://console.neo4j.io/?product=aura-db[Neo4j Aura Console] homepage.
-
-== Rename an instance
-
-You can change the name of an existing instance using the *Rename* action.
-
-To rename an instance:
-
-. Select the ellipsis (*...*) button on the instance you want to rename.
-. Select *Rename* from the resulting menu.
-. Enter a new name for the instance.
-. Select *Rename*.
-
-== Reset an instance
-
-label:AuraDB-Free[]
-label:AuraDB-Professional[]
-
-You can clear all data in an instance using the *Reset to blank* action.
-
-To reset an instance:
-
-. Select the ellipsis (*...*) button on the instance you want to reset.
-. Select *Reset to blank* from the resulting menu.
-. Select *Reset*.
-
-== Upgrade an instance
-
-=== Upgrade from Free to Professional
-
-You can upgrade an AuraDB Free instance to an AuraDB Professional instance using the *Upgrade to Professional* action.
-
-Upgrading your instance clones your Free instance data to a new Professional instance, leaving your existing Free instance untouched.
-
-To upgrade a Free instance:
-
-. Select the ellipsis (*...*) button on the free instance you want to upgrade.
-. Select *Upgrade to Professional* from the resulting menu.
-. Set your desired settings for the new instance. For more information on AuraDB instance creation settings, see xref:auradb/getting-started/create-database.adoc[].
-. Tick the *I understand* checkbox and select *Upgrade Instance*.
-
-=== Upgrade from Professional to Business Critical
-
-You can upgrade an AuraDB Professional instance to an AuraDB Business Critical instance using the *Upgrade to Business Critical* action.
-
-Upgrading your instance clones your Professional instance data to a new Business Critical instance, leaving your existing Professional instance untouched.
-
-To upgrade a Business Critical instance:
-
-. Select the ellipsis (*...*) button on the free instance you want to upgrade.
-. Select *Upgrade to Business Critical*.
-. Set your desired settings for the new instance.
-For more information on AuraDB instance creation settings, see xref:auradb/getting-started/create-database.adoc[].
-. Tick the *I understand* checkbox and select *Upgrade Instance*.
-
-== Resize an instance
-
-label:AuraDB-Professional[]
-label:AuraDB-Enterprise[]
-label:AuraDB-Business-Critical[]
-
-You can change the size of an existing instance using the *Resize* action.
-
-To resize an instance:
-
-. Select the ellipsis (*...*) button on the instance you want to resize.
-. Select *Resize* from the resulting menu.
-. Select the new size you want your instance to be.
-. Tick the *I understand* checkbox and select *Upgrade instance*.
-
-An instance remains available during the resize operation.
-
-== Pause an instance
-
-label:AuraDB-Professional[]
-label:AuraDB-Enterprise[]
-label:AuraDB-Business-Critical[]
-
-[NOTE]
-====
-You cannot manually pause an AuraDB Free instance; they are paused automatically after 72 hours of inactivity. footnote:[Inactivity is when you perform no queries on the instance.]
-====
-
-You can pause an instance when not needed and resume it at any time.
-
-To pause an instance:
-
-. Select the pause button on the instance you want to pause.
-. Tick the *I understand* checkbox and select *Pause* to confirm.
-
-After confirming, the instance begins pausing, and a play button replaces the pause button.
-
-[NOTE]
-====
-Paused instances run at a discounted rate compared to standard consumption, as outlined in the confirmation window.
-You can pause an instance for up to 30 days, after which point AuraDB automatically resumes the instance.
-====
-
-=== Resume a paused instance
-
-To resume an instance:
-
-. Select the play button on the instance you want to pause.
-. Tick the *I understand* checkbox and select *Resume* to confirm.
-
-After confirming, the instance begins resuming, which may take a few minutes.
-
-[WARNING]
-====
-AuraDB Free instances do not automatically resume after 30 days. If an AuraDB Free instance remains paused for more than 30 days, Aura deletes the instance, and all information is lost.
-====
-
-== Clone an instance
-
-You can clone an existing instance to create a new instance with the same data.
-You can clone across regions, from AuraDB to AuraDS and vice versa, and from Neo4j version 4 to Neo4j version 5.
-
-There are four options to clone an instance:
-
-* Clone to a new AuraDB instance
-* Clone to an existing AuraDB instance
-* Clone to a new AuraDS database
-* Clone to an existing AuraDS database
-
-You can access all the cloning options from the ellipsis (*...*) button on the AuraDB instance.
-
-[NOTE]
-====
-You cannot clone from a Neo4j version 5 instance to a Neo4j version 4 instance.
-====
-
-=== Clone to a new AuraDB instance
-
-. Select the ellipsis (*...*) button on the instance you want to clone.
-. Select *Clone To New* and then *AuraDB Professional/Business Critical/Enterprise* from the contextual menu.
-. Set your desired settings for the new database. For more information on AuraDB database creation, see xref:auradb/getting-started/create-database.adoc[].
-. Check the *I understand* box and select *Clone Database*.
-+
-[WARNING]
-====
-Make sure that the username and password are stored safely before continuing.
-Credentials cannot be recovered afterwards.
-====
-
-=== Clone to an existing AuraDB instance
-
-When you clone an instance to an existing instance, the database connection URI stays the same, but the data is replaced with the data from the cloned instance.
-
-[WARNING]
-====
-Cloning into an existing instance will replace all existing data.
-If you want to keep the current data, take a snapshot and export it.
-====
-
-. Select the ellipsis (*...*) button on the instance you want to clone.
-. Select *Clone To Existing* and then *AuraDB* from the contextual menu.
-. If necessary, change the database name.
-. Select the existing AuraDB database to clone to from the dropdown menu.
-+
-[NOTE]
-====
-Existing instances that are not large enough to clone into will not be available for selection.
-In the dropdown menu, they will be grayed out and have the string `(Instance is not large enough to clone into)` appended to their name.
-====
-+
-. Check the *I understand* box and select *Clone*.
-
-=== Clone to a new AuraDS instance
-
-. Select the ellipsis (*...*) button on the instance you want to clone.
-. Select *Clone To New* and then *AuraDS* from the contextual menu.
-. Set the desired name for the new instance.
-. Check the *I understand* box and select *Clone Instance*.
-+
-[WARNING]
-====
-Make sure that the username and password are stored safely before continuing.
-Credentials cannot be recovered afterwards.
-====
-
-=== Clone to an existing AuraDS instance
-
-When you clone an instance to an existing instance, the database connection URI stays the same, but the data is replaced with the data from the cloned instance.
-
-[WARNING]
-====
-Cloning into an existing instance will replace all existing data.
-If you want to keep the current data, take a snapshot and export it.
-====
-
-. Select the ellipsis (*...*) button on the instance you want to clone.
-. Select *Clone To Existing* and then *AuraDS* from the contextual menu.
-. If necessary, change the instance name.
-. Select the existing AuraDS instance to clone to from the dropdown menu.
-+
-[NOTE]
-====
-Existing instances that are not large enough to clone into will not be available for selection.
-In the dropdown menu, they are grayed out and have the string `(Instance is not large enough to clone into)` appended to their name.
-====
-+
-. Tick the *I understand* checkbox and select *Clone*.
-
-
-== Delete an instance
-
-You can delete an instance if you no longer want to be billed for it.
-
-To delete an instance:
-
-. Select the red trashcan icon on the instance you want to delete.
-. Type the exact name of the instance (as instructed) to confirm your decision, and select *Destroy*.
-
-[WARNING]
-====
-There is no way to recover data from a deleted AuraDB instance.
-====
diff --git a/modules/ROOT/pages/aurads/architecture.adoc b/modules/ROOT/pages/aurads/architecture.adoc
deleted file mode 100644
index 10cee950e..000000000
--- a/modules/ROOT/pages/aurads/architecture.adoc
+++ /dev/null
@@ -1,48 +0,0 @@
-[[architecture]]
-= Architecture
-:description: This page describes AuraDS architecture.
-:!figure-caption:
-
-AuraDS makes it easy to run graph algorithms on Neo4j by integrating two main components:
-
-* *Neo4j Database*, where graph data are loaded and stored, and Cypher queries and all database operations (for example user management, query termination, etc.) are executed;
-* *Graph Data Science*, a software component installed in the Neo4j Database, whose main purpose is to run graph algorithms on in-memory projections of Neo4j Database data.
-
-== Graph Data Science concepts
-
-Graph Data Science (GDS) includes procedures to project and manage graphs, run algorithms, and train machine learning models.
-
-.Graph Catalog
-
-The link:{neo4j-docs-base-uri}/graph-data-science/current/management-ops/[graph catalog^] is used to store and manage projected graphs via GDS procedures.
-
-.Algorithms
-
-GDS contains many link:{neo4j-docs-base-uri}/graph-data-science/current/operations-reference/algorithm-references/[graph algorithms^], invoked as Cypher procedures and run on projected graphs.
-
-GDS algorithms are broken down into three tiers of maturity:
-
-- *Alpha*: experimental algorithms that may be changed or removed at any time. Algorithms in this tier are prefixed with `gds.alpha.`.
-
-- *Beta*: algorithms promoted from the Alpha tier to candidates for the Production tier. Algorithms in this tier are prefixed with `gds.beta.`.
-
-- *Production*: algorithms that have been rigorously tested for stability and scalability. Algorithms in this tier are prefixed with `gds.`.
-
-.Model Catalog
-
-Some machine learning algorithms (for example Node Classification and GraphSage) need to use trained models in their computation. The link:{neo4j-docs-base-uri}/graph-data-science/current/model-catalog/[model catalog^] is used to store and manage named trained models.
-
-.Pipeline Catalog
-
-The link:/docs/graph-data-science/current/pipeline-catalog/pipeline-catalog/[pipeline catalog^] is used to manage machine learning pipelines. A pipeline groups together all the stages of a supported machine learning task (for example Node classification), from graph feature extraction to model training, in a single end-to-end workflow.
-
-== Graph data flow
-
-Since GDS algorithms can only run in memory, the typical data flow involves:
-
-. Reading the graph data from Neo4j Database
-. Loading (_projecting_) the data into an in-memory graph
-. Running an algorithm on a projected graph
-. Writing the results back to Neo4j Database (if the algorithm runs in xref:aurads/tutorials/algorithm-modes#_write[write] mode)
-
-image::architecture.png[]
diff --git a/modules/ROOT/pages/aurads/connecting/index.adoc b/modules/ROOT/pages/aurads/connecting/index.adoc
deleted file mode 100644
index 86ffdcee1..000000000
--- a/modules/ROOT/pages/aurads/connecting/index.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-[[aurads-connecting]]
-= Connecting to AuraDS
-
-Once you have xref:aurads/create-instance.adoc[created] an AuraDS instance, you can start using it with any xref:aurads/connecting/neo4j-applications.adoc[Neo4j application] or directly xref:aurads/connecting/python.adoc[from your code].
-Keep your username and password handy, as you will need them to connect to your instance.
\ No newline at end of file
diff --git a/modules/ROOT/pages/aurads/connecting/neo4j-applications.adoc b/modules/ROOT/pages/aurads/connecting/neo4j-applications.adoc
deleted file mode 100644
index af987278c..000000000
--- a/modules/ROOT/pages/aurads/connecting/neo4j-applications.adoc
+++ /dev/null
@@ -1,135 +0,0 @@
-[[aurads-access]]
-= Connecting with Neo4j applications
-:description: This page describes how to access an AuraDS instance through Neo4j applications.
-
-There are several ways to interact with and use graph data in AuraDS.
-
-* <<_neo4j_browser>> - A browser-based interface for querying and viewing graph data with rudimentary visualization.
-* <<_neo4j_bloom>> - A graph exploration application for visually interacting with graph data.
-* <<_neo4j_workspace>> - A browser-based interface used to import, visualize, and query graph data.
-* <<_neo4j_desktop>> - An installable desktop application used to manage local and cloud databases.
-* <<_neo4j_cypher_shell>> - A command-line tool used to run Cypher queries against a Neo4j instance.
-
-[TIP]
-====
-*Tip:* For first-time users, we recommend using Neo4j Browser.
-====
-
-== Neo4j Browser
-
-To open an AuraDS instance with Neo4j Browser:
-
-. Navigate to the https://console.neo4j.io/?product=aura-ds[Neo4j Aura Console^] in your browser.
-. Select the *Query* button on the instance you want to open.
-. Enter the *Username* and *Password* credentials in the Neo4j Browser window that opens.
-These are the same credentials you stored when you xref:aurads/create-instance.adoc[created the instance].
-. Select *Connect*.
-
-Once you have successfully connected, there are built-in guides you can complete to familiarize yourself with Neo4j Browser. See the link:{neo4j-docs-base-uri}/browser-manual/[Browser manual^] for more information.
-
-== Neo4j Bloom
-
-To open an AuraDS instance with Neo4j Bloom:
-
-. Navigate to the https://console.neo4j.io/?product=aura-ds[Neo4j Aura Console^] in your browser.
-. Select the *Explore* button on the instance you want to open.
-. Enter the *Username* and *Password* credentials in the Neo4j Browser window that opens.
-These are the same credentials you stored when you xref:aurads/create-instance.adoc[created the instance].
-. Select *Connect*.
-
-See the link:{neo4j-docs-base-uri}/bloom-user-guide/[Neo4j Bloom documentation^] for more details.
-
-[NOTE]
-.Perspectives in AuraDS
-====
-
-Due to the nature of AuraDS's infrastructure, it is not currently possible to share Perspectives in Bloom, as the data for a given Perspective is stored in local storage in the user's web browser.
-
-An alternative is to export your Perspective as a JSON file and import it into another Bloom session.
-
-To export a Perspective:
-
-. Open the Bloom interface for your Neo4j AuraDS instance.
-. Navigate to the _Perspectives Gallery_.
-. Click on the vertical ellipsis (*...*) and select *Export*.
-. Save the file to your local disk.
-
-You can import perspectives by clicking the blue "Import Perspective" button in the Perspective gallery.
-Please note that the Perspective exposes details about your graph's schema but not the actual data within.
-
-For more information, see link:{neo4j-docs-base-uri}/bloom-user-guide/current/bloom-perspectives/[Bloom Perspectives^].
-
-*Deep links*
-
-As data for a given Perspective is stored in local storage in the user's web browser, if you want to access a deep link referencing perspectives, you will first need to import the perspectives into your local instance of Bloom.
-
-====
-
-== Neo4j Workspace
-
-Neo4j Workspace combines the functionality of Neo4j Browser, Neo4j Bloom, and Neo4j Data Importer into a single interface.
-
-To open an instance with Workspace:
-
-. Navigate to the https://console.neo4j.io/?product=aura-ds[Neo4j Aura Console] in your browser.
-. Select the *Open* button on the instance you want to open.
-. Enter the *Database user* and *Password* credentials in the window that opens.
-These are the same credentials you stored when you xref:aurads/create-instance.adoc[created the instance].
-. Select *Connect*.
-
-For more information on using Neo4j Workspace, see the https://neo4j.com/product/workspace/[Product page].
-
-[NOTE]
-====
-Workspace is enabled by default on AuraDB Free and AuraDB Professional instances but needs to be enabled for AuraDB Enterprise instances.
-If you do not see the *Open* button on your instance, you can enable it by selecting the *Settings* cog in the top menu bar and toggling *Enable workspace*.
-====
-
-== Neo4j Desktop
-
-You can connect AuraDS instances to the Neo4j Desktop application, allowing the ability to have a single portal for interacting with all instances of Neo4j, whether local or located in the cloud.
-
-To connect to an AuraDS instance using Neo4j Desktop:
-
-. Navigate to the https://console.neo4j.io/?product=aura-ds[Neo4j Aura Console^] in your browser.
-. Copy the *Connection URI* of the instance you want to connect to. The URI is in the page that opens when clicking on the instance.
-. In Neo4j Desktop, select the *Projects* tab and select an existing project or create a new one.
-. Select the *Add* dropdown and choose *Remote connection*.
-. Enter a name for the instance and enter the URL from the Neo4j Aura console from the second step.
-Once complete, select *Next*.
-. With *Username/Password* selected, enter your credentials and select *Next*.
-These are the same credentials you stored when you xref:aurads/create-instance.adoc[created the instance].
-. When available, activate the connection by clicking the *Connect* button.
-
-[NOTE]
-====
-*Notes:*
-
-* Neo4j Desktop only allows 1 connection at a time to a database (local or remote).
-* Deactivating an instance in Neo4j Desktop won't shut it down or stop a remote instance - it will only temporarily close the connection to it in Neo4j Desktop.
-====
-
-As with other databases in Neo4j Desktop, you can install https://install.graphapp.io/[Graph Apps^] for monitoring and other functionality. To do this, follow the same process to install the graph application you need, and open it from Neo4j Desktop or a web browser with the running and activated Neo4j AuraDS instance.
-
-== Neo4j Cypher Shell
-
-You can connect to an AuraDS instance using the Neo4j Cypher Shell command-line interface (CLI) and run Cypher commands against your instance from the command line. Refer to the link:{neo4j-docs-base-uri}/operations-manual/current/tools/cypher-shell/[Operations manual^] for instructions on how to install the Cypher Shell.
-
-To connect to an AuraDS instance using Neo4j Cypher Shell:
-
-. Navigate to the https://console.neo4j.io/?product=aura-ds[Neo4j Aura Console^] in your browser.
-. Copy the *Connection URI* of the instance you want to connect to. The URI is in the page that opens when clicking on the instance.
-. Open a terminal and navigate to the folder where you have installed the Cypher Shell.
-. Run the following `cypher-shell` command replacing:
-* *``* with the URI you copied in step 2
-* *``* with the username for your instance
-* *``* with the password for your instance
-+
-[source, shell]
-----
-./cypher-shell -a -u -p
-----
-
-Once connected, you can run `:help` for a list of available commands.
-
-For more information on Cypher Shell, including how to install it, see the link:{neo4j-docs-base-uri}/operations-manual/current/tools/cypher-shell/[Cypher Shell documentation^].
diff --git a/modules/ROOT/pages/aurads/connecting/python.adoc b/modules/ROOT/pages/aurads/connecting/python.adoc
deleted file mode 100644
index 2007d6e72..000000000
--- a/modules/ROOT/pages/aurads/connecting/python.adoc
+++ /dev/null
@@ -1,265 +0,0 @@
-[[connecting-python]]
-= Connecting with Python
-:description: This page describes how to connect to AuraDS using Python.
-:notebook-name: Connecting_with_Python_(GDS_client).ipynb
-
-include::partial$aurads/colab.adoc[]
-
-This tutorial shows how to interact with AuraDS using the link:{neo4j-docs-base-uri}/graph-data-science/current/python-client/[Graph Data Science (GDS) client^] or the link:{neo4j-docs-base-uri}/python-manual/current/[Python Driver^]. In the following sections you can switch between client and driver code clicking on the appropriate tab.
-
-A running AuraDS instance must be available along with access credentials (generated in the xref:aurads/create-instance.adoc[] section) and its connection URI (found in the instance detail page, starting with `neo4j+s://`).
-
-== Installation
-
-Both the GDS client and the Python driver can be installed using `pip`.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, shell]
-----
-pip install graphdatascience
-----
-
-The latest stable version of the client can be found on https://pypi.org/project/graphdatascience[PyPI^].
-=====
-
-[.include-with-Python-driver]
-=====
-[source, shell]
-----
-pip install neo4j
-----
-
-The latest stable version of the driver can be found on https://pypi.org/project/neo4j[PyPI^].
-=====
-====
-
-If `pip` is not available, you can try replacing it with `python -m pip` or `python3 -m pip`.
-
-== Import and setup
-
-Both the GDS client and the Python driver require the connection URI and the credentials as shown in the introduction.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-The client is imported as the `GraphDataScience` class:
-
-[source, python]
-----
-# Client import
-from graphdatascience import GraphDataScience
-----
-
-The `aura_ds=True` constructor argument should be used to have the recommended non-default configuration settings of the Python Driver applied automatically.
-
-[source, python]
-----
-# Replace with the actual URI, username, and password
-AURA_CONNECTION_URI = "neo4j+s://xxxxxxxx.databases.neo4j.io"
-AURA_USERNAME = "neo4j"
-AURA_PASSWORD = "..."
-
-# Client instantiation
-gds = GraphDataScience(
- AURA_CONNECTION_URI,
- auth=(AURA_USERNAME, AURA_PASSWORD),
- aura_ds=True
-)
-----
-=====
-
-[.include-with-Python-driver]
-=====
-The driver is imported as the `GraphDatabase` class:
-
-[source, python]
-----
-# Driver import
-from neo4j import GraphDatabase
-----
-
-[source, python]
-----
-# Replace with the actual URI, username and password
-AURA_CONNECTION_URI = "neo4j+s://xxxxxxxx.databases.neo4j.io"
-AURA_USERNAME = "neo4j"
-AURA_PASSWORD = "..."
-
-# Driver instantiation
-driver = GraphDatabase.driver(
- AURA_CONNECTION_URI,
- auth=(AURA_USERNAME, AURA_PASSWORD)
-)
-----
-=====
-====
-
-== Running a query
-
-Once created, the client (or the driver) can be used to run Cypher queries and call Cypher procedures. In this example the `gds.version` procedure can be used to retrieve the version of GDS running on the instance.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python]
-----
-# Call a GDS method directly
-print(gds.version())
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python]
-----
-# Cypher query
-gds_version_query = """
- RETURN gds.version() AS version
-"""
-
-# Create a driver session
-with driver.session() as session:
- # Use .data() to access the results array
- results = session.run(gds_version_query).data()
- print(results)
-----
-=====
-====
-
-The following code retrieves all the procedures available in the library and shows the details of five of them.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python]
-----
-# Assign the result of the client call to a variable
-results = gds.list()
-
-# Print the result (a Pandas DataFrame)
-print(results[:5])
-----
-
-Since the result is a Pandas DataFrame, you can use methods such as `to_string` and `to_json` to pretty-print it.
-
-[source, python]
-----
-# Print the result (a Pandas DataFrame) as a console-friendly string
-print(results[:5].to_string())
-----
-
-[source, python]
-----
-# Print the result (a Pandas DataFrame) as a prettified JSON string
-print(results[:5].to_json(orient="table", indent=2))
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python]
-----
-# Import the json module for pretty visualization
-import json
-
-# Cypher query
-list_all_gds_procedures_query = """
- CALL gds.list()
-"""
-
-# Create a driver session
-with driver.session() as session:
- # Use .data() to access the results array
- results = session.run(list_all_gds_procedures_query).data()
-
- # Print the prettified result
- print(json.dumps(results[:5], indent=2))
-----
-=====
-====
-
-=== Serializing Neo4j `DateTime` in JSON dumps
-
-In some cases the result of a procedure call may contain Neo4j `DateTime` objects. In order to serialize such objects into JSON, a default handler must be provided.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-# Import for the JSON helper function
-from neo4j.time import DateTime
-
-# Helper function for serializing Neo4j DateTime in JSON dumps
-def default(o):
- if isinstance(o, (DateTime)):
- return o.isoformat()
-
-# Run the graph generation algorithm
-g, _ = gds.beta.graph.generate(
- "example-graph", 10, 3, relationshipDistribution="POWER_LAW"
-)
-
-# Drop the graph keeping the result of the operation, which contains
-# some DateTime fields ("creationTime" and "modificationTime")
-result = gds.graph.drop(g)
-
-# Print the result as JSON, converting the DateTime fields with
-# the handler defined above
-print(result.to_json(indent=2, default_handler=default))
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-# Import to prettify results
-import json
-
-# Import for the JSON helper function
-from neo4j.time import DateTime
-
-# Helper function for serializing Neo4j DateTime in JSON dumps
-def default(o):
- if isinstance(o, (DateTime)):
- return o.isoformat()
-
-# Example query to run a graph generation algorithm
-create_example_graph_query = """
- CALL gds.beta.graph.generate(
- 'example-graph', 10, 3, {relationshipDistribution: 'POWER_LAW'}
- )
-"""
-
-# Example query to delete a graph
-delete_example_graph_query = """
- CALL gds.graph.drop('example-graph')
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Run the graph generation algorithm
- session.run(create_example_graph_query).data()
-
- # Drop the generated graph keeping the result of the operation
- results = session.run(delete_example_graph_query).data()
-
- # Prettify the results using the handler defined above
- print(json.dumps(results, indent=2, sort_keys=True, default=default))
-----
-=====
-====
-
-== Closing the connection
-
-include::partial$aurads/close-connection.adoc[]
-
-include::partial$aurads/references.adoc[]
diff --git a/modules/ROOT/pages/aurads/create-instance.adoc b/modules/ROOT/pages/aurads/create-instance.adoc
deleted file mode 100644
index 3f09a6c3b..000000000
--- a/modules/ROOT/pages/aurads/create-instance.adoc
+++ /dev/null
@@ -1,25 +0,0 @@
-[[aurads-create]]
-= Creating an AuraDS instance
-:description: This page describes how to create a Neo4j AuraDS instance.
-
-. Navigate to the https://console.neo4j.io/?product=aura-ds[Neo4j Aura Console^].
-. Select *New instance* to open the *Create an instance* page.
-. Fill up the instance details:
-* *Instance Name* footnote:[In AuraDS Professional, this field becomes available after selecting the *Calculate Estimate* button.] - The name to give to the instance. A descriptive name makes it easier to find a specific instance among many.
-* *Region* - The physical location of the instance. Set this as close to your location as possible. The closer the region is to your location, the faster the response time for any network interactions with the instance.
-* *Number of nodes/relationships* - The estimated number of nodes and relationships that the instance should support.
-. Select one or more algorithm categories from the *Which algorithms are you going to use?* section (or select *I'm not sure which algorithms to use*) to help estimate the most appropriate instance size. An overview of each algorithm category can be found link:{neo4j-docs-base-uri}/graph-data-science/current/algorithms/[here^].
-. Select *Calculate Estimate* to get an estimate of the resources needed to run the graph (memory, CPU, storage) along with the expected price.
-. Select *Create* to proceed.
-. Copy and store the *Username* and *Generated password* credentials to access the instance just created. Alternatively, you can download the credentials as a `.txt` file.
-+
-WARNING: *Warning*: Make sure that the username and password are stored safely before continuing. Credentials cannot be recovered afterwards.
-+
-. Tick the confirmation checkbox and select *Continue*.
-
-[NOTE]
-====
-Multi-database is not supported within Neo4j AuraDS.
-====
-
-The process will take a few minutes to complete. Upon completion, you will be able to xref:aurads/connecting/index.adoc[connect to the instance].
diff --git a/modules/ROOT/pages/aurads/importing-data/data-importer.adoc b/modules/ROOT/pages/aurads/importing-data/data-importer.adoc
deleted file mode 100644
index 34d80b797..000000000
--- a/modules/ROOT/pages/aurads/importing-data/data-importer.adoc
+++ /dev/null
@@ -1,25 +0,0 @@
-[[aurads-data-importer]]
-= Using Neo4j Data Importer
-:description: This page describes how to use Neo4j Data Importer with a Neo4j AuraDS instance.
-
-[NOTE]
-====
-*Note:* The process of importing or loading data requires you to xref:aurads/create-instance.adoc[create an AuraDS instance] beforehand.
-====
-
-Neo4j Data Importer is a UI-based tool for importing data that lets you:
-
-. Load data from flat files (`.csv` and `.tsv`).
-. Define a graph model and map data to it.
-. Import the data into an AuraDS instance.
-
-To load data with Neo4j Data Importer:
-
-. Navigate to the https://console.neo4j.io/?product=aura-ds[Neo4j Aura Console^] in your browser.
-. Select the *Import* button on the instance you want to open.
-
-Alternatively, you can access Data Importer from the *Import* tab of xref:aurads/connecting/neo4j-applications#_neo4j_workspace[Neo4j Workspace].
-
-Once you have opened Neo4j Data Importer, you can follow the built-in tutorial to learn how to use the tool.
-
-For more information on Neo4j Data Importer, see the link:{neo4j-docs-base-uri}/data-importer/current/[Neo4j Data Importer documentation].
\ No newline at end of file
diff --git a/modules/ROOT/pages/aurads/importing-data/import-db.adoc b/modules/ROOT/pages/aurads/importing-data/import-db.adoc
deleted file mode 100644
index ceec972c5..000000000
--- a/modules/ROOT/pages/aurads/importing-data/import-db.adoc
+++ /dev/null
@@ -1,10 +0,0 @@
-[[aurads-import-db]]
-= Importing an existing database
-:description: This page describes how to import an existing Neo4j database into an AuraDS instance.
-
-[NOTE]
-====
-*Note:* The process of importing or loading data requires you to xref:aurads/create-instance.adoc[create an AuraDS instance] beforehand.
-====
-
-include::partial$import-database.adoc[]
\ No newline at end of file
diff --git a/modules/ROOT/pages/aurads/importing-data/index.adoc b/modules/ROOT/pages/aurads/importing-data/index.adoc
deleted file mode 100644
index 5a6d97aa5..000000000
--- a/modules/ROOT/pages/aurads/importing-data/index.adoc
+++ /dev/null
@@ -1,9 +0,0 @@
-[[aurads-importing-data]]
-= Importing data
-
-There are several ways to import data into AuraDS:
-
-* Importing an xref:aurads/importing-data/import-db.adoc[existing Neo4j database]
-* Using the xref:aurads/importing-data/data-importer.adoc[Data Importer]
-* Using Cypher's xref:aurads/importing-data/load-csv.adoc[`LOAD CSV`] procedure
-* Using the xref:aurads/tutorials/arrow-examples.adoc[Arrow Flight server]
\ No newline at end of file
diff --git a/modules/ROOT/pages/aurads/importing-data/load-csv.adoc b/modules/ROOT/pages/aurads/importing-data/load-csv.adoc
deleted file mode 100644
index 751bae4fa..000000000
--- a/modules/ROOT/pages/aurads/importing-data/load-csv.adoc
+++ /dev/null
@@ -1,340 +0,0 @@
-[[aurads-load-csv]]
-= Loading CSV files
-:description: This page describes how to load CSV files into a Neo4j AuraDS instance.
-:star: *
-:notebook-name: Loading_CSV_files_(GDS_client).ipynb
-
-include::partial$aurads/colab.adoc[]
-
-A CSV file can be loaded into an AuraDS instance using the link:{neo4j-docs-base-uri}/cypher-manual/current/clauses/load-csv/[`LOAD CSV`^] Cypher clause. For security reasons it is not possible to load local CSV files, which must be instead publicly accessible on HTTP or HTTPS servers such as GitHub, Google Drive, and Dropbox. Another way to make CSV files available is to upload them to a cloud bucket storage (such as Google Cloud Storage or Amazon S3) and configure the bucket as a static website.
-
-In this example we will load three CSV files:
-
-* `movies.csv`: a list of movies with their title, release year and a short description
-* `people.csv`: a list of actors with their year of birth
-* `actors.csv`: a list of acting roles, where actors are matched with the movies they had a role in
-
-WARNING: The `LOAD CSV` command is built to handle small to medium-sized data sets, such as anything up to 10 million nodes and relationships. You should avoid using this command for any data sets exceeding this limit.
-
-include::partial$aurads/setup.adoc[]
-
-== Create constraints
-
-Adding constraints before loading any data usually improves data loading performance. In fact, besides adding an integrity check, a unique constraint adds an index on a property at the same time, so that `MATCH` and `MERGE` operations during loading are faster.
-
-[WARNING]
-====
-For best performance when using `MERGE` or `MATCH` with `LOAD CSV`, make sure an index or a unique constraint has been created on the property used for merging.
-Read the link:{neo4j-docs-base-uri}/cypher-manual/current/constraints/[Cypher documentation] for more information on constraints.
-====
-
-In this example we add uniqueness constraints on both movie titles and actors' names.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-# Make movie titles unique
-gds.run_cypher("""
- CREATE CONSTRAINT FOR (movie:Movie) REQUIRE movie.title IS UNIQUE
-""")
-
-# Make person names unique
-gds.run_cypher("""
- CREATE CONSTRAINT FOR (person:Person) REQUIRE person.name IS UNIQUE
-""")
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CREATE CONSTRAINT FOR (movie:Movie) REQUIRE movie.title IS UNIQUE;
-CREATE CONSTRAINT FOR (person:Person) REQUIRE person.name IS UNIQUE;
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-movie_title_constraint = """
- CREATE CONSTRAINT FOR (movie:Movie) REQUIRE movie.title IS UNIQUE
-"""
-
-person_name_constraint = """
- CREATE CONSTRAINT FOR (person:Person) REQUIRE person.name IS UNIQUE
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Make movie titles unique
- session.run(movie_title_constraint).data()
- # Make person names unique
- session.run(person_name_constraint).data()
-----
-=====
-====
-
-== Add nodes from CSV files
-
-We are now ready to load the CSV files from their URIs and create nodes from the data they contain. In the following examples, `LOAD CSV` is used with `WITH HEADERS` to access `row` fields by their corresponding column name. Furthermore:
-
-* `MERGE` is used with the indexed properties to take advantage of the constraints created in the <<_create_constraints>> section.
-* `ON CREATE SET` is used to set the value of a node property when a new one is created.
-* `RETURN count({star})` is used to show the number of processed rows.
-
-Note that the CSV files in this example are curated, so some assumptions are made for simplicity. In a real-world scenario, for example, a CSV file could contain multiple rows that would attempt to assign different property values to the same node; in this case, an link:{neo4j-docs-base-uri}/cypher-manual/current/clauses/merge/#merge-merge-with-on-match[`ON MATCH SET`^] clause must be added to ensure this case is dealt with appropriately.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-gds.run_cypher("""
- LOAD CSV
- WITH HEADERS
- FROM 'https://data.neo4j.com/intro/movies/movies.csv' AS row
- MERGE (m:Movie {title: row.title})
- ON CREATE SET m.released = toInteger(row.released), m.tagline = row.tagline
- RETURN count({star})
-""")
-
-gds.run_cypher("""
- LOAD CSV
- WITH HEADERS
- FROM 'https://data.neo4j.com/intro/movies/people.csv' AS row
- MERGE (p:Person {name: row.name})
- ON CREATE SET p.born = toInteger(row.born)
- RETURN count({star})
-""")
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-LOAD CSV
- WITH HEADERS
- FROM 'https://data.neo4j.com/intro/movies/movies.csv' AS row
-MERGE (m:Movie {title: row.title})
- ON CREATE SET m.released = toInteger(row.released), m.tagline = row.tagline
-RETURN count({star});
-
-LOAD CSV
- WITH HEADERS
- FROM 'https://data.neo4j.com/intro/movies/people.csv' AS row
-MERGE (p:Person {name: row.name})
- ON CREATE SET p.born = toInteger(row.born)
-RETURN count({star});
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-load_movies_csv = """
- LOAD CSV
- WITH HEADERS
- FROM 'https://data.neo4j.com/intro/movies/movies.csv' AS row
- MERGE (m:Movie {title: row.title})
- ON CREATE SET m.released = toInteger(row.released), m.tagline = row.tagline
- RETURN count({star})
-"""
-
-load_people_csv = """
- LOAD CSV
- WITH HEADERS
- FROM 'https://data.neo4j.com/intro/movies/people.csv' AS row
- MERGE (p:Person {name: row.name})
- ON CREATE SET p.born = toInteger(row.born)
- RETURN count({star})
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Load the CSV files
- session.run(load_movies_csv).data()
- session.run(load_people_csv).data()
-----
-=====
-====
-
-== Add relationships from CSV files
-
-Similarly to what we have done for nodes, we now create relationships from the `actors.csv` file.
-In the following example, `LOAD CSV` is used with the `WITH HEADERS` option to access the fields in each `row` by their corresponding column name.
-
-[TIP]
-====
-The default field delimiter for `LOAD CSV` is the comma (`,`).
-Use the link:{neo4j-docs-base-uri}/cypher-manual/current/clauses/load-csv/#load-csv-import-data-from-a-csv-file-with-a-custom-field-delimiter[`FIELDTERMINATOR` option] to set a different delimiter.
-
-If the CSV file is large, use the link:{neo4j-docs-base-uri}/cypher-manual/current/clauses/load-csv/#load-csv-importing-large-amounts-of-data[`CALL IN TRANSACTIONS` clause] to commit a number of rows per transaction instead of the whole file.
-====
-
-Furthermore:
-
-* `MATCH` and `MERGE` are used to find nodes (taking advantage of the constraints created in the <<_create_constraints>> section) and create a relationship between them.
-* `ON CREATE SET` is used to set the value of a relationship property when a new one is created.
-* `RETURN count({star})` is used to show the number of processed rows.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-gds.run_cypher("""
- LOAD CSV
- WITH HEADERS
- FROM 'https://data.neo4j.com/intro/movies/actors.csv' AS row
- MATCH (p:Person {name: row.person})
- MATCH (m:Movie {title: row.movie})
- MERGE (p)-[actedIn:ACTED_IN]->(m)
- ON CREATE SET actedIn.roles = split(row.roles, ';')
- RETURN count({star})
-""")
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-LOAD CSV
- WITH HEADERS
- FROM 'https://data.neo4j.com/intro/movies/actors.csv' AS row
-MATCH (p:Person {name: row.person})
-MATCH (m:Movie {title: row.movie})
-MERGE (p)-[actedIn:ACTED_IN]->(m)
- ON CREATE SET actedIn.roles = split(row.roles, ';')
-RETURN count({star})
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-load_actors_csv = """
- LOAD CSV
- WITH HEADERS
- FROM 'https://data.neo4j.com/intro/movies/actors.csv' AS row
- MATCH (p:Person {name: row.person})
- MATCH (m:Movie {title: row.movie})
- MERGE (p)-[actedIn:ACTED_IN]->(m)
- ON CREATE SET actedIn.roles = split(row.roles, ';')
- RETURN count({star})
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Load the CSV file
- session.run(load_actors_csv).data()
-----
-=====
-====
-
-== Run a Cypher query
-
-Once all the nodes and relationships have been created, we can run a query to check that the data have been inserted correctly. The following query looks for movies with `Keanu Reeves`, orders them by release date and groups their titles.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-gds.run_cypher("""
- MATCH (person:Person {name: "Keanu Reeves"})-[:ACTED_IN]->(movie)
- RETURN movie.released, COLLECT(movie.title) AS movies
- ORDER BY movie.released
-""")
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-MATCH (person:Person {name: "Keanu Reeves"})-[:ACTED_IN]->(movie)
-RETURN movie.released, COLLECT(movie.title) AS movies
-ORDER BY movie.released
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-query = """
- MATCH (person:Person {name: "Keanu Reeves"})-[:ACTED_IN]->(movie)
- RETURN movie.released, COLLECT(movie.title) AS movies
- ORDER BY movie.released
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Run the Cypher query
- result = session.run(query).data()
-
- # Print the formatted result
- print(json.dumps(result, indent=2))
-----
-=====
-====
-
-== Cleanup
-
-When the data are no longer useful, the database can be cleaned up.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-# Delete data
-gds.run_cypher("""
- MATCH (n)
- DETACH DELETE n
-""")
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-MATCH (n)
-DETACH DELETE n
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-delete_data = """
- MATCH (n)
- DETACH DELETE n
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Delete the data
- session.run(delete_data).data()
-----
-=====
-====
-
-=== Closing the connection
-
-include::partial$aurads/close-connection.adoc[]
diff --git a/modules/ROOT/pages/aurads/importing-data/spark.adoc b/modules/ROOT/pages/aurads/importing-data/spark.adoc
deleted file mode 100644
index 47cdd04d9..000000000
--- a/modules/ROOT/pages/aurads/importing-data/spark.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-[[aurads-import-from-spark]]
-= Importing data from Spark
-:description: This page describes how import data from Spark into a Neo4j AuraDS instance.
-
-WARNING: Provisional page. To be reviewed for content and UI.
diff --git a/modules/ROOT/pages/aurads/index.adoc b/modules/ROOT/pages/aurads/index.adoc
deleted file mode 100644
index bdfece984..000000000
--- a/modules/ROOT/pages/aurads/index.adoc
+++ /dev/null
@@ -1,30 +0,0 @@
-[[aurads]]
-= Neo4j AuraDS overview
-:description: This section introduces Neo4j AuraDS.
-:check-mark: icon:check[]
-:table-caption!:
-
-AuraDS is the fully managed version of Neo4j Graph Data Science.
-
-AuraDS instances:
-
-* are automatically upgraded and patched;
-* can be seamlessly scaled up or down;
-* can be paused to reduce costs.
-
-== Plans
-
-AuraDS offers the *AuraDS Professional* and *AuraDS Enterprise* subscription plans.
-The full list of features for each plan is available on the link:https://neo4j.com/pricing/#graph-data-science[Neo4j Pricing page].
-
-== Updates and upgrades
-
-AuraDS updates and upgrades are handled by the platform, and as such do not require user intervention. Security patches and new versions of GDS and Neo4j are installed within short time windows during which the affected instances are unavailable.
-
-The operations are non-destructive, so graph projections, models, and data present on an instance are not affected. No operation is applied until all the running GDS algorithms have completed.
-
-== Support
-
-For a breakdown of the support offered across plan types as well as the support holiday schedule, see the https://aura.support.neo4j.com/hc/en-us/articles/360053850514[Aura Support page].
-
-Additionally, you can access the https://status.neo4j.io/[Aura Status page] to check the current operational status of Aura and subscribe to updates.
\ No newline at end of file
diff --git a/modules/ROOT/pages/aurads/managing-instances/advanced-metrics.adoc b/modules/ROOT/pages/aurads/managing-instances/advanced-metrics.adoc
deleted file mode 100644
index 0582bcc56..000000000
--- a/modules/ROOT/pages/aurads/managing-instances/advanced-metrics.adoc
+++ /dev/null
@@ -1,55 +0,0 @@
-[[aura-monitoring]]
-= Advanced metrics
-
-Advanced metrics is a feature that enables access to a broad range of different instance and database metrics.
-
-To access *Advanced metrics*:
-
-. Navigate to the https://console.neo4j.io/?product=aura-db[Neo4j Aura Console] in your browser.
-. Select the instance you want to access.
-. Select the *Metrics* tab.
-. Select the *Advanced metrics* button.
-
-The presented metrics will be laid out across three tabs according to their category:
-
-* *Resources* - Overall system resources, such as CPU, RAM and disk usage.
-* *Instance* - Information about the Neo4j instance(s) running the database.
-* *Database* - Metrics concerning the database itself, such as usage statistics and entity counts.
-
-When viewing metrics, you can select from the following time intervals:
-
-* 30 minutes
-* 6 hours
-* 24 hours
-* 3 days
-* 7 days
-* 14 days
-* 30 days
-
-== Chart interactions
-
-[NOTE]
-====
-Memory and storage charts can be toggled between absolute and relative values using the *%* toggle.
-====
-
-=== Zoom
-
-To zoom in to a narrower time interval, select and drag inside any chart to select your desired time interval.
-The data will automatically update to match the increased resolution.
-
-To reset zoom, double-click anywhere inside the chart or use the option in the context menu.
-
-=== Expand
-
-Any chart can be expanded to take up all the available screen estate by clicking the *expand* button (shown as two opposing arrows).
-To exit this mode, click the *x* button on the expanded view.
-
-=== Context menu
-
-To access the chart context menu, select the *...* button on any chart.
-
-* *More info* - Selecting *More info* brings up an explanation of the particular metric.
-For some metrics it also provides hints about possible actions to take if that metric falls outside the expected range.
-
-* *Reset zoom* - If the zoom level has been altered by selecting and dragging across a chart, *Reset zoom* resets the zoom back to the selected interval.
diff --git a/modules/ROOT/pages/aurads/managing-instances/backup-restore-export.adoc b/modules/ROOT/pages/aurads/managing-instances/backup-restore-export.adoc
deleted file mode 100644
index e6f2f83ae..000000000
--- a/modules/ROOT/pages/aurads/managing-instances/backup-restore-export.adoc
+++ /dev/null
@@ -1,51 +0,0 @@
-[[aurads-backup-restore-export]]
-= Backup, export, and restore
-:description: This page describes how to backup, export and restore your data from a snapshot.
-
-The data in your AuraDS instance can be backed up, exported, and restored using _snapshots_.
-A snapshot is a copy of the data in an instance at a specific point in time.
-
-The *Snapshots* tab within an AuraDS instance shows a list of available snapshots.
-To access the *Snapshots* tab:
-
-. Navigate to the https://console.neo4j.io/?product=aura-ds[Neo4j Aura Console^] in your browser.
-. Select the instance you want to access.
-. Select the *Snapshots* tab.
-
-== Snapshot types
-
-There are two different types of snapshot:
-
-* *Scheduled* - Runs when you first create an instance, when changes to the underlying system occur (for example, a new patch release), and automatically once a day.
-* *On-demand* - Runs when you select *Take snapshot* from the *Snapshots* tab of an instance.
-
-[NOTE]
-====
-Scheduled daily snapshots are kept for 7 days for Professional instances and 14 days for Enterprise instances.
-On-demand snapshots are kept in the system for 180 days for all instances.
-====
-
-== Snapshot actions
-
-=== Take a snapshot
-
-You can manually trigger an *On-demand* snapshot by selecting *Take snapshot* in the *Snapshots* tab.
-The snapshot status is shown as `In progress` until the snapshot has been created; then, the `Status` becomes `Completed`.
-
-=== Restore
-
-You can restore data in your instance to a previous snapshot by selecting *Restore* next to the snapshot you want to restore.
-
-Restoring a snapshot requires you to confirm the action by typing RESTORE and selecting *Restore*.
-
-[CAUTION]
-====
-Restoring a snapshot overwrites the data in your instance, replacing it with the data contained in the snapshot.
-====
-
-=== Backup and Export
-
-By selecting the ellipses (...) button next to a snapshot, you can:
-
-* *Export* - Download the database as a compressed file, allowing you to store a local copy and work on your data offline. The compressed archive contains a *_.dump_* file that can be imported directly or pushed to the cloud.
-* *Create instance from snapshot* - Create a new AuraDS instance using the data from the snapshot. This opens a window where you can assign a name to the instance that will be created.
diff --git a/modules/ROOT/pages/aurads/managing-instances/instance-actions.adoc b/modules/ROOT/pages/aurads/managing-instances/instance-actions.adoc
deleted file mode 100644
index 00aa5f620..000000000
--- a/modules/ROOT/pages/aurads/managing-instances/instance-actions.adoc
+++ /dev/null
@@ -1,168 +0,0 @@
-[[aurads-instance-actions]]
-= Instance actions
-:description: This page describes the available actions for an AuraDS instance.
-
-You can perform several actions on an AuraDS instance from the https://console.neo4j.io/?product=aura-ds[Neo4j Aura Console^] homepage.
-
-== Renaming an instance
-
-You can change the name of an existing instance by using the *Rename* action.
-
-To rename an instance:
-
-. Select the ellipsis (*...*) button on the instance you want to rename.
-. Select *Rename* from the resulting menu.
-. Enter a new name for the instance.
-. Select *Rename*.
-
-== Resizing an instance
-
-You can change the size of an existing instance by using the *Resize* action.
-
-To resize an instance:
-
-. Select the ellipsis (...) on the instance you want to resize.
-. Select *Resize* from the resulting menu.
-. Select the new size you want your instance to be.
-. Tick the *I understand* checkbox and select *Submit*.
-
-An instance becomes unavailable for a short period of time during the resize operation.
-
-== Pausing an instance
-
-You can pause an instance during periods where you don't need it and resume at any time.
-
-To pause an instance:
-
-. Select the pause icon on the instance you want to pause.
-. Select *Pause* to confirm.
-
-After confirming, the instance will begin pausing, and a *Resume* button will replace the *Pause* button.
-
-[NOTE]
-====
-Paused instances run at a discounted rate compared to standard consumption, as outlined in the confirmation window.
-You can pause an instance for up to 30 days, after which point AuraDS automatically resumes the instance.
-====
-
-== Resuming an instance
-
-To resume an instance:
-
-. Select the play icon on the instance you want to pause.
-. Tick the *I understand* checkbox and select *Resume* to confirm.
-
-After confirming, the instance will begin resuming, which may take a few minutes.
-
-== Cloning an instance
-
-You can clone an existing instance to create a new instance with the same data.
-You can clone across regions, from AuraDB to AuraDS and vice versa, and from Neo4j version 4 to Neo4j version 5.
-
-There are four options to clone an instance:
-
-* Clone to a new AuraDS instance
-* Clone to an existing AuraDS instance
-* Clone to a new AuraDB database
-* Clone to an existing AuraDB database
-
-You can access all the cloning options from the ellipsis (*...*) button on the AuraDS instance.
-
-[NOTE]
-====
-You cannot clone from a Neo4j version 5 instance to a Neo4j version 4 instance.
-====
-
-=== Clone to a new AuraDS instance
-
-. Select the ellipsis (*...*) button on the instance you want to clone.
-. Select *Clone To New* and then *AuraDS* from the contextual menu.
-. Set the desired name for the new instance.
-. Check the *I understand* box and select *Clone Instance*.
-+
-[WARNING]
-====
-Make sure that the username and password are stored safely before continuing.
-Credentials cannot be recovered afterwards.
-====
-
-=== Clone to an existing AuraDS instance
-
-When you clone an instance to an existing instance, the database connection URI stays the same, but the data is replaced with the data from the cloned instance.
-
-[WARNING]
-====
-Cloning into an existing instance will replace all existing data.
-If you want to keep the current data, take a snapshot and export it.
-====
-
-. Select the ellipsis (*...*) button on the instance you want to clone.
-. Select *Clone To Existing* and then *AuraDS* from the contextual menu.
-. If necessary, change the instance name.
-. Select the existing AuraDS instance to clone to from the dropdown menu.
-+
-[NOTE]
-====
-Existing instances that are not large enough to clone into will not be available for selection.
-In the dropdown menu, they are grayed out and have the string `(Instance is not large enough to clone into)` appended to their name.
-====
-+
-. Tick the *I understand* checkbox and select *Clone*.
-
-=== Clone to a new AuraDB instance
-
-[NOTE]
-====
-An AuraDS instance can only be cloned to an AuraDB Professional database (not Free).
-====
-
-. Select the ellipsis (*...*) button on the instance you want to clone.
-. Select *Clone To New* and then *AuraDB* from the contextual menu.
-. Set your desired settings for the new database. For more information on AuraDB database creation, see xref:auradb/getting-started/create-database.adoc[].
-. Check the *I understand* box and select *Clone Database*.
-+
-[WARNING]
-====
-Make sure that the username and password are stored safely before continuing.
-Credentials cannot be recovered afterwards.
-====
-
-=== Clone to an existing AuraDB instance
-
-[NOTE]
-====
-An AuraDS instance can only be cloned to an AuraDB Professional database (not Free).
-====
-
-[WARNING]
-====
-Cloning into an existing instance will replace all existing data.
-If you want to keep the current data, take a snapshot and export it.
-====
-
-. Select the ellipsis (*...*) button on the instance you want to clone.
-. Select *Clone To Existing* and then *AuraDB* from the contextual menu.
-. If necessary, change the database name.
-. Select the existing AuraDB database to clone to from the dropdown menu.
-+
-[NOTE]
-====
-Existing instances that are not large enough to clone into will not be available for selection.
-In the dropdown menu, they will be grayed out and have the string `(Instance is not large enough to clone into)` appended to their name.
-====
-+
-. Check the *I understand* box and select *Clone*.
-
-== Deleting an instance
-
-You can delete an instance if you no longer want to be billed for it.
-
-[WARNING]
-====
-There is no way to recover data from a deleted AuraDS instance.
-====
-
-To delete an instance:
-
-* Select the red trashcan icon on the instance you want to delete.
-* Type the exact name of the instance (as instructed) to confirm your decision, and select *Destroy*.
diff --git a/modules/ROOT/pages/aurads/managing-instances/monitoring.adoc b/modules/ROOT/pages/aurads/managing-instances/monitoring.adoc
deleted file mode 100644
index 55e7e68a9..000000000
--- a/modules/ROOT/pages/aurads/managing-instances/monitoring.adoc
+++ /dev/null
@@ -1,29 +0,0 @@
-[[aurads-monitoring]]
-= Monitoring
-
-To access the *Metrics* tab:
-
-. Navigate to the https://console.neo4j.io/?product=aura-ds[Neo4j Aura Console^] in your browser.
-. Select the name of the instance you want to access.
-. Select the *Metrics* tab.
-
-You can monitor the following metrics of an AuraDS instance:
-
-* *CPU Usage (%)* - The amount of CPU used by the instance as a percentage.
-* *Storage Used (%)* - The amount of disk storage space used by the instance as a percentage.
-* *Heap Usage (%)* - The amount of Java Virtual Machine (JVM) memory used by the instance as a percentage.
-* *Out of Memory Errors* - The number of Out of Memory (OOM) errors encountered by the instance.
-* *Garbage Collection Time (%)* - The amount of time the instance spends reclaiming heap space as a percentage.
-
-[NOTE]
-====
-More information on each metric, as well as suggestions for managing them, can be found within the *Metrics* tab itself.
-====
-
-When viewing metrics, you can select from the following time intervals:
-
-* 6 hours
-* 24 hours
-* 3 days
-* 7 days
-* 30 days
\ No newline at end of file
diff --git a/modules/ROOT/pages/aurads/tutorials/algorithm-modes.adoc b/modules/ROOT/pages/aurads/tutorials/algorithm-modes.adoc
deleted file mode 100644
index f853156ab..000000000
--- a/modules/ROOT/pages/aurads/tutorials/algorithm-modes.adoc
+++ /dev/null
@@ -1,524 +0,0 @@
-[[algorithm-modes]]
-= Executing the different algorithm modes
-:description: This page describes how to use the different algorithm modes.
-:generated-graph-size: 100
-:notebook-name: Executing_the_different_algorithm_modes_(GDS_client).ipynb
-
-include::partial$aurads/colab.adoc[]
-
-This example explains link:{neo4j-docs-base-uri}/graph-data-science/current/common-usage/running-algos[execution modes^] for GDS algorithms and how to use each one of them.
-
-include::partial$aurads/setup.adoc[]
-
-== Create an example graph
-
-We start by creating some basic graph data first.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-gds.run_cypher("""
- CREATE
- (home:Page {name:'Home'}),
- (about:Page {name:'About'}),
- (product:Page {name:'Product'}),
- (links:Page {name:'Links'}),
- (a:Page {name:'Site A'}),
- (b:Page {name:'Site B'}),
- (c:Page {name:'Site C'}),
- (d:Page {name:'Site D'}),
-
- (home)-[:LINKS {weight: 0.2}]->(about),
- (home)-[:LINKS {weight: 0.2}]->(links),
- (home)-[:LINKS {weight: 0.6}]->(product),
- (about)-[:LINKS {weight: 1.0}]->(home),
- (product)-[:LINKS {weight: 1.0}]->(home),
- (a)-[:LINKS {weight: 1.0}]->(home),
- (b)-[:LINKS {weight: 1.0}]->(home),
- (c)-[:LINKS {weight: 1.0}]->(home),
- (d)-[:LINKS {weight: 1.0}]->(home),
- (links)-[:LINKS {weight: 0.8}]->(home),
- (links)-[:LINKS {weight: 0.05}]->(a),
- (links)-[:LINKS {weight: 0.05}]->(b),
- (links)-[:LINKS {weight: 0.05}]->(c),
- (links)-[:LINKS {weight: 0.05}]->(d)
-""")
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CREATE
- (home:Page {name:'Home'}),
- (about:Page {name:'About'}),
- (product:Page {name:'Product'}),
- (links:Page {name:'Links'}),
- (a:Page {name:'Site A'}),
- (b:Page {name:'Site B'}),
- (c:Page {name:'Site C'}),
- (d:Page {name:'Site D'}),
-
- (home)-[:LINKS {weight: 0.2}]->(about),
- (home)-[:LINKS {weight: 0.2}]->(links),
- (home)-[:LINKS {weight: 0.6}]->(product),
- (about)-[:LINKS {weight: 1.0}]->(home),
- (product)-[:LINKS {weight: 1.0}]->(home),
- (a)-[:LINKS {weight: 1.0}]->(home),
- (b)-[:LINKS {weight: 1.0}]->(home),
- (c)-[:LINKS {weight: 1.0}]->(home),
- (d)-[:LINKS {weight: 1.0}]->(home),
- (links)-[:LINKS {weight: 0.8}]->(home),
- (links)-[:LINKS {weight: 0.05}]->(a),
- (links)-[:LINKS {weight: 0.05}]->(b),
- (links)-[:LINKS {weight: 0.05}]->(c),
- (links)-[:LINKS {weight: 0.05}]->(d)
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-# Cypher query
-create_example_graph_on_disk_query = """
- CREATE
- (home:Page {name:'Home'}),
- (about:Page {name:'About'}),
- (product:Page {name:'Product'}),
- (links:Page {name:'Links'}),
- (a:Page {name:'Site A'}),
- (b:Page {name:'Site B'}),
- (c:Page {name:'Site C'}),
- (d:Page {name:'Site D'}),
-
- (home)-[:LINKS {weight: 0.2}]->(about),
- (home)-[:LINKS {weight: 0.2}]->(links),
- (home)-[:LINKS {weight: 0.6}]->(product),
- (about)-[:LINKS {weight: 1.0}]->(home),
- (product)-[:LINKS {weight: 1.0}]->(home),
- (a)-[:LINKS {weight: 1.0}]->(home),
- (b)-[:LINKS {weight: 1.0}]->(home),
- (c)-[:LINKS {weight: 1.0}]->(home),
- (d)-[:LINKS {weight: 1.0}]->(home),
- (links)-[:LINKS {weight: 0.8}]->(home),
- (links)-[:LINKS {weight: 0.05}]->(a),
- (links)-[:LINKS {weight: 0.05}]->(b),
- (links)-[:LINKS {weight: 0.05}]->(c),
- (links)-[:LINKS {weight: 0.05}]->(d)
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Run query
- result = session.run(create_example_graph_on_disk_query).data()
-
- # Prettify the result
- print(json.dumps(result, indent=2, sort_keys=True))
-----
-=====
-====
-
-We then project an in-memory graph from the data just created.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-g, result = gds.graph.project(
- "example-graph",
- "Page",
- "LINKS",
- relationshipProperties="weight"
-)
-
-print(result)
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CALL gds.graph.project(
- 'example-graph',
- 'Page',
- 'LINKS',
- {
- relationshipProperties: 'weight'
- }
-)
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-# Cypher query
-create_example_graph_in_memory_query = """
- CALL gds.graph.project(
- 'example-graph',
- 'Page',
- 'LINKS',
- {
- relationshipProperties: 'weight'
- }
- )
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Run query
- result = session.run(create_example_graph_in_memory_query).data()
-
- # Prettify the result
- print(json.dumps(result, indent=2, sort_keys=True))
-----
-=====
-====
-
-== Execution modes
-
-Every production-tier algorithm can be run in four different modes:
-
-* `stats`
-* `stream`
-* `mutate`
-* `write`
-
-An additional `estimate` mode is explained in detail in the xref:aurads/tutorials/memory-estimation.adoc[] section.
-
-In the following we'll use the PageRank algorithm to show the usage of every execution mode.
-
-=== Stats
-
-The link:{neo4j-docs-base-uri}/graph-data-science/current/common-usage/running-algos/#running-algos-stats[`stats`^] mode can be useful for evaluating an algorithm performance without mutating the in-memory graph. When running an algorithm in this mode, a single row containing a summary of the algorithm statistics (for example, counts or percentile distributions) is returned.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-result = gds.pageRank.stats(
- g,
- maxIterations=20,
- dampingFactor=0.85
-)
-
-print(result)
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CALL gds.pageRank.stats(
- 'example-graph',
- {maxIterations: 20, dampingFactor: 0.85}
-)
-YIELD ranIterations,
- didConverge,
- preProcessingMillis,
- computeMillis,
- postProcessingMillis,
- centralityDistribution,
- configuration
-RETURN *
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-# Cypher query
-page_rank_stats_example_graph_query = """
- CALL gds.pageRank.stats(
- 'example-graph',
- {maxIterations: 20, dampingFactor: 0.85}
- )
- YIELD ranIterations,
- didConverge,
- preProcessingMillis,
- computeMillis,
- postProcessingMillis,
- centralityDistribution,
- configuration
- RETURN *
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Run query
- result = session.run(page_rank_stats_example_graph_query).data()
-
- # Prettify the result
- print(json.dumps(result, indent=2, sort_keys=True))
-----
-=====
-====
-
-The result contains the estimated time to run the algorithm (`computeMillis`) along with other details like the centrality distribution and the configuration parameters.
-
-=== Stream
-
-The link:{neo4j-docs-base-uri}/graph-data-science/current/common-usage/running-algos/#running-algos-stream[`stream`^] mode returns the results of an algorithm as Cypher result rows. This is similar to how standard Cypher reading queries operate.
-
-With the PageRank example, this mode returns a node ID and the computed PageRank score for each node. The link:{neo4j-docs-base-uri}/graph-data-science/current/management-ops/utility-functions/#utility-functions-node-path[`gds.util.asNode`^] procedure can then be used to find a node from its node ID.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-results = gds.pageRank.stream(
- g,
- maxIterations=20,
- dampingFactor=0.85
-)
-
-print(results)
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CALL gds.pageRank.stream(
- 'example-graph',
- {maxIterations: 20, dampingFactor: 0.85}
-)
-YIELD nodeId, score
-RETURN *
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-# Cypher query to just get internal node ID and score
-page_rank_stream_example_graph_query = """
- CALL gds.pageRank.stream(
- 'example-graph',
- {maxIterations: 20, dampingFactor: 0.85}
- )
- YIELD nodeId, score
- RETURN *
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Run query
- results = session.run(page_rank_stream_example_graph_query).data()
-
- # Prettify the results
- print(json.dumps(results, indent=2, sort_keys=True))
-----
-=====
-====
-
-Since an algorithm can run for a long time and the connection may suddenly drop, we suggest to use the `mutate` and `write` modes instead to make sure that the computation completes and the results are saved.
-
-=== Mutate
-
-The link:{neo4j-docs-base-uri}/graph-data-science/current/common-usage/running-algos/#running-algos-mutate[`mutate`^] mode operates on the in-memory graph and updates it with a new property specified with the `mutateProperty` configuration parameter. The new property must not already exist in the in-memory graph.
-
-This mode is useful when chaining the execution of several algorithms each of which relying on the results on the previous.
-
-In the case of PageRank, the result of this mode is a score for each node. In this example we add the calculated score to each node of the in-memory graph as the value of a new property called `pageRankScore`.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-result = gds.pageRank.mutate(
- g,
- mutateProperty="pageRankScore",
- maxIterations=20,
- dampingFactor=0.85
-)
-
-print(result)
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CALL gds.pageRank.mutate(
- 'example-graph',
- {mutateProperty: 'pageRankScore', maxIterations: 20, dampingFactor: 0.85}
-)
-YIELD nodePropertiesWritten, ranIterations
-RETURN *
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-# Cypher query to just get mutate the graph
-page_rank_mutate_example_graph_query = """
- CALL gds.pageRank.mutate(
- 'example-graph',
- {mutateProperty: 'pageRankScore', maxIterations: 20, dampingFactor: 0.85}
- )
- YIELD nodePropertiesWritten, ranIterations
- RETURN *
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Run query
- result = session.run(page_rank_mutate_example_graph_query).data()
-
- # Prettify the result
- print(json.dumps(result, indent=2, sort_keys=True))
-----
-=====
-====
-
-=== Write
-
-The link:{neo4j-docs-base-uri}/graph-data-science/current/common-usage/running-algos/#running-algos-write[`write`^] mode writes the results of the algorithm computation back to the Neo4j database. The written data can be node properties (such as PageRank scores), new relationships (such as Node Similarity similarities), or relationship properties (only for newly created relationships).
-
-Similarly to the previous example, here we add the calculated score of the PageRank algorithm to each node of the Neo4j database as the value of a new property called `pageRankScore`.
-
-TIP: To use the result of a `write` mode computation with another algorithm, a new in-memory graph must be created from the Neo4j database.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-result = gds.pageRank.write(
- g,
- writeProperty="pageRankScore",
- maxIterations=20,
- dampingFactor=0.85
-)
-
-print(result)
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CALL gds.pageRank.write(
- 'example-graph',
- {writeProperty: 'pageRankScore', maxIterations: 20, dampingFactor: 0.85}
-)
-YIELD nodePropertiesWritten, ranIterations
-RETURN *
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-# Cypher query to write the graph
-page_rank_write_example_graph_query = """
- CALL gds.pageRank.write(
- 'example-graph',
- {writeProperty: 'pageRankScore', maxIterations: 20, dampingFactor: 0.85}
- )
- YIELD nodePropertiesWritten, ranIterations
- RETURN *
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Run query
- result = session.run(page_rank_write_example_graph_query).data()
-
- # Prettify the result
- print(json.dumps(result, indent=2, sort_keys=True))
-----
-=====
-====
-
-== Cleanup
-
-After going through the example, both the in-memory graphs and the data in the Neo4j database can be deleted.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-result = gds.graph.drop(g)
-print(result)
-
-gds.run_cypher("""
- MATCH (n)
- DETACH DELETE n
-""")
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CALL gds.graph.drop('example-graph');
-
-MATCH (n)
-DETACH DELETE n;
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-delete_example_in_memory_graph_query = """
- CALL gds.graph.drop('example-graph')
-"""
-
-delete_example_graph = """
- MATCH (n)
- DETACH DELETE n
-"""
-
-with driver.session() as session:
- # Delete in-memory graph
- result = session.run(delete_example_in_memory_graph_query).data()
-
- # Prettify the result
- print(json.dumps(result, indent=2, sort_keys=True, default=default))
-
- # Delete data from Neo4j
- result = session.run(delete_example_graph).data()
-
- # Prettify the result
- print(json.dumps(result, indent=2, sort_keys=True, default=default))
-----
-=====
-====
-
-=== Closing the connection
-
-include::partial$aurads/close-connection.adoc[]
-
-include::partial$aurads/references.adoc[]
\ No newline at end of file
diff --git a/modules/ROOT/pages/aurads/tutorials/algorithm-progress.adoc b/modules/ROOT/pages/aurads/tutorials/algorithm-progress.adoc
deleted file mode 100644
index 706c4195c..000000000
--- a/modules/ROOT/pages/aurads/tutorials/algorithm-progress.adoc
+++ /dev/null
@@ -1,231 +0,0 @@
-[[algorithm-progress]]
-= Monitoring the progress of a running algorithm
-:description: This page describes how to use monitor the algorithm progress.
-:generated-graph-size: 1000000
-:notebook-name: Monitoring_the_progress_of_a_running_algorithm_(GDS_client).ipynb
-
-include::partial$aurads/colab.adoc[]
-
-Running algorithms on large graphs can be computationally expensive. This example shows how to use the link:{neo4j-docs-base-uri}/graph-data-science/current/common-usage/logging/#logging-progress-logging[`gds.beta.listProgress`^] procedure to monitor the progress of an algorithm, both to get an idea of the processing speed and to determine when the computation is completed.
-
-include::partial$aurads/setup.adoc[]
-
-include::partial$aurads/generated-graph.adoc[]
-
-== Run an algorithm and check the progress
-
-We need to run an algorithm that takes some time to converge. In this example we use the Label Propagation algorithm, which we start in a separate thread so that we can check its progress in the same Python process.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-# Import to run the long-running algorithm in a thread
-import threading
-# Import to use the sleep method
-import time
-
-
-# Method to call the label propagation algorithm from a thread
-def run_label_prop():
- print("Running label propagation")
-
- result = gds.labelPropagation.mutate(
- g,
- mutateProperty="communityID"
- )
-
- print(result)
-
-
-# Method to get and pretty-print the algorithm progress
-def run_list_progress():
- result = gds.beta.listProgress()
-
- print(result)
-
-
-# Create a thread for the label propagation algorithm and start it
-label_prop_query_thread = threading.Thread(target=run_label_prop)
-label_prop_query_thread.start()
-
-# Sleep for a few seconds so the label propagation query has time to get going
-print('Sleeping for 5 seconds')
-time.sleep(5)
-
-# Check the algorithm progress
-run_list_progress()
-
-# Sleep for a few more seconds
-print('Sleeping for 10 more seconds')
-time.sleep(10)
-
-# Check the algorithm progress again
-run_list_progress()
-
-# Block and wait for the algorithm thread to finish
-label_prop_query_thread.join()
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CALL gds.labelPropagation.mutate(
- 'example-graph',
- {mutateProperty: 'communityID'}
-)
-YIELD preProcessingMillis,
- computeMillis,
- mutateMillis,
- postProcessingMillis,
- nodePropertiesWritten,
- communityCount,
- ranIterations,
- didConverge,
- communityDistribution,
- configuration
-RETURN *
-
-// The following query has to be run in another Cypher shell, so run this command
-// in a different terminal first:
-//
-// ./cypher-shell -a $AURA_CONNECTION_URI -u $AURA_USERNAME -p $AURA_PASSWORD
-
-CALL gds.beta.listProgress()
-YIELD jobId, taskName, progress, progressBar
-RETURN *
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-# Import to run the long-running algorithm in a thread
-import threading
-# Import to use the sleep method
-import time
-
-
-# Method to call the label propagation algorithm from a thread
-def run_label_prop():
- label_prop_mutate_example_graph_query = """
- CALL gds.labelPropagation.mutate(
- 'example-graph',
- {mutateProperty: 'communityID'}
- )
- YIELD preProcessingMillis,
- computeMillis,
- mutateMillis,
- postProcessingMillis,
- nodePropertiesWritten,
- communityCount,
- ranIterations,
- didConverge,
- communityDistribution,
- configuration
- RETURN *
- """
-
- # Create the driver session
- with driver.session() as session:
- # Run query
- print("Running label propagation")
- results = session.run(label_prop_mutate_example_graph_query).data()
- # Prettify the first result
- print(json.dumps(results[0], indent=2, sort_keys=True))
-
-
-# Method to get and pretty-print the algorithm progress
-def run_list_progress():
- gds_list_progress_query = """
- CALL gds.beta.listProgress()
- YIELD jobId, taskName, progress, progressBar
- RETURN *
- """
-
- # Create the driver session
- with driver.session() as session:
- # Run query
- print('running list progress')
- results = session.run(gds_list_progress_query).data()
- # Prettify the first result
- print('list progress results: ')
- print(json.dumps(results[0], indent=2, sort_keys=True))
-
-
-# Create a thread for the label propagation algorithm and start it
-label_prop_query_thread = threading.Thread(target=run_label_prop)
-label_prop_query_thread.start()
-
-# Sleep for a few seconds so the label propagation query has time to get going
-print('Sleeping for 5 seconds')
-time.sleep(5)
-
-# Check the algorithm progress
-run_list_progress()
-
-# Sleep for a few more seconds
-print('Sleeping for 10 more seconds')
-time.sleep(10)
-
-# Check the algorithm progress again
-run_list_progress()
-
-# Block and wait for the algorithm thread to finish
-label_prop_query_thread.join()
-----
-=====
-====
-
-== Cleanup
-
-The in-memory graph can now be deleted.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-result = gds.graph.drop(g)
-
-print(result)
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CALL gds.graph.drop('example-graph')
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-delete_example_in_memory_graph_query = """
-CALL gds.graph.drop('example-graph')
-"""
-
-with driver.session() as session:
- # Run query
- results = session.run(delete_example_in_memory_graph_query).data()
-
- # Prettify the results
- print(json.dumps(results, indent=2, sort_keys=True, default=default))
-----
-=====
-====
-
-=== Closing the connection
-
-include::partial$aurads/close-connection.adoc[]
-
-include::partial$aurads/references.adoc[]
\ No newline at end of file
diff --git a/modules/ROOT/pages/aurads/tutorials/arrow-examples.adoc b/modules/ROOT/pages/aurads/tutorials/arrow-examples.adoc
deleted file mode 100644
index 99e751ea2..000000000
--- a/modules/ROOT/pages/aurads/tutorials/arrow-examples.adoc
+++ /dev/null
@@ -1,152 +0,0 @@
-[[connecting-arrow]]
-= Loading and streaming back data with Apache Arrow
-:description: This page describes how to use Apache Arrow on AuraDS.
-:notebook-name: Arrow_examples.ipynb
-
-include::partial$aurads/colab.adoc[]
-
-The Enterprise Edition of GDS installed on AuraDS includes an link:https://neo4j.com/docs/graph-data-science/current/installation/configure-apache-arrow-server/[Arrow Flight server], configured and running by default.
-The Arrow Flight server speeds up data-intensive processes such as:
-
-* Creating a graph directly from in-memory data.
-* Streaming node and relationship properties.
-* Streaming the relationship topology of a graph.
-
-There are two ways to use the Arrow Flight server with GDS:
-
-. By using the GDS Python client, which includes an Arrow Flight client.
-. By implementing a custom Arrow Flight client as explained in the link:https://neo4j.com/docs/graph-data-science/current/management-ops/graph-creation/graph-project-apache-arrow/[GDS manual].
-
-In the following examples we use the GDS client as it is the most convenient option.
-All the loading and streaming methods can be used without Arrow, but are more efficient if Arrow is available.
-
-== Setup
-
-[source, python]
-----
-%pip install 'graphdatascience>=1.7'
-
-from graphdatascience import GraphDataScience
-
-# Replace with the actual connection URI and credentials
-AURA_CONNECTION_URI = "neo4j+s://xxxxxxxx.databases.neo4j.io"
-AURA_USERNAME = "neo4j"
-AURA_PASSWORD = ""
-
-# When initialized, the client tries to use Arrow if it is available on the server.
-# This behaviour is controlled by the `arrow` parameter, which is set to `True` by default.
-gds = GraphDataScience(AURA_CONNECTION_URI, auth=(AURA_USERNAME, AURA_PASSWORD), aura_ds=True)
-
-# Necessary if Arrow is enabled (as is by default on Aura)
-gds.set_database("neo4j")
-----
-
-You can call the `gds.debug.arrow()` method to verify that Arrow is enabled and running:
-
-[source, python]
-----
-gds.debug.arrow()
-----
-
-== Loading data
-
-You can load data directly into a graph using the link:https://neo4j.com/docs/graph-data-science-client/current/graph-object/#construct[`gds.graph.construct`] client method.
-
-The data must be a Pandas `DataFrame`, so we need to install and import the link:https://pandas.pydata.org/[`pandas`] library.
-
-[source, python]
-----
-%pip install pandas
-
-import pandas as pd
-----
-
-We can then create a graph as in the following example.
-The format of each `DataFrame` with the required columns is specified in the link:https://neo4j.com/docs/graph-data-science-client/current/graph-object/#construct[GDS manual].
-
-[source, python, role=nocollapse]
-----
-nodes = pd.DataFrame(
- {
- "nodeId": [0, 1, 2],
- "labels": ["Article", "Article", "Article"],
- "pages": [3, 7, 12],
- }
-)
-
-relationships = pd.DataFrame(
- {
- "sourceNodeId": [0, 1],
- "targetNodeId": [1, 2],
- "relationshipType": ["CITES", "CITES"],
- "times": [2, 1]
- }
-)
-
-article_graph = gds.graph.construct(
- "article-graph",
- nodes,
- relationships
-)
-----
-
-Now we can check that the graph has been created:
-
-[source, python]
-----
-gds.graph.list()
-----
-
-== Streaming node and relationship properties
-
-After creating the graph, you can read the node and relationship properties link:https://neo4j.com/docs/graph-data-science-client/current/graph-object/#graph-object-streaming-properties[as streams].
-
-[source, python]
-----
-# Read all the values for the node property `pages`
-gds.graph.nodeProperties.stream(article_graph, "pages")
-----
-
-[source, python]
-----
-# Read all the values for the relationship property `times`
-gds.graph.relationshipProperties.stream(article_graph, "times")
-----
-
-== Performance
-
-To see the difference in performance when Arrow is available, we can measure the time needed to load a dataset into a graph.
-In this example we use a built-in link:https://neo4j.com/docs/graph-data-science-client/current/common-datasets/#_ogbn_graphs[OGBN dataset], so we need to install the `ogb` extra.
-
-[source, python]
-----
-%pip install 'graphdatascience[ogb]>=1.7'
-
-# Load and immediately drop the dataset to download and cache the data
-ogbn_arxiv = gds.graph.ogbn.load("ogbn-arxiv")
-ogbn_arxiv.drop()
-----
-
-We can then time the loading process.
-On an 8 GB AuraDS instance, this should take less than 30 s.
-
-[source, python]
-----
-%%timeit -n 1 -r 1
-
-# This call uses the cached dataset, so only the actual loading is timed
-ogbn_arxiv = gds.graph.ogbn.load("ogbn-arxiv")
-----
-
-With Arrow disabled by adding `arrow=False` to the `GraphDataScience` constructor, the same loading process would take more than 1 minute.
-Therefore, with this dataset, Arrow provides at least a 2x speedup.
-
-== Cleanup
-
-[source, python]
-----
-article_graph.drop()
-ogbn_arxiv.drop()
-
-gds.close()
-----
\ No newline at end of file
diff --git a/modules/ROOT/pages/aurads/tutorials/graph-catalog.adoc b/modules/ROOT/pages/aurads/tutorials/graph-catalog.adoc
deleted file mode 100644
index 30179d236..000000000
--- a/modules/ROOT/pages/aurads/tutorials/graph-catalog.adoc
+++ /dev/null
@@ -1,465 +0,0 @@
-[[graph-catalog]]
-= Projecting graphs and using the Graph Catalog
-:description: This page describes how to use project graphs and use the Graph Catalog.
-:notebook-name: Projecting_graphs_and_using_the_Graph_Catalog_(GDS_client).ipynb
-
-include::partial$aurads/colab.adoc[]
-
-This example shows how to:
-
-* load Neo4j on-disk data into in-memory projected graphs;
-* use the link:{neo4j-docs-base-uri}/graph-data-science/current/management-ops/[Graph Catalog^] to manage projected graphs.
-
-include::partial$aurads/setup.adoc[]
-
-== Load data from Neo4j with native projections
-
-Native projections are used to load into memory a graph stored on disk. The link:{neo4j-docs-base-uri}/graph-data-science/current/management-ops/graph-creation/graph-project/[`gds.graph.project`^] procedure allows to project a graph by selecting the node labels, relationship types and properties to be projected.
-
-The `gds.graph.project` procedure can use a <<_project_using_the_shorthand_syntax,"shorthand syntax">>, where the nodes and relationships projections are simply passed as single values or arrays, or an <<_project_using_the_extended_syntax,"extended syntax">>, where each node or relationship projection has its own configuration. The extended syntax is especially useful if additional transformation of the data or the graph structure are needed. Both methods are shown in this section, using the following graph as an example.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-# Cypher query to create an example graph on disk
-gds.run_cypher("""
- MERGE (a:EngineeringManagement {name: 'Alistair'})
- MERGE (j:EngineeringManagement {name: 'Jennifer'})
- MERGE (d:Developer {name: 'Leila'})
- MERGE (a)-[:MANAGES {start_date: 987654321}]->(d)
- MERGE (j)-[:MANAGES {start_date: 123456789, end_date: 987654321}]->(d)
-""")
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-MERGE (a:EngineeringManagement {name: 'Alistair'})
-MERGE (j:EngineeringManagement {name: 'Jennifer'})
-MERGE (d:Developer {name: 'Leila'})
-MERGE (a)-[:MANAGES {start_date: 987654321}]->(d)
-MERGE (j)-[:MANAGES {start_date: 123456789, end_date: 987654321}]->(d)
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-# Cypher query to create an example graph on disk
-write_example_graph_query = """
- MERGE (a:EngineeringManagement {name: 'Alistair'})
- MERGE (j:EngineeringManagement {name: 'Jennifer'})
- MERGE (d:Developer {name: 'Leila'})
- MERGE (a)-[:MANAGES {start_date: 987654321}]->(d)
- MERGE (j)-[:MANAGES {start_date: 123456789, end_date: 987654321}]->(d)
-"""
-
-# Create the driver session
-with driver.session() as session:
- session.run(write_example_graph_query)
-----
-=====
-====
-
-=== Project using the shorthand syntax
-
-In this example we use the shorthand syntax to simply project all node labels and relationship types.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-# Project a graph using the shorthand syntax
-shorthand_graph, result = gds.graph.project(
- "shorthand-example-graph",
- ["EngineeringManagement", "Developer"],
- ["MANAGES"]
-)
-
-print(result)
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CALL gds.graph.project(
- 'shorthand-example-graph',
- ['EngineeringManagement', 'Developer'],
- ['MANAGES']
-)
-YIELD graphName, nodeCount, relationshipCount
-RETURN *
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-shorthand_graph_create_call = """
- CALL gds.graph.project(
- 'shorthand-example-graph',
- ['EngineeringManagement', 'Developer'],
- ['MANAGES']
- )
- YIELD graphName, nodeCount, relationshipCount
- RETURN *
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Call to project a graph using the shorthand syntax
- result = session.run(shorthand_graph_create_call).data()
-
- # Prettify the result
- print(json.dumps(result, indent=2, sort_keys=True))
-----
-=====
-====
-
-=== Project using the extended syntax
-
-In this example we use the extended syntax for link:{neo4j-docs-base-uri}/graph-data-science/current/management-ops/graph-creation/graph-project/#node-projection-syntax[node^] and link:{neo4j-docs-base-uri}/graph-data-science/current/management-ops/graph-creation/graph-project/#relationship-projection-syntax[relationship^] projections to:
-
-* transform the `EngineeringManagement` and `Developer` labels to `PersonEM` and `PersonD` respectively;
-* transform the _directed_ `MANAGES` relationship into the `KNOWS` _undirected_ relationship;
-* keep the `start_date` and `end_date` relationship properties, adding a default value of `999999999` to `end_date`.
-
-The projected graph becomes the following:
-
-[source, cypher]
-----
-(:PersonEM {first_name: 'Alistair'})-
- [:KNOWS {start_date: 987654321, end_date: 999999999}]-
- (:PersonD {first_name: 'Leila'})-
- [:KNOWS {start_date: 123456789, end_date: 987654321}]-
- (:PersonEM {first_name: 'Jennifer'})
-----
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-# Project a graph using the extended syntax
-extended_form_graph, result = gds.graph.project(
- "extended-form-example-graph",
- {
- "PersonEM": {
- "label": "EngineeringManagement"
- },
- "PersonD": {
- "label": "Developer"
- }
- },
- {
- "KNOWS": {
- "type": "MANAGES",
- "orientation": "UNDIRECTED",
- "properties": {
- "start_date": {
- "property": "start_date"
- },
- "end_date": {
- "property": "end_date",
- "defaultValue": 999999999
- }
- }
- }
- }
-)
-
-print(result)
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CALL gds.graph.project(
- 'extended-form-example-graph',
- {
- PersonEM: {
- label: 'EngineeringManagement'
- },
- PersonD: {
- label: 'Developer'
- }
- },
- {
- KNOWS: {
- type: 'MANAGES',
- orientation: 'UNDIRECTED',
- properties: {
- start_date: {
- property: 'start_date'
- },
- end_date: {
- property: 'end_date',
- defaultValue: 999999999
- }
- }
- }
- }
-)
-YIELD graphName, nodeCount, relationshipCount
-RETURN *
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-extended_form_graph_create_call = """
- CALL gds.graph.project(
- 'extended-form-example-graph',
- {
- PersonEM: {
- label: 'EngineeringManagement'
- },
- PersonD: {
- label: 'Developer'
- }
- },
- {
- KNOWS: {
- type: 'MANAGES',
- orientation: 'UNDIRECTED',
- properties: {
- start_date: {
- property: 'start_date'
- },
- end_date: {
- property: 'end_date',
- defaultValue: 999999999
- }
- }
- }
- }
- )
- YIELD graphName, nodeCount, relationshipCount
- RETURN *
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Call to project a graph using the extended syntax
- result = session.run(extended_form_graph_create_call).data()
-
- # Prettify the results
- print(json.dumps(result, indent=2, sort_keys=True))
-----
-=====
-====
-
-== Use the Graph Catalog
-
-The Graph Catalog can be used to retrieve information on and manage the projected graphs.
-
-=== List all the graphs
-
-The link:{neo4j-docs-base-uri}/graph-data-science/current/management-ops/graph-list/[`gds.graph.list`^] procedure can be used to list all the graphs currently stored in memory.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-# List all in-memory graphs
-all_graphs = gds.graph.list()
-
-print(all_graphs)
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CALL gds.graph.list()
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-show_in_memory_graphs_call = """
- CALL gds.graph.list()
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Run the Cypher procedure
- results = session.run(show_in_memory_graphs_call).data()
-
- # Prettify the results
- print(json.dumps(results, indent=2, sort_keys=True, default=default))
-----
-=====
-====
-
-=== Check that a graph exists
-
-The link:{neo4j-docs-base-uri}/graph-data-science/current/management-ops/graph-exists/[`gds.graph.exists`^] procedure can be called to check for the existence of a graph by its name.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-# Check whether the "shorthand-example-graph" graph exists in memory
-graph_exists = gds.graph.exists("shorthand-example-graph")
-
-print(graph_exists)
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CALL gds.graph.exists('example-graph')
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-check_graph_exists_call = """
- CALL gds.graph.exists('example-graph')
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Run the Cypher procedure and print the result
- print(session.run(check_graph_exists_call).data())
-----
-=====
-====
-
-=== Drop a graph
-
-When a graph is no longer needed, it can be dropped to free up memory using the link:{neo4j-docs-base-uri}/graph-data-science/current/management-ops/graph-drop/[`gds.graph.drop`^] procedure.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-# Drop a graph object and keep the result of the call
-result = gds.graph.drop(shorthand_graph)
-
-# Print the result
-print(result)
-
-# Drop a graph object and just print the result of the call
-gds.graph.drop(extended_form_graph)
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CALL gds.graph.drop('shorthand-example-graph');
-
-CALL gds.graph.drop('extended-form-example-graph');
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-delete_shorthand_graph_call = """
- CALL gds.graph.drop('shorthand-example-graph')
-"""
-
-delete_extended_form_graph_call = """
- CALL gds.graph.drop('extended-form-example-graph')
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Drop a graph and keep the result of the call
- result = session.run(delete_shorthand_graph_call).data()
-
- # Prettify the result
- print(json.dumps(result, indent=2, sort_keys=True, default=default))
-
- # Drop a graph discarding the result of the call
- session.run(delete_extended_form_graph_call).data()
-----
-=====
-====
-
-== Cleanup
-
-When the projected graphs are dropped, the underlying data on the disk are not deleted. If such data are no longer needed, they need to be deleted manually via a Cypher query.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-# Delete on-disk data
-gds.run_cypher("""
- MATCH (example)
- WHERE example:EngineeringManagement OR example:Developer
- DETACH DELETE example
-""")
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-MATCH (example)
-WHERE example:EngineeringManagement OR example:Developer
-DETACH DELETE example;
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-delete_example_graph_query = """
- MATCH (example)
- WHERE example:EngineeringManagement OR example:Developer
- DETACH DELETE example
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Run Cypher call
- print(session.run(delete_example_graph_query).data())
-----
-=====
-====
-
-=== Closing the connection
-
-include::partial$aurads/close-connection.adoc[]
-
-include::partial$aurads/references.adoc[]
\ No newline at end of file
diff --git a/modules/ROOT/pages/aurads/tutorials/memory-estimation.adoc b/modules/ROOT/pages/aurads/tutorials/memory-estimation.adoc
deleted file mode 100644
index aea34605c..000000000
--- a/modules/ROOT/pages/aurads/tutorials/memory-estimation.adoc
+++ /dev/null
@@ -1,216 +0,0 @@
-[[memory-estimation]]
-= Estimating memory usage and resizing an instance
-:description: This page describes how to use estimate the needed memory.
-:generated-graph-size: 50000000
-:notebook-name: Estimating_memory_usage_and_resizing_an_instance_(GDS_client).ipynb
-
-include::partial$aurads/colab.adoc[]
-
-This example shows how to:
-
-* use the link:{neo4j-docs-base-uri}/graph-data-science/current/common-usage/memory-estimation/[memory estimation^] mode to estimate the memory requirements for an algorithm before running it
-* resize an AuraDS instance to accommodate the algorithm memory requirements
-
-include::partial$aurads/setup.adoc[]
-
-include::partial$aurads/generated-graph.adoc[]
-
-NOTE: The graph is fairly large, so the generation procedure will take a few minutes to complete.
-
-== Run the `estimate` mode
-
-The estimation of the memory requirements of an algorithm on an in-memory graph can be useful to determine whether the current AuraDS instance has enough resources to run the algorithm to completion.
-
-The Graph Data Science has guard rails built in: if an algorithm is estimated to use more RAM than is available, an exception is raised. In this case, the AuraDS instance can be resized before running the algorithm again.
-
-In the following example we get a memory estimation for the Label Propagation algorithm to run on the generated graph. The estimated memory is between 381 MiB and 4477 MiB, which is higher than an 8 GB instance has available (4004 MiB).
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-result = gds.labelPropagation.mutate.estimate(
- g,
- mutateProperty="communityID"
-)
-
-print(result)
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CALL gds.labelPropagation.mutate.estimate(
- 'example-graph',
- {mutateProperty: 'communityID'}
-)
-YIELD nodeCount,
- relationshipCount,
- bytesMin,
- bytesMax,
- requiredMemory
-RETURN *
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-# Cypher query
-page_rank_mutate_estimate_example_graph_query = """
- CALL gds.labelPropagation.mutate.estimate(
- 'example-graph',
- {mutateProperty: 'communityID'}
- )
- YIELD nodeCount,
- relationshipCount,
- bytesMin,
- bytesMax,
- requiredMemory
- RETURN *
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Run query
- results = session.run(page_rank_mutate_estimate_example_graph_query).data()
-
- # Prettify the result
- print(json.dumps(results, indent=2, sort_keys=True))
-----
-=====
-====
-
-The `mutate` procedure hits the guard rails on an 8 GB instance, raising an exception that suggests to resize the AuraDS instance.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-result = gds.labelPropagation.mutate(
- g,
- mutateProperty="communityID"
-)
-
-print(result)
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CALL gds.labelPropagation.mutate(
- 'example-graph',
- {mutateProperty: 'communityID'}
-)
-YIELD preProcessingMillis,
- computeMillis,
- mutateMillis,
- postProcessingMillis,
- nodePropertiesWritten,
- communityCount,
- ranIterations,
- didConverge,
- communityDistribution,
- configuration
-RETURN *
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-# Cypher query
-page_rank_mutate_example_graph_query = """
- CALL gds.labelPropagation.mutate(
- 'example-graph',
- {mutateProperty: 'communityID'}
- )
- YIELD preProcessingMillis,
- computeMillis,
- mutateMillis,
- postProcessingMillis,
- nodePropertiesWritten,
- communityCount,
- ranIterations,
- didConverge,
- communityDistribution,
- configuration
- RETURN *
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Run query
- results = session.run(page_rank_mutate_example_graph_query).data()
-
- # Prettify the result
- print(json.dumps(results, indent=2, sort_keys=True))
-----
-=====
-====
-
-== Resize the AuraDS instance
-
-You will need to resize the instance to the next available size (16 GB) in order to continue. An AuraDS instance can be resized from the https://console.neo4j.io/?product=aura-ds[Neo4j Aura Console^] homepage. For more information, check the xref:aurads/managing-instances/instance-actions#_resizing_an_instance[Instance actions] section.
-
-NOTE: Resizing an AuraDS instance incurs a short amount of downtime.
-
-After resizing, wait a few seconds until the projected graph is reloaded, then run the `mutate` step again. This time no exception is thrown and the step completes successfully.
-
-== Cleanup
-
-The in-memory graph can now be deleted.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-result = gds.graph.drop(g)
-
-print(result)
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CALL gds.graph.drop('example-graph')
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-delete_example_in_memory_graph_query = """
- CALL gds.graph.drop('example-graph')
-"""
-
-with driver.session() as session:
- # Run query
- results = session.run(delete_example_in_memory_graph_query).data()
-
- # Prettify the results
- print(json.dumps(results, indent=2, sort_keys=True, default=default))
-----
-=====
-====
-
-=== Closing the connection
-
-include::partial$aurads/close-connection.adoc[]
-
-include::partial$aurads/references.adoc[]
\ No newline at end of file
diff --git a/modules/ROOT/pages/aurads/tutorials/model-catalog.adoc b/modules/ROOT/pages/aurads/tutorials/model-catalog.adoc
deleted file mode 100644
index eef718244..000000000
--- a/modules/ROOT/pages/aurads/tutorials/model-catalog.adoc
+++ /dev/null
@@ -1,693 +0,0 @@
-[[model-catalog]]
-= Persisting and sharing machine learning models
-:description: This page describes how to use the model catalog.
-:leading-underscore: _
-:notebook-name: Persisting_and_sharing_machine_learning_models_(GDS_client).ipynb
-
-include::partial$aurads/colab.adoc[]
-
-This example shows how to train, save, publish, and drop a machine learning model using the link:{neo4j-docs-base-uri}/graph-data-science/current/model-catalog/[Model Catalog].
-
-include::partial$aurads/setup.adoc[]
-
-== Create an example graph
-
-We start by creating some basic graph data first.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-gds.run_cypher("""
- MERGE (dan:Person:ExampleData {name: 'Dan', age: 20, heightAndWeight: [185, 75]})
- MERGE (annie:Person:ExampleData {name: 'Annie', age: 12, heightAndWeight: [124, 42]})
- MERGE (matt:Person:ExampleData {name: 'Matt', age: 67, heightAndWeight: [170, 80]})
- MERGE (jeff:Person:ExampleData {name: 'Jeff', age: 45, heightAndWeight: [192, 85]})
- MERGE (brie:Person:ExampleData {name: 'Brie', age: 27, heightAndWeight: [176, 57]})
- MERGE (elsa:Person:ExampleData {name: 'Elsa', age: 32, heightAndWeight: [158, 55]})
- MERGE (john:Person:ExampleData {name: 'John', age: 35, heightAndWeight: [172, 76]})
-
- MERGE (dan)-[:KNOWS {relWeight: 1.0}]->(annie)
- MERGE (dan)-[:KNOWS {relWeight: 1.6}]->(matt)
- MERGE (annie)-[:KNOWS {relWeight: 0.1}]->(matt)
- MERGE (annie)-[:KNOWS {relWeight: 3.0}]->(jeff)
- MERGE (annie)-[:KNOWS {relWeight: 1.2}]->(brie)
- MERGE (matt)-[:KNOWS {relWeight: 10.0}]->(brie)
- MERGE (brie)-[:KNOWS {relWeight: 1.0}]->(elsa)
- MERGE (brie)-[:KNOWS {relWeight: 2.2}]->(jeff)
- MERGE (john)-[:KNOWS {relWeight: 5.0}]->(jeff)
-
- RETURN True AS exampleDataCreated
-""")
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-MERGE (dan:Person:ExampleData {name: 'Dan', age: 20, heightAndWeight: [185, 75]})
-MERGE (annie:Person:ExampleData {name: 'Annie', age: 12, heightAndWeight: [124, 42]})
-MERGE (matt:Person:ExampleData {name: 'Matt', age: 67, heightAndWeight: [170, 80]})
-MERGE (jeff:Person:ExampleData {name: 'Jeff', age: 45, heightAndWeight: [192, 85]})
-MERGE (brie:Person:ExampleData {name: 'Brie', age: 27, heightAndWeight: [176, 57]})
-MERGE (elsa:Person:ExampleData {name: 'Elsa', age: 32, heightAndWeight: [158, 55]})
-MERGE (john:Person:ExampleData {name: 'John', age: 35, heightAndWeight: [172, 76]})
-
-MERGE (dan)-[:KNOWS {relWeight: 1.0}]->(annie)
-MERGE (dan)-[:KNOWS {relWeight: 1.6}]->(matt)
-MERGE (annie)-[:KNOWS {relWeight: 0.1}]->(matt)
-MERGE (annie)-[:KNOWS {relWeight: 3.0}]->(jeff)
-MERGE (annie)-[:KNOWS {relWeight: 1.2}]->(brie)
-MERGE (matt)-[:KNOWS {relWeight: 10.0}]->(brie)
-MERGE (brie)-[:KNOWS {relWeight: 1.0}]->(elsa)
-MERGE (brie)-[:KNOWS {relWeight: 2.2}]->(jeff)
-MERGE (john)-[:KNOWS {relWeight: 5.0}]->(jeff)
-
-RETURN True AS exampleDataCreated
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-# Cypher query
-create_example_graph_on_disk_query = """
- MERGE (dan:Person:ExampleData {name: 'Dan', age: 20, heightAndWeight: [185, 75]})
- MERGE (annie:Person:ExampleData {name: 'Annie', age: 12, heightAndWeight: [124, 42]})
- MERGE (matt:Person:ExampleData {name: 'Matt', age: 67, heightAndWeight: [170, 80]})
- MERGE (jeff:Person:ExampleData {name: 'Jeff', age: 45, heightAndWeight: [192, 85]})
- MERGE (brie:Person:ExampleData {name: 'Brie', age: 27, heightAndWeight: [176, 57]})
- MERGE (elsa:Person:ExampleData {name: 'Elsa', age: 32, heightAndWeight: [158, 55]})
- MERGE (john:Person:ExampleData {name: 'John', age: 35, heightAndWeight: [172, 76]})
-
- MERGE (dan)-[:KNOWS {relWeight: 1.0}]->(annie)
- MERGE (dan)-[:KNOWS {relWeight: 1.6}]->(matt)
- MERGE (annie)-[:KNOWS {relWeight: 0.1}]->(matt)
- MERGE (annie)-[:KNOWS {relWeight: 3.0}]->(jeff)
- MERGE (annie)-[:KNOWS {relWeight: 1.2}]->(brie)
- MERGE (matt)-[:KNOWS {relWeight: 10.0}]->(brie)
- MERGE (brie)-[:KNOWS {relWeight: 1.0}]->(elsa)
- MERGE (brie)-[:KNOWS {relWeight: 2.2}]->(jeff)
- MERGE (john)-[:KNOWS {relWeight: 5.0}]->(jeff)
-
- RETURN True AS exampleDataCreated
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Run query
- result = session.run(create_example_graph_on_disk_query).data()
-
- # Prettify the result
- print(json.dumps(result, indent=2, sort_keys=True))
-----
-=====
-====
-
-We then project an in-memory graph from the data just created.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-g, result = gds.graph.project(
- "example_graph_for_graphsage",
- {
- "Person": {
- "label": "ExampleData",
- "properties": ["age", "heightAndWeight"]
- }
- },
- {
- "KNOWS": {
- "type": "KNOWS",
- "orientation": "UNDIRECTED",
- "properties": ["relWeight"]
- }
- }
-)
-
-print(result)
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CALL gds.graph.project(
- 'example_graph_for_graphsage',
- {
- Person: {
- label: 'ExampleData',
- properties: ['age', 'heightAndWeight']
- }
- },
- {
- KNOWS: {
- type: 'KNOWS',
- orientation: 'UNDIRECTED',
- properties: ['relWeight']
- }
- }
-)
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-# Cypher query
-create_example_graph_in_memory_query = """
- CALL gds.graph.project(
- 'example_graph_for_graphsage',
- {
- Person: {
- label: 'ExampleData',
- properties: ['age', 'heightAndWeight']
- }
- },
- {
- KNOWS: {
- type: 'KNOWS',
- orientation: 'UNDIRECTED',
- properties: ['relWeight']
- }
- }
- )
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Run query
- result = session.run(create_example_graph_in_memory_query).data()
-
- # Prettify the result
- print(json.dumps(result, indent=2, sort_keys=True))
-----
-=====
-====
-
-== Train a model
-
-Machine learning algorithms that support the `train` mode produce trained models which are stored in the Model Catalog. Similarly, `predict` procedures can use such trained models to produce predictions. In this example we train a model for the link:{neo4j-docs-base-uri}/graph-data-science/current/machine-learning/node-embeddings/graph-sage/[GraphSAGE algorithm^] using the `train` mode.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-model, result = gds.beta.graphSage.train(
- g,
- modelName="example_graph_model_for_graphsage",
- featureProperties=["age", "heightAndWeight"],
- aggregator="mean",
- activationFunction="sigmoid",
- sampleSizes=[25, 10]
-)
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
- CALL gds.beta.graphSage.train(
- 'example_graph_for_graphsage',
- {
- modelName: 'example_graph_model_for_graphsage',
- featureProperties: ['age', 'heightAndWeight'],
- aggregator: 'mean',
- activationFunction: 'sigmoid',
- sampleSizes: [25, 10]
- }
- )
- YIELD modelInfo as info
- RETURN
- info.name as modelName,
- info.metrics.didConverge as didConverge,
- info.metrics.ranEpochs as ranEpochs,
- info.metrics.epochLosses as epochLosses
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-# Cypher query
-train_graph_sage_on_in_memory_graph_query = """
- CALL gds.beta.graphSage.train(
- 'example_graph_for_graphsage',
- {
- modelName: 'example_graph_model_for_graphsage',
- featureProperties: ['age', 'heightAndWeight'],
- aggregator: 'mean',
- activationFunction: 'sigmoid',
- sampleSizes: [25, 10]
- }
- )
- YIELD modelInfo as info
- RETURN
- info.name as modelName,
- info.metrics.didConverge as didConverge,
- info.metrics.ranEpochs as ranEpochs,
- info.metrics.epochLosses as epochLosses
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Run query
- result = session.run(train_graph_sage_on_in_memory_graph_query).data()
-
- # Prettify the result
- print(json.dumps(result, indent=2, sort_keys=True))
-----
-=====
-====
-
-== View the model catalog
-
-We can use the link:{neo4j-docs-base-uri}/graph-data-science/current/model-catalog/list/[`gds.beta.model.list`^] procedure to get information on all the models currently available in the catalog. Along with information on the graph schema, the model name, and the training configuration, the result of the call contains the following fields:
-
-* `loaded`: flag denoting if the model is in memory (`true`) or available on disk (`false`)
-* `stored`: flag denoting whether the model has been persisted to disk
-* `shared`: flag denoting whether the model has been published, making it accessible to all users
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-results = gds.beta.model.list()
-
-print(results)
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CALL gds.beta.model.list()
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-# Cypher query
-list_model_catalog_query = """
- CALL gds.beta.model.list()
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Run query
- results = session.run(list_model_catalog_query).data()
-
- # Prettify the results
- print(json.dumps(results, indent=2, sort_keys=True, default=default))
-----
-=====
-====
-
-== Save a model to disk
-
-The link:{neo4j-docs-base-uri}/graph-data-science/current/model-catalog/store/[`gds.alpha.model.store`^] procedure can be used to persist a model to disk. This is useful both to keep models for later reuse and to free up memory.
-
-[WARNING]
-====
-Not all the models can be saved to disk. A list of the supported models can be found on the link:https://neo4j.com/docs/graph-data-science/current/model-catalog/store/#catalog-model-store[GDS manual].
-
-*If a model cannot be saved to disk, it will be lost when the AuraDS instance is restarted.*
-====
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-result = gds.alpha.model.store(model)
-
-print(result)
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CALL gds.alpha.model.store("example_graph_model_for_graphsage")
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-# Cypher query
-save_graph_sage_model_to_disk_query = """
- CALL gds.alpha.model.store("example_graph_model_for_graphsage")
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Run query
- result = session.run(save_graph_sage_model_to_disk_query).data()
-
- # Prettify the result
- print(json.dumps(result, indent=2, sort_keys=True))
-----
-=====
-====
-
-If we list the model catalog again after persisting a model, we can see that the `stored` flag for that model has been set to `true`.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-results = gds.beta.model.list()
-
-print(results)
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-CALL gds.beta.model.list()
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-# Cypher query
-list_model_catalog_query = """
- CALL gds.beta.model.list()
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Run query
- results = session.run(list_model_catalog_query).data()
-
- # Prettify the results
- print(json.dumps(results, indent=2, sort_keys=True, default=default))
-----
-=====
-====
-
-== Share a model with other users
-
-After a model has been created, it can be useful to make it available to other users for different use cases.
-
-IMPORTANT: A model can only be shared with other users of the same AuraDS instance.
-
-=== Create a new user
-
-In order to see how this works in practice on AuraDS, we first of all need to link:{neo4j-docs-base-uri}/operations-manual/current/authentication-authorization/manage-users/[create another user^] to share the model with.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-# Switch to the "system" database to run the
-# "CREATE USER" admin command
-gds.set_database("system")
-
-gds.run_cypher("""
- CREATE USER testUser IF NOT EXISTS
- SET PASSWORD 'password'
- SET PASSWORD CHANGE NOT REQUIRED
-""")
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-:connect system
-
-CREATE USER testUser IF NOT EXISTS
-SET PASSWORD 'password'
-SET PASSWORD CHANGE NOT REQUIRED
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-# Cypher query
-create_a_new_user_query = """
- CREATE USER testUser IF NOT EXISTS
- SET PASSWORD 'password'
- SET PASSWORD CHANGE NOT REQUIRED
-"""
-
-# Create the driver session using the "system" database
-with driver.session(database="system") as session:
- # Run query
- result = session.run(create_a_new_user_query).data()
-
- # Prettify the result
- print(json.dumps(result, indent=2, sort_keys=True))
-----
-=====
-====
-
-=== Publish the model
-
-A model can be _published_ (made accessible to other users) using the link:{neo4j-docs-base-uri}/graph-data-science/current/model-catalog/publish/[`gds.alpha.model.publish`] procedure. Upon publication, the model name is updated by appending `{leading-underscore}public` to its original name.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-# Switch back to the default "neo4j" database
-# to publish the model
-gds.set_database("neo4j")
-
-model_public = gds.alpha.model.publish(model)
-
-print(model_public)
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-:connect neo4j
-
-CALL gds.alpha.model.publish('example_graph_model_for_graphsage')
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-# Cypher query
-publish_graph_sage_model_to_disk_query = """
- CALL gds.alpha.model.publish('example_graph_model_for_graphsage')
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Run query
- result = session.run(publish_graph_sage_model_to_disk_query).data()
-
- # Prettify the result
- print(json.dumps(result, indent=2, sort_keys=True, default=default))
-----
-=====
-====
-
-=== View the model as a different user
-
-In order to verify that the published model is visible to the user we have just created, we need to create a new client (or driver) session. We can then use it to run the `gds.beta.model.list` procedure again under the new user and verify that the model is included in the list.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-test_user_gds = GraphDataScience(
- AURA_CONNECTION_URI,
- auth=("testUser", "password"),
- aura_ds=True
-)
-
-results = test_user_gds.beta.model.list()
-
-print(results)
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-// First, open a new Cypher shell with the following command:
-//
-// ./cypher-shell -a $AURA_CONNECTION_URI -u testUser -p password
-
-CALL gds.beta.model.list()
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-test_user_driver = GraphDatabase.driver(
- AURA_CONNECTION_URI,
- auth=("testUser", "password")
-)
-
-# Create the driver session
-with test_user_driver.session() as session:
- # Run query
- results = session.run(list_model_catalog_query).data()
-
- # Prettify the results
- print(json.dumps(results, indent=2, sort_keys=True, default=default))
-----
-=====
-====
-
-== Cleanup
-
-The in-memory graphs, the data in the Neo4j database, the models, and the test user can now all be deleted.
-
-[.tabbed-example]
-====
-[.include-with-GDS-client]
-=====
-[source, python, subs=attributes+]
-----
-# Delete the example dataset
-gds.run_cypher("""
- MATCH (example:ExampleData)
- DETACH DELETE example
-""")
-
-# Delete the projected graph from memory
-gds.graph.drop(g)
-
-# Drop the model from memory
-gds.beta.model.drop(model_public)
-
-# Delete the model from disk
-gds.alpha.model.delete(model_public)
-
-# Switch to the "system" database to delete the example user
-gds.set_database("system")
-
-gds.run_cypher("""
- DROP USER testUser
-""")
-----
-=====
-
-[.include-with-Cypher]
-=====
-[source, cypher, subs=attributes+]
-----
-// Delete the example dataset from the database
-MATCH (example:ExampleData)
-DETACH DELETE example;
-
-// Delete the projected graph from memory
-CALL gds.graph.drop("example_graph_for_graphsage");
-
-// Drop the model from memory
-CALL gds.beta.model.drop("example_graph_model_for_graphsage_public");
-
-// Delete the model from disk
-CALL gds.alpha.model.delete("example_graph_model_for_graphsage_public");
-
-// Delete the example user
-DROP USER testUser;
-----
-=====
-
-[.include-with-Python-driver]
-=====
-[source, python, subs=attributes+]
-----
-# Delete the example dataset from the database
-delete_example_graph_query = """
- MATCH (example:ExampleData)
- DETACH DELETE example
-"""
-
-# Delete the projected graph from memory
-drop_in_memory_graph_query = """
- CALL gds.graph.drop("example_graph_for_graphsage")
-"""
-
-# Drop the model from memory
-drop_example_models_query = """
- CALL gds.beta.model.drop("example_graph_model_for_graphsage_public")
-"""
-
-# Delete the model from disk
-delete_example_models_query = """
- CALL gds.alpha.model.delete("example_graph_model_for_graphsage_public")
-"""
-
-# Delete the example user
-drop_example_user_query = """
- DROP USER testUser
-"""
-
-# Create the driver session
-with driver.session() as session:
- # Run queries
- print(session.run(delete_example_graph_query).data())
- print(session.run(drop_in_memory_graph_query).data())
- print(session.run(drop_example_models_query).data())
- print(session.run(delete_example_models_query).data())
-
-# Create another driver session on the system database
-# to drop the test user
-with driver.session(database='system') as session:
- print(session.run(drop_example_user_query).data())
-
-driver.close()
-test_user_driver.close()
-----
-=====
-====
-
-=== Closing the connection
-
-include::partial$aurads/close-connection.adoc[]
-
-include::partial$aurads/references.adoc[]
\ No newline at end of file
diff --git a/modules/ROOT/pages/billing.adoc b/modules/ROOT/pages/billing.adoc
new file mode 100644
index 000000000..08b6513a9
--- /dev/null
+++ b/modules/ROOT/pages/billing.adoc
@@ -0,0 +1,60 @@
+[[aura-Billing]]
+= Billing
+:description: Consumption reporting allows customers to monitor their billing and credit consumption.
+
+label:Aura-DB-Professional[]
+label:AuraDB-Business-Critical[]
+label:AuraDB-Virtual-Dedicated-Cloud[]
+
+Neo4j offers cloud services through two billing models. Prepaid consumption plans, where credits are deducted from the available balance each month.
+Or pay-as-you-go, where usage is invoiced and charged on your credit card monthly in arrears.
+
+At the top-right corner of the Billing page, there's a summary of the total amount due for the current month.
+This value is shown in Aura Credits (ACU), providing a real-time estimate of your projected billing.
+
+== Consumption report
+
+The consumption report gives real-time insights into resource usage for the current project, including both running and paused states.
+You can also Export a CSV file containing usage in a selected period.
+
+The default time frame is month-to-date.
+The consumption report is updated hourly.
+
+.Consumption report example
+[.shadow]
+image::consumptionreport.png[]
+
+== Required roles
+
+The report is available to the following roles: `Organization Owner`, `Organization Admin`, and `Project Admin`.
+
+To view the report, a payment method must be attached to the account.
+If there's no billing method attached, in a project, on the *Billing* page there's a button to *Add new payment information*.
+
+=== Filters
+
+* Filter the usage data by predefined and custom date intervals.
+* Look back for a period of up to one year.
+* Filter by `Last 24 hours`, `Last 7 days`, `Last 30 days`, `Last 90 days` or a `Custom range`.
+
+=== Instances and Sessions
+
+There are two tabs for reviewing consumption by type:
+
+* The *Instances* tab includes a billing summary for database instances.
+* The *Sessions* tab includes a billing summary for any Graph Analytics sessions
+
+=== Product name and pricing
+
+For more information, refer to link:https://console-preview.neo4j.io/pricing[Aura Pricing] for the specific product names and their prices.
+If a database is paused, its charge is reduced to 20% of the standard hourly rate.
+
+=== Billing status
+
+Can be `ongoing` (currently being billed) or `ended` (historical usage).
+
+=== Units of measure
+
+Usage is displayed in the specified unit of measure.
+GB-hours usage is calculated by multiplying the number of hours a database is running (whether actively used or not) by the memory size in gigabytes (GB).
+
diff --git a/modules/ROOT/pages/platform/cloud-providers.adoc b/modules/ROOT/pages/cloud-providers.adoc
similarity index 69%
rename from modules/ROOT/pages/platform/cloud-providers.adoc
rename to modules/ROOT/pages/cloud-providers.adoc
index 1ee0bb5f1..3b5f820b7 100644
--- a/modules/ROOT/pages/platform/cloud-providers.adoc
+++ b/modules/ROOT/pages/cloud-providers.adoc
@@ -1,19 +1,21 @@
[[aura-cloud-providers]]
= Aura with cloud provider marketplaces
+:description: This page proved information about Neo4j Aura with cloud marketplaces.
+:page-aliases: platform/cloud-providers.adoc
-== Aura Enterprise and Business Critical
+== AuraDB Virtual Dedicated Cloud, AuraDS Enterprise, and AuraDB Business Critical
-label:AuraDB-Enterprise[]
+label:AuraDB-Virtual-Dedicated-Cloud[]
label:AuraDS-Enterprise[]
label:AuraDB-Business-Critical[]
-You can purchase Neo4j Aura Enterprise and Neo4j Aura Business Critical via private offer through the following cloud provider marketplaces:
+You can purchase AuraDB Virtual Dedicated Cloud, AuraDS Enterprise, and AuraDB Business Critical via private offer through the following cloud provider marketplaces:
* Amazon Web Services (AWS)
* Microsoft Azure (Azure)
* Google Cloud Platform (GCP)
-To discuss private offers, contact marketplace-sales@neo4j.com.
+mailto:marketplace-sales@neo4j.com[Contact us] to discuss private offers.
== Aura Professional
@@ -23,8 +25,14 @@ label:AuraDS-Professional[]
You can purchase Neo4j Aura Professional on a pay-as-you-go basis through the following cloud provider marketplaces:
* Amazon Web Services (AWS)
+* Microsoft Azure (Azure)
* Google Cloud Platform (GCP)
+[NOTE]
+====
+Credits from cloud marketplace providers cannot be used for payment towards Neo4j Aura.
+====
+
Purchasing Neo4j Aura Professional through a cloud provider marketplace gives you access to integrated billing and usage reporting in your chosen cloud provider's console.
=== AWS
@@ -32,14 +40,13 @@ Purchasing Neo4j Aura Professional through a cloud provider marketplace gives yo
[discrete]
==== 1. Purchase the service
-To get started, visit the https://aws.amazon.com/marketplace/pp/prodview-2t3o7mnw5ypee[Neo4j Aura Professional AWS Marketplace page] and select *View Purchase options*.
+To get started, visit the link:https://aws.amazon.com/marketplace/pp/prodview-2t3o7mnw5ypee?trk=176b570f-20dd-4b84-aa7e-cae53990fe91&sc_channel=el&source=neo4j[Neo4j Aura Professional AWS Marketplace page] and select *View Purchase options*.
-From here you will need to select the *Neo4j Aura Professional* Contract option, decide if you would like to auto-renew your contract when it ends, and then select *Create contract* and *Pay now*.
+From here you need to select the *Neo4j Aura Professional* Contract option, decide if you would like to auto-renew your contract when it ends, and then select *Create contract* and *Pay now*.
+
+
+Note that while you are shown a $0 yearly contract option, pricing is pay-as-you-go based on usage and not a fixed subscription service.
-[NOTE]
-====
-While you are shown a $0 yearly contract option, pricing is pay-as-you-go based on usage and not a fixed subscription service.
-====
[discrete]
==== 2. Set up your account
@@ -49,10 +56,7 @@ To start using Neo4j Aura, select *Click here to set up your account* to be dire
If you are not already logged in to the Aura Console, you will be taken to the Neo4j Aura login/sign-up page.
From here you can either log in with an existing Neo4j Aura account or create a new one.
-[NOTE]
-====
You do not need to use the same email address for your Neo4j Aura account as your AWS account.
-====
If you are creating a Neo4j Aura account for the first time, you will need to confirm your email address and accept the Neo4j Aura Terms of Service before you can access the Aura Console.
@@ -76,7 +80,8 @@ Purchasing from the GCP Marketplace requires the `Billing Account Administrator
==== 2. Choose a project
If you purchase the service at the top level of your GCP account, you'll need to choose a target project.
-You will only need to _purchase_ Neo4j Aura for GCP once, as you can then enable it on a project-by-project basis. However, you still need to choose a target project when you first purchase the service.
+You will only need to _purchase_ Neo4j Aura for GCP once, as you can then enable it on a project-by-project basis.
+However, you still need to choose a target project when you first purchase the service.
[discrete]
==== 3. Enable the service
@@ -95,6 +100,9 @@ When this is first set up, you should have no billing history.
[discrete]
==== 4. Complete the set up
+pass:[]
+pass:[]
+
To start using Neo4j Aura, select *MANAGE VIA NEO4J, INC.* to be directed to the Aura Console.
[NOTE]
@@ -103,6 +111,9 @@ When you click "MANAGE VIA NEO4J, INC.", you will be alerted that "You're leavin
When you click *Confirm*, if the Aura Console fails to open you may need to address any popup blockers in your browser and try again.
====
+pass:[]
+pass:[]
+
For security purposes, Neo4j and GCP do not share your login credentials.
You will need to log in to the Neo4j Console with the same Google account you have used on GCP.
@@ -123,14 +134,16 @@ To get started, visit the https://azuremarketplace.microsoft.com/en-us/marketpla
[discrete]
==== 3. Subscribe to Neo4j Aura Professional
-* Select the resource group that the Aura Professional subscription will apply to. Then, create a name for the SaaS subscription so you can easily identify it later.
-* Your billing term will be a 1-month subscription at $0 cost. Aura Professional has a consumption based pricing model, so you will only be charged for the amount you consume in Gigabyte hours (Gb/h)
+* Select the resource group that the Aura Professional subscription will apply to.
+Then, create a name for the SaaS subscription so you can easily identify it later.
+* Your billing term will be a 1-month subscription at $0 cost.
+Aura Professional has a consumption based pricing model, so you will only be charged for the amount you consume in Gigabyte hours (Gb/h)
* Set recurring billing to *On*
* Click *Review + subscribe*
[NOTE]
====
* Ensure your Azure account is upgraded before continuing.
-* Enable *marketplace purchases* in Azure. See more info on the https://learn.microsoft.com/en-us/azure/cost-management-billing/manage/enable-marketplace-purchases[Azure website]
+* Enable *marketplace purchases* in Azure.
+See more info on the https://learn.microsoft.com/en-us/azure/cost-management-billing/manage/enable-marketplace-purchases[Azure website]
====
-
diff --git a/modules/ROOT/pages/auradb/connecting-applications/overview.adoc b/modules/ROOT/pages/connecting-applications/overview.adoc
similarity index 84%
rename from modules/ROOT/pages/auradb/connecting-applications/overview.adoc
rename to modules/ROOT/pages/connecting-applications/overview.adoc
index 4129d2bf9..8c80a7478 100644
--- a/modules/ROOT/pages/auradb/connecting-applications/overview.adoc
+++ b/modules/ROOT/pages/connecting-applications/overview.adoc
@@ -1,10 +1,11 @@
[[aura-connecting-overview]]
= Connecting applications
:description: This section covers how to use drivers and libraries to connect your application to AuraDB.
+:page-aliases: auradb/connecting-applications/overview.adoc
You can use the official link:{neo4j-docs-base-uri}/create-applications/[drivers and libraries] provided by Neo4j to connect your application to AuraDB using a variety of programming languages.
Regardless of what language you use, you will need to provide the following information to connect to an AuraDB instance:
-* `uri` - The *Connection URI* for your AuraDB instance. You can copy this from the instance card or details page in the Console.
+* `uri` - The *Connection URI* for your AuraDB instance. You can copy this from the instance card or details page in the console.
* `username` and `password` - The *Username* and *Password* for your AuraDB instance. You can copy or download these during the instance creation process.
\ No newline at end of file
diff --git a/modules/ROOT/pages/connecting-applications/query-api.adoc b/modules/ROOT/pages/connecting-applications/query-api.adoc
new file mode 100644
index 000000000..5a2d48a09
--- /dev/null
+++ b/modules/ROOT/pages/connecting-applications/query-api.adoc
@@ -0,0 +1,15 @@
+[[aura-query-api-tutorial]]
+= Using the Query API with Aura
+:description: Use the Query API with Aura to execute Cypher statements against a Neo4j server through HTTPS requests.
+
+== Introduction
+
+The Query API enables interaction with an Aura database using Cypher via HTTPS.
+
+For more information, see the link:https://neo4j.com/docs/query-api/current/[Query API documentation] but note that:
+
+* For Aura instances, the host follows the format `.databases.neo4j.io`
+
+* The Query API documentation mainly refers to self-managed instances that use port `7474`, whereas Aura supports HTTPS only, which defaults to port `443`
+
+* As HTTPS implies port `443` in the request, you do not need to include the port in the URL.
\ No newline at end of file
diff --git a/modules/ROOT/pages/platform/connectors/bi.adoc b/modules/ROOT/pages/connectors/bi.adoc
similarity index 87%
rename from modules/ROOT/pages/platform/connectors/bi.adoc
rename to modules/ROOT/pages/connectors/bi.adoc
index 5156c89c5..9fa35a4e1 100644
--- a/modules/ROOT/pages/platform/connectors/bi.adoc
+++ b/modules/ROOT/pages/connectors/bi.adoc
@@ -1,12 +1,13 @@
[[connecting-bi]]
= Neo4j Connector for BI
:description: This page describes how to connect to AuraDS using the BI Connector.
+:page-aliases: platform/connectors/bi.adoc
:product: AuraDS
-[TIP]
-====
-Tutorial: xref:tutorials/bi.adoc[Using the BI Connector with Aura]
-====
+// [TIP]
+// ====
+// Tutorial: xref:tutorials/bi.adoc[Using the BI Connector with Aura]
+// ====
The Neo4j Connector for Business Intelligence (BI) delivers access to Neo4j graph data from BI tools such as Tableau, Power BI, Looker, TIBCO, Spotfire Server, Microstrategy, and more. It can be used to run SQL queries on a Neo4j graph and retrieve data in tabular format.
diff --git a/modules/ROOT/pages/platform/connectors/kafka.adoc b/modules/ROOT/pages/connectors/kafka.adoc
similarity index 94%
rename from modules/ROOT/pages/platform/connectors/kafka.adoc
rename to modules/ROOT/pages/connectors/kafka.adoc
index 126661e43..e2bec9935 100644
--- a/modules/ROOT/pages/platform/connectors/kafka.adoc
+++ b/modules/ROOT/pages/connectors/kafka.adoc
@@ -1,8 +1,9 @@
[[connecting-kafka]]
= Neo4j Connector for Apache Kafka
:description: This page describes how to connect to Aura using Kafka.
+:page-aliases: platform/connectors/kafka.adoc
-Many users and customers want to integrate Kafka and other streaming solutions with Neo4j, either to ingest data into the graph from other sources or to send update events to the event log for later consumption.
+Many users and customers want to integrate Kafka and other streaming solutions with Neo4j, either to ingest data into the graph from other sources or to send update events to the event log for later consumption.
Aura supports the use of the https://neo4j.com/docs/kafka/[Kafka Connect Neo4j Connector^], which allows you to ingest data into Neo4j from Kafka topics or send change events from Neo4j into Kafka topics.
Connecting to Aura only requires to make a few changes to the https://neo4j.com/docs/kafka/kafka-connect/source/[source^] and https://neo4j.com/docs/kafka/kafka-connect/sink/[sink^] configuration examples:
diff --git a/modules/ROOT/pages/platform/connectors/spark.adoc b/modules/ROOT/pages/connectors/spark.adoc
similarity index 88%
rename from modules/ROOT/pages/platform/connectors/spark.adoc
rename to modules/ROOT/pages/connectors/spark.adoc
index 4ae03c0f3..0755d1621 100644
--- a/modules/ROOT/pages/platform/connectors/spark.adoc
+++ b/modules/ROOT/pages/connectors/spark.adoc
@@ -1,11 +1,11 @@
[[connecting-spark]]
= Neo4j Connector for Apache Spark
:description: This page describes how to connect to Aura using Spark.
-
-[TIP]
-====
-Tutorial: xref:tutorials/spark.adoc[Using the Apache Spark Connector with Aura]
-====
+:page-aliases: platform/connectors/spark.adoc
+// [TIP]
+// ====
+// Tutorial: xref:tutorials/spark.adoc[Using the Apache Spark Connector with Aura]
+// ====
The Neo4j Connector for Apache Spark is intended to make integrating graphs with Spark easy. There are two ways to use the connector:
diff --git a/modules/ROOT/pages/dashboards/ai-dashboards.adoc b/modules/ROOT/pages/dashboards/ai-dashboards.adoc
new file mode 100644
index 000000000..a969c5734
--- /dev/null
+++ b/modules/ROOT/pages/dashboards/ai-dashboards.adoc
@@ -0,0 +1,80 @@
+= Creating a dashboard with AI
+
+. In the **Create with AI** dialog, you can enter an optional natural language text prompt to let Neo4j create a customized dashboard.
+. Once you are happy with the prompt, **Create** the dashboard.
+
+[NOTE]
+====
+When creating a dashboard with AI, the AI analyzes your link:https://neo4j.com/docs/operations-manual/current/procedures/#procedure_db_schema_visualization[database schema], that is, the node labels and relationship types of your database.
+It cannot read the actual data in your database.
+====
+
+If you do not enter a prompt, Neo4j AI will still make use of your database schema and come up with a suitable dashboard.
+
+Note that the following examples might differ when you reproduce them.
+This is due to the nature of how LLM processing works.
+
+
+== Creating a data-focused dashboard
+
+Copy the following prompt or create your own and use it to create a dashboard:
+
+.Prompt for a data-focused dashboard
+[source]
+----
+Create a dashboard on my retail database, giving me an overview over recent top customers, top selling products, currently available stock, a category overview and sales volume per quarter.
+----
+
+The more information you include, the better the result.
+This holds for information about the database itself as well as entities in the data model.
+
+.Create a data-focused dashboard
+image::dashboards/ai-dashboard-prompt-data-focus.png[]
+
+Here is a sample result:
+
+.A data-focused dashboard
+image::dashboards/ai-dashboard-data-focus.png[]
+
+
+== Creating a visualization-focused dashboard
+
+Copy the following prompt or create your own and use it to create a dashboard:
+
+.Prompt for a visualization-focused dashboard
+[source]
+----
+Create a dashboard with only graph visualizations to analyze my graph.
+----
+
+.Create visualization-focused dashboard
+image::dashboards/ai-dashboard-prompt-visualization-focus.png[]
+
+Here is a sample result:
+
+.A visualization-focused dashboard
+image::dashboards/ai-dashboard-visualization-focus.png[]
+
+
+== Combining data- and visualization focus
+
+Copy the following prompt or create your own and use it to create a dashboard:
+
+.Prompt for a dashboard with both a data and a visualization focus
+[source]
+----
+Create a dashboard with 5 bar charts analyzing products for categories and 5 graph visualizations for these categories.
+----
+
+.Create a dashboard with both a data and a visualization focus
+image::dashboards/ai-dashboard-prompt-dual-focus.png[]
+
+Here is a sample result:
+
+.A dashboard with both a data and a visualization focus (1)
+image::dashboards/ai-dashboard-dual-focus-1.png[]
+
+.A dashboard with both a data and a visualization focus (2)
+image::dashboards/ai-dashboard-dual-focus-2.png[]
+
+
diff --git a/modules/ROOT/pages/dashboards/faq-and-resources.adoc b/modules/ROOT/pages/dashboards/faq-and-resources.adoc
new file mode 100644
index 000000000..654d12675
--- /dev/null
+++ b/modules/ROOT/pages/dashboards/faq-and-resources.adoc
@@ -0,0 +1,21 @@
+= FAQ and resources
+:description: Frequently asked questions.
+
+On this page you can find some pointers to common questions and materials.
+
+== FAQ
+
+
+=== Something went wrong, how can I report a bug or issue?
+
+You can use the link:https://support.neo4j.com/[Neo4j support portal] to report any bugs or issues with Neo4j Dashboards.
+
+=== Where can I submit feature requests for Dashboards?
+
+Feature requests and other feedback can be submitted via the link:https://feedback.neo4j.com/dashboards[Aura feedback page].
+
+// == Resources
+
+// Tutorials
+// Blog posts
+// Training materials (GraphAcademy?)
diff --git a/modules/ROOT/pages/dashboards/getting-started.adoc b/modules/ROOT/pages/dashboards/getting-started.adoc
new file mode 100644
index 000000000..a049d5cad
--- /dev/null
+++ b/modules/ROOT/pages/dashboards/getting-started.adoc
@@ -0,0 +1,92 @@
+= Getting started
+:description: This page contains a working example of Neo4j dashboards.
+
+Set up a working example of Neo4j dashboards.
+
+== Prerequisites
+
+You need:
+
+. A Neo4j Aura account
+. A Neo4j Aura database instance
+
+See xref::/getting-started/create-account.adoc[Create an account] and xref::/getting-started/create-instance.adoc[Create an instance] for details.
+
+== Add a sample data set
+
+Import the Northwind dataset to your instance:
+
+* In Aura, find the **Learn** button at the top right.
+* In the **Beginner** page, select the **Learn the basics** guide.
+* When prompted to **Connect to instance**, select the instance where you would like to import sample data.
+* In step 4 of 11 of the guide, import via **Get the Northwind dataset**.
+* Run the import from the **Import** page via **Run import**.
+
+The examples on this page refer to the Northwind dataset.
+
+
+== Create a dashboard
+
+Create a new dashboard:
+
+. In the **Dashboards** page, create a new dashboard with **Create dashboard** at the top right.
+. If prompted to **Connect to instance**, select your instance.
+. The UI takes you directly to the new dashboard. To change the title, select the title text and edit it, then confirm.
+. Your dashboard has a single page titled "Main page". To change the name, hover it and then use the more menu *[...]* and **Edit page name**.
+. Use **Add card** to create cards which represent visualizations.
+
+
+=== Add a card with a bar chart
+
+Create a bar chart which displays the number of orders per customer.
+
+In the dashboard page tab:
+
+. Use **Add card** at the bottom right of the page.
+. Optionally change the title of the card by clicking and editing, then confirm.
+. In the new card, use the more menu *[...]* at the top right, then select **Bar chart** as the **Chart type**.
+. **Edit** the Cypher query of the card and paste the following Cypher query to the input field, then **Save**:
++
+[source,cypher]
+----
+MATCH (c:Customer)-[:PURCHASED]->(o:Order)
+RETURN c.contactName AS Customer, count(o) AS Orders
+ORDER BY Orders DESC LIMIT 10
+----
+
+Your bar chart should look like this:
+
+.Example bar chart
+image::dashboards/visualizations/visualization-bar-chart.png[]
+
+
+=== Add a card with a line chart
+
+Create a line chart which displays the number of products for each order ID.
+
+In the dashboard page tab:
+
+. Use **Add card** at the bottom right of the page.
+. Optionally change the title of the card by clicking and editing, then confirm.
+. In the new card, use the more menu *[...]* at the top right, then select **Line chart** as the **Chart type**.
+. **Edit** the Cypher query of the card and paste the following Cypher query to the input field, then **Save**:
++
+[source,cypher]
+----
+MATCH (o:Order)-[:ORDERS]->(p:Product)
+RETURN datetime(replace(o.orderDate, " ", "T")) AS Date,
+ count(p) as Categories
+LIMIT 20
+----
+
+Your line chart should look like this:
+
+.Example line chart
+image::dashboards/visualizations/visualization-line-chart.png[]
+
+
+== Next steps
+
+See xref::/dashboards/managing-dashboards.adoc[] for more dashboard options.
+
+See xref::/dashboards/visualizations/index.adoc[] to learn about the different charts and visualizations of Neo4j dashboards.
\ No newline at end of file
diff --git a/modules/ROOT/pages/dashboards/import.adoc b/modules/ROOT/pages/dashboards/import.adoc
new file mode 100644
index 000000000..4e4fc9ef5
--- /dev/null
+++ b/modules/ROOT/pages/dashboards/import.adoc
@@ -0,0 +1,79 @@
+= Import
+:description: This page holds information about import options for dashboards.
+
+When you create a dashboard, you can choose to **Import**.
+
+In the **Import dashboard** dialog, you have several options to source the dashboard JSON for the import:
+
+* Browse for a JSON file on your machine.
+* Select a dashboard JSON that is stored in your database.
+* Drag and drop or paste a dashboard JSON to the editor.
+
+.Import dashboard dialog
+image::dashboards/import/import-dashboard-dialog.png[]
+
+[NOTE]
+====
+Note that you only have the option to select a stored dashboard JSON when there are stored dashboards in your database.
+
+It is not possible to delete stored dashboards from the database through the dashboard application.
+====
+
+If the dashboard JSON selected for the import is syntactically correct, the **Import dashboard** dialog displays a **Summary** and **Details** for the import.
+This means that the import can be run.
+
+
+== Supported reports
+
+[cols="9,^4",options="header",grid="cols"]
+|===
+| NeoDash | Aura dashboards
+
+| Table | ✅
+| Graph | ✅
+| Bar chart | ✅
+| Pie chart | ✅
+| Line chart | ✅
+| Map | Converted to graph
+| Single value | ✅
+| Raw JSON | ❌
+| Parameter select | Only node and relationship properties
+| Form | ❌
+| iFrame | ❌
+| Markdown | ✅
+
+|===
+
+Advanced NeoDash reports added via the Advanced visualization extension are not supported.
+
+
+== Summary
+
+Data fields relevant to the import are displayed under **Summary**.
+
+The latest version of both NeoDash Labs and NeoDash commercial are supported.
+However, the import procedure can always attempt to import a dashboard JSON, even if the version differs from those.
+
+The summary includes:
+
+* the number of imported pages, cards, parameters and filters.
+* the number of cards and filters that were _migrated_, _converted_ or _skipped_.
+** _Migrated_ cards and filters have a type that is supported by Aura dashboards.
+** _Converted_ cards and filters have a closest supported similar type available in Aura dashboards.
+ For example a NeoDash commercial map report is converted to an Aura dashboard graph visualization.
+** The import procedure _skips_ any types it can neither migrate nor convert.
+
+Note that not all settings from NeoDash reports have a counterpart in Aura dashboards.
+Any such settings do not affect the import and are not preserved by the import procedure.
+
+
+== Details
+
+Expand the **Details** to inspect incidents reported during the import.
+
+Entries include the location of the incident in the dashboard JSON, that is, the specific page and report, as well as a description what went wrong or is of note.
+
+Descriptions can be, for example:
+
+* Unsupported parameter select types, resulting in skipped filters.
+* Missing entity and/or property types, missing parameter names.
\ No newline at end of file
diff --git a/modules/ROOT/pages/dashboards/index.adoc b/modules/ROOT/pages/dashboards/index.adoc
new file mode 100644
index 000000000..c01cdac41
--- /dev/null
+++ b/modules/ROOT/pages/dashboards/index.adoc
@@ -0,0 +1,19 @@
+[[dashboards]]
+= What are dashboards?
+:description: Dashboards as a part of the new Aura console experience.
+
+Neo4j dashboards let you compose different visualizations such as tables and graphs in tabbed pages to have relevant data at a glance.
+
+.A sample dashboard
+image::dashboards/dashboard-full.png[]
+
+
+Neo4j dashboards' main features include:
+
+* Neo4j dashboards are a part of the Aura console experience and are stored automatically in the Neo4j cloud storage.
+* Neo4j dashboards operate directly on your graph data via Cypher queries.
+* Manage and customize your dashboards, dashboard pages and cards.
+* Customize visualizations via settings and their corresponding Cypher query.
+// * Parameterize visualizations or entire dashboards
+// * Apply rule-based styling to your visualizations
+// * Share your dashboards across your team or your company
\ No newline at end of file
diff --git a/modules/ROOT/pages/dashboards/managing-dashboards.adoc b/modules/ROOT/pages/dashboards/managing-dashboards.adoc
new file mode 100644
index 000000000..d46cb0368
--- /dev/null
+++ b/modules/ROOT/pages/dashboards/managing-dashboards.adoc
@@ -0,0 +1,60 @@
+= Managing dashboards
+:description: Create and modify Neo4j dashboards.
+
+In Neo4j Aura, use **Dashboards** under **Tools** in the left-side navigation to access the dashboard page.
+
+[NOTE]
+====
+The number of dashboards you can create depends on your Aura tier:
+
+* AuraDB Free: three dashboards
+* AuraDB Professional: 25 dashboards
+* AuraDB Business Critical and Virtual Dedicated Cloud: unlimited dashboards
+====
+
+
+== Creating a dashboard
+
+If you have no dashboards, select **Create dashboard** and either **Create from scratch** or **Import**.
+Alternatively, select **Create with AI**.
+
+.Create your first dashboard
+image::dashboards/create-first-dashboard.png[]
+
+If you already have a dashboard and want to create another, use **Create Dashboard** at the top right, and select **Create from scratch**, **Import** or **Create with AI**.
+
+**Create from scratch** takes you directly to a new and empty dashboard.
+
+See xref:/dashboards/import.adoc[] to learn more about importing dashboards.
+
+See xref::/dashboards/ai-dashboards.adoc[] for advice how to create a dashboard via prompt.
+
+
+== Editing a dashboard
+
+. Select a dashboard tile from the **Dashboards** page.
+. To change the title, select the title text and edit it, then confirm.
+
+
+=== Dashboard pages
+
+Select a dashboard tile from the **Dashboards** page.
+Dashboard pages are organized as tabs.
+A new dashboard contains a single page "Main page".
+You can edit or duplicate the page via hovering the page name and using the more menu *[...]*.
+Add more pages via **+** next to the existing pages' names.
+Additional pages can also be deleted from the more menu *[...]*.
+
+
+=== Dashboard cards
+
+A dashboard page can hold any number of cards.
+Each card can hold a xref::/dashboards/visualizations/index.adoc[visualization], also referred to as the **Chart type**.
+To add a card, use **Add card** at the bottom right.
+Use the more menu *[...]* at the top right of a card to edit the card.
+Use the six dots at the top left of a card to drag and drop it within the grid of the page.
+
+
+== Deleting a dashboard
+
+In the dashboards tile page, use the more menu *[...]* of a dashboard tile and select **Delete**, then confirm.
\ No newline at end of file
diff --git a/modules/ROOT/pages/dashboards/parameters-and-filters.adoc b/modules/ROOT/pages/dashboards/parameters-and-filters.adoc
new file mode 100644
index 000000000..4a5e00bcb
--- /dev/null
+++ b/modules/ROOT/pages/dashboards/parameters-and-filters.adoc
@@ -0,0 +1,114 @@
+= Parameters and filters
+:description: Use parameters in your dashboards to further customize queries or use them in filters for node and relationship properties.
+
+
+== Parameters
+
+Parameters are associated with a specific dashboard.
+Each parameter has a value and type both of which you can set initially.
+
+Display the parameters of a dashboard by using the parentheses button to show the **Parameters** drawer.
+
+.The parameters drawer
+[.shadow]
+image::dashboards/parameters-and-filters/parameters-drawer.png[]
+
+The drawer shows the dashboard parameters in alphabetical order.
+Each parameter has an icon next to it hinting at its data type:
+
+* A text icon for strings.
+* A hashtag icon for numbers.
+* A calendar icon for dates.
+* A filter icon for unspecified data types.
+
+
+=== Creating a parameter
+
+. From the **Parameters** drawer, use the **Add** button.
+. In the dialog, enter a parameter name, select a data type and optionally an initial value for the parameter, then **Save**.
+
+
+=== Editing a parameter
+
+. From the **Parameters** drawer, hover over the parameter you wish to edit.
+. In the dialog, edit the parameter name, data type and/or initial value, then **Save**.
+
+
+=== Using a parameter in a query
+
+To use a parameter in a query, prefix the parameter name with `$`.
+For example, to use a parameter with a number value of 4 and the name "unit_param", use `$unit_param` in your query:
+
+.Cypher query for a table which displays products with a certain number of units in stock
+[source,cypher]
+----
+MATCH (p:Product)
+WHERE p.unitsInStock = $unit_param
+RETURN p.productName as product, p.unitsInStock AS units
+ LIMIT 10
+----
+
+.Using a parameter in a query to display products with a certain number of units in stock
+image::dashboards/parameters-and-filters/parameter-in-query.png[]
+
+
+=== Deleting a parameter
+
+From the **Parameters** drawer, hover over the parameter you wish to edit and use the trash bin button.
+
+
+== Filters
+
+Filters are special dashboard xref:dashboards/managing-dashboards.adoc#_dashboard_cards[cards].
+
+.A filter card for the product node property `unitsInStock`
+image::dashboards/parameters-and-filters/filter-units-in-stock-card.png[]
+
+You can add them to your dashboards in a similar fashion.
+You can use a filter to dynamically set parameter values inside queries.
+
+
+=== Adding a filter
+
+. In the **Dashboards** page, use **Add filter** at the bottom right.
+. In the **Add filter** dialog, select a filter type: a node or relationship type, and a property.
+. Use either an existing parameter or create a new one, then **Save**.
+
+.Creating a filter for the product node property `unitsInStock`
+image::dashboards/parameters-and-filters/filter-units-in-stock.png[]
+
+[NOTE]
+====
+Making filters dependent on a node or relationship property is necessary to let the dashboard infer values for the filter card to chose from.
+====
+
+
+=== Editing a filter
+
+. In the **Dashboards** page, use the more menu *[...]* of the filter you wish to edit and select **Edit filter**.
+. Edit your filter and **Save**.
+
+
+=== Deleting a filter
+
+In the **Dashboards** page, use the more menu *[...]* of the filter you wish to edit and select **Delete**, then confirm.
+
+
+//=== Highlight all cards using a filter
+//
+//In the **Dashboards** page, use the target icon of a filter to highlight all cards that make use of this particular filter.
+
+// screenshot
+
+
+=== Using a filter in a card
+
+Use the filter parameter as you normally would in the query of the card.
+
+For example, the table from xref:#_using_a_parameter_in_a_query[] and the parameter from xref:#_adding_a_filter[] are connected.
+You can select a value from the filter card drop down to let the dashboard assign this value to the parameter `$example_number_param` and dynamically update the table:
+
+.Selecting a new value for the unitsInStock filter
+image::dashboards/parameters-and-filters/filter-and-card.gif[]
+
+All cards which make use of the parameter are updated with the new value.
\ No newline at end of file
diff --git a/modules/ROOT/pages/dashboards/sharing-dashboards.adoc b/modules/ROOT/pages/dashboards/sharing-dashboards.adoc
new file mode 100644
index 000000000..162fee6b8
--- /dev/null
+++ b/modules/ROOT/pages/dashboards/sharing-dashboards.adoc
@@ -0,0 +1,29 @@
+= Sharing dashboards
+:description: Share your dashboards with people in your project.
+
+You can share your dashboards with other users in your project.
+You can either invite individual users from your project or share your dashboard with everyone in the project.
+
+A shared dashboard grants READ permission to the users you shared it with.
+
+Either use the more menu *[...]* of a dashboard in the **Dashboards** page and select **Share** or select **Share** at the top right when looking at a particular dashboard.
+
+.Dashboard sharing dialog
+image::dashboards/sharing-dialog.png[]
+
+The **Share** dialog lets you invite more users by selecting email addresses from the **Invite users** dropdown and adding them to the list of **People with access**.
+
+[NOTE]
+====
+The users you can invite are the same users you can find in the **Users** page under **Project**.
+====
+
+You can also toggle and grant **General access** to anyone in the project.
+If toggled, it takes precedence over the possibly more restrictive list of people with access.
+The list is preserved and applies again as soon as you untoggle **General access**.
+
+When a dashboard is shared, a team icon appears at the top right of the dashboard in the **Dashboards** page.
+Hover the icon to learn whether the dashboard is shared with the project or the number of people it is shared with.
+
+.Dashboard sharing icon
+image::dashboards/sharing-hover.png[]
\ No newline at end of file
diff --git a/modules/ROOT/pages/dashboards/visualizations/barchart.adoc b/modules/ROOT/pages/dashboards/visualizations/barchart.adoc
new file mode 100644
index 000000000..ba56b68ce
--- /dev/null
+++ b/modules/ROOT/pages/dashboards/visualizations/barchart.adoc
@@ -0,0 +1,66 @@
+= Bar chart
+:description: The Neo4j dashboard bar chart visualization.
+
+A bar chart displays different categories and values in a bar layout.
+Choose the following:
+
+* *Category*: a text field. Categories are the bar labels.
+* *Value*: a numeric field. This determines the height of the bars.
+
+//* *Group*: A second optional text field. When grouping is enabled in the advanced settings, the group can be used to draw a stacked bar chart, with several groups per category.
+
+[TIP]
+====
+Select a horizontal segment of the bar chart to zoom in.
+Use the reload button to reset the bar chart zoom.
+====
+
+== Examples
+
+
+=== Simple bar chart
+
+.Cypher query for a bar chart which displays the customers with the most orders
+[source,cypher]
+----
+MATCH (c:Customer)-[:PURCHASED]->(o:Order)
+RETURN c.contactName AS Customer, count(o) AS Orders
+ORDER BY Orders DESC LIMIT 10
+----
+
+.A bar chart displaying the customers with the most orders
+image::dashboards/visualizations/visualization-bar-chart.png[]
+
+
+=== Stacked bar chart
+
+.Cypher query for a bar chart which separates customer orders by freight weight
+[source,cypher]
+----
+MATCH (c:Customer)-[:PURCHASED]->(o:Order)
+WITH c, count(o) AS Orders, collect(o) as os
+RETURN c.contactName AS Customer,
+ size([x IN os WHERE x.freight > "20.0" | x ]) AS freightGT20,
+ size([x IN os WHERE x.freight <= "20.0" | x ]) AS freightLT20
+ORDER BY Orders DESC LIMIT 10
+----
+
+.A bar chart separating customers orders by freight weight
+image::dashboards/visualizations/visualization-bar-chart-stacked.png[]
+
+
+== Configuration
+
+Select your **Category** for the x-axis and one or multiple **Value**s for the y-axis.
+
+You can also select:
+
+* The **Bar arrangement** (grouped or stacked).
+* The **Orientation** (vertical or horizontal).
+* The **Scale** (linear or logarithmic).
+* Whether or not to show the legend of the bar chart.
+
+[TIP]
+====
+If you are configuring a stacked bar chart, use the entries in the **Value** drop down to deselect and select your value axes to manipulate the order in which the corresponding bars are displayed.
+====
\ No newline at end of file
diff --git a/modules/ROOT/pages/dashboards/visualizations/graph.adoc b/modules/ROOT/pages/dashboards/visualizations/graph.adoc
new file mode 100644
index 000000000..61deb8afe
--- /dev/null
+++ b/modules/ROOT/pages/dashboards/visualizations/graph.adoc
@@ -0,0 +1,52 @@
+= Graph
+:description: The Neo4j dashboard graph visualization.
+
+
+Neo4j dashboard graphs render returned nodes, relationships and paths.
+Configure the graph layout and nodes, relationships, labels and colors to your liking.
+
+You can drag and drop nodes in the displayed graph.
+
+
+== Example
+
+
+.Cypher query for a graph which displays products which are part of the "produce" category
+[source,cypher]
+----
+MATCH (p:Product)-[o:PART_OF]->(c:Category)
+WHERE c.categoryName = "Produce"
+RETURN p, o, c
+----
+
+.A graph displaying the products which are part of the "produce" category
+image::dashboards/visualizations/visualization-graph.png[]
+
+
+== Configuration
+
+
+=== Layout
+
+Neo4j dashboard graphs have two layout options:
+
+. Force-based layout, which spreads nodes evenly.
+. Hierarchical layout, which respects the direction of relationships. Select one of: left to right, right to left, top to bottom, bottom to top.
+
+
+=== Styling
+
+Style your graph:
+
+. Select node and relationship colors.
+. Assign a width to relationship arrows.
+. Assign node circle sizes.
+. Select node and relationship captions from existing data fields.
+
+Use the **Styling** drop down to select the scope of your styling options:
+
+. Neo4j Console: this styling is used in your entire console.
+. Dashboard: this styling is limited to the current dashboard.
+. Card: this styling is only applied to the current card and graph visualization.
+
+Finally, you can toggle the visibility of the side panel and the context menu of the graph.
\ No newline at end of file
diff --git a/modules/ROOT/pages/dashboards/visualizations/index.adoc b/modules/ROOT/pages/dashboards/visualizations/index.adoc
new file mode 100644
index 000000000..a660d8b1e
--- /dev/null
+++ b/modules/ROOT/pages/dashboards/visualizations/index.adoc
@@ -0,0 +1,49 @@
+= Visualizations
+:description: The different visualization options in Neo4j dashboards.
+
+A visualization is embedded in a dashboard card.
+Visualizations have different types, each of which expect different types of data.
+
+
+== Writing Cypher queries
+
+Each visualization uses a Cypher query specified in the dashboard card to retrieve data from your Neo4j database and display it.
+Edit the query associated with a dashboard card by using the more menu *[...]* at the top right of a card and then **Edit card**.
+
+
+Cypher syntax is generally supported, see link:https://neo4j.com/docs/cypher-manual/current/introduction/cypher-aura/[Cypher and Aura] and xref::/query/introduction.adoc[Query data].
+
+Keep the following best practices in mind when writing your Cypher queries:
+
+. Use a `LIMIT` clause in your query to keep the result size manageable.
+. Ensure that you return the correct data types for the correct visualization type.
+
+//
+// For example, a graph report expects nodes and relationships, whereas a line chart expects numbers.
+//
+//
+
+////
+== Row limiting
+
+NeoDash has a built-in post-query row limiter.
+This means that results are truncated to a maximum number of rows, depending on the report type.
+The row limiter ensures that visualizations do not become too complex for the browser to display.
+
+Note that even though the row limiter is enabled by default, rows are only limited after the query is executed.
+Therefore, it is recommended to use the `LIMIT` clause in your query at all times.
+
+== Parameters
+
+Parameters can be set in a dashboard by using a xref::/user-guide/reports/parameter-select.adoc[] report.
+Set parameters are then available in any Cypher query across the dashboard.
+
+In addition, session parameters are available based on the currently active database connection.
+
+|===
+|Parameter | Description
+| $session_uri | The URI of the current active database connection.
+| $session_database | The Neo4j database that was connected to when the user logged in.
+| $session_username | The username used to authenticate to Neo4j.
+|===
+////
\ No newline at end of file
diff --git a/modules/ROOT/pages/dashboards/visualizations/linechart.adoc b/modules/ROOT/pages/dashboards/visualizations/linechart.adoc
new file mode 100644
index 000000000..31dc77c4f
--- /dev/null
+++ b/modules/ROOT/pages/dashboards/visualizations/linechart.adoc
@@ -0,0 +1,61 @@
+= Line chart
+:description: The Neo4j dashboard line chart visualization.
+
+You can use a line chart to draw one or more lines in a two-dimensional plane.
+It has two numeric fields:
+
+* **Dimension**: The values used as the x-axis.
+* **Value**: The values used on the y-axis.
+
+// Needs confirmation:
+// The line chart supports plotting both simple numbers and time values on the x-axis.
+// If you select a Neo4j datetime property on the x-axis, the chart is automatically drawn as a time series.
+
+[TIP]
+====
+Select a horizontal segment of the line chart to zoom in.
+Use the reload button to reset the line chart zoom.
+====
+
+== Examples
+
+
+=== Basic line chart
+
+.Cypher query for a line chart which displays the number of product categories by order dates
+[source,cypher]
+----
+MATCH (o:Order)-[:ORDERS]->(p:Product)
+RETURN datetime(replace(o.orderDate, " ", "T")) AS Date,
+ count(p) as Categories
+LIMIT 20
+----
+
+.A line chart displaying the number of product categories by order dates
+image::dashboards/visualizations/visualization-line-chart.png[]
+
+
+=== Multi-line chart
+
+.Cypher query for a line chart which displays the number of product categories and the product count by order dates
+[source,cypher]
+----
+MATCH (o:Order)-[or:ORDERS]->(p:Product)
+RETURN datetime(replace(o.orderDate, " ", "T")) AS Date,
+ count(p) as Categories,
+ sum(or.quantity) as Quantity
+LIMIT 20
+----
+
+.A line chart displaying the number of product categories and the product count by order dates
+image::dashboards/visualizations/visualization-line-chart-multi.png[]
+
+== Configuration
+
+Select your **Dimension** for the x-axis and one or multiple **Value**s for the y-axis.
+
+You can also select:
+
+* The **Orientation** (vertical or horizontal).
+* The **Scale** (linear or logarithmic).
+* Whether or not to show the legend of the bar chart.
\ No newline at end of file
diff --git a/modules/ROOT/pages/dashboards/visualizations/piechart.adoc b/modules/ROOT/pages/dashboards/visualizations/piechart.adoc
new file mode 100644
index 000000000..063e264b9
--- /dev/null
+++ b/modules/ROOT/pages/dashboards/visualizations/piechart.adoc
@@ -0,0 +1,26 @@
+= Pie chart
+:description: The Neo4j dashboard pie chart visualization.
+
+A pie chart displays different categories and values as slices of a circular disc.
+Choose the following:
+
+* *Category*: a text field. Categories are the labels of the pie slices.
+* *Value*: a numeric field. This determines the size of the slices.
+
+== Example
+
+
+.Cypher query for a pie chart which displays the number of products per category
+[source,cypher]
+----
+MATCH (p:Product)-[:PART_OF]->(c:Category)
+RETURN c.categoryName AS Category, count(p) AS Products LIMIT 20
+----
+
+.A pie chart displaying the number of products per category
+image::dashboards/visualizations/visualization-pie-chart.png[]
+
+
+== Configuration
+
+Select your **Category** for the pie slice labels and a **Value** for size of the pie slices.
\ No newline at end of file
diff --git a/modules/ROOT/pages/dashboards/visualizations/single-value.adoc b/modules/ROOT/pages/dashboards/visualizations/single-value.adoc
new file mode 100644
index 000000000..2707edfd5
--- /dev/null
+++ b/modules/ROOT/pages/dashboards/visualizations/single-value.adoc
@@ -0,0 +1,22 @@
+= Single value
+:description: The Neo4j dashboard single value visualization.
+
+You can display a single value in a card.
+
+
+== Example
+
+.Cypher query for a single value which displays the number of products
+[source,cypher]
+----
+MATCH (p:Product)
+RETURN count(p) AS productCount
+----
+
+.A single value displaying the number of products
+image::dashboards/visualizations/visualization-single-value.png[]
+
+== Configuration
+
+Select a **Size**, **Text position** and **Text color** for the single value.
+A **Size** of **Auto** will size the displayed value according to the card size.
\ No newline at end of file
diff --git a/modules/ROOT/pages/dashboards/visualizations/table.adoc b/modules/ROOT/pages/dashboards/visualizations/table.adoc
new file mode 100644
index 000000000..f5c41b85e
--- /dev/null
+++ b/modules/ROOT/pages/dashboards/visualizations/table.adoc
@@ -0,0 +1,46 @@
+= Table
+:description: The Neo4j dashboard table visualization.
+
+
+Neo4j dashboard tables can render all data returned by a Cypher query as a result table.
+
+Results are paginated and you can select the number of results per page.
+Sort by a result column by clicking the column header.
+
+When you hover over a table cell, a copy icon appears towards the end of the cell which lets you copy the content of the table cell to the clipboard.
+
+
+== Example
+
+//=== Basic table
+
+.Cypher query for a table which displays the number of products per category
+[source,cypher]
+----
+MATCH (p:Product)-[:PART_OF]->(c:Category)
+RETURN c.categoryName AS Category, count(p) AS Products LIMIT 20
+----
+
+.A table displaying the number of products per category
+image::dashboards/visualizations/visualization-table-basic.png[]
+
+
+
+////
+=== Table with nodes and collections
+
+.Cypher query for a table which displays the category nodes and collections of products
+[source,cypher]
+----
+MATCH (p:Product)-[:PART_OF]->(c:Category)
+RETURN c as CategoryNode, collect(p) AS Products LIMIT 20
+----
+
+.A table displaying the category nodes and collections of products
+image::dashboards/visualizations/visualization-table-nodes-and-collections.png[]
+
+////
+
+== Configuration
+
+You can toggle rich rendering of cells, which adds syntax highlight and makes the result data types clear.
\ No newline at end of file
diff --git a/modules/ROOT/pages/dashboards/visualizations/text.adoc b/modules/ROOT/pages/dashboards/visualizations/text.adoc
new file mode 100644
index 000000000..7d66b62da
--- /dev/null
+++ b/modules/ROOT/pages/dashboards/visualizations/text.adoc
@@ -0,0 +1,13 @@
+= Text
+:description: The Neo4j dashboard text visualization.
+
+You can display Markdown-formatted text in a card.
+
+
+== Example
+
+.The Markdown editor
+image::dashboards/visualizations/visualization-text-editor.png[]
+
+.A card displaying Markdown text
+image::dashboards/visualizations/visualization-text-card.png[]
\ No newline at end of file
diff --git a/modules/ROOT/pages/explore/explore-default-actions.adoc b/modules/ROOT/pages/explore/explore-default-actions.adoc
new file mode 100644
index 000000000..d0a87cd39
--- /dev/null
+++ b/modules/ROOT/pages/explore/explore-default-actions.adoc
@@ -0,0 +1,176 @@
+:description: This chapter contains default actions and keyboard shortcuts.
+
+[[default-actions]]
+= Default actions and shortcuts
+A summary of the actions and associated keyboard shortcuts are below.
+
+//Check Mark
+:check-mark: icon:check[]
+
+//Cross Mark
+:cross-mark: icon:times[]
+
+.Explore Actions and their Keyboard Shortcuts
+[cols=".^5a,.^20,.^50,.^25a,.^15a", options=header]
+|===
+|
+| Action
+| Description
+| Shortcut
+| Typed in search bar
+
+| image:icon-magnifying-glass.png[width=25]
+| Inspect
+| Opens the detail view of a selected node, showing all its properties and labels
+| _Mac_: kbd:[⌘+I]
+
+_Windows_: kbd:[Ctrl+I]
+| {cross-mark}
+
+| image:select-related-nodes.png[width=25]
+| Select related nodes
+| Selects all nodes that can be connected to selected node
+| _Mac_: kbd:kbd:[⌘+⇧+R]
+
+_Windows_: kbd:[Ctrl+⇧+R]
+| {cross-mark}
+
+// | image:icon-pencil.png[width=25] | Edit node | Allows editing of all the properties of the selected node at once | _Mac_: kbd:[⌘+⌥+E]
+
+// _Windows_: kbd:[Ctrl+Alt+E] | {cross-mark}
+
+| image:icon-invert.png[width=25]
+| Invert Selection
+| Inverts the current selection
+| _Mac_: kbd:[⌘+⌥+A]
+
+_Windows_: kbd:[Ctrl+Alt+A]
+| {check-mark}
+
+| image:icon-fit-selection.png[width=25]
+| Fit to selection
+| Zooms in and centers the selection on the canvas
+| _Mac_: kbd:[⌘+F]
+
+_Windows_: kbd:[Ctrl+F]
+| {check-mark}
+
+| image:icon-expand-reveal.png[width=25] | Expand
+| Expands all the neighbours of the selected nodes
+| _Mac_: kbd:[⌘+E]
+
+_Windows_: kbd:[Ctrl+E]
+| {check-mark}
+
+| image:icon-expand-reveal.png[width=25]
+| Reveal
+| List/Detail view specific action.
+Reveals all the selected nodes or relationships on the canvas.
+|
+| {cross-mark}
+
+| image:icon-path.png[width=25]
+| Path
+| Shows the shortest path between two selected nodes
+|
+| {cross-mark}
+
+| image:icon-dismiss.png[width=25]
+| Dismiss
+| Hides all selected nodes and relationships
+| _Mac_: kbd:[⌘+H]
+
+_Windows_: kbd:[Ctrl+H]
+| {check-mark}
+
+| image:icon-dismiss.png[width=25]
+| Dismiss other nodes
+| Hides everything that is not selected
+| _Mac_: kbd:[⌘+⇧+H]
+
+_Windows_: kbd:[Ctrl+⇧+H]
+| {check-mark}
+
+| image:icon-add.png[width=25]
+| Create relationship
+| Allows the creation of relationship between two selected nodes.
+The direction is set in the sequence in which the two nodes are clicked.
+|
+| {cross-mark}
+
+| image:icon-add.png[width=25]
+| Create node
+| Allows the creation of a node in a specified category.
+The newly created node will inherit all the labels that category has.
+|
+| {cross-mark}
+
+| image:icon-duplicate.png[width=25]
+| Duplicate
+| Duplicates a selected node with all the properties it has.
+The newly duplicated node is always selected and has no relationships to other nodes.
+| _Mac_: kbd:[⌘+D]
+
+_Windows_: kbd:[Ctrl+D]
+| {cross-mark}
+
+| image:icon-clear.png[width=25]
+| Clear Scene
+| Clears the whole scene and collapses the list view
+| _Mac_: kbd:[⌘+⌫]
+
+_Windows_: kbd:[Ctrl+⌫]
+| {cross-mark}
+
+| image:refresh-data.png[width=25]
+| Refresh Data
+| Refreshes the data in the Scene
+|
+| {cross-mark}
+
+| image:dismiss-single-nodes.png[width=25]
+| Dismiss single nodes
+| Hides all nodes that have no relationships to other nodes in the Scene
+| _Mac_: kbd:[⌘+⇧+S]
+
+_Windows_: kbd:[Ctrl+⇧+S]
+| {cross-mark}
+
+| image:icon-undo.png[width=25]
+| Undo
+|
+|
+| {check-mark}
+
+| image:icon-redo.png[width=25]
+| Redo
+|
+|
+| {check-mark}
+
+| image:icon-jumpto.png[width=25]
+| Jump to node/relationship
+| Zooms in and centers the desired node or relationship on the canvas
+|
+| {cross-mark}
+
+|
+| Select All
+| Selects all properties and relationships on the canvas
+| _Mac_: kbd:[⌘+A]
+
+_Windows_: kbd:[Ctrl+A]
+| {cross-mark}
+
+|
+| Zoom In
+|
+| _Mac_: kbd:[⌘ + +]
+| {cross-mark}
+
+|
+| Zoom Out
+|
+| _Mac_: kbd:[⌘ + -]
+| {cross-mark}
+|===
diff --git a/modules/ROOT/pages/explore/explore-features/edit-graph-data.adoc b/modules/ROOT/pages/explore/explore-features/edit-graph-data.adoc
new file mode 100644
index 000000000..b5e920cd1
--- /dev/null
+++ b/modules/ROOT/pages/explore/explore-features/edit-graph-data.adoc
@@ -0,0 +1,69 @@
+:description: This section describes how to edit graph data in Explore.
+
+= Edit graph data
+
+Explore allows you to edit your graph data directly from the scene.
+Consequently, the only data you can modify is what is visible in the current scene.
+You can also create new nodes and relationships in your scene, which are added to your database.
+
+[NOTE]
+====
+Editing data in Explore requires write permission to the database.
+====
+
+* *Edit labels* - You can add or remove labels from a node when you inspect its properties in the Inspector.
+Only labels available in the database can be added.
+
+[.shadow]
+image::edit-label.png[width=300]
+
+* *Edit or add properties* - You can add, edit or delete properties on a node when you inspect its properties in the Inspector.
+Only property keys enabled for viewing in Explore (as defined in the Perspective) will be visible and editable.
+Relationship properties can also be edited in its respective Inspector.
+
+[.shadow]
+image::edit-properties.png[width=300]
+
+* *Create new relationships* - New relationships can be created from the canvas directly.
+To create a new relationship, select the source and destination nodes taking care to select the source node first.
+The right-click context menu will show the `Create relationship` enabled with a sub-menu showing the available relationship types.
+Only relationship types available in the database can be added.
+Note that database constraints, if they exist, affect the ability to create relationships.
+See the link:https://neo4j.com/docs/cypher-manual/current/constraints/[Cypher Manual -> Constraints] for more information on constraints.
+
+[.shadow]
+image::create-relationship.png[width=300]
+
+* *Create new nodes* - New nodes can also be created from the canvas.
+To create an empty new node, use the canvas context menu and select an existing category to which the node should be assigned.
+Another option is to duplicate an existing node from its context menu.
+If you attempt to create a node of a particular label that has existence or uniqueness constraints for one or more properties, the Inspector shows which properties that require (unique) values before you can create the node.
+See the link:https://neo4j.com/docs/cypher-manual/current/constraints/[Cypher Manual -> Constraints] for more information on constraints.
+
+[.shadow]
+image::create-node.png[width=300]
+
+* *Delete a relationship* - A relationship can be deleted from the canvas as well.
+With the desired relationship selected, the context menu includes an option to delete the relationship.
+If more than one relationship is selected, you can delete the selection.
+
+[.shadow]
+image::delete-relationship.png[width=300]
+
+* *Delete nodes* - Similarly, nodes can also be deleted directly from the canvas.
+The context menu for nodes allows you to delete selected node(s) in the same way as for relationships.
+
+[.shadow]
+image::delete-node.png[width=300]
+
+[NOTE]
+====
+You can only delete elements from the database if your role has required permissions.
+See link:https://neo4j.com/docs/operations-manual/current/authentication-authorization/[Operations Manual -> Authentication and authorization] for more information on role-based access control.
+====
+
+[WARNING]
+====
+Deleting nodes and relationships from the canvas permanently deletes them from the database.
+Be careful with this option as it cannot be undone.
+====
diff --git a/modules/ROOT/pages/explore/explore-features/full-text-search.adoc b/modules/ROOT/pages/explore/explore-features/full-text-search.adoc
new file mode 100644
index 000000000..e39266699
--- /dev/null
+++ b/modules/ROOT/pages/explore/explore-features/full-text-search.adoc
@@ -0,0 +1,29 @@
+:description: This section describes full-text search in Explore.
+
+[[bloom-fulltext-search]]
+= Full-text search
+
+Explore allows users to always run a full-text search query against the database for their search input.
+This is useful when suggestions provided by Explore do not satisfy the user’s need.
+Full-text search using the input is the last suggestion provided to the user in the suggestions list.
+
+[.shadow]
+image::full-text-search.jpg[width=400]
+
+Explore can take advantage of native full text indexes in the database.
+Additionally, for small graphs with low cardinality in data values (e.g. the Movies graph), Explore is able to search in property values without requiring an explicit index.
+The full text string entered by the user is searched as one unit for these cases.
+
+[NOTE]
+====
+Full-text searching can be a time-consuming operation.
+Depending on the database, the state of indexes, and the search input requested, you may have a noticeable lag in response time because the queries may take a long time to return.
+That's why full-text search is kept as the last option in the suggestions list, to avoid unintentional use.
+====
+
+[WARNING]
+====
+When there is a delay in getting the search suggestions to return, the full-text search is the only option available to the user in the suggestions list.
+It is recommended not to use the full-text search suggestion inadvertently.
+The user may have to wait for a long time before results are returned and the full-text search can put a slightly larger workload on the database server.
+====
diff --git a/modules/ROOT/pages/explore/explore-features/graph-pattern-search.adoc b/modules/ROOT/pages/explore/explore-features/graph-pattern-search.adoc
new file mode 100644
index 000000000..59772714b
--- /dev/null
+++ b/modules/ROOT/pages/explore/explore-features/graph-pattern-search.adoc
@@ -0,0 +1,115 @@
+:description: This section describes how do a graph pattern search in Neo4j Explore.
+
+[[graph-pattern-search]]
+= Graph pattern search
+
+Explore provides an easy and flexible way to explore your graph through graph patterns.
+It uses a vocabulary built from your graph and Perspective elements (categories, labels, relationship types, property keys and property values).
+Uncategorized labels and relationships or properties hidden in the Perspective are not considered in the vocabulary.
+To build a graph pattern search in Explore, you use this vocabulary and either build the pattern step by step or type in a near-natural language phrase.
+
+// There are two different search experiences in Explore, the default search and the classic search.
+// To switch between the two, use the toggle in the xref::/Explore-visual-tour/settings-drawer.adoc[Settings] drawer.
+
+== Step-by-step pattern building with proactive suggestions
+
+One approach to building graph patterns is to use the proactive suggestions feature of Explore.
+This is useful when you need assistance with picking elements of your graph schema (e.g. relationship types from a label or categories that connect together).
+
+When you go to the search bar, Explore presents proactive options to begin your search.
+You can select from any node labels available in the Perspective or use a blank `(any)` node.
+Further, if you know which relationship you are interested in, but not which node labels it connects, you can select based on the relationship type and use the wildcard option for the node label (the `(any)` node).
+Additionally, you can filter the suggestions by Search phrases, nodes, or relationships.
+
+// But, as explained in the following section on xref::/explore-features/graph-pattern-search.adoc#language-graph-patterns[Near-natural language and graph patterns], you can always type your own query as well.
+
+[.shadow]
+image::proactive-blank-input.png[width=400]
+
+If you select a node label, `Product` for example, Explore lets you choose if you want to further filter on the start node by its relationships or if you want to refine by properties and/or property values.
+Explore gives you a hint about the datatype of the property value directly in the search bar.
+
+[.shadow]
+image::proactive-product-selected.png[width=400]
+
+If you select `Properties`, you can see all properties for the `Product` label and if you pick `discontinued` for example, you can then specify the condition, for example `true`.
+This results in a pattern that starts with a discontinued product and filters all other nodes, both the ones with other labels as well as the `Product` nodes where the `discontinued` property does not equal `true`.
+
+From here, you can either press the play icon to display all discontinued products, or you can continue defining your graph pattern.
+
+When you are happy with the start node, select `Relationship` to see a list of available relationship types for your specified start node, both incoming and outgoing.
+Similarly, you can further filter on properties for the relationship, if available.
+If you are not interested in the type of relationship, you can use the wildcard `(any)` relationship.
+
+The last step is to specify the end node, which naturally follows the same steps as the start node.
+The wildcard option, `(any)`, is available here as well.
+
+Press the play icon when you are ready to execute the search.
+
+=== A note on property-value suggestions
+
+`Category`, `label` and `relationship type` matches are searched in Explore's in-memory metadata of available graph and Perspective elements.
+For property matches, Explore queries the database instead to find suggestions.
+To do so, Explore relies on property indexes to be set up in the database for any properties that should be searchable in Explore.
+
+For bigger graphs, all properties of a node with a certain label are considered as indexed if there are less than 1000 nodes with the specific label.
+However, if a property has the same value on more than 10% of the nodes, it is not searchable, whether indexed or not, for performance reasons.
+For small graphs with low cardinality in data values (e.g. the Movies graph, found in the https://neo4j.com/developer/example-data[example data sets]), Explore is able to search for property values without requiring an index.
+
+Depending on the search input, the number of indexes, and the speed of typing in the search box, it is possible that Explore runs a large number of index lookup queries to find relevant matches.
+Optimizations are built-in to delay firing queries while waiting for user to complete the input and to cancel un-needed queries if the input is changed.
+
+Explore also attempts to hide pattern permutations from the suggestions list, if they are not found in the database.
+This may not be applicable in all situations.
+It is possible for database performance issues or network latency between the user’s machine and the Neo4j server to cause delays in showing search suggestions.
+
+//As of 2.12, this doesn't work
+// [[language-graph-patterns]]
+// == Near-natural language and graph patterns
+
+// Assume that you want to find `Products` that are connected to `Orders` by any relationship.
+// Using a near-natural language search expression, you can type in the search in several different ways.
+
+// [NOTE]
+// ====
+// To use the full-text search, a full-text index needs to be present in the database.
+// ====
+
+// For example, if you type `Product Order` in the search bar, you get the following suggestion:
+
+// [.shadow]
+// image::product-order.png[width=400]
+
+// This is straightforward, a `Product` node connected via the wildcard `(any)` relationship to an `Order` node.
+// You can execute or further refine by adding more relationships to the pattern, or by defining conditions based on the properties of the `Order` nodes.
+
+// But if you instead type `order with product` in the search bar and run it as a full-text search, Explore returns seven nodes:
+
+// [.shadow]
+// image::full-text-search.png[width=800]
+
+// If you inspect these nodes individually, you can see that all of them has either `order` and/or `product` among their property values.
+// A full-text search requires at least three characters in the search bar.
+// Explore matches them exactly and if you enter multiple words, the returned elements contain at least one of them.
+
+// If the results of a full-text search exceeds the node query limit, Explore presents you with a pop-up which lets you select which elements to add to the Scene instead of blocking all results.
+// Typing `order of a product` in the search bar yields many matches and if your limit is set below 1000, it results in the following:
+
+// [.shadow]
+// image::search-pop-up.png[width=600]
+
+
+== Case sensitivity of input
+
+Neo4j database is *case sensitive*.
+By default, property values are matched by Explore in a *case sensitive* fashion, if they *begin with* any of the matching tokens input by the user.
+
+By contrast, metadata elements like labels, categories, relationship types or property keys, are matched in a *case insensitive* fashion.
+Also, metadata elements are matched if they simply contain one of the search tokens.
+
+[NOTE]
+====
+Case insensitive matching of property values requires full-text indexes on all properties that will be searched.
+Without full-text indexes, Explore will use case sensitive searching.
+====
+
diff --git a/modules/ROOT/pages/explore/explore-features/scene-actions.adoc b/modules/ROOT/pages/explore/explore-features/scene-actions.adoc
new file mode 100644
index 000000000..4028568ca
--- /dev/null
+++ b/modules/ROOT/pages/explore/explore-features/scene-actions.adoc
@@ -0,0 +1,41 @@
+:description: This section describes Scene actions in Explore.
+
+[[scene-actions]]
+= Scene actions
+
+_Scene actions_ are parameterized Cypher queries, much like xref:explore/explore-visual-tour/search-bar.adoc#search-phrase[Search phrase]s.
+The difference is that in a Scene action, the parameters can be the selected elements in your current selection instead of all available and matching elements in your graph.
+Also, whereas Scene actions are defined in the Perspective drawer under _Saved Cypher_, they are invoked from the context menu.
+
+The Scene actions are listed in the order they were created.
+Explore lets you reorder them any way you like by dragging and dropping them as you please in the Perspective drawer.
+The order of Scene actions in the Perspective drawer is also reflected from the context menu.
+
+[.shadow]
+image::scene-action-context.png[width=500]
+
+In the following example, using the Northwind graph, a Scene action, _Discontinued products_, is created (and saved) based on the selected nodes in the scene.
+This Scene action is available from the context menu when node(s) are selected and displays products that have been discontinued from the selected suppliers.
+
+[.shadow]
+image::scene-action.png[width=400]
+
+It is possible to make the Scene action available for only some categories, you control this in the _Action Availability_ dropdown menu.
+
+[.shadow]
+image::action-availability.png[width=300]
+
+If a relationship is selected instead of nodes, the _Discontinued products_ Scene action is not available, which is a result of the `WHERE elementId(n) in $nodes` on the second line of the Cypher query.
+
+[.shadow]
+image::scene-action-relationship.png[width=400]
+
+If you write a Scene action where your query targets relationships rather than nodes, they are defined in a similar fashion, `WHERE elementId(r) in $relationships`.
+However, Explore reminds you if you forget.
+
+[NOTE]
+====
+Only the distinction between `$nodes` and `$relationships` matters to a Scene action's availability for a selected element.
+Any further refinement, such as the `p.discontinued=true` in the example, is ignored in from this point of view.
+For example, if you select a `Supplier` node that is not connected to any discontinued products, the Scene action _Discontinued products_ is still available, but running it does not yield any results.
+====
diff --git a/modules/ROOT/pages/explore/explore-features/search-phrases-advanced.adoc b/modules/ROOT/pages/explore/explore-features/search-phrases-advanced.adoc
new file mode 100644
index 000000000..cfdbfcda9
--- /dev/null
+++ b/modules/ROOT/pages/explore/explore-features/search-phrases-advanced.adoc
@@ -0,0 +1,116 @@
+:description: This section describes more advanced Search phrases in Explore.
+
+[[search-phrases-advanced]]
+= Search phrases for advanced queries
+
+As mentioned in xref:explore/explore-visual-tour/search-bar.adoc#search-phrase[Search phrase], a Search phrase allows you to save a pre-defined graph query.
+Search phrases are defined in the Perspective drawer and automatically saved when created.
+Your saved Search phrases can be accessed from the Perspective drawer as well.
+
+== Static Search phrase
+
+[.shadow]
+image::static-search-phrase.png[width=400]
+
+In this example using the Northwind graph, a static Search phrase has been saved with a Cypher query that spans multiple nodes and relationships.
+The first box titled `Search phrase` specifies the phrase that the user will type in partially or fully.
+The description appears underneath all the Search phrase matches displayed to the user.
+
+Explore will match any part of the Search phrase field in a case-insensitive fashion.
+For example, typing in `germ` or `ORDER` or `SeaFoo` will all show a match for `Germans ordering Seafood`.
+
+== Dynamic Search phrases
+
+[.shadow]
+image::parameterized-search-phrase.png[width=400]
+
+Parameters can be used in Search phrases to make them dynamic.
+In this example using the Northwind graph, there are 2 parameters (indicated with `$` sign in front) added to the Search phrase.
+These allow for further user input to determine which query should be run.
+There are three options available for suggestions to these parameters:
+
+* *No suggestions* - If selected, the suggestions list will not show when using the Search phrase.
+* *Label-key* - Allows picking label:key pair for the suggestions list.
+* *Cypher query* - Custom written Cypher query for the suggestions list.
+
+[[parameter-data-types]]
+=== Parameter data types
+
+The data type for every parameter must be specified.
+Explore supports `string`, `integer`, `float` and `boolean` data types.
+Additionally, Explore also supports the temporal types `Date`, `Time`, `DateTime`, `LocalDate`, and `LocalDateTime`.
+Temporal types with time zones, i.e. `Time` and `DateTime`, can also be used for rule-based styling or filters.
+You can search for them and get search suggestions and also edit them in the Inspector (provided that you have write access to the graph).
+
+User input for a parameter gets converted to the data type specified for it.
+
+If you want to setup parameters for other data types supported in Cypher, you can use a combination of `string`, `integer`, `float` and `boolean` inputs to build other data types for Cypher.
+Please see link:https://neo4j.com/docs/cypher-manual/current/values-and-types/[Cypher manual -> Values and types] for more information on data types.
+
+A couple of scenarios are described below, but there are a number of others that you may come across.
+
+* *Temporal (date or time) type*: When you have temporal properties, you can use `Date`, `Time`, `DateTime`, `LocalDate`, or `LocalDateTime` Cypher functions along with a string parameter.
+For example:
++
+[source, cypher, indent=0]
+----
+MATCH (n:Employee) where n.startDate = date($inDate)
+return n
+----
++
+where `$inDate` would be a `string` input like `2019-05-23`.
+
+* *Spatial type*: For spatial properties, you can use point or distance Cypher functions along with float parameters in a Search phrase. For example:
++
+[source, cypher, indent=0]
+----
+MATCH (n:Store) where n.location = point({latitude:$lat, longitude:$long})
+return n
+----
++
+where `$lat` and `$long` would have `float` inputs like `37.55` and `-122.31`.
+
+
+=== Chaining of parameters
+
+The user-input for one parameter can be used to filter the list of suggestions provided for a subsequent parameter.
+This is referred to as _parameter chaining_.
+For example, consider the Search phrase used above with multiple parameters, `Customers from $country ordering $category`.
+In this case, perhaps you want to restrict the list of category suggestions based on the country the user picked, parameter chaining will help you achieve this.
+To use it, the list of category suggestions will need to be constructed using a Cypher query that uses the `$country` parameter to filter categories.
+See image below for an example of what this could look like.
+
+[.shadow]
+image::parameter-chaining.png[width=400]
+
+== Search phrases caveats
+
+* Explore will limit the number of records processed for the visualization to 10000 unless a smaller limit has been set in the query.
+This is to prevent the app from hanging or crashing for queries that return too many records.
++
+[.shadow]
+image::query-limit.png[width=400]
++
+* It is recommended that Search phrases either return a path or a set of nodes.
++
+[WARNING]
+====
+Returning only relationships may cause unexpected behavior in addition to no graph visualization changes.
+====
++
+For example, the following query:
++
+[source, cypher, indent=0]
+----
+MATCH ()-[r:CONNECTED_TO]->() RETURN r
+----
++
+Should be refactored to:
++
+[source, cypher, indent=0]
+----
+MATCH p = ()-[r:CONNECTED_TO]->() RETURN p
+----
+
+* Furthermore, be aware that it is possible to modify data with a Search phrase as any valid Cypher query can be used.
+ It is not recommended to use Search phrase for this goal as an end-user might not be aware of the consequences of running a Search phrase that includes WRITE transactions.
diff --git a/modules/ROOT/pages/explore/explore-features/slicer.adoc b/modules/ROOT/pages/explore/explore-features/slicer.adoc
new file mode 100644
index 000000000..4a0f31eef
--- /dev/null
+++ b/modules/ROOT/pages/explore/explore-features/slicer.adoc
@@ -0,0 +1,59 @@
+[[slicer]]
+= Slicer
+:description: This section introduces the Slicer functionality in Explore.
+
+The Slicer is a feature that lets you quickly and interactively change what is visible in your scene.
+It allows you to demonstrate difference in numerical values of properties via a timeline.
+You can do it by manually scrubbing or use the playback function.
+
+//replace image when get hands on plugin
+[.shadow]
+image::slicer.png[width=800]
+
+To use an example from the Northwind dataset, let's say you want to place a large order of any kind of beer but want to make sure that there are enough units in stock before you place your order.
+Once you have the products of choice in your scene, access the Slicer via the Slicer button, which will open a panel on the bottom.
+Click _Add Range_ and select which property you want to use.
+
+[.shadow]
+image::add_range.png[width=400]
+
+Note that only properties with _numerical_ values are available:
+
+* `dateTime`
+* `date`
+* `time`
+* `localTime`
+* `localDateTime`
+* `duration`
+* `integer`
+* `float`
+
+In case your property is temporal and includes multiple timezones, it is possible to translate them into the same timezone.
+
+The property values of `unitsInStock` are integers and once selected, all available values for this property are displayed on a timeline.
+If you hover over a bar on the timeline, you can see information on how many visible nodes that have each value.
+
+You can manually scrub along the timeline or use the playback function to visualize the changes in property values.
+Let's say you need at least 100 units of beer, you select values >100 on the timeline to see which kinds of beer are available.
+
+[.shadow]
+image::selected-values.png[width=800]
+
+The playback function lets you visualize your selected ranges in real time.
+Start by selecting one or more values by manually expanding or narrowing your selection, then press the play button and watch nodes appear/disappear in the scene based on the value of their `unitsInStock` property.
+
+[.shadow]
+image::playback.png[width=400]
+
+You can select between three different modes for playback:
+
+* Slide range to end - This option plays in increments of the size of the range you have selected on the timeline.
+* Start of range to end - This option starts with displaying your selected range and successively expands until all values are displayed.
+* Within range - This option starts in the beginning of your selection and successively decreases until it reaches the end of your selction.
+
+Sometimes it may desirable to filter out data by one property first and then further refine by another property.
+The Slicer lets you add up to five different ranges.
+
+While you are using the Slicer, you can't interact with the scene in any other way than selecting/deselecting nodes or relationsips.
+To be able to interact again, you need to close the Slicer and this can be done in two ways, by the _Keep Scene and Close_ button or the *X* next to the button.
+The difference between the two is that the button keeps the scene as-is while the *X* restores the scene to what it was before you opened the Slicer.
diff --git a/modules/ROOT/pages/explore/explore-perspectives/database-scans.adoc b/modules/ROOT/pages/explore/explore-perspectives/database-scans.adoc
new file mode 100644
index 000000000..64a950713
--- /dev/null
+++ b/modules/ROOT/pages/explore/explore-perspectives/database-scans.adoc
@@ -0,0 +1,27 @@
+:description: This section describes database scans in Explore.
+
+[[database-scans]]
+= Database scans
+
+A database scan is performed each time a perspective is created or refreshed, in order to determine what property keys are present for each label and relationship type.
+This information is used in various places in Explore, such as the categories, Search phrase editor and styling rules.
+
+When the database contains > 10,000,000 nodes and relationships combined, Explore provides two scan options, _complete_ scan or _quick_ scan.
+The complete scan will scan all the nodes and relationships in the database, whereas the quick one only scans a random sample.
+The quick scan is faster than the complete, but it may not find all property keys if they are only present on a few nodes or relationships.
+A complete scan can take a long time if the database is large and may result in a Explore timeout.
+If you opt for a complete scan of a large database and you experience a Explore timeout, select the quick scan and try again.
+For smaller graphs, Explore always performs a complete scan of the database without giving these options.
+
+[.shadow]
+image::datascan-generate.png[width=400]
+[.shadow]
+image::datascan-refresh.png[width=400]
+
+If you know that you have a consistent schema for your database and/or if your database is large, a quick scan is recommended.
+On the other hand, if you have an inconsistent schema and/or if you can't find some of your properties after a quick scan, a full scan is advisable.
+
+[NOTE]
+====
+The database scan/refresh is triggered by the creation of a new Perspective or by the refresh of an existing Perspective and cannot be done manually.
+====
diff --git a/modules/ROOT/pages/explore/explore-perspectives/perspective-creation.adoc b/modules/ROOT/pages/explore/explore-perspectives/perspective-creation.adoc
new file mode 100644
index 000000000..f300e1caa
--- /dev/null
+++ b/modules/ROOT/pages/explore/explore-perspectives/perspective-creation.adoc
@@ -0,0 +1,111 @@
+:description: This section describes how to create and use Perspectives in Explore.
+
+[[perspective-creation]]
+= Creation and use
+
+== Default Perspective
+
+When you connect Explore for the first time to a non-empty database, a Perspective is automatically created for you (see *Generate Perspective* below for details on how this is done.).
+This is called *Default Perspective* and it cannot be modified from the UI.
+However, if there are changes to the database, Explore updates the Default Perspective accordingly.
+If you want to customize the view, you can create a new Perspective.
+
+== Creating a Perspective
+
+[.shadow]
+image::perspective-creation.png[width=800,align="center"]
+
+Perspectives can be selected or created from the Perspective gallery, found in the Perspective drawer.
+There are two options when you opt to create a new Perspective:
+
+. *Generate Perspective* - With this option, Explore performs a scan of the database and analyze the labels within the graph, identifying the smallest number of labels that uniquely categorize all nodes.
+Explore then creates the perspective and auto-fill in the requisite category definitions, select properties for captioning categories, populate list of available relationship types, and set default styles.
+Once created, the Perspective definition can be edited and reconfigured differently at any time.
++
+When Explore connects to a database for the first time, auto generating the Perspective might be the best option in most cases.
++
+As mentioned above, if the database contains more than 10,000,000 nodes and relationships combined, a complete scan takes a long time and you can elect to run a quick scan instead.
+See xref:explore/explore-perspectives/database-scans.adoc[Database scans] for more details.
+
+
+. *Blank Perspective* - With this option, you can build a Perspective from scratch by defining each category and configuring properties and styling for it.
+Explore still adds the list of available relationship types.
++
+Choose this option when you have a large number of labels in the data, but your Perspective only needs to contain a small subset of them.
+It is more efficient to create the Perspective manually rather than auto-generating it and reconfiguring to remove many categories.
+
+[NOTE]
+====
+The first time you open Explore with a new database, Explore automatically generates a perspective (Auto-perspective).
+As described above, Explore samples a node from each category it finds and use the properties of the sampled node.
+As you query more data, Explore adds any new properties as they are found.
+====
+
+
+*Perspective Export and Import*
+
+You can also export the Perspective definition in a serialized json file format.
+This is useful to either save the definition as of a certain time, or to migrate Perspectives between different environments.
+The json file can then be imported using Explore connected to the same or a different database.
+
+
+Both exporting and importing a Perspective can be done from the Perspective gallery.
+If you have multiple Perspectives, you can filter them by typing in the search box located on the top of the Perspective gallery.
+
+[.shadow]
+image::perspective-export-import.png[width=800]
+
+== Components of a Perspective
+
+[.shadow]
+image::perspective-components.png[width="800"]
+
+In the Perspective designer, you can specify *Categories*, *Relationships* and tailored *Cypher queries* for a Perspective.
+
+[discrete]
+[[perspective-categories]]
+=== Categories
+
+Within a Perspective, a category defines each business entity – Person, Place or Thing – which is visible through the Perspective.
+Typically, a single graph label maps to a particular category, but multiple labels may also be mapped to a single category.
+
+When you create a category, Explore analyzes the graph to find the related properties and other labels that occur on nodes that have the category-defining label.
+If desired, you can select which properties to exclude from the visualization.
+Explore assigns a default color for the category, but you can change the default color and node sizes from the xref:explore/explore-visual-tour/legend-panel.adoc[Legend panel].
+You can also give the category an icon from an extensive library of searchable icons.
+Rule-based styling can also be applied at any stage.
+
+[TIP]
+====
+Keep in mind when you manually create a Perspective, that Explore assigns nodes to categories in the order the categories appear in the list.
+The category labels above take precedence over the ones below.
+A new category is by default added to the top of the list, but the list can be rearranged by dragging the categories up or down, allowing you to control the order of importance.
+====
+
+[NOTE]
+====
+If a node has multiple labels, and the labels are mapped to different categories, the category which is defined first in the Perspective definition is used by Explore for that node.
+Hence the styling of the node is driven by the first category to which any of its labels are mapped.
+For example, if _Tom Hanks_ has the `Person` and `Actor` labels, and there are two respective categories defined for `Actor` and `Person` in that order, the styling for the _Tom Hanks_ node will be derived from the `Actor` category.
+However, when searching for all `Person` nodes, _Tom Hanks_ will still be returned in the query results since it has a `Person` label on it.
+====
+
+[discrete]
+=== Relationships
+
+Based on the Perspective's purpose, it may be useful to limit the relationship types that are available for user exploration.
+The Perspective designer lets you choose one or more relationship types from the list of available types in the graph, and hide them.
+By default, all relationship types are visible.
+
+Similar to category styling, relationship type styling options for color, thickness, and rule-based styles are available in the xref:explore/explore-visual-tour/legend-panel.adoc[Legend panel].
+
+[discrete]
+=== Saved Cypher
+
+In the *Saved Cypher* tab of the Perspective designer, you can define _Search phrases_ and _Scene actions_.
+xref:explore/explore-visual-tour/search-bar.adoc#search-phrase[Search phrase]s are defined and scoped for a particular Perspective, as they usually apply to a specific business view of the graph.
+They are stored with the rest of the Perspective definition and run from the Search bar.
+See xref:explore/explore-features/search-phrases-advanced.adoc[Search phrases for advanced queries] for how to define Search phrases.
+
+_Scene actions_ are Cypher queries you can run on the elements available in your current scene.
+They are run from the context menu when at least one element is selected, see xref:explore/explore-features/scene-actions.adoc[Scene actions] for more information.
\ No newline at end of file
diff --git a/modules/ROOT/pages/explore/explore-perspectives/perspectives.adoc b/modules/ROOT/pages/explore/explore-perspectives/perspectives.adoc
new file mode 100644
index 000000000..26ff33482
--- /dev/null
+++ b/modules/ROOT/pages/explore/explore-perspectives/perspectives.adoc
@@ -0,0 +1,73 @@
+:description: This section describes Perspectives in Explore.
+
+[[perspectives]]
+= Perspectives
+
+In Explore, a Perspective defines a certain business view or domain that can be found in the target Neo4j graph.
+A single Neo4j graph can be viewed through different Perspectives, each tailored for a different business purpose.
+
+Perspectives define:
+
+* Categorization of business entities.
+* Property visibility and value type.
+* Relationship visibility.
+* Styling (color, icon, caption).
+* Custom Search phrases (see later section).
+
+== A business view of the graph
+
+Within a graph there are often multiple smaller graphs which are connected to each other.
+Sometimes you need to see everything.
+Other times it's helpful to have a more focused view.
+Defining a Perspective allows you to select what parts of the graph to show and how to show them.
+
+The dataset used is the _Northwind_ link:https://neo4j.com/developer/example-data[example data set].
+Northwind is a fictional company with a database that tracks their product catalog, sales orders, and sales staff.
+
+.Northwind ER Diagram
+image::image22.png[width=800]
+
+When importing Northwind directly into a Neo4j graph there is a 1:1 correspondence of tables to labels, which means that for each record a node has been created with a label that matches the original name of the table.
+Each foreign key reference gets converted into a graph relationship.
+
+After connecting to the instance that contains this Neo4j database and launching Explore for the first time, Explore automatically generates a Perspective based on the data it finds.
+If you have any previously defined Perspectives, they are available for selection.
+You can always ask Explore to auto-generate a Perspective.
+The auto-generated Perspective is a good place to start.
+Explore categorizes the nodes into entities, select useful captions and assign color-coding.
+
+Keep in mind though, that when Explore auto-generates a Perspective, a complete scan of the database is performed.
+If your database is large, i.e. > 10,000,000 nodes and relationships combined, this will take a long time and you can opt for a quick scan instead.
+See xref:explore/explore-perspectives/database-scans.adoc[Database scans] for more information.
+The Northwind example data set is _not_ large and Explore can quickly auto-generate a Perspective.
+
+By contrast, when Explore creates an Auto-perspective the first time you open it with a new database, it samples a node from each category it finds and use the properties in the sampled node.
+As with any other perspective, as you query more data, Explore adds any new properties as they are found.
+
+You are able to search and explore the entire Northwind graph.
+
+.Northwind as a graph
+[.shadow]
+image::northwind-as-a-graph.png[width=800]
+
+While everyone in the organization could benefit from a graph view, not everyone needs to see everything.
+For instance, the shipping department of Northwind may only need to see orders, products, and customers.
+You can create another Perspective that highlights only those categories.
+
+.Northwind Shipping Perspective
+[.shadow]
+image::northwind-shipping-perspective.png[width=800]
+
+Similarly, you can create Perspectives that are specific to the sales department, purchasing department, or customer service department.
+
+.Northwind Sales Perspective
+[.shadow]
+image::northwind-sales-perspective.png[width=800]
+
+[.shadow]
+.Northwind Purchasing Perspective
+image::northwind-purchasing-perspective.png[width=800]
+
+[.shadow]
+.Northwind Customer Perspective
+image::northwind-customer-perspective.png[width=800]
\ No newline at end of file
diff --git a/modules/ROOT/pages/explore/explore-perspectives/refresh-perspectives.adoc b/modules/ROOT/pages/explore/explore-perspectives/refresh-perspectives.adoc
new file mode 100644
index 000000000..bbe28b097
--- /dev/null
+++ b/modules/ROOT/pages/explore/explore-perspectives/refresh-perspectives.adoc
@@ -0,0 +1,27 @@
+:description: This section describes how to refresh a Perspective in Explore.
+[[refresh-perspectives]]
+= Refresh Perspectives
+
+When changes are made to the database, for example by adding, removing, or changing property data types, these are automatically reflected in any existing Perspectives.
+You can remove any unwanted automatically added categories, Explore does not automatically re-add them.
+
+However, if you use Explore with the plugin, such changes are *not* reflected automatically.
+In order to make the changes available, click the "Refresh Perspective" button in the top right corner of the Perspective drawer.
+
+Keep in mind that this operation may take anywhere from a few seconds to several minutes, depending on the size of your database.
+
+[.shadow]
+image::perspective-refresh-magnified.png[width=500]
+
+Once the Perspective is refreshed, new relationship types and property keys are added and data types for property keys are updated.
+Any relationship types or property keys that were hidden before the refresh, remain hidden after.
+If any labels, relationship types or property keys are deleted from the database, they are deleted from the Perspective as well.
+
+[.shadow]
+image::property-key-refresh.png[width=800]
+
+However, new categories are not automatically created if new labels are added to the database, though the additional labels are available in the Perspective.
+Similarly, if a label is removed from the database, the associated category is not automatically removed from the Perspective when you refresh.
+
+Also, note that if labels or properties have changed in the database, the change may affect styling rules and Search Phrases.
+Those have to be updated manually after a Perspective refresh.
\ No newline at end of file
diff --git a/modules/ROOT/pages/explore/explore-quick-start.adoc b/modules/ROOT/pages/explore/explore-quick-start.adoc
new file mode 100644
index 000000000..9180fe223
--- /dev/null
+++ b/modules/ROOT/pages/explore/explore-quick-start.adoc
@@ -0,0 +1,50 @@
+:description: This section presents tips for a quicker start with Explore.
+[[explore-quick-start]]
+= Quick start
+
+Explore is designed to be simple and intuitive enough for a business person or knowledge worker to pick up and use.
+Follow these quick start tips if you would like to play with the interface on your own before returning to this guide to learn about the more advanced and nuanced features.
+
+You need an active connection to an instance that contains data, one of the link:https://neo4j.com/developer/example-data[example data sets] works.
+Additionally, Explore needs a xref:explore/explore-perspectives/perspectives.adoc[Perspective] as a business view of the graph to which it connects.
+The first time that Explore connects to a graph, it shows a selection of already defined Perspectives or, if you have the access rights, offer to auto-generate a new one.
+Auto-generation is a good place to jump quickly into graph exploration.
+Keep in mind that a complete scan of the database will be performed when you auto-generate a Perspective and if your database is large, this can take a long time.
+In that case, you may opt for a quick scan instead.
+See xref:explore/explore-perspectives/database-scans.adoc[Database scans] for more information.
+
+Graph exploration begins by searching for interesting parts of the graph.
+
+[.shadow]
+image::explore-ui.png[width=800]
+
+== Start by searching
+
+By loading the _Northwind_ https://neo4j.com/developer/example-data[example data set], you can start interacting with the graph right away.
+If you need help loading the dataset, the process is described in detail xref:import/introduction.adoc[here].
+
+Once loaded, try searching for:
+
+* everything in a category, like _Products_.
+* particular things in a category, like _Products with productName _Louisiana_.
+* qualified patterns, like _Suppliers of Products with productName Louisiana_.
+* long patterns, like _Customer purchase details about order for product with productName Louisiana_.
+* generic patterns, like _Categories of Products with Suppliers_.
+
+
+[NOTE]
+====
+For faster search performance, it is highly recommended to set up indexes in the Neo4j database for all properties that should be searchable in Explore.
+See link:{neo4j-docs-base-uri}/cypher-manual/current/indexes/[Cypher Manual -> Indexes].
+====
+
+== Interact with the visualization
+
+With a currently displayed graph visualization you can:
+
+* zoom in and out using the buttons or scroll using a mouse or touchpad.
+* double-click a node to see details.
+* right-click to bring up a context menu and try the available options.
+* short press click in an empty spot and drag to pan the visualization.
+* use the marquee tools to select nodes and relationships in the visualization.
+* click on the legend panel to select / deselect all nodes in a category.
\ No newline at end of file
diff --git a/modules/ROOT/pages/explore/explore-visual-tour/card-list.adoc b/modules/ROOT/pages/explore/explore-visual-tour/card-list.adoc
new file mode 100644
index 000000000..f4d0dc294
--- /dev/null
+++ b/modules/ROOT/pages/explore/explore-visual-tour/card-list.adoc
@@ -0,0 +1,29 @@
+:description: This section describes the card list in Explore.
+
+[[card-list]]
+= Card list
+
+The card list, when expanded, shows details about the currently shown nodes on the canvas.
+You can choose between viewing all or only the selected nodes (if any).
+Each node appears as a little card which shows a few of the available properties on the node.
+The search bar in the card list lets you filter the card list further by typing a search and returning only the matching cards.
+The list can be even further refined by selecting _Nodes_ or _Relationships_ to return only those.
+
+[.shadow]
+image::card-list.png[width=700]
+
+Cards in the list can be used to interact with nodes on the canvas.
+Select one or more cards in the list to select them on canvas or vice versa.
+Interact with the selected cards using shortcut actions like Expand or Dismiss that appear on the bottom of the list, or use the right-click context menu.
+
+Double-click on a card to see the Inspector, which shows its properties in detail.
+Note that hovering a property shows you what type of property it is, i.e. string, integer, float etc.
+Properties can be edited, if you have write access to the graph, see xref:explore/explore-features/edit-graph-data.adoc[Edit graph data] for more information.
+
+[.shadow]
+image::node-inspector.png[width=300]
+
+The Inspector also shows a node’s relationships and neighbors as cards, which in turn, can be used to navigate to, interact with, or see detail about these relationships and neighbors.
+
+[.shadow]
+image::relationships-of-a-node.png[width=300]
diff --git a/modules/ROOT/pages/explore/explore-visual-tour/explore-overview.adoc b/modules/ROOT/pages/explore/explore-visual-tour/explore-overview.adoc
new file mode 100644
index 000000000..d2e4b72a8
--- /dev/null
+++ b/modules/ROOT/pages/explore/explore-visual-tour/explore-overview.adoc
@@ -0,0 +1,33 @@
+[[explore-overview]]
+= Explore overview
+:description: This section describes how to use the Explore tool.
+
+[.shadow]
+image::explore-ui.png[width=800]
+
+== Perspective drawer
+
+The xref:explore/explore-visual-tour/perspective-drawer.adoc[*Perspective drawer*] is where you can define the business context depicted in the scene.
+
+== Scene
+
+Explore's main workspace is a graph scene, where you'll see the graph visualization.
+The scene contains just the parts of the graph which you've found through search or exploration.
+
+Click directly on nodes to move them manually into place.
+Right-click on nodes, relationships, or the background to bring up context menus to perform actions.
+See xref:explore/explore-visual-tour/scene-interactions.adoc[Scene interactions] for more information.
+
+To export your current scene, click the `Export visualization` icon in the upper right corner of your screen.
+You may either take a screenshot and save as a .png, export the contents as CSV or share the Scene.
+
+
+
+== Overlays
+
+Overlays provide supplemental views for working with the graph scene.
+
+* *Legend panel* - shows all the business entities (categories and relationship types) available in the current Perspective.
+This panel also lets you define the style for categories and relationship types using default or rule-based styles.
+* *Search bar* - accepts a near-natural language search query input and offers suggestions on graph patterns that match the input.
+* *Card list* - shows details about the nodes and relationships in the scene.
\ No newline at end of file
diff --git a/modules/ROOT/pages/explore/explore-visual-tour/legend-panel.adoc b/modules/ROOT/pages/explore/explore-visual-tour/legend-panel.adoc
new file mode 100644
index 000000000..f2abf22ea
--- /dev/null
+++ b/modules/ROOT/pages/explore/explore-visual-tour/legend-panel.adoc
@@ -0,0 +1,94 @@
+:description: This section describes the Legend panel in Explore.
+
+[[legend-panel]]
+= Legend panel
+
+[.shadow]
+image::legend-panel-intro.png[width=400]
+
+The legend panel shows a list of all categories and relationship types available in the current Perspective, along with the style used to render their nodes and relationships respectively.
+When the list contains many elements, you can use a filter to limit the legend to show only elements present in the scene, or find those not present in the scene, or search for an element of interest.
+Click on a category or relationship type in the legend to select all nodes or relationships of that type.
+A count shows the number of items of a type that are currently visible somewhere in the scene.
+Styles applied to nodes and relationships can also be changed from this legend panel.
+You have the flexibility to define the style for an entire category or relationship type, or use data-driven rules to apply styles to specific nodes or relationships.
+By using the arrow-button you can quickly collapse or expand the legend panel.
+
+== Default styling
+
+Using the default style, you can change the color and size of nodes or relationships that belong to a category or relationship type.
+Additionally, it is possible to change the property (or add more) selected by default to caption the selected category of nodes or types of relationships.
+You can also customize the caption's font size, make it bold, italic, or underlined, and change its placement on the node.
+The same options are available for relationship captions, except for bold, italic, and underline.
+
+[.shadow]
+image::captions.png[width=400]
+
+For node categories, you can assign an icon to further differentiate the category.
+
+In cases where a node has multiple labels mapped to different categories, the styling is determined by the category defined first in the Perspective.
+See xref:explore/explore-perspectives/perspective-creation.adoc#perspective-categories[Categories] for more information.
+
+
+== Rule-based styling
+
+Explore allows you to set up rule-based styling based on the properties present in your graph.
+Rule availability and application varies by the type of a graph element and its available properties.
+Rule-based styling is supported for string, numeric and boolean properties.
+Temporal properties are also supported, `Date`, `Time`, `LocalTime`, `DateTime`, and `LocalDateTime`.
+See xref:explore/explore-features/search-phrases-advanced.adoc#parameter-data-types[Parameter data types] for more information on temporal properties.
+
+There are three different modes for rule-based styling: _single_, _range_, and _unique values_.
+
+[discrete]
+=== Single
+
+[.shadow]
+image::rule-based-styling-single.png[width=800]
+
+This allows you to set up a rule that applies one single color, size and/or caption based on a condition.
+For properties with numeric values, a histogram provides an overview of the values present in the current Scene.
+The slider lets you select a value and apply rule-based styling based on this.
+
+For example, as shown above, a rule defined on a `discontinued` property of a `Product` category only applies to `Product` nodes that have a `discontinued` value set to `true`.
+In this case, all affected nodes are presented in blue and have their `discontinued` value as their caption.
+
+If the property is a temporal type using timezones (`Time` and `DateTime`), you can base your styling on a selected timezone and translate all time values to that zone by checking the box _Translate timezones to_ and select a timezone.
+(Note that _Z_ indicates _Zulu timezone_, ie. GMT, time offset +00:00.)
+If you leave the box unchecked, timezones are ignored.
+
+[.shadow]
+image::rule-based-time.png[width=300]
+
+[NOTE]
+====
+Histograms are only available for the single mode of rule-based styling and for properties with numerical values of either `integer`, `float`, or temporal types.
+If the selected property does not have a numerical value, the histogram is not available.
+====
+
+[discrete]
+=== Range
+
+[.shadow]
+image::rule-based-styling-range.png[width=800]
+
+For numeric properties, you can set up a rule that applies a range of colors or sizes to a range of values.
+In the image above, a _Range_-rule has been used to style nodes with the `unitPrice` integer property with a spectrum of colors from green to red, as well as size nodes from small to big.
+
+For temporal properties using timezones (`Time` and `DateTime`), you have the same option to normalize to one timezone or to ignore timezones altogether as above with rules on a single value instead of a range of values.
+
+[discrete]
+=== Unique values
+
+[.shadow]
+image::rule-based-styling-unique-values.png[width=800]
+
+Activate this when you want to assign a unique color to each property value of a given property key.
+
+
+[TIP]
+====
+Rules override the default style setting such that if no rule is satisfied, the default style is applied.
+If multiple rules affecting the same attribute (e.g. node color) are specified, the rule that appears first in the list is applied to that attribute.
+Subsequent rules may still be applied if they affect other attributes (e.g. node size).
+====
\ No newline at end of file
diff --git a/modules/ROOT/pages/explore/explore-visual-tour/perspective-drawer.adoc b/modules/ROOT/pages/explore/explore-visual-tour/perspective-drawer.adoc
new file mode 100644
index 000000000..5654d794f
--- /dev/null
+++ b/modules/ROOT/pages/explore/explore-visual-tour/perspective-drawer.adoc
@@ -0,0 +1,12 @@
+:description: This section describes the Perspective drawer in Explore.
+
+[[perspective-drawer]]
+= Perspective drawer
+
+[.shadow]
+image::perspective-drawer.png[width=400]
+
+The _Perspective_ is the business view, or the context, of the graph to which Explore connects.
+The Perspective drawer is used to select or define this business view or context, before graph exploration can begin.
+The business context set in the Perspective controls everything available in the Explore tool, e.g. what nodes and relationships that should be accessible, how they get categorized and styled, and what details about them can be seen or changed.
+Perspectives are discussed in detail in xref::/explore/explore-perspectives/perspectives.adoc[Perspectives].
diff --git a/modules/ROOT/pages/explore/explore-visual-tour/scene-interactions.adoc b/modules/ROOT/pages/explore/explore-visual-tour/scene-interactions.adoc
new file mode 100644
index 000000000..2ce8ec9aa
--- /dev/null
+++ b/modules/ROOT/pages/explore/explore-visual-tour/scene-interactions.adoc
@@ -0,0 +1,189 @@
+:description: This section describes Scene interactions in Explore.
+
+[[scene-interactions]]
+= Scene interactions
+
+Several interactions are available from the canvas to help you explore your graph visualization.
+Some of the commonly expected ones are:
+
+* Zoom in and out using your mouse or touchpad scroll functions, or use the buttons at the bottom right of the canvas.
+The size of the text on nodes is dynamic in relation to the size of the node.
+* Hover over a node or a relationship to see its label and selected properties.
+For nodes, but _not_ relationships, you can control what properties to display.
+This is done in two steps, first you select which properties should be available for display from the Perspective designer.
+Click on a label from the list of Categories and check/uncheck Exclude from the available properties.
+Next, in the Legend panel, select the colored circle to open the styling menu and then find the *Text* option under *Default*.
+Then you select which property/properties to show on hover.
++
+[.shadow]
+image::hover.png[width=500]
++
+* Left-click on a single node or relationship to select or deselect it. Multiple objects can be selected by holding the appropriate keyboard modifier key (Cmd or Ctrl key) before selecting.
+* Double-click on a node or relationship to open the Inspector to inspect the element’s details.
+* Left-click in an empty spot on the canvas and drag immediately to pan the visualization.
+* Use the marquee tools to select multiple nodes and relationships.
++
+[.shadow]
+image::marquee-tools.png[width=500]
++
+* Right-click anywhere on the canvas to bring up context-sensitive menus for the object(s) clicked on.
+You can right-click on a single node or relationship, on a group of highlighted nodes and/or relationships, or on an empty spot on the canvas.
++
+[.shadow]
+image::context-double.png[width=500]
++
+The following sections describe some of the notable graph interactions available in these context menus.
+
+[[select-related]]
+== Select related nodes
+
+If you want to work with a node and its closest connected neighbors, you can select it and from the context menu _Select related nodes_.
+Once selected, you can then dismiss the other (unselected) nodes and only have your nodes of interest in the Scene.
+This action is also possible with multiple nodes selected.
+
+[[dismiss-single]]
+== Dismiss single nodes
+
+As mentioned above, it is often convenient to display only connected nodes.
+Without selecting any nodes or relationships, the context menu lets you _Dismiss single nodes_ to remove all nodes that are not related to any other visible nodes in the Scene.
+
+[[reveal-relationships]]
+== Reveal relationships
+
+With a single or group of nodes selected, you can reveal direct relationships between them that are not already visible in your scene.
+This option is available if relationships exist between selected nodes, provided that they are not already displayed.
+Accordingly, if only one node is selected, the `Reveal relationships` is available only if the node in question has a relationship to itself.
+
+[.shadow]
+image::reveal-relationships.png[width=500]
+
+[[expand-nodes]]
+== Expand nodes
+
+With a single or group of nodes selected for context, you can expand the nodes to visualize their immediate neighbors.
+Then, select from the result and expand further to navigate local areas of the graph.
+Expansion can be done from the right-click context menu of a node or from the Inspector when viewing a node's relationships or neighbors.
+// When expanding neighbors of a node, the number of nodes returned is limited to the number specified in the Node query limit in the xref:explore/explore-visual-tour/settings-drawer.adoc[Settings drawer].
+
+[.shadow]
+image::expand-nodes.png[width=500]
+
+The right-click context menu provides additional options to expand selectively along a specific relationship type and direction, or to open the Advanced Expansion dialog and choose to expand along several specific paths, or to specific neighboring node types, or a combination.
+You can also choose to limit the number of nodes that should be returned in the result.
+If you set a limit in the context menu, this overrides any Node query limit set in the Settings drawer.
+
+[.shadow]
+image::advanced-expansion.png[width=500]
+
+== Shortest path
+
+A powerful feature of Neo4j graphs is to see how two entities may be connected without knowing the exact path of relationships between them.
+To do so in Explore, you can run a shortest path command between two nodes.
+Select the two nodes of interest, right-click on one of the nodes to bring up the context menu and select the Shortest Path option.
+
+[.shadow]
+image::shortest-path.png[width=500]
+
+[NOTE]
+Explore searches for shortest paths within 20 hops and shows the first shortest path found by the database.
+
+== Layouts
+
+In instances where you are interested in knowing more about _how_ various nodes are related in comparison to each other, Explore allows you to change the layout of your scene.
+By default, the nodes and relationships in a Scene are presented in a _force-based_ layout.
+
+The _hierarchical_-layout option from the layout menu located at the bottom right-hand corner of the canvas, presents the nodes in the Scene in an hierarchical order instead.
+The nodes are then arranged by the directionality of their relationships, determined by the way relationship directions are set up in the database.
+When the hierarchical layout is activated, you can change the orientation with the edit-button directly to the right of the layout menu.
+
+[.shadow]
+image::layouts-hierarchy.png[width=800]
+
+The third option is the _coordinate layout_ and it arranges, and fixes, the nodes on the canvas by their `integer`, `float`, or `point` type properties (provided that the nodes have them) and is used for geographical entities for example.
+You can select which node property to use from the dropdown menu.
+
+If no property is selected for the x-axis, Explore automatically looks for any `point` property and if no such property exists, it looks for any property named _latitude_, then _lat_, and then _x_.
+For the y-axis, the order is: `point`, any property named _longitude_, _long_, and lastly _y_.
+
+If some nodes already in the Scene do not have applicable properties when switching to the coordinate layout, they are placed on one side.
+
+Only properties with _numerical_ values are available, i.e. _not_ `string` properties.
+For `point` properties, both two-dimension Cartesian and geographic (longitude and latitude) points are supported.
+When geographic points are used, Mercator Projection is used as the map projection.
+
+You can scale both axes to find the right level of granularity for your dataset.
+
+[.shadow]
+image::coordinate-layout.png[width=800]
+
+If you want to go back to the force-directed layout, just select the force-based layout from the layout menu.
+
+When using the force-based layout, Cytoscape is enabled by default.
+This means that smaller graphs are laid out using a Cytoscape layout which is faster and makes the elements in the scene more readable, and it applies to graphs of <100 nodes and <300 relationships.
+It can be disabled via the edit-button.
+
+
+
+== Map
+
+The map is located in the bottom right corner of the canvas and gives you an overview of the entire scene.
+It helps you navigate, especially when your graph is large and/or when your nodes contain a lot of information and you often need to zoom in and out to view.
+
+The map shows all the nodes present in the scene, as well as the currently visible selection and where this selection is located in relation to the whole scene.
+It allows you to pan the Scene by dragging the box that contains the current selection, you can zoom in and out using the `+` and `-`.
+
+_Presentation mode_ hides the search bar, legend panel and other elements to take the current Scene into full screen.
+If you need the legend panel while in presentation mode, use the arrow-button to expand.
+
+Your current zoom-percentage is shown and by clicking that number, you reset the zoom to 100%.
+In addition, you can select to fit the entire graph to the screen or hide the map.
+
+[.shadow]
+image::map.png[width=300]
+
+== Filtering
+
+When you have a Scene full of nodes and relationships, it can be difficult to identify exactly the sub-graph you need.
+Applying a _filter_ can help you fine-tune the results from your Search phrase and help you find what you are looking for in your data.
+When a filter is applied, all filtered elements are greyed out in the Scene, they are still visible but you cannot interact with them.
+You can also completely remove the filtered elements from the Scene, by using the `Dismiss Filtered elements`.
+
+[.shadow]
+image::filtering-dismiss.png[width=800]
+
+Filters can be accessed from the Filter drawer and are created based on the elements present in your scene, their categories, types, and properties.
+The filter can be as coarse or as fine as you like.
+
+To start, you select the node category or relationship type to base the filter on.
+At that point, you have the option to filter out all other categories present in the Scene.
+For more fine-grained filtering, you can further specify properties to filter on.
+Filtering is supported for `integer`, `float`, `boolean`, `string`, `Date`, `Time`, `LocalTime`, `DateTime`, and `LocalDateTime` properties.
+If the chosen property is numeric, a histogram gives you an overview of the values present in the current Scene.
+The slider(s) allows you to set value(s) for your filter.
+When you are satisfied, you use the toggle to apply the filter.
+
+[.shadow]
+image::filtering-histogram.png[width=800]
+
+You can create as many filters as you like, they remain in the Filter drawer until you delete them.
+When you have multiple filters, they are collapsed in the drawer and you click on them to expand them and see their details.
+
+=== Filtering on temporal properties
+
+If your data contains temporal values, either on nodes or relationships, you can filter on these properties.
+Explore's support of temporal value types is aligned with the types supported in Neo4j.
+For more information, see the link:https://neo4j.com/docs/cypher-manual/current/values-and-types/temporal/[Cypher Manual -> Temporal (Date/Time) values].
+
+Timezones are supported for both `Time` and `DateTime` values.
+You can configure your filter to use local time, ignoring any timezones, or to normalize to one time zone.
+If you check the box _Translate timezones to_, you can select which timezone you want to use as your normal, based on your region.
+(Note that _Z_ indicates _Zulu timezone_, ie. GMT, time offset +00:00.)
+
+[.shadow]
+image::timezones.png[width=300]
+
+== Editing in Explore
+
+If you have the required write permissions to your graph database, you can edit your graph data in Explore.
+You can edit labels and properties as well as create new nodes and relationships directly from your scene.
+For more information, see xref:explore/explore-features/edit-graph-data.adoc[Edit graph data].
diff --git a/modules/ROOT/pages/explore/explore-visual-tour/search-bar.adoc b/modules/ROOT/pages/explore/explore-visual-tour/search-bar.adoc
new file mode 100644
index 000000000..dd665a278
--- /dev/null
+++ b/modules/ROOT/pages/explore/explore-visual-tour/search-bar.adoc
@@ -0,0 +1,114 @@
+:description: This section describes the search bar in Explore.
+
+[[search-bar]]
+= Search bar
+
+Explore is a search-first environment for you to explore your graph.
+To provide this experience, the search bar supports several types of search input.
+When you enter a term or phrase in the search bar, Explore provides a list of suggestions for queries you can run, based on matches in the following input types:
+
+* Search phrase
+* Graph pattern
+* Full-text search
+* Actions
+
+Additionally, you can use the co-pilot to help you write search patterns.
+See further down for more information on the co-pilot.
+
+[.shadow]
+image::search-bar-5.png[width=500]
+[.shadow]
+image::search-bar-6.png[width=500]
+[.shadow]
+image::search-bar-7.png[width=500]
+
+[TIP]
+====
+You can press or click on a suggestion in the list to select it, or scroll to the suggestion and press ENTER to run it.
+====
+
+
+== Sample Search phrase
+
+When Explore autogenerates a Perspective (see xref:explore/explore-perspectives/perspective-creation.adoc[Creating a Perspective] for more information) or when you create your own (non-empty) Perspective, Explore provides a sample Search phrase to help you see the data in your graph.
+The Search phrase is called _Show me a graph_ and is available in the search bar.
+It returns a sample of your data.
+
+[.shadow]
+image::show-me-a-graph.png[width=500]
+
+== Graph pattern
+
+Graph patterns are a relaxed, near-natural language grammar based on a vocabulary drawn from node labels, relationship types and property keys and indexed property values, enriched by categories or other configuration as defined in the applied Perspective (see xref:explore/explore-perspectives/perspectives.adoc[Perspectives] for more detail).
+Terms that Explore detects are used to create potential pattern matches, are added to the suggestions list, from which you can pick the one you wish to query.
+See xref:explore/explore-features/graph-pattern-search.adoc[Graph pattern search] for tips on graph pattern searching.
+
+[[search-phrase]]
+== Search phrase
+
+A Search phrase is essentially an alias for a pre-defined graph query, which is saved within a Perspective.
+Search phrases allow for user-friendly access to queries that need to be run frequently, or can't be conveniently expressed as a search pattern.
+Search phrases also allow for highly customized domain-specific questions to be asked, and can:
+
+* be paired with a parameterized Cypher query.
+* call algorithms, or anything else that can be called using procedures.
+* modify the graph (requires write access).
+
+See xref:explore/explore-features/search-phrases-advanced.adoc[Search phrases for advanced queries] tutorial topic for tips on using Search phrases.
+
+
+== Full-text search
+
+When Explore can’t find an appropriate suggestion for the entered search term, you have the ability to run a full-text search against the Neo4j database.
+Explore uses the native full-text indexes in the database for this feature.
+You will need to set up a full-text index to enable full-text search in Explore.
+Without any full-text index configured, Explore will fall back to searching in all available indexed string properties.
+
+See xref:explore/explore-features//full-text-search.adoc[Full-text search] tutorial topic for tips on using the full-text search option.
+
+
+== Actions
+
+Actions are phrases that trigger user-interface commands when typed in the search bar, e.g. `Clear Scene` will empty the canvas of the currently shown nodes and relationships.
+This lists some of the available Actions:
+
+* Invert selection - selects every unselected node and deselects any selected node/s.
+* Fit to selection - zooms in on the selection and centers it on the canvas.
+* Expand selection - option to see everything directly connected to the selected node/s.
+* Clear Scene - empty the canvas.
+* Dismiss - removes everything selected.
+* Dismiss others - removes everything not selected.
+* Refresh Data - refreshes the data on the canvas.
+* Redo - repeat the latest action.
+* Undo - undo the latest action.
+
+See xref:explore/explore-default-actions.adoc[Default actions and shortcuts] for the complete list and associated keyboard shortcuts.
+
+== Explore co-pilot
+
+The co-pilot is a feature to help you write search patterns.
+The search bar already offers near-natural search, but the co-pilot is even closer to natural language.
+It makes it easier to mimic Cypher queries without having to know the Cypher syntax.
+The co-pilot is available in the search bar once you have enabled it in the Org settings.
+See xref:visual-tour/index.adoc#org-settings[Organization settings] for more information.
+You enter your query in a separate bar and the co-pilot suggests a pattern in the search bar, based on the input.
+
+[.shadow]
+image::explore-copilot.png[width=800]
+
+The co-pilot is an experimental feature and does not provide guaranteed accuracy.
+To get the best results, it is important to be familiar with the data model, especially the labels and relationships in the graph.
+The more detailed you are when using the co-pilot, the more accurate the results will be.
+
+For example, if you are looking for the shipper who ships the product _Ikura_, you could ask "Who ships Ikura".
+But that doesn't yield any results so you need to be more specific.
+You know that _Ikura_ is a **product** and that the **shipper** is the one shipping it, so a better suggestion is "Which Shipper ships the Product Ikura".
+The main difference between the two questions for the co-pilot is that the latter includes category names, which are the labels in the graph.
+If you use the data model as a basis for your questions, you will get better results from the co-pilot.
+
+
+
+
+
+
+
diff --git a/modules/ROOT/pages/explore/introduction.adoc b/modules/ROOT/pages/explore/introduction.adoc
new file mode 100644
index 000000000..b54ddbb57
--- /dev/null
+++ b/modules/ROOT/pages/explore/introduction.adoc
@@ -0,0 +1,27 @@
+:description: This section gives a short introduction to the Explore tool.
+[[explore-introduction]]
+= Explore
+
+The Explore tool, powered by Neo4j Bloom, is a graph exploration tool for visually interacting with graph data.
+
+A graph puts information into context.
+People, places, and things.
+Products, services, and accounts.
+Transactions, identities, and events.
+Explore shows the patterns you intuitively know are there in your data, and reveals new patterns you may not have expected.
+
+[[explore-features]]
+
+The core set of Explore features are:
+
+* *Perspective* - the lens through which you view graph data, can be customized for different business purposes.
+See xref:explore/explore-perspectives/perspectives.adoc[] for more information.
+* *Visualization* - high performance, GPU-powered physics and rendering.
+* *Exploration* - directly interacts with the data to explore connections and details.
+* *Inspection* - see all the record details and browse to connected records.
+* *Editing* - create records, connect data, update information.
+* *Search* - find information, using advanced near-natural language Search phrases.
+
+Explore exists as a standalone tool as well, _Neo4j Bloom_.
+Most features are identical, but some differences exist.
+See the full documentation link:{neo4j-docs-base-uri}/bloom-user-guide/current[Neo4j Bloom] for more information.
\ No newline at end of file
diff --git a/modules/ROOT/pages/getting-started/connect-instance.adoc b/modules/ROOT/pages/getting-started/connect-instance.adoc
new file mode 100644
index 000000000..5700b7c2b
--- /dev/null
+++ b/modules/ROOT/pages/getting-started/connect-instance.adoc
@@ -0,0 +1,63 @@
+[[connect-to-instance]]
+= Connect to an instance
+:description: This page describes how to connect to an instance in the new Neo4j Aura console.
+:page-aliases: auradb/getting-started/connect-database.adoc, aurads/connecting/index.adoc
+
+To interact with a database in an instance, you need to establish a connection.
+
+. Go to *Import*, *Explore* or *Query*.
+. Select *Status* and from there you can connect to an instance.
+. You may need your *Username* and *Password* credentials.
+
+[.shadow]
+.Connection banner
+image::connectionbanner1.png[]
+
+[.shadow]
+.Connection modal
+image::connectionmodalnonremote.png[width=300]
+
+[cols="20%,80%"]
+|===
+| Field | Description
+
+|Connection method
+|The protocol is used for the communication between the Neo4j database server and the client application or tool.
+The default is `neo4j+s//`.
+// For more information about connection schemes, see link:https://neo4j.com/docs/operations-manual/current/configuration/connectors/[Operations Manual -> Configure network connectors] and link:https://neo4j.com/docs/bolt/current/bolt/[Bolt Protocol].
+
+// |Connection URL
+// |You can get this from your instance details
+
+|Single sign-on
+|If this is set up, you can use SSO.
+
+|Database user
+|Neo4j by default
+
+|Password
+|You are given the password when you initially create the instance
+|===
+
+== Connection method
+
+To connect to an instance, select one of the following connection methods: *neo4j+s://* or *https://*
+
+The connection method protocol is used for the communication between the Neo4j database server and the client application or tool.
+For more information about connection schemes, see link:https://neo4j.com/docs/operations-manual/current/configuration/connectors/[Operations Manual -> Configure network connectors] and link:https://neo4j.com/docs/bolt/current/bolt/[Bolt Protocol].
+
+=== *neo4j+s://*
+
+Establishes a TLS-encrypted connection to Neo4j using the Bolt protocol over WebSockets, with full certificate validation.
+Use for optimal performance.
+
+=== *https://*
+
+TLS encrypted connection to Neo4j over HTTPS (Query API), with full certificate validation.
+Use in environments where port restrictions may prevent access to the Bolt port.
+
+== Errors
+
+In an enterprise environment with restrictive networking, connection errors are sometimes caused because non-standard ports (like the Bolt protocol) are blocked by firewalls when using *neo4j+s://*.
+
+Switching to *https://* can address connection issues and ensure seamless connectivity without extra configuration.
diff --git a/modules/ROOT/pages/getting-started/create-account.adoc b/modules/ROOT/pages/getting-started/create-account.adoc
new file mode 100644
index 000000000..b84e93a43
--- /dev/null
+++ b/modules/ROOT/pages/getting-started/create-account.adoc
@@ -0,0 +1,11 @@
+[[aura-create-account]]
+= Create an account
+:description: This page describes how to create a Neo4j Aura account.
+:page-aliases: platform/create-account.adoc
+
+To access Neo4j Aura, you need to have an Aura account.
+Navigate to link:https://console.neo4j.io[Neo4j Aura console] and follow the instructions for registration.
+Once you have accepted the terms and conditions and verified your email address, you can start using the console.
+
+The console exists in two versions,the new console, and the classic experience, as mentioned in xref:index.adoc[About Neo4j Aura console].
+You can toggle between the different versions, via the account dropdown in the top right corner.
\ No newline at end of file
diff --git a/modules/ROOT/pages/getting-started/create-instance.adoc b/modules/ROOT/pages/getting-started/create-instance.adoc
new file mode 100644
index 000000000..d95a9be7f
--- /dev/null
+++ b/modules/ROOT/pages/getting-started/create-instance.adoc
@@ -0,0 +1,30 @@
+[[create-instance]]
+= Create an instance
+:description: This page describes how to create an instance in the new Neo4j Aura console.
+:page-aliases: auradb/getting-started/create-database.adoc, aurads/create-instance.adoc
+
+There are two types of Aura product instances:
+
+* **AuraDB** (the default Aura instance type)
+* **AuraDS** (the xref:graph-analytics/index.adoc#aura-ds[fully managed version] of Neo4j Graph Data Science)
+
+Select the type of instance using the switch on the top right, then choose the tier that best suits your needs.
+
+For AuraDB, you can upgrade your tier, and resize your instance's memory and storage later if your needs change.
+For pre-paid AuraDB instances on GCP, adjustable storage independent of memory is available, see xref:managing-instances/instance-actions.adoc#_adjust_storage[Adjust storage] for more information.
+
+You can optionally enable additional features:
+
+* xref:graph-analytics/index.adoc#aura-gds-plugin[Graph Analytics plugin] to add graph analytics capabilities (**AuraDB Professional only**)
+* xref:managing-instances/instance-details.adoc#aura-vector-optimization[Vector optimization] to enhance performance for vector-based operations
+
+After selecting **Next**, your instance is created.
+A password is generated for the instance, ensure to either copy or download it, as it will **not** be recoverable after.
+The password is required to access your instance later.
+
+[NOTE]
+====
+You can only create **one Free instance** per account.
+To create more instances, you need to upgrade your tier.
+See link:https://neo4j.com/pricing/[Neo4j Pricing] for more information on the different tiers.
+====
\ No newline at end of file
diff --git a/modules/ROOT/pages/getting-started/migrate-metadata.adoc b/modules/ROOT/pages/getting-started/migrate-metadata.adoc
new file mode 100644
index 000000000..3507315f6
--- /dev/null
+++ b/modules/ROOT/pages/getting-started/migrate-metadata.adoc
@@ -0,0 +1,51 @@
+[[aura-migrate-metadata]]
+= Migrate metadata from Workspace
+:description: This describes how to migrate metadata from Neo4j Workspace to the new Neo4j Aura Console.
+
+If you have an Aura instance created in the classic Aura console (refer to xref:new-console.adoc[New Neo4j Aura console] for comparison), you can migrate the metadata from Workspace to the new console and continue working with your data in the new experience.
+The metadata includes the data model and saved Cypher queries.
+
+== Data model
+
+From the *Import* tab in Workspace, open the more menu (*...*) and download the model, with or without data.
+
+[.shadow]
+image::export-model.png[width=300]
+
+Then navigate to the new console and select *Import* -> *Graph models*.
+Once you select *New graph model*, you access the more menu (*...*), similar to Workspace, and select *Open model* with or without data.
+
+[.shadow]
+image::import-model.png[width=600]
+
+Note that if you have downloaded your data with the model, you can also go a different route via *Import* -> *Data sources* and select *New data source* and then import locally from files.
+This leads you to the same *Import* frame as the first route and you can use the more menu (*...*) to open the model *with* your data.
+
+== Saved Cypher
+
+Any saved Cypher snippets can be downloaded from the *Query* tab in Workspace.
+From the Saved Cypher drawer, use the *Export* button to download selected queries as a _.csv_ file.
+
+[.shadow]
+image::export-saved-cypher.png[width=300]
+
+In the new console, navigate to the *Query* tab and open the *Saved Cypher* drawer.
+Use the *Import* button and select the _.csv_ file you downloaded from Workspace.
+
+[.shadow]
+image::import-saved-cypher.png[width=400]
+
+== Perspectives
+
+Perspectives, except for the default Perspective (which is automatically re-created in the new console), can be exported from the Perspective drawer in Workspace.
+Use the *Export* option on the Perspective you want to save.
+It is exported as a _.json_ file.
+
+[.shadow]
+image::export-perspective.png[width=300]
+
+In the new console, navigate to the *Explore* tab and open the *Perspective* drawer.
+Use the *Import* option and select the _.json_ file you downloaded from Workspace.
+
+[.shadow]
+image::import-perspective.png[width=600]
diff --git a/modules/ROOT/pages/graph-analytics/index.adoc b/modules/ROOT/pages/graph-analytics/index.adoc
new file mode 100644
index 000000000..504a12b8c
--- /dev/null
+++ b/modules/ROOT/pages/graph-analytics/index.adoc
@@ -0,0 +1,170 @@
+[[graph-analytics-aura]]
+= Graph analytics in Aura
+:description: This page describes how to run graph analytics in Aura.
+:page-aliases: aurads/index.adoc
+:gds-sessions-page: {neo4j-docs-base-uri}/graph-data-science/current/aura-graph-analytics/
+
+If your data is in Aura or you plan to move it to Aura, you have several options to run graph analytics:
+
+* Enable the <> on an existing AuraDB instance (Professional).
+* Use <> from an existing AuraDB Professional, Business Critical, or Virtual Dedicated Cloud instance.
+* Create an <> (Professional, Enterprise).
+
+If your data is in a self-managed Neo4j DBMS or a non-Neo4j data source, you can still use <>.
+
+[[aura-gds-plugin]]
+== Graph Analytics plugin
+
+label:AuraDB-Professional[]
+
+The Graph Analytics plugin allows you to use the link:https://neo4j.com/docs/graph-data-science/current/introduction/[Graph Data Science library] in any AuraDB Professional instance with the following requirements:
+
+* Neo4j version 5 or later
+* 4 GB of memory or more
+* All supported cloud providers and regions
+
+You can enable or disable Graph Analytics when necessary, both during xref:getting-started/create-instance.adoc[instance creation] and on an xref:managing-instances/instance-details.adoc[existing instance].
+It can also be changed with the link:https://neo4j.com/docs/aura/platform/api/specification/#/instances/patch-instance-id[Aura API].
+
+The plugin shares compute and memory resources with the AuraDB server, so you do not incur any additional costs when you enable it.
+
+[[get-started-plugin]]
+=== Getting started
+
+With the Graph Analytics plugin enabled:
+
+* If you use the Neo4j Browser or run Cypher queries through a Neo4j driver, start with the link:{neo4j-docs-base-uri}/graph-data-science/current/getting-started/[Graph Data Science] tutorials.
+* If you use Python, start with the link:{neo4j-docs-base-uri}/graph-data-science-client/current/tutorials/tutorials/[Python client tutorials].
+
+[[aura-gds-serverless]]
+== Aura Graph Analytics
+
+Aura Graph Analytics allows you to use the Graph Data Science library regardless of where your source data is stored.
+It runs in Aura as a link:{gds-sessions-page}[dedicated service] optimised for analytics workloads, with no memory or compute resources shared with your data store.
+
+You can enable, disable, and configure Aura Graph Analytics on the organization level in the xref:visual-tour/index.adoc#graph-analytics-org-settings[organization settings].
+The details on any running sessions are available in the xref:visual-tour/index.adoc#graph-analytics-page[Graph Analytics] page.
+
+=== From an AuraDB instance
+
+label:AuraDB-Professional[] label:AuraDB-Business-Critical[] label:AuraDB-Virtual-Dedicated-Cloud[]
+
+[IMPORTANT]
+====
+Aura Graph Analytics is currently unavailable via any of the xref:cloud-providers.adoc[cloud provider marketplaces].
+====
+
+You can use Aura Graph Analytics from any AuraDB Professional, Business Critical, or Virtual Dedicated Cloud instance with the following requirements:
+
+* Neo4j version 5 or later
+* All supported cloud providers and regions
+
+[[get-started-serverless]]
+=== Getting started
+
+With Aura Graph Analytics enabled in your organization, you only need to create xref:api/authentication.adoc#_creating_credentials[Aura API credentials] before you can get started.
+
+With the Aura API credentials available:
+
+* If you use the Neo4j Browser or run Cypher queries through a Neo4j driver, start with the link:{neo4j-docs-base-uri}/graph-data-science/current/aura-graph-analytics/quickstart/[examples] in the Graph Data Science documentation.
+* If you use Python, start with one of the Python client tutorials depending on whether your data is in
+link:{neo4j-docs-base-uri}/graph-data-science-client/current/tutorials/graph-analytics-serverless/[AuraDB], in a link:{neo4j-docs-base-uri}//graph-data-science-client/current/tutorials/graph-analytics-serverless-self-managed/[self-managed Neo4j database], or a link:{neo4j-docs-base-uri}//graph-data-science-client/current/tutorials/graph-analytics-serverless-standalone/[non-Neo4j data source].
+
+[[aura-ds]]
+== AuraDS
+
+label:AuraDS-Professional[] label:AuraDS-Enterprise[]
+
+AuraDS is the fully managed version of the Graph Data Science library where the <> is deployed by default.
+
+In an AuraDS instance, Graph Analytics is always on.
+The plugin shares compute and memory resources with the AuraDS server.
+
+AuraDS instances have the following features:
+
+* Upgrades and patches are automatically applied.
+* Can be seamlessly scaled up or down.
+* Can be paused to reduce costs.
+
+=== Plans
+
+AuraDS offers the AuraDS Professional and AuraDS Enterprise subscription plans.
+The full list of features for each plan is available on the link:https://neo4j.com/pricing/#graph-data-science[Neo4j Pricing page].
+
+=== Updates and upgrades
+
+AuraDS updates and upgrades are handled by the platform, and as such do not require user intervention.
+Security patches and new versions of GDS and Neo4j are installed within short time windows during which the affected instances are unavailable.
+
+The operations are non-destructive, so graph projections, models, and data present on an instance are not affected.
+No operation is applied until all the running GDS algorithms have completed.
+
+[IMPORTANT]
+====
+Graphs and models created or updated _after_ the instance update/upgrade process has started are *not guaranteed* to be restored upon restart.
+====
+
+[[comparison]]
+== Comparison
+
+[opts="header", cols="1s,1,1,1,1"]
+|===
+|
+|Graph Data Science +
+on-premisesfootnote:[Assuming use of the link:{neo4j-docs-base-uri}/graph-data-science/current/installation/installation-enterprise-edition/[GDS Enterprise Edition].]
+|Graph Analytics plugin +
+(AuraDB Pro)
+|Aura Graph Analytics +
+(AuraDB Pro, BC, VDC)
+|AuraDS
+
+|Maximum memory
+|No upper limit +
+(contract-dependent)
+|Up to 128 GB
+|Up to 128 GB (Pro, BC) or 512 GB (VDC)
+|Up to 384 GB
+
+|Resources for analytics
+|Shared with DB +
+(user-configured)
+|Shared with DB +
+(optimized for DB)
+|Dedicated
+|Shared with DB +
+(optimized for analytics)
+
+|Number of concurrent GDS sessions
+|-
+|-
+|Up to 10 (Pro, BC) or 100 (VDC)
+|-
+
+|Data sources
+|Same server
+|Same server
+|AuraDB (same tier); self-managed Neo4j DBMS; custom
+|Same server; custom
+
+|Restart behavior
+4+|
+
+>|_Downtime_
+|Short
+|No
+|No
+|Short
+
+>|_Projected graphs_
+|Must be backed up and restored manually
+|Not retained
+|Unaffected
+|Restored automaticallyfootnote:createdafter[Graphs and models created or updated _after_ the instance update/upgrade process has started are *not guaranteed* to be restored upon restart.]
+
+>|_Trained models_
+|Must be backed up and restored manually
+|Not retained
+|Unaffected
+|Restored automaticallyfootnote:createdafter[]
+
+|===
diff --git a/modules/ROOT/pages/import/file-provision.adoc b/modules/ROOT/pages/import/file-provision.adoc
new file mode 100644
index 000000000..8e70f2c6f
--- /dev/null
+++ b/modules/ROOT/pages/import/file-provision.adoc
@@ -0,0 +1,87 @@
+[[aura-file-provision]]
+= Data provision
+:description: This section describes how to provide files for import.
+
+The *Data sources* tab is where you can add new data sources and see sources you have already added.
+
+You start by connecting a data source, which can be a relational database or a cloud data warehouse, or you can stream local flat files.
+
+In essence, you provide the data in some format to be imported and the Import service imports this into your instance.
+
+Import supports relational databases and flat files, i.e. files that contain data in a tabular format where each row represents a record and each column represents a field in that record.
+The most common format for flat files is CSV (comma-separated values), but Import also supports TSV (tab-separated values).
+
+== Connecting to remote data sources
+
+When you use the *New data source* button, you are presented with the following options for remote sources:
+
+* PostgreSQL
+* MySQL
+* SQL Server
+* Oracle
+* BigQuery
+* Databricks
+* Snowflake
+* AWS S3
+* Azure Blogs & Data Lake Storage
+* Google Cloud Storage
+
+Regardless of which one you select, you are required to provide roughly the same information to allow the Import service to load the tables for you from your remote source.
+
+.Example data source
+[.shadow]
+image::data-source-fields.png[]
+
+First, you need to give the data source a name to identify it.
+
+Second, you need to complete various fields with information from your data source.
+These are different depending on which source you are using.
+
+The *Host* field is the same for all sources, it is your database server's hostname or IP address, and it can normally be found in your account details with the vendor in question.
+
+The *Port* is pre-populated for you and defines which network port is used to connect to your database server.
+
+*Database* or *Service* (for Oracle) is the name of the database that contains the tables you want to import to your Aura instance.
+
+The *Schema* of your tabular data is needed for Import to know how your tables relate to each other.
+Note that this field is not included for MySQL.
+
+Additionally for cloud data warehouses (Snowflake), you can optionally provide both a *Warehouse* name and a *Role*.
+If no information is provided in these fields, the default values are used.
+
+Third, you need to provide user credentials for your data source.
+These are the username and password used to access the remote data source, *not* your Aura credentials.
+
+Once you have entered all the required information, the Import service can connect to your remote source and can fetch all the tables and import them to your Aura instance.
+
+When you have added a data source, you need to create a data model before you can import data.
+See xref:import/modeling.adoc[] for more information.
+Your added data sources are listed on the *Data sources* tab and you can interact with them via the *[...]* menu.
+
+.Interact with data source
+[.shadow]
+image::data-sources-interaction.png[]
+
+== Streaming local files
+
+When you stream your local CSV files, the process can be more iterative and manual.
+
+Import requires all CSV files to have a header row and at least one row of data.
+The header row specifies how the data in the file should be interpreted and contains information for each field.
+Keep in mind that the column names must be unique, i.e. it is not possible to have two columns with the same name within the same file.
+
+The CSV files are provided in the _Data source_ panel of Import.
+You can browse for them or drag and drop them into the panel.
+Once a file is added to the panel, you can preview the header and the first row of data in the file by expanding the file.
+
+[.shadow]
+image::files.png[]
+
+When you provide CSV files to Import, only a reference to your local files is kept.
+This is used to send the necessary commands to stream the data to your target database when you run the import.
+Your local files themselves are *not* uploaded anywhere and therefore, if you reload the page before running the import, the files are no longer available to the page and you need to provide them again.
+This is due to security features of your browser.
+
+
+Once the tables or files are in place, you need to specify how they should be organized in relation to each other, you need to *model your data*, in other words.
+See xref:import/modeling.adoc[] for more information.
diff --git a/modules/ROOT/pages/import/import.adoc b/modules/ROOT/pages/import/import.adoc
new file mode 100644
index 000000000..5ba4ae424
--- /dev/null
+++ b/modules/ROOT/pages/import/import.adoc
@@ -0,0 +1,74 @@
+[[import]]
+:description: This section describes how to do the actual import of data with Neo4h Import.
+= Run the import
+
+Once the files are provided, the data model complete, and all elements mapped, the import can be run.
+However, you can _preview_ your data at any time and make sure that everything is mapped the way you expect before you run the actual import.
+
+[[preview]]
+== Preview
+
+The preview button is located next to the _Run import_ button and unlike running the import, the preview does *not* require an active connection.
+
+image::dropdown.png[width=300]
+
+When you run a preview, only a sample of the provided data is scanned.
+This means that the preview can differ from the final import in terms of connectedness, for example.
+Even so, the preview can be very useful to get an overview of the data, especially on smaller datasets.
+
+Since the preview does not actually import any data, it can be run iteratively until you are satisfied.
+
+You can preview **all** or **selected** elements in your data.
+If your model is particularly complex, it can be beneficial to preview only parts of the data.
+To use this feature, select the parts of your model that you want to preview and _Preview selected_ from the dropdown.
+
+The preview shows a sample of either all or selected data that is mapped _correctly_ in the model.
+Unlike the actual import, the preview can be run regardless of the completeness of the mapping.
+If any element is missing the green checkmark in the model, it will _not_ be included in the preview, but the preview can still be run.
+
+[[run-import]]
+== Run the import
+
+When you are happy with the model and the mapping is complete, the import can begin.
+But before starting the import, it is important to make sure Import is connected to the database.
+This is done in the dropdown on the connection banner located in the top center of the UI.
+
+// Add something about the DB switcher here, when that is available.
+
+image::connection-dropdown.png[]
+
+The _Run import_ button shows the progress of the import when you press it.
+The import is done in batches and can be stopped at any time.
+
+When the import stops, whether because it is complete or because it was cancelled, a summary is displayed.
+The summary contains information about the imported nodes and relationships, including time elapsed, the file size, the number of properties etc.
+The results summary also allows you to see the Cypher statement used to load a particular file.
+It is not advisable to copy and paste these statements, but seeing them can provide valuable insight into how constraints are created and how load statements are constructed.
+
+For nodes, there are two statements, a _key statement_ and a _load statement_.
+The key statement is concerned with creating a constraint to ensure the uniqueness of the nodes.
+The load statement creates nodes for every item in the mapped file and adds the assigned properties from the data model.
+
+For relationships, there is only a load statement.
+It finds the start (source) and end (target) nodes and creates a relationship between them and sets the assigned (if any) properties to the relationship.
+
+// == Generate Cypher script
+
+// There may be situations where you want use the import logic elsewhere or where you need more complex transformations than Import allows.
+// Once you have added your files and mapped the data, instead of running the import, you can generate the corresponding Cypher script representing the model and mapping.
+
+// The script can be used in the _Query_ tab of Workspace, provided that the files are accessible to the DBMS, or on the command line via Cypher shell, for example.
+
+// The generated code contains comments that help you understand how the load statement works and what different parts it consists of.
+// This also helps you understand where you need to make changes to adapt it to where you intend to run it.
+
+// You can download the script with or without the files.
+
+// The _Generate Cypher script_ is available from the more menu.
+
+// image::generate-cypher.png[]
+
+
+
+
+//== Errors
\ No newline at end of file
diff --git a/modules/ROOT/pages/import/indexes-and-constraints.adoc b/modules/ROOT/pages/import/indexes-and-constraints.adoc
new file mode 100644
index 000000000..fd32c17cf
--- /dev/null
+++ b/modules/ROOT/pages/import/indexes-and-constraints.adoc
@@ -0,0 +1,30 @@
+[[indexes-and-constraints]]
+:description: This section describes how to use indexes and constraints in Import.
+= Indexes and constraints
+
+Import supports adding indexes to improve read performance of queries and creates constraints to ensure the accuracy of data.
+They are found in the details panel and the tab is visible when a single node is selected in the data model panel.
+
+[.shadow]
+image::constraints-tab.png[]
+
+Once a node is mapped to a file and a property is selected to serve as its ID, both a constraint and an index are created automatically.
+
+== Constraints
+
+A uniqueness constraint is created on the node property selected as node ID.
+This ensures that no other node with the same ID is created and to achieve that, a corresponding index is also created to support that constraint.
+It is not possible to modify the uniqueness constraint nor to add any additional constraints.
+For more information on constraints see link:https://neo4j.com/docs/cypher-manual/current/constraints/#unique-node-property[Cypher Manual -> Constraints].
+
+== Indexes
+
+As mentioned previously, an index is created automatically on the assigned ID property for a node to support the uniqueness constraint.
+This index cannot be modified in any way from this tab, but if you change which property to use as ID, both the constraint and corresponding index change accordingly.
+
+You can add more indexes with the `+` and then select which property to index from the dropdown menu.
+If you know that you will regularly look at a specific property, it is good practice to add an index to that property.
+For example in the link:https://neo4j.com/docs/getting-started/appendix/tutorials/guide-import-relational-and-etl/[Northwind] dataset, if you know you are going to be looking for orders in a specific date range, it is advisable to add an index to the `orderDate` property.
+
+Regardless of which property you add the index to, the index type is Neo4j's default index, which is `range` for Neo4j latest version and `btree` for Neo4j 4.x.
+For more information on indexes, see link:https://neo4j.com/docs/cypher-manual/current/indexes/[Cypher Manual -> Indexes].
\ No newline at end of file
diff --git a/modules/ROOT/pages/import/introduction.adoc b/modules/ROOT/pages/import/introduction.adoc
new file mode 100644
index 000000000..c7bd8c8fe
--- /dev/null
+++ b/modules/ROOT/pages/import/introduction.adoc
@@ -0,0 +1,32 @@
+= Import
+:description: This is an introduction to the Import data service.
+:page-aliases: auradb/importing/importing-data.adoc, aurads/importing-data/index.adoc
+
+The import service contains a tool for importing data into your instance and is ideal to get started quickly with testing and prototyping.
+It allows you to import data without using any code from:
+
+* PostgreSQL
+* MySQL
+* SQL Server
+* Oracle
+* Big Query
+* Databricks
+* Snowflake
+* AWS S3
+* Azure Blobs & Data Lake Storage
+* Google Cloud Storage
+* .CSV
+
+This service is also available as a standalone tool for importing data into self-managed Neo4j instances.
+The standalone importer is available at the following links, depending on whether you need support for unsecured connections:
+
+** link:https://data-importer.neo4j.io/[] -- Secure only
+** link:https://data-importer.graphapp.io/[] -- Both secure and unsecure
+
+
+For more information, see the link:https://neo4j.com/docs/data-importer/current/[Data Importer documentation].
+
+[NOTE]
+====
+Only CSV files can be imported in the standalone tool.
+====
diff --git a/modules/ROOT/pages/import/mapping.adoc b/modules/ROOT/pages/import/mapping.adoc
new file mode 100644
index 000000000..f4087981e
--- /dev/null
+++ b/modules/ROOT/pages/import/mapping.adoc
@@ -0,0 +1,120 @@
+[[mapping]]
+:description: This sections describes how to map files to a data model.
+= Mapping
+
+Mapping is the process of associating a table or file with an element in your data model.
+This is what allows Import to construct the Cypher statements needed to load your data.
+
+When you generate a data model, the mapping is largely done for you.
+However, as you add elements to your model, whether you create a model manually or if you are adding to a generated model, the new elements need to be mapped to their corresponding tables or files.
+
+Your data source may contain data that is not relevant to your data model, and when you build your model, you can select what data to use.
+Only data that is mapped correctly to the elements in your model is imported, so it is important to get the mapping right.
+
+[NOTE]
+====
+If you need to change the mapping, it is possible to run the import again.
+Affected elements already imported are updated and **not** duplicated.
+====
+
+== Nodes
+
+To map a node to a file/table, the node needs to have a label, which you can type directly on the node or in the mapping details panel.
+After naming the label, you can select the file/table to map to the node.
+If you are streaming local files, you can add the file at any time before running the import, but it is convenient to do it at this stage.
+
+[.shadow]
+image::node-mapping.png[width=400]
+
+Additionally, the node needs to have at least one property and an ID.
+
+The properties are key-value pairs that describe the node.
+
+The ID is used to uniquely identify nodes and when connecting nodes to each other in relationships.
+If a node with the same ID is seen more than once, it is only created for the first instance observed in the flat file.
+If a node with the same ID is seen again, any properties are updated, resulting in the most recently read properties being kept.
+As mentioned, the node ID is crucial when creating relationships and this is explained in further detail in the section on mapping <>.
+
+If you added the file already, you can choose to map properties from that file.
+Import derives the properties from the columns in the file and guesses the data type.
+With this option, you can select which properties to use.
+Once selected, you can rename the properties, change the data type, if needed, and select which property should serve as the node ID.
+
+By default, Import uses the property with `id` in its name as the ID, but if none of the columns in the mapped file meet this condition, or if more than one does, you have to manually select which property to use as the node ID.
+Whether you let Import select the ID or you do it manually, every node _needs_ to have an ID in order to complete the mapping.
+
+The property used as node ID is marked with a key icon.
+
+[.shadow]
+image::node-id.png[width=400]
+
+[[mapping-relationships]]
+== Relationships
+
+Much like how a node needs to have a label, a relationship needs to have a type.
+This can be typed directly on the relationship or in the details panel.
+The file to map the relationship to is selected in the same way as for nodes. Depending on your data, this file could be:
+
+* The same file as used for the nodes at both ends of the relationship.
+In this case, Import automatically maps the file and appropriate columns.
+It is easy for it to indentify the file columns to use in the _From_ and _To_ mapping, as they have already been mapped as ID properties to the nodes at each end.
+
+* A file that is used to define the node at only one end of the relationship, but also contains a column that contains the ID of the other node.
+In this case, you need to manually select the file and specify the _From_ and _To_ mapping manually.
+This is similar to a table in a relational database the contains a Foreign Key to link to another table, but here that key is used to link to another node rather than a table.
+
+* A completely separate file that is used solely to define the relationship.
+In this case, you need to select the relevant file and then map the columns in the file that correspond to the _From_ and _To_ node ID properties.
+This is similar to a _link table_ in relational database terms.
+
+
+This part is crucial to ensuring the relationships link nodes as indended. It is defined in the _Node ID mapping_ section of the details panel.
+
+[.shadow]
+image::relationship-mapping.png[width=400]
+
+== File filtering
+
+When mapping a file, both to nodes and relationships, you can use a toggle to filter the file.
+This is useful when using aggregate node lists and relationship lists as source files.
+Aggregate node lists contain all the nodes in the same file and they can be separated/grouped together by having the same value in a specific column.
+Aggregate relationship lists contain corresponding information about relationships in one file and the relationships can be grouped together in the same fashion.
+The file filtering allows you to select a column and an exact value to match and only the elements that match are used as a source for that element in your data model.
+
+[.shadow]
+image::file-filtering.png[width=400]
+
+[[exclude-list]]
+== Node exclude list
+
+Sometimes a source file may contain a column where multiple rows have the same string as the value, such as `[empty]` or `null`.
+If this column is used as node ID, and you run the import, this results in the creation of "super nodes".
+Every row in the mapped file that has such a value end up being connected to the same node, the "super node".
+To avoid this, you can specify strings that should cause Import to exclude the rows they appear in.
+By default, Import excludes any rows where the value of the node ID column is empty.
+
+The node exclude list is available from the more menu (`...`) in the data model panel, under _Settings_.
+
+[.shadow]
+image::node-exclude.png[width=400]
+
+== Complete the mapping
+
+If the mapping is not complete, ie. if any element in the model is missing the green checkmark, the import can't be run.
+If you try, Import sends an error message and highlights which element(s) in the model is missing information and also which fields in the details panel need to be filled out.
+
+For nodes, the following information is required:
+
+* Label - to identify the type of a node
+* File - the source file for the node from which the properties are derived
+* Properties - at least one property needs to be selected and if more than one, one needs to be selected as the node ID
+
+For relationships:
+
+* Type - a name that describes the relationship it represents
+* File - the source file that contains information on which nodes are connected by the relationship
+* Node ID mapping - which nodes in the model are connected by the relationship; their labels, IDs and ID columns.
+
+If the mapping is not complete, you can run a preview of the import, but it does not contain incompletely mapped elements.
+
+Once every element in the model has a green checkmark to indicate complete mapping, the import can be run.
\ No newline at end of file
diff --git a/modules/ROOT/pages/import/modeling.adoc b/modules/ROOT/pages/import/modeling.adoc
new file mode 100644
index 000000000..02b57837c
--- /dev/null
+++ b/modules/ROOT/pages/import/modeling.adoc
@@ -0,0 +1,77 @@
+:description: This section introduces data modeling.
+= Data modeling
+
+The data model is the is the blueprint for the database.
+It defines how the data is organized and is the key to create a graph from your data.
+The data model is what you map your tables or files to.
+It consists of nodes and relationships.
+Nodes need to have a _label_ and one or more _properties_.
+Relationships need to have a _type_ and one or more _properties_.
+
+You can add a new model and see your available models (if you have any) in the *Graph models* tab.
+If you add a new model, you can either either *Connect to a data source* to select a source from your list (see xref:import/file-provision.adoc[] for information about data sources), or you can drag and drop (or browse for) local flat files.
+
+Once you have defined your data source, you have three options to create your model:
+
+* *Define manually* - sketch your model and map your data
+* *Generate from schema* - automatically define and map a model based on tables and constraints in your data
+* *Generate with AI* - if you have enabled *Generative AI assistance* (in the xref:visual-tour/index.adoc#org-settings[Organization settings]), your model and mapping is automatically defined using AI on available metadata in your data source, to give you a more complete model.
+
+When you have a model generated, with or without AI, always review and make sure the model and mapping is done as expected.
+The generated model as meant to be a starting point and you add elements to it as needed since a relational database often does not contain sufficient information for the Import service to generate a complete model.
+Depending on what metadata your data contains, you may get a more complete model if you generate with AI.
+
+If you are streaming local files, you can also upload a model, with or without data, in a _.json_ format via the [*...*] more menu.
+But if you want full control of how your data is modeled, you can create the model manually.
+
+The data model panel is located in the center of the screen.
+It has buttons for collapsing the files panel, adding a node, discarding elements, previewing, running an import, and a more options button.
+
+[.shadow]
+image::model-panel.png[]
+
+== Workflow
+
+The most efficient way to create your model is to complete the mapping of each element, ie. select source file, IDs, and properties, before moving on to the next element.
+Being familiar with your files is essential to creating a good model.
+See the section on xref:import/mapping.adoc[Mapping] for more information.
+
+== Create a node
+
+To create a node, click the _Add Node_ button.
+The button is located in the top left corner of the data model panel.
+As mentioned previously, the node needs a label and one or more properties.
+The label can be typed directly on the node or in the _Label_ field in the details panel, to the right of the model panel.
+The conventional way of labeling is to use CamelCase, see link:https://neo4j.com/docs/cypher-manual/current/syntax/naming/#_recommendations[Cypher Manual -> Recommendations] for more information on labeling.
+
+In addition to a label, the node needs to be mapped to a file, which is done in the field _File_, below the lable field, in the details panel.
+The mapping is not necessary to create the node, nor is adding properties, but both need to be done before the import can be run.
+See xref:import/mapping.adoc[Mapping] for more information on mapping.
+
+== Create a relationship
+
+To create a relationship, it is necessary to have at least one node already.
+If you hover over a selected node, a grey circle with a green plus sign appear on top of the blue cirlce.
+
+[.shadow]
+image::node-relationship.png[]
+
+The plus sign can be dragged to an empty space on the canvas and once released, a new node is created with a relationship to the first node.
+However, if you already have two nodes, you can just drag the plus sign from one node to the other and a relationship is created.
+
+You can type directly on a selected relationship to specify the relationship type.
+This can be done in the details panel as well.
+The casing convention for relationship types is upper-case SNAKE_CASE, see link:https://neo4j.com/docs/cypher-manual/current/syntax/naming/#_recommendations[Cypher Manual -> Recommendations] for more information.
+
+The relationship needs to be mapped to a file and have at least one property, but again, this is not necessary to create the relationship.
+The section on xref:import/mapping.adoc[Mapping] covers this.
+
+A relationship always has a direction, and if needed, you can reverse the direction in your model with a button, as shown below.
+
+[.shadow]
+image::relationship.png[]
+
+== Deleting elements
+
+Elements can be deleted from the model by selecting them and either clicking the trash icon, or using the delete key on your keyboard.
+You can select all elements with kbd:[⌘+a] on Mac or kbd:[Ctrl+a] on Windows and delete them all at once.
diff --git a/modules/ROOT/pages/import/quick-start.adoc b/modules/ROOT/pages/import/quick-start.adoc
new file mode 100644
index 000000000..24b75f3a7
--- /dev/null
+++ b/modules/ROOT/pages/import/quick-start.adoc
@@ -0,0 +1,59 @@
+[[quick-start]]
+:description: This section gives an overview of the Import service.
+= Quick start
+
+The Import service UI consists of three tabs; *Data sources*, *Graph models*, and *Import jobs*.
+These reflect the three stages of importing data; provide the data, i.e., configure a source to fetch the data from, model the data, i.e., define how the data is organized, and finally, run the import.
+
+If you haven't previously imported any data, all three are empty, otherwise sources, models, and import jobs are listed here.
+
+[.shadow]
+.Connect data source
+image::data-source.png[width=800]
+
+== Provide the data
+
+To get started you need to connect to a data source.
+Import supports PostgreSQL, MySQL, SQL Server, Oracle, BigQuery, Databricks, Snowflake, AWS S3, Azure Blobs & Data Lake Storage, Google Cloud Storage, as well as locally hosted flat files.
+
+[.shadow]
+.Supported data sources
+image::sources.png[width=500]
+
+For relational databases and cloud data warehouses, you need to give the data source a name, configure the data source, and add user credentials for the database account.
+The data source configuration is essentially the same for both relational databases and data warehouses; you specify a *host* for your database, a *port* to connect to, the name of the *database*/*service*, and a *schema* that contains your tables (except for MySQL data sources).
+See xref:import/file-provision.adoc[] for more information.
+
+If you want to stream local files, you can drag and drop them into the data source panel or browse for them.
+
+== Model and map the data
+
+When you have connected a data source, you have the option to have a model generated based on primary and foreign key constraints in the source database.
+The quickest way is to accept to have a model generated, but you can draw your own later, see xref:import/modeling.adoc[] for more information.
+
+If you use local files, you can upload a _.json_ file containing your model, if available, or draw your own.
+
+If you generate a model from your relational data the model is already mapped to your tables.
+This is indicated by a green checkmark in the model.
+If you upload a local model, this may or may not be mapped already (it depends on whether it was mapped when first exported or not).
+If any element is unmapped, the import will fail and you will be prompted to add the missing mapping information.
+For more information on this, see xref:import/mapping.adoc[].
+When all elements in the model are mapped, as indicated with the green checkmark, you can run the import.
+
+[.shadow]
+image::import-ready.png[width=400]
+
+== Run the import
+
+When you have connected a SQL data source, you need to provide credentials to the source and the destination (your Aura instance) in order to run the import.
+However, if you are streaming local files, you just need to make sure that they are available in the data source panel and re-provide them if they are not.
+
+If you have selected to go forward with a data source or an existing model and change your mind, click on *Graph models* to find the three tabs again.
+From here, you can go back to *Data sources* to select a different source, if you want.
+
+[.shadow]
+image::go-back.png[width=300]
+
+Connected data sources, models, and past import jobs are then listed under the respective tabs.
+
+For more detailed information on xref:import/file-provision.adoc[file provision], xref:import/modeling[data models], and xref:import/import.adoc[import], see the respective pages.
\ No newline at end of file
diff --git a/modules/ROOT/pages/import/visual-tour.adoc b/modules/ROOT/pages/import/visual-tour.adoc
new file mode 100644
index 000000000..196cb575e
--- /dev/null
+++ b/modules/ROOT/pages/import/visual-tour.adoc
@@ -0,0 +1,45 @@
+[[Overview]]
+:description: This section provides an overview of the Import user interface.
+= Visual tour
+
+Once you have connected to a data source, Importer's interface is the same regardless of your data source.
+It is divided into three parts, the data source panel where you provide the tables/files, the data model panel where you draw your model, and the details panel where you map your model to your tables.
+
+[.shadow]
+image::upx-import2.png[]
+
+== Data source panel
+
+The source tables for the import are placed here.
+If you are using local CSV files, either you drag and drop them here or browse for them.
+Each table can be expanded using the dropdown option, to display the header and the first row of the table.
+The `...` menu allows you to delete the table.
+See xref:import/file-provision.adoc[] for more information about files and tables.
+
+== Data model panel
+
+The center of the UI is the canvas used to create the data model on.
+When you have a model, this panel is where you can see if there are errors in mapping.
+See xref:import/modeling.adoc[] for more information about the data model.
+
+== Add node
+
+This button adds a node to the data model, **not** to the database.
+See xref:import/modeling.adoc[] about how to create the data model.
+
+== Delete selection
+
+Select one or more elements on the canvas and then use this button to delete them.
+
+== More menu
+
+The `...` menu contains options to open a saved model, with or without data, to save your current model, with or without data, settings for the import, and an option to clear everything.
+For more information on the settings, see xref:import/mapping.adoc#exclude-list[Node exclude list].
+
+== Details panel
+
+The details panel contains two tabs, _Definition_ and _Constraints & Indexes_.
+The _Defintion_ tab is where you give labels to nodes and relationships, map them to files from the data source panel, and specify their properties.
+See xref:import/mapping.adoc[] for more information.
+The _Constraints & Indexes_ tab allows you to see details about constraints and indexes and lets you add or modify the latter.
+See xref:import/indexes-and-constraints.adoc[] for more information.
\ No newline at end of file
diff --git a/modules/ROOT/pages/index.adoc b/modules/ROOT/pages/index.adoc
index 500b6fde9..a5867a3c4 100644
--- a/modules/ROOT/pages/index.adoc
+++ b/modules/ROOT/pages/index.adoc
@@ -1,23 +1,52 @@
[[aura]]
-= Neo4j Aura overview
-:description: This page introduces the Aura platform.
+= Neo4j Aura Documentation
+:description: Introduce the new Aura console experience.
+:page-aliases: auradb/index.adoc
+:page-ogtitle: Neo4j Aura
-Neo4j Aura is a fast, scalable, always-on, fully automated graph platform offered as a cloud service.
+Neo4j Aura is a fully automated graph platform offered as a cloud service.
+It brings together the capabilties of several tools, services, and operations from the Neo4j catalog.
+To get started with Neo4j Aura, log in at link:https://console-preview.neo4j.io/account/profile[], or click "Get Started Free" at the top of the page.
-Aura includes AuraDB, the graph database as a service for developers building intelligent applications, and AuraDS, the graph data science as a service for data scientists building predictive models and analytics workflows.
+The Neo4j Aura console, or **console** for short, is the new UI experience for Neo4j Aura users.
+Use the console to import and interact with your data — from visualizing nodes and relationships to executing queries with the Cypher query language.
+You can monitor your instances and databases via metrics and logs to get insight into various aspects, such as performance, resource usage, and overall system health.
-== Neo4j AuraDB
-Neo4j AuraDB is the fully managed graph database as a service that helps build intelligent, context-driven applications faster with lightning-fast queries, real-time insights, built-in developer tools, data visualization, and integrations supported by the largest graph developer community.
+The Aura environment starts with an organization which can contain multiple projects with multiple users associated.
+Projects, users, and billing can all be managed directly from the same console.
-For more information on AuraDB, see the xref:auradb/index.adoc[Neo4j AuraDB overview].
+If you have used Aura before, you will find the console familiar but with a host of new features.
+The classic Aura console is still available, and will remain available until all available features have been integrated into the new console.
+
+See xref:new-console.adoc[New Neo4j Aura console] for a comparison between the new console and the classic experience.
+
+Aura includes AuraDB, a fully managed graph database, and AuraDS, a fully managed data science as a service solution for running graph analytics on Aura.
+The documentation is based on AuraDB unless specified otherwise.
+See xref:graph-analytics/index.adoc[] for more information about AuraDS and graph analytics.
+
+== Products and tiers
+
+Aura offers the following products and tiers:
+
+*Products:*
+
+** AuraDB
+** AuraDS
+** Aura Graph Analytics
+
+*Tiers:*
+
+** AuraDB Free
+** AuraDB Professional
+** AuraDB Business Critical
+** AuraDB Virtual Dedicated Cloud
+** AuraDS Professional
+** AuraDS Enterprise
+
+The full list of features available with each tier is available on the link:https://neo4j.com/pricing/[Neo4j Pricing page].
-== Neo4j AuraDS
-Neo4j AuraDS is the fully managed data science as a service solution for data scientists that unifies the machine learning (ML) surface and graph database into a single workspace, making it easy to uncover the connections in big data and answer business-critical questions.
-For more information on AuraDS, see the xref:aurads/index.adoc[Neo4j AuraDS overview].
(C) {copyright}
License: link:{common-license-page-uri}[Creative Commons 4.0]
-// Edit at: https://github.com/neo4j-graphacademy/courses/blob/main/asciidoc/courses/neo4j-fundamentals/promo.adoc
-include::https://raw.githubusercontent.com/neo4j-graphacademy/courses/main/asciidoc/courses/neo4j-fundamentals/promo.adoc[]
diff --git a/modules/ROOT/pages/platform/logging/download-logs.adoc b/modules/ROOT/pages/logging/download-logs.adoc
similarity index 98%
rename from modules/ROOT/pages/platform/logging/download-logs.adoc
rename to modules/ROOT/pages/logging/download-logs.adoc
index a78d14899..e8e4f4da9 100644
--- a/modules/ROOT/pages/platform/logging/download-logs.adoc
+++ b/modules/ROOT/pages/logging/download-logs.adoc
@@ -7,7 +7,7 @@ You can access logs from an Aura instance via the *Logs* tab.
To access the *Logs* tab:
-. Navigate to the https://console.neo4j.io/[Neo4j Aura Console] in your browser.
+. Navigate to the https://console.neo4j.io/[Neo4j Aura console] in your browser.
. Select the instance you want to export the logs from.
. Select the *Logs* tab.
diff --git a/modules/ROOT/pages/logging/log-downloads.adoc b/modules/ROOT/pages/logging/log-downloads.adoc
new file mode 100644
index 000000000..b4b859f67
--- /dev/null
+++ b/modules/ROOT/pages/logging/log-downloads.adoc
@@ -0,0 +1,88 @@
+[[aura-monitoring]]
+= Download logs
+:description: This page describes how to download logs.
+:page-aliases: platform/logging/download-logs.adoc
+:log-download-retention-days: 30
+:max-download-rows: 5 million
+:max-request-hours: 24
+:role-project-admin: Project Admin
+:role-project-member: Project Member
+
+label:AuraDB-Professional[]
+label:AuraDB-Virtual-Dedicated-Cloud[]
+
+Downloading logs requires a role of either _{role-project-admin}_ or _{role-project-member}_.
+
+You can download query logs and security logs.
+The logs include any filters or search terms applied when the logs were fetched.
+Downloading logs consists of two steps: **initiate the download** and **download the file**.
+
+== Request log
+
+You can request a log of up to {max-request-hours} hours of data and a maximum of {max-download-rows} rows.
+
+After fetching logs, use the *download* button in the top right corner of the *Summary* and *Details* table to initiate the download.
+
+* Select the *log type*: Summary (aggregated log statistics), Details (individual log events), or both.
+* Select a format: JSON or CSV.
+If you select CSV, you can optionally include CSV headers (enabled by default) and specify a field delimiter (default: comma).
+* Select *Confirm* to request the logs.
+
+Once the log is ready for download, it appears in the *Downloads archive*.
+
+== Download the log
+
+Download logs from the *Downloads archive*.
+When the status is Ready, use the Download icon to download the file.
+
+Logs are provided as a zipped file of your selected format (JSON or CSV).
+The file name follows this pattern: `---logs-..gz`, for example: `dd9ba752-1731586207476-query-logs-details.json.gz`
+
+To delete a log file, use the delete icon in the Actions column.
+
+[NOTE]
+====
+Log files are automatically deleted from the downloads archive after {log-download-retention-days} days.
+====
+
+== Downloads archive
+
+To access the archive click the *Open downloads archive* icon in the top-right corner of the page.
+
+The archive displays a table of all the requested logs for the selected instance.
+It includes the following information about each download:
+
+.Downloads archive columns
+[cols="25,75v"]
+|===
+| Display Name | Description
+
+| Requested
+| When the download was requested and by which user.
+
+| Status
+| Current status of the download (e.g., Running, Completed, Failed).
+
+| Type
+| The type of logs being downloaded (Query or Security) and subtype (Summary or Details).
+
+| Time Period
+| The time range covered by the downloaded logs.
+
+| Rows
+| Number of log entries exported.
+
+| Format
+| File format of the download (JSON or CSV with additional CSV settings).
+
+| Filters
+| Applied filters for the log download.
+
+| Actions
+| _Download log_ icon button (Arrow Down Tray Icon) and _Delete log_ icon button (Trash Icon) for the log file.
+|===
+
+.Initiate logs download / open downloads archive icons
+[.shadow]
+image::logsicons.png[]
+
diff --git a/modules/ROOT/pages/logging/log-forwarding.adoc b/modules/ROOT/pages/logging/log-forwarding.adoc
new file mode 100644
index 000000000..4d88f1933
--- /dev/null
+++ b/modules/ROOT/pages/logging/log-forwarding.adoc
@@ -0,0 +1,73 @@
+[[aura-query-logs]]
+= Security log forwarding
+:page-aliases: platform/logging/log-forwarding.adoc
+
+label:AuraDB-Business-Critical[]
+label:AuraDB-Virtual-Dedicated-Cloud[]
+label:AuraDS-Enterprise[]
+
+With security log forwarding, you can stream security logs directly to a cloud project owned by your organization, in real time.
+
+[NOTE]
+====
+To access log forwarding, you need to have the _Project Admin_ role.
+====
+
+To access *Log forwarding*:
+
+. Navigate to *Settings* under the Project section in the sidebar.
+. Select *Log forwarding*.
+
+This will display a list of currently configured log forwarding processes for the active project.
+Each configuration will show its scope (region or instance) and status (`forwarding`, `setting up` or `paused`).
+
+For actions related to existing configurations, use the `...` button on the right side of the row to open a menu from which the following actions can be taken:
+
+* *View configuration* - Displays the complete details of the configuration.
+* *Edit* - Allows you to change the configuration.
+* *Delete* - Removes the configuration.
+
+If no log forwarding process is set up, a button to do so is displayed in the center of the page.
+
+== Set up log forwarding
+
+[NOTE]
+====
+Aura Database and Analytics services are business-critical for users.
+There are requests to introduce more capabilities enabling access to logs and metrics to derive actionable insights using your choice of monitoring platform.
+
+Aura has a strong roadmap of observability sharing features including security logs, query logs, and other capabilities.
+Many of these logs can be of significant size, hence *a new consumption-based billing model including cloud egress costs* will be introduced in the future.
+
+Security is of paramount importance, and therefore the security logs are initially available for free.
+====
+
+The complete steps for setting up log forwarding depend on the chosen cloud provider.
+
+Exhaustive instructions are provided in the wizard which appears by following the steps below.
+
+. Navigate to the *Log forwarding* page as described above.
+. Use *Configure log forwarding* and select the scope for log forwarding.
+* label:AuraDB-Business-Critical[] A specific instance will have its logs forwarded.
+* label:AuraDB-Virtual-Dedicated-Cloud[] All instances in the selected region will have their logs forwarded.
+* label:AuraDS-Enterprise[] All instances in the selected region will have their logs forwarded.
+. Follow the instructions specific to your cloud provider.
+
+[NOTE]
+====
+Only one log forwarding configuration is permitted for each unique scope.
+====
+
+== Output destination
+
+Log forwarding can forward logs to the log service of the same cloud provider as the monitored instance is located in.
+
+Cross-region log forwarding is supported.
+
+If your instance is in:
+
+* *Google Cloud Platform* - Forward logs to Google Cloud Logging in your own GCP project.
+* *Amazon Web Services* - Forward logs to CloudWatch in your own AWS account.
+* *Azure* - Forward logs to a Log Analytics workspace in your own Azure subscription.
+
+Logs can be further forwarded into third party systems using the log routing capabilities provided by your cloud provider.
diff --git a/modules/ROOT/pages/logging/query-log-analyzer.adoc b/modules/ROOT/pages/logging/query-log-analyzer.adoc
new file mode 100644
index 000000000..a0d511043
--- /dev/null
+++ b/modules/ROOT/pages/logging/query-log-analyzer.adoc
@@ -0,0 +1,254 @@
+[[aura-monitoring]]
+= Query log analyzer
+:description: This page describes the query log analyzer.
+:page-alisases: platform/logging/query-log-analyzer.adoc
+:log-retention-days: 30
+:max-request-hours: 24
+
+label:AuraDB-Professional[]
+label:AuraDB-Business-Critical[]
+label:AuraDB-Virtual-Dedicated-Cloud[]
+
+Query log analyzer is a feature that provides a UI to review the queries executed on an Aura instance.
+
+You can access query logs from *Logs* in the left-hand navigation or from query-related metrics in the *Database* tab in *Metrics*.
+Use the more menu (*...*) on the metric to navigate to *Explore query logs*.
+
+To switch between instances, use the dropdown menu on the top left.
+
+Query log analyzer is split up in three parts:
+
+* *Query timeline* - Timeline showing metrics for number of queries, failed queries and query latency.
+* *Summary table* - An aggregated view of query logs, giving a high level overview over the selected time period.
+* *Details table* - A detailed view showing individual query executions in the selected time period.
+
+[.shadow]
+image::query-log-analyzer.png[width=100%]
+
+To fetch logs, first select a time range in the Query timeline.
+With a time selection done, press the *Fetch logs* button.
+You may optionally select any filters or search text if required, then press *Fetch*.
+
+A summary of query executions is returned, showing aggregations per query.
+To see the individual query executions, click the right arrow at the end of the line to show details for that query.
+The details pane shows individual executions.
+
+== Query timeline
+
+When viewing the query timeline, you can select from the following time intervals:
+
+* 30 minutes
+* Last hour
+* Last 2 hours
+* Last 6 hours
+* Last 24 hours
+* Last 3 days
+* Last week
+
+The query timeline can be collapsed by clicking on the header.
+
+[NOTE]
+====
+The query timeline may show activity from internal meta queries, which are filtered in the table.
+====
+
+=== Zoom
+
+To zoom in to a narrower time interval, select and drag inside the timeline to select your desired time interval.
+The data in the timeline automatically updates to match the increased resolution.
+To update the table, click the *Fetch logs* button.
+
+To reset zoom, double-click anywhere inside the timeline.
+
+=== Toggle data series
+
+To hide or show individual data series, select the corresponding data series in the legend below the timeline.
+
+
+[[fetch-logs]]
+== Fetch logs
+
+The *Fetch logs* button opens up a dialog where you can add filters and search before fetching the logs.
+The Query timeline determines the current time selection, which can be changed by closing the dialog and modifing the timeline.
+To fetch the logs after selection of filters and search is done, click the *Go* button.
+
+[NOTE]
+====
+Query logs are available for a period of {log-retention-days} days, and each request can be for up to {max-request-hours} hours of data.
+====
+
+[[filters]]
+=== Filters
+
+Use the filter button to load the available filters over the selected time period.
+Filters are available for the following fields:
+
+* Status
+* User
+* Driver
+* Application
+* Initiation type
+* Minimum duration (ms)
+
+[[search]]
+=== Search
+
+Use the search button to search for specific queries or errors.
+Search can be specified for the *Query text* and the *Error text*.
+The fields are case insensitive.
+
+== Log tables
+
+The log tables provide two different views of your query data:
+
+* The *Summary table* aggregates similar queries, showing statistics like average execution time and total count.
+This view helps identify patterns and potential performance issues across multiple executions of the same query.
+
+* The *Details table* shows individual query executions with their specific timestamps, users, and performance metrics.
+This granular view is useful for investigating specific incidents or understanding the context of individual query executions.
+
+=== Summary
+
+.Summary table columns
+[cols="25,25m,50v"]
+|===
+| Display Name | Field Name | Description
+
+| Status
+| severity
+| The status of the query execution.
+
+| Query
+| query
+| The full query text.
+
+| Count
+| executionCount
+| The number of times this query was executed.
+
+| From
+| fromTime
+| The start timestamp of the first query execution.
+
+| To
+| toTime
+| The end timestamp of the last query execution.
+
+| Total time spent (s)
+| totalTimeSpent
+| The total time spent executing all instances of this query, in seconds.
+
+| Avg time (ms)
+| avgExecutionTimeMs
+| The average execution time of the query, in milliseconds.
+
+| Min time (ms)
+| minExecutionTimeMs
+| The minimum execution time of the query, in milliseconds.
+
+| Max time (ms)
+| maxExecutionTimeMs
+| The maximum execution time of the query, in milliseconds.
+
+| Avg waiting (ms)
+| avgWaitingTimeMs
+| The average time spent waiting before query execution, in milliseconds.
+
+| Avg bytes
+| avgAllocatedBytes
+| The average number of bytes allocated per query execution.
+
+| Avg page hits
+| avgPageHits
+| The average number of page hits per query execution.
+
+| Avg page faults
+| avgPageFaults
+| The average number of page faults per query execution.
+
+| Actions
+| -
+| Contains an icon button (Arrow Right Circle) to view detailed executions of this specific query in the Details table. Use this button to filter the Details table to show only executions of the selected query.
+|===
+
+=== Details
+
+.Details table columns
+[cols="25,25m,50v"]
+|===
+| Display Name | Field Name | Description
+
+| Status
+| severity
+| The status of the query execution.
+
+| Query
+| query
+| The full query text.
+
+| End time
+| endTime
+| The timestamp when the query execution completed, including milliseconds.
+
+| Duration (ms)
+| executionTimeMs
+| The duration of the query execution in milliseconds.
+
+| Planning (ms)
+| planningTimeMs
+| The time spent planning the query execution in milliseconds.
+
+| Waiting (ms)
+| waitingTimeMs
+| The time spent waiting before query execution in milliseconds.
+
+| User
+| authenticatedUser
+| The user who executed the query.
+
+| Database
+| database
+| The database where the query was executed.
+
+| Driver
+| driver
+| The database driver used to execute the query.
+
+| Application
+| app
+| The application that initiated the query.
+
+| Initiation type
+| initiationType
+| The type of query initiation.
+
+| Alloc bytes
+| allocatedBytes
+| The number of bytes allocated during query execution.
+
+| Page hits
+| pageHits
+| The number of page hits during query execution.
+
+| Page faults
+| pageFaults
+| The number of page faults during query execution.
+|===
+
+== Table interactions
+
+=== Sort table
+
+By default, the table is sorted on *Count* for *Summary* and *Status* for *Details*.
+To sort by a column (such as Max Time ms) click on the column heading.
+
+=== Modify columns
+
+The columns in the table can be modified by clicking the button to the right of the column row.
+Columns can be enabled or disabled, or the order changed using the grid icon at the top right of the table.
+
+=== Expand query
+
+In the table three rows of query text will be shown.
+To see the whole query if the query is longer, press the *View more* button under the query text.
+
diff --git a/modules/ROOT/pages/logging/security-log-analyzer.adoc b/modules/ROOT/pages/logging/security-log-analyzer.adoc
new file mode 100644
index 000000000..e4ccfa6a1
--- /dev/null
+++ b/modules/ROOT/pages/logging/security-log-analyzer.adoc
@@ -0,0 +1,169 @@
+:log-retention-days: 30
+:max-request-hours: 24
+
+[[aura-monitoring]]
+= Security log analyzer
+
+label:AuraDB-Professional[]
+label:AuraDB-Business-Critical[]
+label:AuraDB-Virtual-Dedicated-Cloud[]
+
+Security log analyzer is a feature that provides a UI to review the security events on an Aura instance.
+
+You can access security logs from *Logs* in the left-hand navigation and selecting *Security* in the tabs at the top.
+
+To switch between instances, use the dropdown menu on the top left.
+
+Security log analyzer is split up in two parts:
+
+* *Summary table* - An aggregated view of security logs, giving a high level overview over the selected time period.
+* *Details table* - A detailed view showing individual security events in the selected time period.
+
+Use the *Fetch logs* button to fetch logs and select a time range for the request.
+You may optionally select any filters or search text if required, then press *Fetch*.
+
+A summary of security events is returned, showing aggregations per event.
+To see the individual security events, click the right arrow at the end of the line to show details for that event.
+The details pane shows individual events.
+
+[[fetch-logs]]
+== Fetch logs
+
+You can select the time range for the security logs from the following intervals:
+
+* Last 30 minutes
+* Last hour
+* Last 2 hours
+* Last 6 hours
+* Last 24 hours
+* Custom time range (start date and end date)
+
+
+[NOTE]
+====
+Security logs are available for a period of {log-retention-days} days, and each request can be for up to {max-request-hours} hours of data.
+====
+
+[[filters]]
+=== Filters
+
+Use the filter button to load the available filters over the selected time period.
+Filters are available for the following fields:
+
+* Status
+* Executing User
+* Authenticated User
+* Driver
+
+[[search]]
+=== Search
+
+You can search for specific security events or messages using the search fiels.
+Search can be specified for the *Message text*.
+The field is case insensitive.
+
+== Log tables
+
+The log tables provide two different views of your security data:
+
+* The *Summary table* aggregates similar security events, showing statistics like total count and time range.
+The table will be grouped by status and executing user.
+This view helps identify patterns and potential security issues across multiple events.
+
+* The *Details table* shows individual security events with their specific timestamps, users, and other details.
+This granular view is useful for investigating specific incidents or understanding the context of individual security events.
+
+=== Summary
+
+.Summary table columns
+[cols="25,25m,50v"]
+|===
+| Display Name | Field Name | Description
+
+| Status
+| severity
+| The status of the security event.
+
+| Message
+| message
+| The log message.
+
+| Count
+| executionCount
+| The number of times this security event was found.
+
+| From
+| fromTime
+| The start timestamp of the first security event.
+
+| To
+| toTime
+| The end timestamp of the last security event.
+
+| Executing User
+| executingUser
+| The user who executed the security event.
+
+| Authenticated User
+| authenticatedUser
+| The user whose credentials were used to authenticate.
+
+| Actions
+| -
+| Contains an icon button (Arrow Right Circle) to view detailed executions of this specific security message in the Details table. Use this button to filter the Details table to show only executions of the selected security message.
+|===
+
+=== Details
+
+.Details table columns
+[cols="25,25m,50v"]
+|===
+| Display Name | Field Name | Description
+
+| Status
+| severity
+| The status of the security event.
+
+| Message
+| message
+| The log message.
+
+| Time
+| time
+| The timestamp when the log was created, including milliseconds.
+
+| Executing User
+| executingUser
+| The user who executed the security event.
+
+| Authenticated User
+| authenticatedUser
+| The user whose credentials were used to authenticate.
+
+| Database
+| database
+| The database for the security event (if applicable).
+
+| Driver
+| driver
+| The database driver used during the security event.
+
+|===
+
+== Table interactions
+
+=== Sort table
+
+By default, the table is sorted on *Count* for *Summary* and *Status* for *Details*.
+Use the column heading to sort by a column (such as Executing User).
+
+=== Modify columns
+
+The columns in the table can be modified by clicking the button to the right of the column row.
+Columns can be enabled or disabled, or the order changed using the grid icon at the top right of the table.
+
+=== Expand query
+
+In the table three rows of message text will be shown.
+To see the whole message if the message is longer, press the *View more* button under the message text.
+
diff --git a/modules/ROOT/pages/auradb/managing-databases/advanced-metrics.adoc b/modules/ROOT/pages/managing-instances/advanced-metrics.adoc
similarity index 98%
rename from modules/ROOT/pages/auradb/managing-databases/advanced-metrics.adoc
rename to modules/ROOT/pages/managing-instances/advanced-metrics.adoc
index 0e20b42c6..2d305beb3 100644
--- a/modules/ROOT/pages/auradb/managing-databases/advanced-metrics.adoc
+++ b/modules/ROOT/pages/managing-instances/advanced-metrics.adoc
@@ -8,7 +8,7 @@ Advanced metrics is a feature that enables access to a broad range of different
To access *Advanced metrics*:
-. Navigate to the https://console.neo4j.io/?product=aura-db[Neo4j Aura Console] in your browser.
+. Navigate to the https://console.neo4j.io/?product=aura-db[Neo4j Aura console] in your browser.
. Select the instance you want to access.
. Select the *Metrics* tab.
. Select the *Advanced metrics* button.
@@ -72,7 +72,7 @@ Most metrics will have several values for a given timestamp because of the follo
* Multiple database replicas
* Compressing several data points into one, depending on zoom level
-Aggregating functions are used to reconcile metrics having multiple data points and make the most sense of that particular metric.
+Aggregating functions are used to reconcile metrics having multiple data points and make the most sense of that particular metric.
To convey an even more detailed picture of the state of the system, several aggregations can be shown.
The possible aggregations are:
@@ -82,15 +82,15 @@ The possible aggregations are:
* *Average* - The average value of the metric across all cluster members.
* *Sum* - The sum of the metric across all cluster members.
-== Store size metrics
+== Store size metrics
=== Resources tab
-The chart on the _Resources_ tab shows the allocated store size metric for the selected database either as a percentage of the available storage assigned for the database or as absolute values.
+The chart on the _Resources_ tab shows the allocated store size metric for the selected database either as a percentage of the available storage assigned for the database or as absolute values.
=== Database tab
-The _Database_ tab provides a chart that shows the store size and the portion of the allocated space that the database is actively utilizing.
+The _Database_ tab provides a chart that shows the store size and the portion of the allocated space that the database is actively utilizing.
Both metrics are represented as percentages of the available storage assigned to the database.
These metrics may differ due to the way Neo4j allocates and reuses space.
diff --git a/modules/ROOT/pages/managing-instances/backup-restore-export.adoc b/modules/ROOT/pages/managing-instances/backup-restore-export.adoc
new file mode 100644
index 000000000..907353795
--- /dev/null
+++ b/modules/ROOT/pages/managing-instances/backup-restore-export.adoc
@@ -0,0 +1,209 @@
+[[aura-backup-restore-export]]
+= Backup, export, restore, and upload
+:description: This page describes how to backup, export, and restore your data from a snapshot as well as using `neo4j admin` to upload data.
+:page-aliases: auradb/managing-databases/backup-restore-export.adoc, aurads/managing-databases/backup-restore-export.adoc, aurads/managing-databases/importing-data/import-db, auradb/importing/import-database
+
+The data in your Aura instance can be backed up, exported, and restored using snapshots.
+
+A snapshot is a copy of the data in an instance at a specific point in time.
+
+The *Snapshots* tab within an Aura instance card shows a list of available snapshots.
+You can find the *Snapshots* tab via *Inspect* in the more menu (*...*) on the instance card.
+
+There are different kinds of snapshots taken at different intervals with different lifecycles depending on your tier.
+See <> for more information.
+
+
+Aura stores the data securely in encrypted cloud storage buckets.
+Backups are stored in the same Cloud Service Provider and region as the instance they are associated with.
+
+== Snapshot types
+
+=== Scheduled
+
+label:AuraDB-Professional[]
+label:AuraDB-Business-Critical[]
+label:AuraDB-Virtual-Dedicated-Cloud[]
+label:AuraDS-Professional[]
+label:AuraDS-Enterprise[]
+
+A *Scheduled* snapshot is a snapshot that is automatically triggered at a cadence depending on your tier.
+
+For Professional instances running the latest version, scheduled snapshots are run automatically once a day, same for both AuraDS Professional and for AuraDS Enterprise instances, For Business Critical and Virtual Dedicated Cloud instances, they run once an hour.
+See <> further on for information about frequency, restorability, and exportability of scheduled snapshots, as well snapshots on instances running version 4.x.
+
+=== On Demand
+
+An *On Demand* snapshot is a snapshot that you manually trigger with the *Take snapshot* button.
+This type of snapshot is the only snapshot available for Free instances.
+
+== Snapshot actions
+
+image::snapshot-actions.png[width=400]
+
+[[export-create]]
+
+=== Export / Create
+
+From the more menu (...) next to an existing snapshot, you can:
+
+* *Export* - Download an AuraDB instance as a .backup file (latest version) or .dump file (version 4.x) to store a local copy and work on data offline.
+AuraDB instances with the Graph Analytics plugin are also exported as a .backup file.
+Download an AuraDS instance as a .tar file.
+
+* *Create instance from snapshot* - Create a new instance using the data from the snapshot.
+
+
+[NOTE]
+====
+The ability to Export or Create an instance from a scheduled Virtual Dedicated Cloud snapshot is limited to 14 days for 4.x instances.
+For latest version instances, this is limited to 60 days for full scheduled backups.
+Differential backups are not restorable/exportable and cannot be used to create a new instances from.
+
+Additionally, for Virtual Dedicated Cloud instances running Neo4j latest version, the ability to export or create an instance from a scheduled snapshot is limited to one (full) snapshot per day.
+
+Use the toggle **Show exportable only** on top of the list of snapshots to filter by whether a snapshot is exportable or not.
+====
+
+[[restore-snapshot]]
+=== Restore
+
+[CAUTION]
+====
+Restoring from a snapshot overwrites the data in your instance, replacing it with the data contained in the snapshot.
+====
+
+You can restore data in your instance to a previous snapshot.
+Use the arrow next to the (*...*) menu on the snapshot you want to restore from.
+This action overwrites the data currently in your instance and you will be asked to confirm that this is indeed desired.
+If not, you can create a new instance from the snapshot instead, as described in <>.
+
+
+[[snapshot-details]]
+== Snapshot details
+
+[cols="^,^,^,^,^,^,^",options="header"]
+|===
+| Tier
+| Aura version
+2+| Frequency of snapshots
+2+| Scheduled snapshots
+| On-demand snapshot footnote:1[On-demand snapshots are restorable and exportable for the same period.]
+
+|
+|
+| *Full snapshot* footnote:2[The full snapshot captures the entire database, while differential snapshots record changes since the last full snapshot.]
+| *Differential snapshot*
+| *Restorable days*
+| *Exportable days*
+| *Restorable and exportable days*
+
+| AuraDB Free
+| 4, latest
+| N/A
+| N/A
+| N/A
+| N/A
+| N/A
+
+| AuraDB Professional
+| 4, latest
+| Daily
+| N/A
+| 7
+| 7
+| 7
+
+| AuraDB Business Critical
+| latest
+| Daily
+| Hourly
+| 30
+| 30 full footnote:3[The differential snapshot is not exportable.]
+| 30
+
+.2+| AuraDB Virtual Dedicated Cloud
+| 4
+| Every 6 hours footnote:4[One snapshot per day has a long retention period and remaining three a shorter period.]
+|
+| 60 (long), 7 (short)
+| 14 (long), 7 (short)
+| 90
+
+| latest
+| Daily
+| Hourly
+| 60
+| 60 full footnote:3[]
+| 90
+
+| AuraDS Professional
+| latest
+| Daily
+| N/A
+| 7
+| 7
+| 30
+
+| AuraDS Enterprise
+| latest
+| Daily
+| N/A
+| 14
+| 7
+| 90
+|===
+
+[[restore-backup]]
+== Restore from backup file
+
+From the *Restore from backup file* tab, next to the *Snapshots* tab on the instance card, drag and drop your _.backup_, _.dump_, or _.tar_ file or browse for it.
+This action also overwrites the data currently in your instance.
+If this is not desired, you can create a new instance from the snapshot instead, as described in <>.
+
+[[neo4j-admin-upload]]
+== Upload an existing Neo4j database
+
+If you have a local copy of a Neo4j database and Neo4j installed locally, you can use this installation of Neo4j to upload this to your Aura instance.
+
+[CAUTION]
+====
+This command does not work if you have a network access configuration setup that prevents public traffic to the region your instance is hosted in. See <> below for more information.
+====
+
+`database upload` is a `neo4j-admin` command that you can run to upload the contents of a Neo4j database into an Aura instance, regardless of the database's size, as long as it fits your Aura instance.
+Keep in mind that the database you want to upload may run a different version of Neo4j than your Aura instance.
+Additionally, your Neo4j Aura instance must be accessible from the machine running `neo4j-admin`.
+Otherwise, the upload will fail with SSL errors.
+
+For details on how to use the `neo4j-admin database upload` command, along with a full list of options and version compatibility, see link:{neo4j-docs-base-uri}/operations-manual/current/tools/neo4j-admin/upload-to-aura/[Operations Manual → Upload to Neo4j Aura].
+
+[NOTE]
+====
+The `database upload` command, introduced in Neo4j 5, replaces the `push-to-cloud` command in Neo4j 4.4 and 4.3.
+If the database you want to upload is running an earlier version of Neo4j, please see link:https://neo4j.com/docs/operations-manual/4.4/tools/neo4j-admin/push-to-cloud/[the Neo4j Admin push-to-cloud documentation].
+====
+
+[CAUTION]
+====
+The `neo4j-admin push-to-cloud` command in Neo4j 4.4 and earlier is not compatible with instances encrypted with Customer Managed Keys.
+Use `neo4j-admin database upload` in Neo4j 5 to upload data to instances encrypted with Customer Managed Keys.
+
+For Neo4j 4.x instances in Azure encrypted with Customer Managed Keys, use the Import data service to load data, as `neo4j-admin database upload` is not supported.
+See xref:import/introduction.adoc[Import] for more information.
+====
+
+=== Public traffic
+
+If you have created a network access configuration from the *Network Access* page, accessed through the sidebar menu of the Console, *Public traffic* must be enabled for the region your instance is hosted in before you can use the `database upload` command on that instance.
+
+To enable *Public traffic* on a network access configuration:
+
+. Select *Configure* next to the region that has Public traffic disabled.
+
+. Select *Next* until you reach step 4 of 4 in the resulting *Edit network access configuration* modal.
+
+. Clear the *Disable public traffic* checkbox and *Save*.
+
+You can now use the `database upload` command on the instances within that region.
+Once the command has completed, you can disable *Public traffic* again by following the same steps and re-selecting the *Disable public* traffic checkbox.
\ No newline at end of file
diff --git a/modules/ROOT/pages/managing-instances/custom-endpoints.adoc b/modules/ROOT/pages/managing-instances/custom-endpoints.adoc
new file mode 100644
index 000000000..a878d3022
--- /dev/null
+++ b/modules/ROOT/pages/managing-instances/custom-endpoints.adoc
@@ -0,0 +1,46 @@
+[[aura-custom-endpoints]]
+= Custom endpoints
+:description: This section describes custom endpoints.
+:page-aliases: auradb/managing-databases/custom-endpoints.adoc
+
+label:AuraDB-Professional[]
+label:AuraDB-Business-Critical[]
+label:AuraDB-Virtual-Dedicated-Cloud[]
+
+Custom endpoints can be used for database management and migration.
+
+If you are a user with the *Project Admin* role, you can create custom endpoints for your instances directly from the console.
+This allows you to establish connection to a database using this endpoint instead of the ``.
+
+[NOTE]
+====
+Once a custom endpoint is created (and thus assigned) or re-assigned to an instance no further changes can be made for up to three hours.
+If you re-assign an endpoint, you have the option to undo the re-assignment at any time without waiting.
+====
+
+[.shadow]
+image::endpoints.png[width=800]
+
+*Custom Endpoints* can be found in the Project Settings in the sidebar of the console.
+
+Use the *Create custom endpoint* button to define your endpoint.
+Choose an appropriate name for the endpoint (it can only contain lowercase letters, numbers, and hyphens) and select which instance to assign the endpoint to.
+Note that the name doesn't have to be unique, Aura will append a series of random characters to the name to ensure that the URL is unique, regardless of what you name the endpoint.
+
+*Authentication* to the custom endpoint is via the username / password credentials for the assigned database instance.
+
+Once a custom endpoint has been assigned to an instance, it will be listed along with existing custom endpoints in the menu in the Project settings.
+They can be managed both from this menu as well as from the instance card.
+
+[.shadow]
+image::manage-endpoints.png[width=600]
+
+A custom endpoint can be configured to a different instance or deleted.
+If you reassign a custom endpoint, ensure that the instance security and networking configurations you need are also applied to your target assigned instance.
+
+* *For CMEK enabled instances* - make sure both assigned instances have the same key.
+
+* *For Private link* - Custom endpoints are allowed to be established for private link enabled instances within the same region.
+If a custom endpoint is assigned to an instance with private traffic enabled (public traffic disabled) it cannot then be linked to an instance with public traffic enabled, and vice versa.
+If a custom endpoint was once assigned to a private link enabled instance that has later been destroyed, these rules still apply.
+For more information, see xref:security/secure-connections.adoc#_custom_endpoints_with_private_link[Private links].
diff --git a/modules/ROOT/pages/managing-instances/cypher-version.adoc b/modules/ROOT/pages/managing-instances/cypher-version.adoc
new file mode 100644
index 000000000..d0c440f3d
--- /dev/null
+++ b/modules/ROOT/pages/managing-instances/cypher-version.adoc
@@ -0,0 +1,16 @@
+= Cypher version
+:description: This page describes how to set the default Cypher version for a database.
+
+Neo4j supports two Cypher versions, *Cypher 5* and *Cypher 25*.
+
+No new features are added to Cypher 5: it receives only performance and security updates, as well as eventual bug fixes.
+This is the current default query language.
+
+All new features are added to Cypher 25, which will eventually become the default language for new instances.
+
+To make use of new features and functionality, you need to change the default language.
+This is managed per Aura instance and requires an `ADMIN` role.
+It is done using the `ALTER DATABASE` command and thus affects all queries executed against your database.
+However, you can also override the default language for individual queries.
+
+For more information about Cypher versions and commands, see link:{neo4j-docs-base-uri}/cypher-manual/25/queries/select-version/[Cypher Manual -> Select Cypher version].
diff --git a/modules/ROOT/pages/managing-instances/develop.adoc b/modules/ROOT/pages/managing-instances/develop.adoc
new file mode 100644
index 000000000..d301d2b22
--- /dev/null
+++ b/modules/ROOT/pages/managing-instances/develop.adoc
@@ -0,0 +1,17 @@
+[[aura-develop]]
+= Develop
+:description: This page describes the instance details.
+
+To connect your instance to another application using Neo4j drivers, select the **Connect** button on the instance card, then select **Drivers**.
+
+Drivers are available for the following programming languages:
+
+* Python
+* Java
+* JavaScript
+* .NET
+* Go
+
+[.shadow]
+.Configuring the driver
+image::develop.png[A screenshot of the drivers dialogue]
\ No newline at end of file
diff --git a/modules/ROOT/pages/managing-instances/instance-actions.adoc b/modules/ROOT/pages/managing-instances/instance-actions.adoc
new file mode 100644
index 000000000..b0b701dce
--- /dev/null
+++ b/modules/ROOT/pages/managing-instances/instance-actions.adoc
@@ -0,0 +1,242 @@
+[[instance-actions]]
+= Instance actions
+:description: This page describes the following instance actions - rename, resest, upgrade, resize, pause, resume, clone to a new database, clone to an existing database, or delete and instance.
+:page-alisases: auradb/managing-databases/database-actions.adoc, aurads/managing-instances/instance-actions.adoc
+
+Perform multiple instance actions directly from an instance card: Pause/resume, develop, inspect and instance's details, clone to new instance/clone to existing instance, edit secondaries, delete, resize, view resources/view all metrics.
+
+== Inspect an instance's details
+
+From the instance card, select the more menu (*...*) then select *instance details*.
+From the instance details you can rename an instance and view instance details.
+See xref:managing-instances/instance-details.adoc[Instance details] for more information.
+
+[.shadow]
+.The more [...] menu
+image::instanceactions.png[]
+
+// == Reset an instance
+
+// label:AuraDB-Free[]
+// label:AuraDB-Professional[]
+
+// You can clear all data in an instance using the *Reset to blank* action.
+
+// To reset an instance:
+
+// . Select the more actions (*...*) button on the instance you want to reset.
+// . Select *Reset to blank* from the resulting menu.
+// . Select *Reset*.
+
+// == Upgrade an instance
+
+// === Upgrade from Free to Professional
+
+// You can upgrade an AuraDB Free instance to an AuraDB Professional instance using the *Upgrade to Professional* action.
+
+// Upgrading your instance clones your Free instance data to a new Professional instance, leaving your existing Free instance untouched.
+
+// To upgrade a Free instance:
+
+// . Select the ellipsis (*...*) button on the free instance you want to upgrade.
+// . Select *Upgrade to Professional* from the resulting menu.
+// . Set your desired settings for the new instance. For more information on AuraDB instance creation settings, see xref:auradb/getting-started/create-instance.adoc[].
+// . Tick the *I understand* checkbox and select *Upgrade Instance*.
+
+// === Upgrade from Professional to Business Critical
+
+// You can upgrade an AuraDB Professional instance to an AuraDB Business Critical instance using the *Upgrade to Business Critical* action.
+
+// Upgrading your instance clones your Professional instance data to a new Business Critical instance, leaving your existing Professional instance untouched.
+
+// To upgrade a Business Critical instance:
+
+// . Select the ellipsis (*...*) button on the free instance you want to upgrade.
+// . Select *Upgrade to Business Critical*.
+// . Set your desired settings for the new instance.
+// For more information on AuraDB instance creation settings, see xref:auradb/getting-started/create-instance.adoc[].
+// . Tick the *I understand* checkbox and select *Upgrade Instance*.
+
+== Resize an instance
+// label:AuraDB-Professional[]
+// label:AuraDB-Enterprise[]
+// label:AuraDB-Business-Critical[]
+
+Resizing an instance means changing the memory, CPU and storage size.
+Note that this option is *not available* for *Free instances*.
+
+You can change the size of an existing instance using the *Configure* button on the instance you want to resize.
+An instance remains available during the resize operation.
+
+== Adjust storage
+
+[NOTE]
+====
+Available for AuraDB Professional, Business Critical & Virtual Dedicated Cloud deployments on GCP, with PAYG directly transacted with Neo4j or Prepaid billing, Neo4j latest version.
+====
+
+Adjust the storage size of your instance at any time.
+Each AuraDB instance includes a standard storage allocation and configuration; additional storage is chargeable.
+For more information, refer to link:https://console-preview.neo4j.io/pricing[Aura Pricing]
+
+To select the storage size during instance creation, select the desired value in the *Storage (Adjustable)* column.
+
+To adjust an existing instance's storage, use the *Configure* button on the instance card and select a new value for the storage size.
+
+You may reduce your storage allocation, provided the new size is equal to or greater than your current usage.
+For example, if you are using 8GB of data, the minimum selectable option will be 8GB.
+Selecting a size smaller than your current usage is not permitted.
+This store size policy also applies to general instance downsizing.
+
+[.shadow]
+.Adjust storage size
+image::adjust_storage.png[adjust storage screenshot,540,380]
+
+== Pause an instance
+
+Pausing a Neo4j instance temporarily stops the database, which means:
+
+* No Access to Data: The data stored in the database becomes inaccessible.
+Any applications or users trying to connect to the instance won't be able to run queries or retrieve data until the instance is resumed.
+
+* No Processing: The database won't process any operations or transactions while it is paused.
+Any ongoing operations are halted.
+
+* Data is Preserved: Your data remains safe and unchanged while the instance is paused.
+When you resume the instance, you can pick up right where you left off.
+
+Pausing is putting the database on hold without shutting it down completely, so you can restart it quickly when needed.
+
+// label:AuraDB-Professional[]
+// label:AuraDB-Enterprise[]
+// label:AuraDB-Business-Critical[]
+
+[NOTE]
+====
+You cannot manually pause an AuraDB Free instance; they are paused automatically after 72 hours of inactivity.
+====
+
+You can pause an instance when not needed and resume it at any time.
+Do so by using the *Pause* button on the instance card.
+After confirming, the instance begins pausing, and a play button replaces the pause button.
+
+When you pause an instance, Aura performs a backup operation behind the scenes.
+It can take a while to pause, depending on your instance size, because Aura is creating the backup.
+
+[NOTE]
+====
+Paused instances run at a discounted rate compared to standard consumption, as outlined in the confirmation window.
+You can pause an instance for up to 30 days, after which point Aura automatically resumes the instance.
+====
+
+=== Resume a paused instance
+
+Resume a paused instance with the *Play* button on the instance card.
+
+After confirming, the instance begins resuming, which may take a few minutes.
+Resuming an instance restores the database from the backup.
+
+[WARNING]
+====
+Aura Free instances do not automatically resume after 30 days.
+If an Aura Free instance remains paused for more than 30 days, Aura deletes the instance, and all information is lost.
+====
+
+== Clone an instance
+
+You can clone an existing instance to create a new instance with the same data.
+You can clone across regions, and from Neo4j version 4 to Neo4j latest version.
+
+There are two options to clone an instance:
+
+* Clone to a new instance
+* Clone to an existing instance
+
+You can access all the cloning options from the more menu (*...*) on the instance.
+
+[NOTE]
+====
+You cannot clone from a Neo4j latest version instance to a Neo4j version 4 instance.
+====
+=== Clone to a new instance
+
+. From the more menu (*...*) on the instance you want to clone, select *Clone to* and then *New instance* from the contextual menu.
+. Set your desired settings for the new database.
+For more information on AuraDB database creation, see xref:getting-started/create-instance.adoc[Create an instance].
+. Check the *I accept* box and select *Create*.
++
+[WARNING]
+====
+Make sure that the username and password are stored safely before continuing.
+Credentials cannot be recovered afterwards.
+====
+
+=== Clone to an existing instance
+
+When you clone an instance to an existing instance, the database connection URI stays the same, but the data is replaced with the data from the cloned instance.
+
+[WARNING]
+====
+Cloning into an existing instance will replace all existing data.
+If you want to keep the current data, take a snapshot and export it.
+====
+
+. From the more menu (*...*) on the instance you want to clone, select *Clone to* and then *Existing instance* from the contextual menu.
+. Select the existing AuraDB database to clone to from the dropdown menu.
++
+[NOTE]
+====
+Existing instances that are not large enough to clone into will not be available for selection.
+====
++
+. Check the *I understand the target instance will be overwritten* box and select *Clone*.
+
+== Delete an instance
+
+Delete an instance using the trashcan icon on the instance card.
+
+Type the exact name of the instance (as instructed) to confirm your decision, and select *Destroy*.
+
+[WARNING]
+====
+There is no way to recover data from a deleted Aura instance.
+====
+
+== Mark an instance as `production`
+
+label:AuraDB-Virtual-Dedicated-Cloud[]
+label:AuraDB-Business-Critical[]
+
+Admins can mark a database instance as `production` in AuraDB Virtual Dedicated Cloud and AuraDB Business Critical.
+
+When Neo4j updates Aura Database versions, instances marked as `production` are last to have the Aura Database version updated.
+Because updates are applied to these database instances after other database instances receive updates, you can monitor any potential impact on less critical instances first.
+
+After marking the instance as `production` the label is applied immediately, and all instance actions (such as pause or clone) are temporarily unavailable while the instance is set to `production` in the Neo4j backend.
+
+Use the more menu (*...*) on the instance card to mark it as `production`.
+
+[.shadow]
+.Mark an instance as production
+image::mark-as-production.png[]
+
+[.shadow]
+.Instance marked as production
+image::marked-as-production.png[]
+
+== Upgrade AuraDB Professional to AuraDB Business Critical
+
+To upgrade your AuraDB Professional instance to AuraDB Business Critical, select the more menu (*…*) on an instance card, then select *Upgrade*.
+
+.Upgrade your AuraDB Professional instance to AuraDB Business Critical
+[.shadow]
+image::upgradeprotobc.png[]
+
+This upgrade does not change the original DBID, so your application strings remain valid.
+
+*GDS plugin removal:*
+If your AuraDB Professional instance uses the GDS plugin, it is removed during the upgrade.
+The GDS plugin is not supported in AuraDB Business Critical.
+
+*Minimum instance size:*
+If you try to upgrade a 1GB Professional instance, you need to resize it to at least 2GB of storage, because this is the minimum supported size in AuraDB Business Critical.
diff --git a/modules/ROOT/pages/managing-instances/instance-details.adoc b/modules/ROOT/pages/managing-instances/instance-details.adoc
new file mode 100644
index 000000000..354afb071
--- /dev/null
+++ b/modules/ROOT/pages/managing-instances/instance-details.adoc
@@ -0,0 +1,91 @@
+[[aura-instance-details]]
+= Instance details
+:description: This page describes the instance details.
+
+From the instance card, select the more menu *(…)* then select *Inspect*.
+From here you can see an Overview of the instance details, Snapshots, Restore from backup file, or see the logs.
+
+== Overview
+
+In the Overview, apart from viewing the instance details, you can also rename your instance using the pen icon next to the instance name.
+
+[.shadow]
+image::inspect-details1.png[]
+
+
+.Instance details
+[cols="1,1"]
+|===
+| Detail | Description
+
+|ID
+|Every instance has an ID which is a unique identifier.
+It means multiple instances can have the same instance name, because they are distinguishable by their unique ID.
+
+|Connection URI
+|Use this to connect to an instance
+
+|Query API URL
+|Use this to connect to an instance with Query API
+
+|Custom endpoint
+|Applicable if a custom endpoint has been configured for the instance.
+See xref:managing-instances/custom-endpoints.adoc[Custom endpoints] for more information.
+
+|Private URI
+|Applicable if you have a private link set up
+
+|Version
+|The version of the Aura database
+
+|Region
+|Where servers are located
+
+|Memory
+|The capacity of your instance
+
+|CPU
+|Aura provides database as a service through public cloud providers.
+It runs on container technology and this allows for the AuraDB Instance to allocate dedicated CPU resources.
+
+|Storage
+|Backups, automated and manual, are stored separately from your instance's allocated storage.
+These are saved in the cloud provider's storage bucket in the same region as your Aura instance.
+
+Options for adjustable storage are available for AuraDB pre-paid billing on GCP, Neo4j latest version.
+See xref:managing-instances/instance-actions.adoc#_adjust_storage[Adjust storage] for more information.
+
+|Encryption key
+|Neo4j Managed Key encrypts your data
+
+|Vector Optimized
+|Whether vector optimization is enabled for the instance or not.
+See xref:managing-instances/vector-optimization.adoc[Vector optimization] for more information.
+
+|Graph Analytics
+|Whether the Graph Analytics Plugin is present or not.
+See xref:graph-analytics/index.adoc#aura-gds-plugin[Graph Analytics plugin] for more information.
+|===
+
+== Snapshots
+
+The data in your instance can be backed up, exported, and restored using snapshots.
+A snapshot is a copy of the data in an instance at a specific point in time.
+
+Neo4j regularly takes snapshots of your instance, and you can also take a snapshot on demand.
+These snapshots can be used to restore data to a different Neo4j instance.
+
+For more information, see xref:managing-instances/backup-restore-export.adoc[Backup, export, and restore].
+
+
+== Restore from backup file
+
+You can restore a database to your instance by dragging and dropping it to this tab.
+It needs to be either a _.dump_, _.backup_, or _.tar_ file.
+When you restore a database, all existing data is overwritten by your selected file.
+
+See xref:managing-instances/backup-restore-export.adoc#restore[Restore] for more information.
+
+// == Logs
+
+// From the _Logs_ tab, you can request Query logs for a specified time frame.
diff --git a/modules/ROOT/pages/managing-instances/instance-resources.adoc b/modules/ROOT/pages/managing-instances/instance-resources.adoc
new file mode 100644
index 000000000..114fd3089
--- /dev/null
+++ b/modules/ROOT/pages/managing-instances/instance-resources.adoc
@@ -0,0 +1,28 @@
+[[aura-instance-resources]]
+= Instance resources
+:description: Interesting description goes here
+
+Metrics about the resources that are crucial for maintaining the performance and stability of your database instances.
+Monitoring these metrics helps you assess resource usage, anticipate potential issues, and make informed decisions about scaling your infrastructure.
+
+To view additional metrics, simply tab "See all metrics" from the instance card, or "Metrics" from the left hand panel.
+
+== CPU Usage
+
+CPU usage (cores). CPU is used for planning and serving queries.
+If this metric is constantly spiking or at its limits, select `resize` on the instance card, to increase the size of your instance.
+
+== Storage
+
+Amount of disk space reserved to store data, in gigabytes (GB).
+Ideally, the database should all fit into memory (page cache) for the best performance.
+Keep an eye on storage metrics to make sure you have enough storage for future growth.
+This metric is available in *Metrics > Resources > Storage*.
+Check this metric with page cache usage to see if the data is too large for the memory and consider increasing the size of your instance in this case.
+
+Storage is adjustable independent of memory for AuraDB pre-paid plans on GCP, Neo4j latest version. See xref:managing-instances/instance-actions.adoc#_adjust_storage[Adjust storage] for more information.
+
+== Query Executions
+
+The total number of Out of Memory Errors for the instance.
+Consider increasing the size of the instance if any OOM errors.
diff --git a/modules/ROOT/pages/managing-instances/logs.adoc b/modules/ROOT/pages/managing-instances/logs.adoc
new file mode 100644
index 000000000..bbfb081a4
--- /dev/null
+++ b/modules/ROOT/pages/managing-instances/logs.adoc
@@ -0,0 +1,3 @@
+[[aura-Logs]]
+= Logs
+:description: Interesting description goes here
\ No newline at end of file
diff --git a/modules/ROOT/pages/managing-instances/migration-readiness.adoc b/modules/ROOT/pages/managing-instances/migration-readiness.adoc
new file mode 100644
index 000000000..b740e68f6
--- /dev/null
+++ b/modules/ROOT/pages/managing-instances/migration-readiness.adoc
@@ -0,0 +1,257 @@
+[[aura-monitoring]]
+= Migration Readiness Report
+:description: This section describes migration readiness report for upgrading from aura 4 to latest.
+:page-aliases: migration-readiness.adoc
+
+[NOTE]
+====
+The Migration Readiness Report is only available for Instances on Neo4j version 4.
+
+You can access the report for any version 4 instance via the link through the "Migration recommended" label on the Instance and Console pages.
+====
+
+
+== Overview
+
+This tool advises how to prepare an AuraDB v4 instance for migration to AuraDB latest.
+It reports current application queries, drivers, and database objects that would prevent you from migrating to AuraDB latest by providing information based on your recent usage history.
+In addition, you can also see link:https://neo4j.com/docs/aura/tutorials/upgrade/[Upgrade to Neo4j 5 within Aura] for details on each identified issue and how to address it.
+
+The main categories of issues that the Migration Readiness Report deals with are:
+
+* Cypher deprecations
+* Deprecated driver usage
+* Deprecated index types
+
+The report page has a section for each category and a chart at the top titled "Deprecations and query timeline."
+This document explains each section, but the first is vital to controlling the others and is detailed in the following section.
+Note that the report only highlights issues that need to be addressed to make your code and queries compatible with AuraDB latest.
+Working on these issues presents a good opportunity to learn more about the advantages of the new features in the latest version.
+
+[NOTE]
+====
+From January 2025 Neo4j Server adopted calendar versioning (CalVer).
+Earlier versions, such as Neo4j 4 and 5 used semantic versioning (SemVer).
+Neo4j Aura uses only the latest version.
+====
+
+After implementing the recommendations from the report, you can use the test and live migration functionalities to prepare and, finally, guide you through the actual migration.
+
+=== Control over the time window
+
+[.shadow]
+image::mrr-deprecation-query-timeline.png[]
+
+In the "Deprecations and query timeline" section, you will see a chart displaying the usage of deprecated Cypher features and constructs and a general measure of query load on that system (query rate).
+You can disable and enable the data series by clicking on the legend.
+This can be helpful if you want to focus on something specific temporarily.
+The time frame can be controlled in two ways:
+
+* Zoom in by clicking on the chart area (start time) and drag while holding the mouse button down until the end time you want to select.
+The chart then zooms into the desired time frame.
+* Use the time selector in the top right corner of the chart.
+The chart updates with the desired time frame.
+The maximum time frame is 7 days of history.
+
+To get back to the time range that was previously selected, double-click the chart area.
+Be aware that while the chart can display up to 7 days, the details for Cypher deprecations and deprecated driver usage can be retrieved for a maximum time range of 24 hours.
+
+== Cypher deprecations
+
+[.shadow]
+image::mrr-fetch-logs.png[width=250]
+
+After selecting a time frame of a maximum of 24 hours, use the button to fetch the deprecations logs in this section.
+Setting filters in the following popup window is optional but helpful if you want to see only specific entries.
+You can filter on:
+
+* Name of the deprecation
+* The user that executed the query
+* Driver that was used to execute the query
+* The application name that executed the query (if set)
+* The initiation type of the query
+* The query text or parts of it
+
+Use the button in the popup to fetch applicable data to populate the report's table.
+
+[.shadow]
+image::mrr-deprecation-table.png[]
+
+Each row in the table represents a query in the selected timeframe that must be changed to seamlessly migrate to the latest Aura version.
+You have to rewrite those queries to only use Cypher supported by the latest Aura version.
+
+All executions of the same query are aggregated into one row (see also the "Count" column).
+Use the magnifying glass at the start of each row to access a popup with more information about the query and suggestions on dealing with each issue.
+It also provides relevant links to the documentation for each deprecation.
+
+[.shadow]
+image::mrr-resolution-guide.png[width=600]
+
+The last column in the table of Cypher deprecations links to a view of this specific query in the Aura Query Log Analyzer tool, which can provide information on each execution of the selected query.
+The tool can view queries on all databases except the `system` database.
+
+[.shadow]
+image::mrr-show-query-log-button.png[width=400]
+
+== Deprecated driver usage
+
+[.shadow]
+image::mrr-fetch-driver-stats.png[width=400]
+
+After selecting a time frame of a maximum of 24 hours, use the button to fetch the driver statistics in this section.
+By default, the filters in the popup are set to show only driver usage with potential issues in any database, including the system database.
+You can change those freely to see all driver usage, for example.
+
+Use the button in the popup to fetch applicable data to populate the report's table.
+Depending on the type of client accessing the Neo4j database, links are provided in the column “Latest version” to help with the upgrade.
+
+[.shadow]
+image::mrr-driver-table.png[]
+
+Like the Cypher deprecations table, the last column links to a view of this specific driver's executed queries in the Aura Query Log tool.
+The tool can provide information on each query execution in which the selected driver was used.
+The tool can view queries on all databases except the `system` database.
+
+== Deprecated index types
+
+This section provides information on how to deal with deprecated indexes that may be used in version 4 but need to be handled before or while moving to the latest version.
+
+This part involves running a provided Cypher query on your database to identify the deprecated indexes, or constraints backed by deprecated indexes, and then deciding how to best deal with them.
+For each deprecated index you can decide to manually create a replacement index before the migration (pre-create) or have the migration process create it for you.
+Pre-creating indexes will speed up the migration process but requires additional disk space.
+Not pre-creating indexes will lead to a longer migration process and may result in the need to manually recreate indexes after the migration, as the automatically migrated indexes may not be the optimal type for your application.
+
+Index replacement can either be done manually by following the documentation or using the UI, as shown below, provided in the Migration Readiness Report.
+
+[.shadow]
+image::mrr-index-replacement.png[]
+
+Indexes that are not supported in the latest version are shown in the table, and their replacements of type RANGE can be created for single indexes or all indexes at once.
+The Cypher query to create the replacement index(es) is shown as well.
+
+[NOTE]
+====
+Index replacements created using the UI will be of type RANGE.
+In cases where RANGE is not the best option, change the provided Cypher query accordingly (e.g. to create TEXT or POINT index) and execute it manually.
+====
+
+== Testing and executing the migration
+
+After implementing the recommendations from the report, you can now test and run the migration.
+Only users with the permission to create and delete instances can access this functionality.
+It is highly recommended to run a test migration before attempting the live migration.
+
+It is also advisable to set up a custom endpoint before the migration to speed up the switch to the migrated instance in your application.
+For more information, see xref:managing-instances/custom-endpoints.adoc[Custom endpoints].
+
+[NOTE]
+====
+During the migration, the migration target instance may be shown with a few different statuses on the instance page, such as LOADING or OVERWRITING for example.
+Do not attempt to access the instance before the migration is safely finished.
+The progress of migration can be seen in the Migration Readiness Report of the original instance.
+====
+
+=== Run a test migration
+
+Use the *Run test migration* buttons at the top or bottom of the page and then follow the steps outlined in the dialog boxes.
+
+The steps of running a test migration are:
+
+. Carefully read and act upon the steps described in the "Read before test migration" dialog.
+Proceed only if you made the appropriate preparations (e.g. backups of your configurations).
+. Configure a target instance, as described in the next section.
+.. If you have selected a new instance to migrate to: Download the new credentials for that instance.
+. Wait for the migration to finish.
+. Follow all steps outlined in "Next steps before finalizing the test migration" at the top of the Migration Readiness Report page.
+This includes all your testing on the migrated instance.
+. Once you are done with testing, click the "Finalize test migration" button and complete the dialog to remove your test instance.
+
+You can repeat test migrations or run them in parallel as much as need.
+Be aware that running those instances incur the same cost as running any other instance of that size.
+
+==== Configure target instance
+
+An instance can either be migrated to a new instance or an instance that is already running the latest version of Aura and that fits the memory and storage configuration of the original instance.
+This means that if you select the second option, the instance you want to migrate to has to have at least the same amount of memory and storage as the original one.
+
+Note that cloning into an existing instance overwrites all of its existing data and name.
+This action cannot be undone and may take longer than cloning to a new instance.
+If you still have data that you want to keep on the instance, it is advised to take a snapshot and download it before continuing.
+
+For both migration options you can also choose to include the migration of the store format to `block` storage.
+It is highly recommended to do this, as it enables you to make use of the newest features in Neo4j Aura as well as leverage its superior performance and scalability.
+More information on the `block` format can be found link:https://neo4j.com/docs/operations-manual/current/database-internals/store-formats/[here].
+
+[NOTE]
+====
+In the process of migrating to a test instance, the instance will get a new name, regardless if it is new or existing.
+It starts with "[Testing]", followed by (most of) the original instance's name and a test counter in parentheses e.g. "[Testing] original name (1)".
+====
+
+==== Testing the migrated instance
+
+Once you see the box below on the Migration Readiness Report, your migrated instance is ready for testing.
+Follow the steps described and test your instance to make sure your live migration will go smoothly.
+
+[.shadow]
+image::mrr-test-instance-ready.png[]
+
+==== Finalize test migration
+
+Once you are done with testing, use the "Finalize test migration" button.
+You will be asked to acknowledge the finalization since the *test instance is deleted* in the process.
+You can skip this step and keep the test instance, but this incurs a cost.
+Therefore, to minimize costs if you test manually, don't forget to delete the test instance when you are done.
+
+=== Run the live migration
+
+Use the *Live migration* buttons at the top or bottom of the page and then follow the steps outlined in the dialog boxes.
+
+The steps of running the live migration are:
+
+. Carefully read and act upon the steps described in the "Read before live migration" dialog.
+Proceed only if you made the appropriate preparations (e.g. backups of your configurations).
+. Carefully read and act upon the step described in the "Writes made on the v4 instance during migration" dialog.
+Make sure that your application will not write to the original instance during the migration to prevent this data from being lost.
+. Configure a target instance, as described in the next section.
+.. If you have selected a new instance to migrate to: Download the new credentials for that instance.
+. Wait for the migration to finish.
+. Follow all steps outlined in "Next steps before finalizing the live migration" at the top of the Migration Readiness Report page.
+This includes all your testing on the migrated instance.
+. Once you are done with testing, click the "Finalize live migration" button and complete the dialog to remove your original version 4 instance.
+
+There can only be one live migration in progress at any time.
+If you need to, you can restart the process at any point by removing the migrated instance until you finalize the migration by removing the original instance.
+
+==== Configure target instance
+
+An instance can either be migrated to a new instance or an instance that is already running the latest version of Aura and that fits the memory and storage configuration of the original instance.
+This means that if you select the second option, the instance you want to migrate to has to have at least the same amount of memory and storage as the original one.
+
+Note that cloning into an existing instance overwrites all of its existing data and name.
+This action cannot be undone and may take longer than cloning to a new instance.
+If you still have data that you want to keep on the instance, it is advised to take a snapshot and download it before continuing.
+
+For both migration options you can also choose to include the migration of the store format to `block` storage.
+It is highly recommended to do this, as it enables you to make use of the newest features in Neo4j Aura as well as leverage its superior performance and scalability.
+More information on the `block` format can be found link:https://neo4j.com/docs/operations-manual/current/database-internals/store-formats/[here].
+
+Regardless of which option you select, the name of the migration target instance will be the same as the original instance.
+
+==== Testing the migrated instance
+
+Once you see the following box on the Migration Readiness Report your migrated instance is ready for testing.
+Follow the steps described and test your instance to make sure your application can work with it in your production system.
+
+[.shadow]
+image::mrr-live-migration-ready-for-test.png[]
+
+==== Finalize live migration
+
+Once you are done with testing, use the "Finalize live migration" button.
+You will be asked to acknowledge the finalization since *the original instance is permanently removed* in the process.
+Additionally, when the dialog is completed, you will no longer have access to the Migration readiness report.
+
+You can also postpone this step and keep the original instance e.g. as a rollback option.
+Be mindful that this means you have the running costs for both the migrated and the original instance.
+If you wish to remove the original instance later, you can revisit this step or remove it via the Aura console.
diff --git a/modules/ROOT/pages/auradb/managing-databases/monitoring.adoc b/modules/ROOT/pages/managing-instances/monitoring.adoc
similarity index 89%
rename from modules/ROOT/pages/auradb/managing-databases/monitoring.adoc
rename to modules/ROOT/pages/managing-instances/monitoring.adoc
index 27390cefb..b35ca4d82 100644
--- a/modules/ROOT/pages/auradb/managing-databases/monitoring.adoc
+++ b/modules/ROOT/pages/managing-instances/monitoring.adoc
@@ -1,11 +1,11 @@
[[aura-monitoring]]
-= Monitoring
+= All metrics
label:AuraDB-Professional[]
label:AuraDB-Enterprise[]
label:AuraDB-Business-Critical[]
-You can monitor the following metrics of an AuraDB instance from the *Metrics* tab:
+You can monitor the following metrics of an instance from the *Metrics* tab:
* *CPU Usage (%)* - The amount of CPU used by the instance as a percentage.
* *Storage Used (%)* - The amount of disk storage space used by the instance as a percentage.
@@ -29,6 +29,6 @@ When viewing metrics, you can select from the following time intervals:
To access the *Metrics* tab:
-. Navigate to the https://console.neo4j.io/?product=aura-db[Neo4j Aura Console] in your browser.
+. Navigate to the https://console.neo4j.io/?product=aura-db[Neo4j Aura console] in your browser.
. Select the instance you want to access.
. Select the *Metrics* tab.
\ No newline at end of file
diff --git a/modules/ROOT/pages/managing-instances/regions.adoc b/modules/ROOT/pages/managing-instances/regions.adoc
new file mode 100644
index 000000000..057fc2946
--- /dev/null
+++ b/modules/ROOT/pages/managing-instances/regions.adoc
@@ -0,0 +1,276 @@
+[[regions]]
+= Regions
+:description: This page lists all regions Aura supports deployment in, sorted by tier.
+:page-aliases: auradb/managing-databases/regions.adoc
+
+label:AuraDB-Professional[]
+label:AuraDS-Professional[]
+label:AuraDB-Business-Critical[]
+label:AuraDS-Enterprise[]
+label:AuraDB-Virtual-Dedicated-Cloud[]
+
+Aura supports the following regions based on your tier:
+
+== Amazon Web Services (AWS)
+
+[.tabbed-example]
+====
+
+[.include-with-AuraDB-Professional]
+======
+** `ap-south-1`
+** `ap-southeast-1`
+** `eu-west-1`
+** `eu-west-3`
+** `sa-east-1`
+** `us-east-1`
+** `us-west-2`
+======
+
+[.include-with-AuraDS-Professional]
+======
+** `ap-south-1`
+** `ap-southeast-1`
+** `eu-west-1`
+** `eu-west-3`
+** `sa-east-1`
+** `us-east-1`
+** `us-west-2`
+======
+
+[.include-with-AuraDB-Business-Critical]
+======
+** `ap-south-1`
+** `ap-southeast-1`
+** `eu-west-1`
+** `eu-west-3`
+** `sa-east-1`
+** `us-east-1`
+** `us-east-2`
+** `us-west-2`
+======
+
+[.include-with-AuraDS-Enterprise]
+======
+** `ap-southeast-1`
+** `ap-southeast-2`
+** `eu-central-1`
+** `eu-west-1`
+** `sa-east1`
+** `us-east-1`
+** `us-east-2`
+
+======
+[.include-with-AuraDB-Virtual-Dedicated-Cloud]
+======
+** `ap-south-1`
+** `ap-southeast-1`
+** `ap-southeast-2`
+** `ap-northeast-1`
+** `ap-northeast-2`
+** `ap-northeast-3`
+** `ca-central-1`
+** `eu-central-1`
+** `eu-north-1`
+** `eu-south-1`
+** `eu-west-1`
+** `eu-west-2`
+** `eu-west-3`
+** `il-central-1`
+** `sa-east-1`
+** `us-east-1`
+** `us-east-2`
+** `us-west-2`
+======
+====
+
+== Microsoft Azure
+
+[.tabbed-example]
+====
+
+[.include-with-AuraDB-Professional]
+======
+** `brazilsouth`
+** `centralindia`
+** `eastus`
+** `francecentral`
+** `koreacentral`
+** `uksouth`
+** `westus3`
+======
+
+[.include-with-AuraDS-Professional]
+======
+** `brazilsouth`
+** `centralindia`
+** `eastus`
+** `francecentral`
+** `koreacentral`
+** `uksouth`
+** `westus3`
+======
+
+[.include-with-AuraDB-Business-Critical]
+======
+** `brazilsouth`
+** `centralindia`
+** `eastus`
+** `francecentral`
+** `koreacentral`
+** `uksouth`
+** `westus3`
+======
+
+[.include-with-AuraDS-Enterprise]
+======
+** `australiaeast`
+** `brazilsouth`
+** `centralus`
+**` eastus`
+** `eastus2`
+** `francecentral`
+** `germanywestcentral`
+** `koreacentral`
+** `northeurope`
+** `norwayeast`
+** `southcentralus`
+** `southeastasia`
+** `uksouth`
+** `westeurope`
+** `westus3`
+======
+
+[.include-with-AuraDB-Virtual-Dedicated-Cloud]
+======
+** `australiaeast`
+** `brazilsouth`
+** `canadacentral`
+** `centralindia`
+** `eastus`
+** `eastus2`
+** `francecentral`
+** `germanywestcentral`
+** `koreacentral`
+** `northeurope`
+** `norwayeast`
+** `southafricanorth`
+** `switzerlandnorth`
+** `uaenorth`
+** `uksouth`
+** `westus3`
+======
+====
+
+== Google Cloud Platform (GCP)
+
+[.tabbed-example]
+====
+
+[.include-with-AuraDB-Professional]
+======
+** `asia-east1`
+** `asia-east2`
+** `asia-south1`
+** `asia-southeast1`
+** `australia-southeast1`
+** `europe-west1`
+** `europe-west2`
+** `europe-west3`
+** `us-central1`
+** `us-east1`
+** `us-west1`
+======
+
+[.include-with-AuraDS-Professional]
+======
+** `asia-east1`
+** `asia-east2`
+** `asia-south1`
+** `asia-southeast1`
+** `australia-southeast1`
+** `europe-west1`
+** `europe-west2`
+** `europe-west3`
+** `us-central1`
+** `us-east1`
+** `us-west1`
+======
+
+[.include-with-AuraDB-Business-Critical]
+======
+** `asia-east1`
+** `asia-east2`
+** `asia-south1`
+** `asia-southeast1`
+** `australia-southeast1`
+** `europe-west1`
+** `europe-west2`
+** `europe-west3`
+** `us-central1`
+** `us-east1`
+** `us-west1`
+======
+
+[.include-with-AuraDS-Enterprise]
+======
+** `asia-east1
+** `asia-south1
+** `asia-south2
+** `australia-southeast1`
+** `australia-southeast2`
+** `europe-southwest1`
+** `europe-west1`
+** `europe-west2`
+** `europe-west4`
+** `northamerica-northeast1`
+** `southamerica-east1`
+** `us-central1`
+** `us-east1`
+** `us-east4`
+** `us-south1`
+** `us-west1`
+** `us-west2`
+======
+
+[.include-with-AuraDB-Virtual-Dedicated-Cloud]
+======
+** `asia-east1`
+** `asia-east2`
+** `asia-northeast1`
+** `asia-northeast2`
+** `asia-northeast3`
+** `asia-south1`
+** `asia-south2`
+** `asia-southeast1`
+** `asia-southeast2`
+** `australia-southeast1`
+** `australia-southeast2`
+** `europe-central2`
+** `europe-north1`
+** `europe-southwest1`
+** `europe-west1`
+** `europe-west2`
+** `europe-west3`
+** `europe-west4`
+** `europe-west6`
+** `europe-west8`
+** `europe-west9`
+** `europe-west12`
+** `northamerica-northeast1`
+** `northamerica-northeast2`
+** `us-central1`
+** `us-east1`
+** `us-east4`
+** `us-east5`
+** `us-west1`
+** `us-west2`
+** `us-west3`
+** `us-west4`
+** `us-south1`
+** `southamerica-east1`
+** `southamerica-west1`
+** `me-central1`
+** `me-west1`
+======
+====
\ No newline at end of file
diff --git a/modules/ROOT/pages/managing-instances/secondaries.adoc b/modules/ROOT/pages/managing-instances/secondaries.adoc
new file mode 100644
index 000000000..b26526b60
--- /dev/null
+++ b/modules/ROOT/pages/managing-instances/secondaries.adoc
@@ -0,0 +1,40 @@
+[[auradb-secondaries]]
+= Secondaries
+:description: This section introduces secondaries for scaling out read workloads.
+:page-aliases: auradb/managing-databases/secondaries.adoc
+
+label:AuraDB-Business-Critical[]
+label:AuraDB-Virtual-Dedicated-Cloud[]
+
+A secondary is a read-only copy of your Aura database.
+Secondaries help you scale the read query workload your AuraDB instance is able to serve, by spreading the load evenly across multiple copies of the data.
+This increases the maximum read query throughput of a database while preventing bottlenecks.
+
+To ensure high availability, secondaries are distributed across availability zones.
+They are however, only available within the same cloud region as the primary Aura instance.
+
+Up to 15 secondaries can be added per AuraDB instance, which increases the read capacity to handle read-heavy workloads significantly.
+Secondaries can be added, managed, and removed through the Aura console or the Aura API.
+Currently, they are static and do not support elastic or auto-scaling behavior.
+
+The secondary count is retained when the database is paused and resumed.
+For example, if your database has three secondaries and you pause it, it will resume with three secondaries.
+
+[NOTE]
+====
+Secondaries can take some time to become operational after they are created, and there may be delays when the system is busy.
+Causal consistency is maintained among your secondaries with the use of bookmarks and these also ensure that returned data is correct and up-to-date.
+However, if the database is under heavy load, queries using bookmarks may also experience delays in adding secondaries.
+See link:https://neo4j.com/docs/operations-manual/current/clustering/introduction/#causal-consistency-explained[Operations Manual -> Causal consistency] for more information.
+====
+
+== Edit secondary count using the console
+
+Once the feature is enabled for your project, you can see the secondary count set to zero on an instance card.
+To edit the number of secondaries, use the *More* menu (three dots) on the card.
+
+image::secondary-count-console.png[width=400]
+
+== Edit secondary count using the Aura API
+
+Use the `/instances/\{instanceId}` endpoint to edit the number of secondaries.
\ No newline at end of file
diff --git a/modules/ROOT/pages/managing-instances/vector-optimization.adoc b/modules/ROOT/pages/managing-instances/vector-optimization.adoc
new file mode 100644
index 000000000..1bb2e337d
--- /dev/null
+++ b/modules/ROOT/pages/managing-instances/vector-optimization.adoc
@@ -0,0 +1,76 @@
+[[aura-vector-optimization]]
+= Vector optimization
+:description: This section describes vector optimization for Neo4j Aura instances.
+
+label:AuraDB-Professional[]
+label:AuraDB-Business-Critical[]
+label:AuraDB-Virtual-Dedicated-Cloud[]
+
+Vector optimization reserves memory for vector indexes, enhancing performance for vector-based operations.
+It is available for AuraDB instances with more than 4GB of memory and across all supported cloud providers and regions.
+
+This configuration re-allocates memory from the graph database to the vector index.
+If this has an impact on your application, consider resizing to a larger Aura instance.
+
+To enable vector optimization during instance creation, select *Instance details* > *Additional settings* > *Vector-optimized configuration*.
+
+It can also be changed with the link:https://neo4j.com/docs/aura/platform/api/specification/#/instances/patch-instance-id[Aura API].
+
+To enable vector optimization on existing instances, from the instance card, use the *Configure* button to access *Configure instance* and find the toggle called *Vector-optimized configuration*.
+You can view the current vector configuration status in the instance details, from the (*...*) menu on the instance card.
+
+If you lower the instance size below 4GB, vector optimization is disabled automatically.
+
+If you clone your instance to a new instance, the new instance inherits the vector optimization settings of the original instance.
+But if you clone to an existing instance, its vector optimization setting remains unchanged.
+
+To learn more about how to use vector indexes, see link:https://neo4j.com/docs/cypher-manual/current/indexes/semantic-indexes/vector-indexes/[Cypher Manual -> Vector indexes].
+
+
+== Instance sizing guide
+
+The vector optimized configuration is intended to allow an Aura instances' available storage to be completely filled and still provide consistent vector search performance.
+The table below shows the theoretical maximum GiB of vectors for each instance size, and the equivalent number of 768 dimension float-32 vectors.
+
+[cols="1,1,2"]
+|===
+|Aura Instance Size
+|GiB vectors
+|Million vectors (768 dimensions)
+
+|4GB
+|2.8
+|0.9
+
+|8GB
+|5.6
+|1.8
+
+|16GB
+|11.2
+|3.6
+
+|32GB
+|22.4
+|7.3
+
+|64GB
+|44.9
+|14.6
+
+|128GB
+|89.8
+|29.2
+
+|256GB
+|179.6
+|58.4
+
+|512GB
+|359.3
+|116.9
+
+|===
+
+GiB vectors is limited by available storage.
+As larger stores become available, you can increase the vector capacity for these instances.
\ No newline at end of file
diff --git a/modules/ROOT/pages/metrics/metrics-integration/examples.adoc b/modules/ROOT/pages/metrics/metrics-integration/examples.adoc
new file mode 100644
index 000000000..b694b8e9a
--- /dev/null
+++ b/modules/ROOT/pages/metrics/metrics-integration/examples.adoc
@@ -0,0 +1,116 @@
+[aura-cmi-intergation-examples]
+= Integration examples
+:description: This page provides
+:page-aliases: platform/metrics-integration.adoc#aura-cmi-example-using-prometheus
+:table-caption!:
+
+label:AuraDB-Virtual-Dedicated-Cloud[]
+label:AuraDS-Enterprise[]
+label:AuraDB-Business-Critical[]
+
+Aura metrics integration examples for various APM platforms.
+
+.Example using Prometheus
+[aura-cmi-example-using-prometheus%collapsible]
+====
+
+.Install Prometheus
+
+- One way is to get a tarball from link:https://prometheus.io/docs/prometheus/latest/installation/[^]
+
+.Configure Prometheus
+
+- To monitor one or more instances, add a section to the Prometheus configuration file `prometheus.yml`.
+
+- Copy the job configuration template provided for the project endpoint or the instance endpoint, as shown.
+
+image::cmi_prometheus_job_config.png[]
+
+- Replace the placeholders `` and `` with corresponding values created in the API credentials section.
+
+- For details, see link:https://prometheus.io/docs/prometheus/latest/configuration/configuration/[Prometheus configuration reference^].
+
+.Start Prometheus
+
+- Use the config updated with credentials to start the Prometheus server.
+
+[source, shell]
+----
+./prometheus --config.file=prometheus.yml
+----
+
+.Test that metrics are fetched
+
+- Check if the metrics endpoints are being successfully connected as targets in Prometheus' UI:
+
+image::cmi_prometheus_targets.png[]
+
+- Check if any of the Aura metrics are showing up by querying using PromQL and plot the basic graphs:
+
+image::cmi_prometheus_jobs_example.png[]
+
+.Use Grafana
+
+- Install and configure Grafana, adding the endpoint of the Prometheus instance configured in the previous step as a data source.
+You can create visualizations, dashboards, and alarms based on Neo4j metrics.
+
+.Usage
+The following is an example of gaining more insights into your Aura instance CPU usage for capacity planning:
+
+- Example PromQL query to plot
+[source, promql]
+----
+max by(availability_zone) (neo4j_aura_cpu_usage{instance_mode="PRIMARY"}) / sum by(availability_zone) (neo4j_aura_cpu_limit{instance_mode="PRIMARY"})
+----
+
+.Chart shows CPU usage of primaries by availability zone
+image::cmi_primaries_az_plot.png["Primaries by availability zone"]
+====
+
+.Example using Datadog
+[aura-cmi-example-using-datadog%collapsible]
+====
+
+.Get a Datadog account, link:https://www.datadoghq.com/[^]
+
+.Install a Datadog agent as described in Datadog documentation
+
+.Configure an endpoint with token authentication
+
+- Edit `/etc/datadog-agent/conf.d/openmetrics.d/conf.yaml` as follows:
+
+[NOTE]
+----
+Replace the placeholders ``, `` and `` with corresponding values from the previous steps.
+----
+
+.`/etc/datadog-agent/conf.d/openmetrics.d/conf.yaml`
+[source, yaml]
+----
+init_config:
+instances:
+ - openmetrics_endpoint:
+ timeout: 30
+ metrics:
+ - neo4j_.*
+ auth_token:
+ reader:
+ type: oauth
+ url: https://api.neo4j.io/oauth/token
+ client_id:
+ client_secret:
+ writer:
+ type: header
+ name: Authorization
+ value: "Bearer "
+----
+
+For details, see link:https://docs.datadoghq.com/agent/?tab=Linux[Datadog Agent documentation^] and link:https://github.com/DataDog/datadog-agent/blob/main/pkg/config/config_template.yaml[configuration reference^].
+
+.Test that metrics are fetched
+
+* `sudo systemctl restart datadog-agent`
+* Watch `/var/log/datadog/*` to see if fetching metrics happens or if there are warnings regarding parsing the configuration.
+* Check in Datadog metric explorer to see if metrics appear (after a couple of minutes).
+
+====
diff --git a/modules/ROOT/pages/metrics/metrics-integration/introduction.adoc b/modules/ROOT/pages/metrics/metrics-integration/introduction.adoc
new file mode 100644
index 000000000..fb78a9ae6
--- /dev/null
+++ b/modules/ROOT/pages/metrics/metrics-integration/introduction.adoc
@@ -0,0 +1,214 @@
+[aura-customer-metrics-integration]
+= Customer Metrics Integration (CMI)
+:description: This page describes the Custom Metrics Integration for Neo4j Aura.
+:page-aliases: platform/metrics-integration.adoc
+:table-caption!:
+
+label:AuraDB-Virtual-Dedicated-Cloud[]
+label:AuraDS-Enterprise[]
+label:AuraDB-Business-Critical[]
+
+An application performance monitoring system can be configured to fetch metrics of AuraDB instances of types:
+
+* AuraDB Virtual Dedicated Cloud
+* AuraDS Enterprise
+* AuraDB Business Critical
+
+This gives users access to their Neo4j Aura instance metric data for monitoring purposes.
+
+Analyzing the metrics data allows users to:
+
+* Optimize their Neo4j workload
+* Adjust Aura instance sizing
+* Set up notifications
+
+[NOTE]
+====
+Aura Database and Analytics services are business-critical for users.
+There are requests to introduce more capabilities enabling access to logs and metrics to derive actionable insights using your choice of monitoring platform.
+
+Aura has a strong roadmap of observability sharing features including security logs, query logs, and other capabilities.
+Customer metrics integration requires transmitting a significant amount of data, hence *a new consumption-based billing model including cloud egress costs* will be introduced in the future.
+
+Observability is of paramount importance, and therefore the customer metrics integration is initially available for free.
+====
+
+[aura-cmi-process-overview]
+== Integration Process overview
+
+image::cmi_process_overview.png[]
+
+Detailed Aura CMI setup steps and APM integration examples are described in the next chapters.
+
+[aura-cmi-security]
+== Security
+
+Metrics for a Neo4j Aura instance are only returned if all the following are true:
+
+* `Authorization` header of the metrics request contains a valid token.
+* The token was issued for an Aura user with the _Metrics Reader_ role.
+* Project has instances of types `Enterprise (Virtual Dedicated Cloud)` or `Business Critical`.
+* The specified instance belongs to the specified project.
+
+pass:[]
+[IMPORTANT]
+====
+The legacy term `Enterprise` is still used within the codebase and API.
+However, in the Aura console and documentation, the AuraDB Enterprise project type is now known as AuraDB Virtual Dedicated Cloud.
+====
+pass:[]
+
+[aura-cmi-revoke-access-to-metrics]
+.Revoke access to metrics
+
+To revoke a user's access to metrics of a specific project, remove the user from that project in `Project > Users` .
+After that, the user still exists but its connection to the project is removed.
+
+[NOTE]
+====
+The revocation described takes effect after the authorization caches expire, which takes approximately 5 minutes.
+It results in HTTP 401 being returned, along with the message `User doesn't have access to Metrics resources.`
+However, if you remove only the Aura API credentials used to retrieve metrics, the revocation takes effect only after the tokens issued with these credentials expire, as no new token can be issued anymore.
+Currently used token expiration time is 1 hour.
+====
+
+== Metric details
+
+[aura-cmi-metric-labels]
+.Metric labels
+
+Depending on the metric, the following labels are applied:
+
+* `aggregation`: the aggregation used to calculate the metric value, set on every metric.
+Since the Neo4j instance is deployed as a Neo4j cluster, aggregations are performed to combine values from all relevant cluster nodes.
+The following aggregations are used: `MIN`, `MAX`, `AVG` and `SUM`.
+* `instance_id`: the Aura instance ID the metric is reported for, set on every metric.
+* `database`: the name of the Neo4j database the metric is reported for.
+Set to `neo4j` by default.
+
+.Example
+
+[source, shell]
+----
+# HELP neo4j_database_count_node The total number of nodes in the database.
+# TYPE neo4j_database_count_node gauge
+neo4j_database_count_node{aggregation="MAX",database="neo4j",instance_id="78e7c3e0"} 778114.000000 1711462853000
+----
+
+[aura-cmi-looking-up-metric-name]
+.Looking up metric name
+
+Metrics reference is provided xref:./reference.adoc[here].
+
+[aura-cmi-metric-scrape-interval]
+== Metric scrape interval
+
+The recommended scrape interval for metrics is in the range of 30 seconds up to 2 minutes, depending on requirements.
+The metrics endpoint caches metrics for 30 seconds.
+
+[aura-cmi-metric-scrape-timeout]
+== Metric scrape timeout
+
+The recommended scrape timeout for metrics is 30 seconds.
+
+Examples of setting scrape timeout for different APM systems:
+
+* Prometheus: `scrape_timeout: 30s` (see link:https://prometheus.io/docs/prometheus/latest/configuration/configuration/#:~:text=scrape%20interval.%0A%5B-,scrape_timeout%3A%20%3Cduration%3E,-%7C%20default%20%3D%20%3Cglobal_config.scrape_timeout[Prometheus configuration reference^])
+* Datadog: `timeout: 30` (see link:https://github.com/DataDog/integrations-core/blob/824902a298e54e4af8d5e6e080e7131e3f1e98b0/openmetrics/datadog_checks/openmetrics/data/conf.yaml.example#L35C7-L35C14[Datadog Agent configuration^])
+
+[IMPORTANT]
+====
+Setting the scrape timeout to a lower value increases the risk of request timeouts which can lead to less accurate or less up-to-date metric data.
+====
+
+[aura-cmi-metrics-granularity]
+== Metric granularity
+
+The metrics returned by the integration endpoint provide the labels: `aggregation`, `instance_id`, and `database`.
+
+An Aura instance typically runs on multiple servers to achieve availability and workload scalability in different modes.
+These servers are deployed across different Cloud Provider availability zones in the user-selected region.
+
+Metrics Integration supports a more granular view of the Aura instance metrics with additional data points and labels for availability zone and instance mode combinations.
+This view can be enabled on demand.
+
+
+See xref:./process.adoc#cmi-endpoint-config[Metrics endpoint configuration] for details on how to change metric granularity.
+
+Note that there may be a delay in more granular metrics being available when a new Aura instance is created.
+This is because of the way 'availability zone' data is collected.
+
+.Example metric data points
+[source]
+----
+neo4j_aura_cpu_usage{aggregation="MAX",instance_id="a59d71ae",availability_zone="eu-west-1a",instance_mode="PRIMARY"} 0.025457 1724245310000
+neo4j_aura_cpu_usage{aggregation="MAX",instance_id="a59d71ae",availability_zone="eu-west-1b",instance_mode="PRIMARY"} 0.047088 1724245310000
+neo4j_aura_cpu_usage{aggregation="MAX",instance_id="a59d71ae",availability_zone="eu-west-1c",instance_mode="PRIMARY"} 0.021874 1724245310000
+----
+
+.Additional metric labels
+* `availability_zone` - User selected Cloud provider availability zone.
+* `instance_mode` - `PRIMARY` or `SECONDARY` based on user selected workload requirement of reads and writes.
+(Minimum 3 primaries per instance)
+
+[aura-cmi-programmatic-support]
+== Programmatic support
+
+[aura-cmi-api-for-metrics-integration]
+.Aura API for Metrics Integration
+
+* Aura API supports fetching metrics integration endpoints using:
+ ** endpoint `+/tenants/{tenantId}/metrics-integration+` (for project metrics)
+ ** JSON property `metrics_integration_url` as part of `+/instances/{instanceId}+` response (for instance metrics)
+* Reference: link:https://neo4j.com/docs/aura/platform/api/specification/[Aura API Specification^]
+
+[NOTE]
+====
+_Project_ replaces _Tenant_ in the Aura Console UI and documentation.
+However, in the API, `tenant` remains the nomenclature.
+====
+
+[aura-cmi-cli-for-metrics-integration]
+.Aura CLI for Metrics Integration
+
+* Aura CLI has a subcommand for `tenants` command to fetch project metrics endpoint:
++
+[source]
+----
+aura projects get-metrics-integration --tenant-id
+
+# example output
+{
+ endpoint: "https://customer-metrics-api.neo4j.io/api/v1//metrics"
+}
+
+# extract endpoint
+aura projects get-metrics-integration --project-id | jq '.endpoint'
+----
+
+* For instance metrics endpoint, Aura CLI `instances get` command JSON output includes a new property `metrics_integration_url`:
++
+[source]
+----
+aura instances get --instance-id
+
+# example output
+{
+ "id": "id",
+ "name": "Production",
+ "status": "running",
+ "tenant_id": "YOUR_PROJECT_ID",
+ "cloud_provider": "gcp",
+ "connection_url": "YOUR_CONNECTION_URL",
+ "metrics_integration_url": "https://customer-metrics-api.neo4j.io/api/v1///metrics",
+ "region": "europe-west1",
+ "type": "enterprise-db",
+ "memory": "8GB",
+ "storage": "16GB"
+ }
+
+# extract endpoint
+aura instances get --instance-id | jq '.metrics_integration_url'
+----
+
+* Reference: link:https://neo4j.com/labs/aura-cli/1.0/cheatsheet/[Aura CLI cheatsheet^]
diff --git a/modules/ROOT/pages/metrics/metrics-integration/process.adoc b/modules/ROOT/pages/metrics/metrics-integration/process.adoc
new file mode 100644
index 000000000..f3a995eb2
--- /dev/null
+++ b/modules/ROOT/pages/metrics/metrics-integration/process.adoc
@@ -0,0 +1,95 @@
+[aura-customer-metrics-process]
+= Metrics integration process
+:description: This page describes the metrics integration process for Neo4j Aura.
+:page-aliases: platform/metrics-integration.adoc#aura-cmi-steps
+:table-caption!:
+
+label:AuraDB-Virtual-Dedicated-Cloud[]
+label:AuraDS-Enterprise[]
+label:AuraDB-Business-Critical[]
+
+Metrics integration is a two-step process:
+
+- Aura CMI setup
+- Customer APM integration
+
+[aura-cmi-setup]
+== Aura CMI setup
+
+. Log in to Aura as project admin.
+. Make sure there is a dedicated Aura user to use for fetching metrics.
+You can either:
+ ** Create a new user:
+ ... Invite a new user, selecting _Metrics Reader_ as a role in `Project > Users`.
+ ... Follow the invitation link and log in to Neo4j Aura.
+ ... Confirm the project membership.
+ ** Or you can find an existing user in `Project > Users` and change its role to _Metrics Reader_
+. Ensure you are logged in to Aura as the user selected in the previous step.
+In `Account > API Keys`, create new Aura API credentials.
+Save client secret.
+
+[NOTE]
+====
+Capabilities of users with the role _Metrics Reader_ are limited to fetching the metrics and getting a read-only view of the project.
+====
+
+[aura-cmi-apm-integration]
+== Customer APM integration
+
+. Optionally configure interested endpoints as described in <> before setting up the APM system.
+. To set up an APM system to fetch Aura metrics, use the `[...]` in the table view and copy either the endpoint URL or Prometheus job configuration template (if configuring Prometheus).
++
+[.shadow]
+image::cmi_apm_config_input.png[]
++
+. Use OAuth2 type of authentication specifying the Client ID and Client Secret created in the previous step.
+For examples using Prometheus and Datadog, see xref:./examples.adoc[Examples].
+. Once metrics start flowing to APM system, statuses of used endpoints can be viewed in the Metrics integration table. __(See the section about Metrics endpoint configuration below for an example of the status table)__
+
+[[cmi-endpoint-config]]
+== Metrics endpoint configuration
+
+[NOTE]
+====
+To change the metrics endpoint configuration, you need to have the _Project Admin_ role in the project.
+====
+
+Aura Metrics Integration setup offers two types of metrics endpoints to endpoint Aura instance metrics:
+
+- **Project metrics endpoint** - A single endpoint with Project level endpoint configuration that allows selecting which instance metrics to include.
+- **Instance metrics endpoint** - Individual endpoint for each of the instances in the Project with corresponding endpoint configuration.
+
+[.shadow]
+image::cmi_status_table.png[]
+
+Use the __Settings__ button for an endpoint in the table to configure that endpoint.
+
+[NOTE]
+====
+**_Selecting endpoints_**
+
+Selecting an endpoint is based on individual use cases.
+
+In general, if there is a need to monitor the performance of a specific Aura instance in your project it's better to use the instance endpoint.
+
+If your goal is to monitor a subset of instances in the Project and you don't want to configure an APM system for each endpoint, then configure the Project endpoint and select the instances you are interested in.
+Selecting only the relevant instances helps to improve the latency of the metrics endpoint and reduces the amount of data sent to the APM system.
+====
+
+* Project endpoint configuration:
++
+[.shadow]
+image::cmi_project_config.png[width=400]
++
+. `Selecting instances to include in project endpoint` - This is a project endpoint specific setting which allows users to select instances this endpoint should report metrics for.
++
+. `Include new instances` - This setting enables scraping for newly created instance without manually updating the Project endpoint configuration.
+. `Metrics granularity` - This setting allows scraping more granular metrics (`Comprehensive`) for the instance(s) of this endpoint.
+Comprehensive metric values are labeled with cloud provider availability zone and Neo4j node type (primary/secondary) for more detailed observability.
+
+* Instance endpoint configuration allows configuring the `Metrics granularity` for the corresponding instance.
++
+[.shadow]
+image::cmi_instance_config.png[width=400]
+
+Endpoint configuration changes affect the metrics endpoints only after **__5 minutes__** of submitting these changes from UI due to Metrics integration service caching.
diff --git a/modules/ROOT/pages/platform/metrics-integration.adoc b/modules/ROOT/pages/metrics/metrics-integration/reference.adoc
similarity index 52%
rename from modules/ROOT/pages/platform/metrics-integration.adoc
rename to modules/ROOT/pages/metrics/metrics-integration/reference.adoc
index fa40726d1..81f49ba40 100644
--- a/modules/ROOT/pages/platform/metrics-integration.adoc
+++ b/modules/ROOT/pages/metrics/metrics-integration/reference.adoc
@@ -1,243 +1,15 @@
-[aura-customer-metrics-integration]
-= Customer Metrics Integration (CMI)
+[aura-cmi-reference]
+= Reference
+:description: This page lists the available metrics in the metric integration for Neo4j Aura.
+:page-aliases: platform/metrics-integration.adoc#aura-cmi-metric_definitions
:table-caption!:
-label:AuraDB-Enterprise[] label:AuraDS-Enterprise[]
-
-An application performance monitoring system can be configured to fetch metrics of Neo4j Aura Enterprise instances.
-This gives users access to their Neo4j Aura instance metric data for monitoring purposes.
-Analyzing the metrics data allows users to:
-
-* Optimize their Neo4j load
-* Adjust Aura instance sizing
-* Set up notifications
-
-[aura-cmi-process-overview]
-== Process overview
-
-image::process.png[]
-
-[aura-cmi-steps]
-== Detailed steps
-
-. Log in to Aura as tenant admin.
-. Make sure there is a dedicated Aura user to use for fetching metrics.
-You can either:
- ** Create a new user:
- ... In https://console.neo4j.io/#user-management["User Management"^] of Neo4j Aura, invite a new user, selecting "Metrics Integration Reader" as a role.
-image:invite_user.png[]
- ... Follow the invitation link and log in to Neo4j Aura.
- ... Confirm the tenant membership.
- ** Or you can find an existing user in https://console.neo4j.io/#user-management["User Management"^] and change its role to "Metrics Integration Reader" +
-+
-[NOTE]
-====
-Capabilities of users with the role "Metrics Integration Reader" are limited to fetching the metrics and getting a read-only view of the tenant.
-====
-. Ensure you are logged in to Aura as the user selected in the previous step.
-In https://console.neo4j.io/#account["Account Details"^], create new Aura API credentials.
-Save client secret. +
-image:api_credentials.png[]
-. Configure the APM system to fetch metrics from the URL(s) or configuration templates shown in https://console.neo4j.io/#metrics-integration["Metrics Integration"^] of Neo4j Aura. Use `oauth2` type of authentication specifying the Client ID and Client Secret created in the previous step. See examples for <<_example_using_prometheus,Prometheus and Grafana>> and <<_example_using_datadog,Datadog>> below.
-. Use the APM system to create visualizations, dashboards, and alarms based on Neo4j metrics.
-
-[aura-cmi-security]
-== Security
-
-Metrics for a Neo4j Aura instance are only returned if all the following are true:
-
-* `Authorization` header of the metrics request contains a valid token.
-* The token was issued for an Aura user with "Metrics Integration Reader" role.
-* Tenant is of type `enterprise`.
-* The specified instance belongs to the specified tenant.
-
-[aura-cmi-revoke-access-to-metrics]
-.Revoke access to metrics
-
-To revoke a user's access to metrics of a specific tenant, remove the user from that tenant in https://console.neo4j.io/#user-management["User Management"^].
-After that, the user still exists but its connection to the tenant is removed.
-
-[NOTE]
-====
-The revocation described takes effect after the authorization caches expire, which takes approximately 5 minutes.
-It results in HTTP 401 being returned, along with the message `User doesn't have access to Metrics resources.`
-However, if you remove only the Aura API credentials used to retrieve metrics, the revocation will take effect only after the tokens issued with these credentials expire, as no new token can be issued anymore. Currently used token expiration time is 1 hour.
-====
-
-[aura-cmi-metric-labels]
-.Metric labels
-
-Depending on the metric, the following labels are applied:
-
-* `aggregation`: the aggregation used to calculate the metric value, set on every metric.
-Since the Neo4j instance is deployed as a Neo4j cluster, aggregations are performed to combine values from all relevant cluster nodes.
-The following aggregations are used: `MIN`, `MAX`, `AVG` and `SUM`.
-* `instance_id`: the Aura instance ID the metric is reported for, set on every metric.
-* `database`: the name of the Neo4j database the metric is reported for.
-Set to `neo4j` by default.
-
-.Example
-
-[source, shell]
-----
-# HELP neo4j_database_count_node The total number of nodes in the database.
-# TYPE neo4j_database_count_node gauge
-neo4j_database_count_node{aggregation="MAX",database="neo4j",instance_id="78e7c3e0"} 778114.000000 1711462853000
-----
-
-[aura-cmi-looking-up-metric-name]
-.Looking up metric name in Neo4j Aura Advanced Metrics
-
-In Neo4j Aura Advanced Metrics, it is possible to find out the metric name that corresponds to the chart, by using the chart menu item "Metrics Integration" as shown.
-
-image::advanced_metrics.png[]
-
-[aura-cmi-metric-scrape-interval]
-== Metric scrape interval
-
-Recommended scrape interval for metrics is in the range of 30 seconds up to 2 minutes, depending on requirements. The metrics endpoint caches metrics for 30 seconds.
-
-[aura-cmi-example-using-prometheus]
-== Example using Prometheus
-
-.Install Prometheus
-
-One way is to get a tarball from link:https://prometheus.io/docs/prometheus/latest/installation/[^]
-
-.Configure Prometheus
-
-To monitor one or more instances, add a section to the Prometheus configuration file `prometheus.yml`.
-
-Copy the configuration section proposed in link:https://console.neo4j.io/#metrics-integration[Metrics Integration^], as shown.
-
-[NOTE]
-====
-Replace the placeholders `` and `` with corresponding values created in the previous step.
-====
-
-image::metrics_integration.png[]
-
-For details, see https://prometheus.io/docs/prometheus/latest/configuration/configuration/[Prometheus configuration reference^].
-
-.Start Prometheus
-
-[source, shell]
-----
-./prometheus --config.file=prometheus.yml
-----
-
-.Test that metrics are fetched
-
-Open http://localhost:9090 and enter a metric name or expression in the search field (ex. `neo4j_aura_cpu_usage`).
-
-.Use Grafana
-
-Install and configure Grafana, adding the endpoint of the Prometheus instance configured in the previous step as a data source.
-You can create visualizations, dashboards, and alarms based on Neo4j metrics.
-
-[aura-cmi-example-using-datadog]
-== Example using Datadog
-
-.Get a Datadog account, link:https://www.datadoghq.com/[^]
-
-.Install a Datadog agent as described in Datadog documentation
-
-.Configure an endpoint with token authentication
-
-Edit `/etc/datadog-agent/conf.d/openmetrics.d/conf.yaml` as follows:
-
-[NOTE]
-====
-Replace the placeholders ``, `` and `` with corresponding values from the previous steps.
-====
-
-./etc/datadog-agent/conf.d/openmetrics.d/conf.yaml
-
-[source, yaml]
-----
-init_config:
-instances:
- - openmetrics_endpoint:
- metrics:
- - neo4j_.*
- auth_token:
- reader:
- type: oauth
- url: https://api.neo4j.io/oauth/token
- client_id:
- client_secret:
- writer:
- type: header
- name: Authorization
- value: "Bearer "
-----
-
-For details, see link:https://docs.datadoghq.com/agent/?tab=Linux[Datadog Agent documentation^] and link:https://github.com/DataDog/datadog-agent/blob/main/pkg/config/config_template.yaml[configuration reference^].
-
-.Test that metrics are fetched
-
-* `sudo systemctl restart datadog-agent`
-* Watch `/var/log/datadog/*` to see if fetching metrics happens or if there are warnings regarding parsing the config.
-* Check in Datadog metric explorer to see if metrics appear (after a couple of minutes).
-
-[aura-cmi-programmatic-support]
-== Programmatic support
-
-[aura-cmi-api-for-metrics-integration]
-.Aura API for Metrics Integration
-
-* Aura API supports fetching metrics integration endpoints using:
- ** endpoint `+/tenants/{tenantId}/metrics-integration+` (for tenant metrics)
- ** JSON property `metrics_integration_url` as part of `+/instances/{instanceId}+` response (for instance metrics)
-* Reference: link:https://neo4j.com/docs/aura/platform/api/specification/[Aura API Specification^]
-
-[aura-cmi-cli-for-metrics-integration]
-.Aura CLI for Metrics Integration
-
-* Aura CLI has a subcommand for `tenants` command to fetch tenant metrics endpoint:
-+
-[source]
-----
-aura tenants get-metrics-integration --tenant-id
-
-# example output
-{
- endpoint: "https://customer-metrics-api.neo4j.io/api/v1//metrics"
-}
-
-# extract endpoint
-aura tenants get-metrics-integration --tenant-id | jq '.endpoint'
-----
-
-* For instance metrics endpoint, Aura CLI `instances get` command JSON output includes a new property `metrics_integration_url`:
-+
-[source]
-----
-aura instances get --instance-id
-
-# example output
-{
- "id": "id",
- "name": "Production",
- "status": "running",
- "tenant_id": "YOUR_TENANT_ID",
- "cloud_provider": "gcp",
- "connection_url": "YOUR_CONNECTION_URL",
- "metrics_integration_url": "https://customer-metrics-api.neo4j.io/api/v1///metrics",
- "region": "europe-west1",
- "type": "enterprise-db",
- "memory": "8GB",
- "storage": "16GB"
- }
-
-# extract endpoint
-aura instances get --instance-id | jq '.metrics_integration_url'
-----
-
-* Reference: link:https://neo4j.com/labs/aura-cli/1.0/cheatsheet/[Aura CLI cheetsheet^]
+label:AuraDB-Virtual-Dedicated-Cloud[]
+label:AuraDS-Enterprise[]
+label:AuraDB-Business-Critical[]
[aura-cmi-metric_definitions]
-== Metric Definitions
+== Metric definitions
[caption=]
.Out of Memory Errors
@@ -250,7 +22,7 @@ m| `neo4j_aura_out_of_memory_errors_total`
Consider increasing the size of the instance if any OOM errors.
| Metric type
| _Counter_
-| Default aggregation
+| Aggregation method
m| SUM
|===
@@ -263,7 +35,7 @@ m| neo4j_aura_cpu_limit
| The total CPU cores assigned to the instance nodes.
| Metric type
| _Gauge_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -273,11 +45,12 @@ m| MAX
| Metric name
m| neo4j_aura_cpu_usage
| Description
-| CPU usage (cores). CPU is used for planning and serving queries.
+| CPU usage (cores).
+CPU is used for planning and serving queries.
If this metric is constantly spiking or at its limits, consider increasing the size of your instance.
| Metric type
| _Gauge_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -290,7 +63,7 @@ m| neo4j_aura_storage_limit
| The total disk storage assigned to the instance.
| Metric type
| _Gauge_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -311,10 +84,26 @@ If the workload of Neo4j and performance of queries indicates that more heap spa
This helps avoid unwanted pauses for garbage collection.
| Metric type
| _Gauge_
-| Default aggregation
+| Aggregation method
m| MAX
|===
+.Page Cache Hit Ratio (per minute)
+[frame="topbot", stripes=odd, grid="cols", cols="<1,<4"]
+|===
+| Metric name
+m| neo4j_dbms_page_cache_hit_ratio_per_minute
+| Description
+| The percentage of times data required during query execution was found in memory vs needing to be read from disk.
+Ideally the whole graph should fit into memory, and this should consistently be between 98% and 100%.
+If this value is consistently or significantly under 100%, check the page cache usage ratio to see if the graph is too large to fit into memory.
+A high amount of insert or update activity on a graph can also cause this value to change.
+| Metric type
+| _Gauge_
+| Aggregation method
+m| AVG
+|===
+
.Page Cache Usage Ratio
[frame="topbot", stripes=odd, grid="cols", cols="<1,<4"]
|===
@@ -325,7 +114,7 @@ m| neo4j_dbms_page_cache_usage_ratio
If this is close to or at 100%, then it is likely that the hit ratio will start dropping, and you should consider increasing the size of your instance so that more memory is available for the page cache.
| Metric type
| _Gauge_
-| Default aggregation
+| Aggregation method
m| MIN
|===
@@ -339,7 +128,7 @@ m| neo4j_dbms_bolt_connections_running
This is a set of snapshots over time and may appear to spike if workloads are all completed quickly.
| Metric type
| _Gauge_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -352,7 +141,7 @@ m| neo4j_dbms_bolt_connections_idle
| The total number of Bolt connections that are connected to the Aura database but not currently executing Cypher or returning results.
| Metric type
| _Gauge_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -367,7 +156,7 @@ This includes both properly and abnormally ended connections.
This value may drop if background maintenance is performed by Aura.
| Metric type
| _Counter_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -382,7 +171,7 @@ This includes both successful and failed connections.
This value may drop if background maintenance is performed by Aura.
| Metric type
| _Counter_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -397,7 +186,7 @@ Young garbage collections typically complete quickly, and the Aura instance wait
High values indicate that the instance is running low on memory for the workload and you should consider increasing the size of your instance.
| Metric type
| _Counter_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -412,7 +201,7 @@ Old garbage collections can take time to complete, and the Aura instance waits w
High values indicate that there are long-running processes or queries that could be optimized, or that your instance is running low on CPU or memory for the workload and you should consider reviewing these metrics and possibly increasing the size of your instance.
| Metric type
| _Counter_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -427,7 +216,7 @@ If this spikes or is increasing, check that the queries executed are using param
This value may drop if background maintenance is performed by Aura.
| Metric type
| _Counter_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -440,7 +229,7 @@ m| neo4j_database_transaction_active_read
| The number of currently active read transactions.
| Metric type
| _Gauge_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -453,7 +242,7 @@ m| neo4j_database_transaction_active_write
| The number of active write transactions.
| Metric type
| _Gauge_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -467,7 +256,7 @@ m| neo4j_database_transaction_committed_total
This value may drop if background maintenance is performed by Aura.
| Metric type
| _Counter_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -481,7 +270,7 @@ m| neo4j_database_transaction_peak_concurrent_total
This value may drop if background maintenance is performed by Aura.
| Metric type
| _Counter_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -495,7 +284,7 @@ m| neo4j_database_transaction_rollbacks_total
This value may drop if background maintenance is performed by Aura.
| Metric type
| _Counter_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -509,7 +298,7 @@ m| neo4j_database_check_point_events_total
This value may drop if background maintenance is performed by Aura.
| Metric type
| _Counter_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -523,7 +312,7 @@ m| neo4j_database_check_point_total_time_total
This value may drop if background maintenance is performed by Aura.
| Metric type
| _Counter_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -538,7 +327,7 @@ Checkpoints should typically take several seconds to several minutes.
Values over 30 minutes warrant investigation.
| Metric type
| _Gauge_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -551,7 +340,7 @@ m| neo4j_database_count_relationship
| The total number of relationships in the database.
| Metric type
| _Gauge_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -564,7 +353,7 @@ m| neo4j_database_count_node
| The total number of nodes in the database.
| Metric type
| _Gauge_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -580,7 +369,7 @@ Keep an eye on this metric to make sure you have enough storage for today and fo
Check this metric with page cache usage to see if the data is too large for the memory and consider increasing the size of your instance in this case.
| Metric type
| _Gauge_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -595,7 +384,7 @@ A spike can mean your workload is exceeding the instance's available memory, and
Consider increasing the size of your instance to improve performance if this metric remains high.
| Metric type
| _Counter_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -608,7 +397,7 @@ m| neo4j_db_query_execution_success_total
| The total number of successful queries executed on this database.
| Metric type
| _Counter_
-| Default aggregation
+| Aggregation method
m| SUM
|===
@@ -621,7 +410,7 @@ m| neo4j_db_query_execution_failure_total
| The total number of failed queries executed on this database.
| Metric type
| _Counter_
-| Default aggregation
+| Aggregation method
m| SUM
|===
@@ -631,10 +420,10 @@ m| SUM
| Metric name
m| neo4j_db_query_execution_internal_latency_q99
| Description
-| The query execution time in milliseconds where 99% of queries executed faster than the reported time.
+| label:new[Introduced in 5.0] The query execution time in milliseconds where 99% of queries executed faster than the reported time.
| Metric type
| _Gauge_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -644,10 +433,10 @@ m| MAX
| Metric name
m| neo4j_db_query_execution_internal_latency_q75
| Description
-| The query execution time in milliseconds where 75% of queries executed faster than the reported time.
+| label:new[Introduced in 5.0] The query execution time in milliseconds where 75% of queries executed faster than the reported time.
| Metric type
| _Gauge_
-| Default aggregation
+| Aggregation method
m| MAX
|===
@@ -657,11 +446,42 @@ m| MAX
| Metric name
m| neo4j_db_query_execution_internal_latency_q50
| Description
-| The query execution time in milliseconds where 50% of queries executed faster than the reported time.
+| label:new[Introduced in 5.0] The query execution time in milliseconds where 50% of queries executed faster than the reported time.
This also corresponds to the median of the query execution time.
| Metric type
| _Gauge_
-| Default aggregation
+| Aggregation method
+m| MAX
+|===
+
+.Last Committed Transaction ID
+[frame="topbot", stripes=odd, grid="cols", cols="<1,<4"]
+|===
+| Metric name
+m| neo4j_database_transaction_last_committed_tx_id_total
+| Description
+| The id of the last committed transaction.
+Track this for primary cluster members of your Aura instance.
+It should show overlapping, ever-increasing lines and if one of the lines levels off or falls behind, it is clear that this cluster member is no longer replicating data, and action is needed to rectify the situation.
+| Metric type
+| _Counter_
+| Aggregation method
m| MAX
|===
+.Cluster Leader (only included if xref:metrics/metrics-integration/introduction.adoc#_metric_granularity[high granularity] is turned on)
+[frame="topbot", stripes=odd, grid="cols", cols="<1,<4"]
+|===
+| Metric name
+| neo4j_cluster_raft_is_leader
+| Description
+| Is this server the leader? Track this for each rafted member in the cluster.
+It reports 0 if it is not the leader and 1 if it is the leader.
+The sum of all of these should always be 1.
+However, there are transient periods in which the sum can be more than 1 because more than one member thinks it is the leader.
+Action may be needed if the metric shows 0 for more than 30 seconds.
+| Metric type
+| _Gauge_
+| Aggregation method
+m| MAX
+|===
diff --git a/modules/ROOT/pages/metrics/metrics-integration/status.adoc b/modules/ROOT/pages/metrics/metrics-integration/status.adoc
new file mode 100644
index 000000000..6db6f6af8
--- /dev/null
+++ b/modules/ROOT/pages/metrics/metrics-integration/status.adoc
@@ -0,0 +1,29 @@
+= Metrics endpoint status
+:description: This page lists metrics endpoint status in the metric integration in Neo4j Aura.
+:table-caption!:
+
+label:AuraDB-Virtual-Dedicated-Cloud[]
+label:AuraDS-Enterprise[]
+label:AuraDB-Business-Critical[]
+
+Once the endpoints are configured and APM integration is setup, APM starts scraping the metrics.
+The Metrics integration service records some usage statistics for scraped endpoints.
+These statistics are presented in the Metrics integration status table :
+
+image::cmi_status_table.png[]
+
+== Status details
+
+. `Type` - the type of endpoint scraped (Project or Instance).
+. `Endpoint target` - name of the entity (Project or Instance) being scraped.
+. `Status` - indicates the endpoint usage and behaviour. An endpoint status could be one of the following :
+ * `Not in use` - indicates the endpoint is not used.
+ * `Error` - indicates there has been an error while scraping the endpoint.
+ image::cmi_error_status.png[]
+ **__Hovering over the__ `Error` __status shows
+ the type of error encountered during the scrape.__**
+ * `In use` - indicates that the endpoint is most recently scraped wihout any issues.
+. `Last active` - timestamp of when the endpoint was last scraped.
+. `Metrics count` - number of metrics scraped in the interval of last 24 hours.
+. `Data volume` - amount of metrics data in bytes scraped in the interval of last 24 hours.
+. `Request rate` - rate of requests to the metrics endpoint per hour.
diff --git a/modules/ROOT/pages/metrics/view-metrics.adoc b/modules/ROOT/pages/metrics/view-metrics.adoc
new file mode 100644
index 000000000..1f57c7cba
--- /dev/null
+++ b/modules/ROOT/pages/metrics/view-metrics.adoc
@@ -0,0 +1,114 @@
+[[view-metrics]]
+= View metrics
+:description: This page describes metrics in the Neo4j Aura console.
+:page-aliases: auradb/managing-databases/monitoring.adoc, auradb/managing-databases/advanced-metrics.adoc, aurads/managing-databases/monitoring.adoc, aurads/managing-databases/advanced-metrics.adoc
+
+You can get a view of instance resources from the instance card.
+
+You can also monitor the following metrics of an instance from the *Metrics* tab.
+Then from the tabs at the top, you can navigate between resources, instance and database metrics.
+
+The metrics are laid out across three tabs according to their category:
+
+* *Resources* - Overall system resources, such as CPU, RAM and disk usage.
+* *Instance* - Information about the Neo4j instance(s) running the database.
+* *Database* - Metrics concerning the database itself, such as usage statistics and entity counts.
+
+image::metrics.png[]
+
+Select the info icon to see information about a particular metric, and you can expland the graph.
+
+image::moreinfo.png[]
+
+When viewing metrics, you can select from the following time intervals:
+
+* 6 hours
+* 24 hours
+* 3 days
+* 7 days
+* 30 days
+
+== Chart interactions
+
+[NOTE]
+====
+Memory and storage charts can be toggled between absolute and relative values using the *%* toggle.
+====
+
+=== Toggle data series
+
+To hide or show individual data series, select the corresponding data series in the legend below the chart.
+
+=== Zoom
+
+To zoom in to a narrower time interval, select and drag inside any chart to select your desired time interval.
+The data will automatically update to match the increased resolution.
+
+To reset zoom, double-click anywhere inside the chart or use the option in the context menu.
+
+=== Expand
+
+Any chart can be expanded to take up all the available screen estate by clicking the *expand* button (shown as two opposing arrows).
+To exit this mode, click the *x* button on the expanded view.
+
+=== Context menu
+
+To access the chart context menu, select the *...* button on any chart.
+
+* *More info* - Selecting *More info* brings up an explanation of the particular metric.
+For some metrics it also provides hints about possible actions to take if that metric falls outside the expected range.
+
+* *Reset zoom* - If the zoom level has been altered by selecting and dragging across a chart, *Reset zoom* resets the zoom back to the selected interval.
+
+== Aggregations
+
+Most metrics will have several values for a given timestamp because of the following reasons:
+
+* Multiple database replicas
+* Compressing several data points into one, depending on zoom level
+
+Aggregating functions are used to reconcile metrics having multiple data points and make the most sense of that particular metric.
+To convey an even more detailed picture of the state of the system, several aggregations can be shown.
+
+The possible aggregations are:
+
+* *Min* - The minimum value of the metric across all cluster members.
+* *Max* - The maximum value of the metric across all cluster members.
+* *Average* - The average value of the metric across all cluster members.
+* *Sum* - The sum of the metric across all cluster members.
+
+== Detail view
+
+label:AuraDB-Business-Critical[]
+label:AuraDB-Virtual-Dedicated-Cloud[]
+
+An Aura instance can run on multiple servers to achieve availability and workload scalability.
+These servers are deployed across different Cloud Provider availability zones in the user-selected region.
+
+Detail view shows distinct data series for availability zone & instance mode combinations.
+This is presented as an alternative to the aggregations described above.
+
+Detail view can be enabled with the toggle under the time interval selector.
+
+[NOTE]
+====
+Metrics in the Detail view for a new Aura instance may take time to appear because of the way 'availability zone' data is collected.
+====
+
+== Store size metrics
+
+=== Resources tab
+
+The chart on the _Resources_ tab shows the allocated store size metric for the selected database either as a percentage of the available storage assigned for the database or as absolute values.
+
+=== Database tab
+
+The _Database_ tab provides a chart that shows the store size and the portion of the allocated space that the database is actively utilizing.
+Both metrics are represented as percentages of the available storage assigned to the database.
+
+These metrics may differ due to the way Neo4j allocates and reuses space.
+Once allocated, space is never automatically de-allocated.
+Thus, reducing the data (nodes, relationships, properties) stored in the database does not reduce the top-line store size metric.
+However, Neo4j will reuse this 'available' space before allocating more from the system.
+The amount of allocated space that is 'available' is reported by the database, and Advanced metrics uses this metric to derive the used space by subtracting it from the allocated store size.
+This information can help you understand how close your database is to exceeding the assigned storage size.
\ No newline at end of file
diff --git a/modules/ROOT/pages/microsoft-fabric.adoc b/modules/ROOT/pages/microsoft-fabric.adoc
new file mode 100644
index 000000000..e133f6650
--- /dev/null
+++ b/modules/ROOT/pages/microsoft-fabric.adoc
@@ -0,0 +1,133 @@
+
+[[microsoft-fabric]]
+= Workload for Microsoft Fabric
+:description: This page describes how to use Aura on Microsoft Fabric.
+
+label:Aura-DB-Professional[]
+
+[IMPORTANT]
+====
+Workload for Microsoft Fabric is a Public Preview and is not intended for production use.
+====
+
+== Overview
+
+The public preview of the Workload for Microsoft Fabric enables developers, data scientists, and analysts to transform their OneLake data into a graph database in Aura and perform graph analytics within the Microsoft Fabric console.
+
+Microsoft Fabric allows you to do a number of things, including, but not limited to, the following:
+
+* Try AuraDB Professional for free with a 14-day free trial.
+* Transform the data in a OneLake Lakehouse into a graph in AuraDB, using generative AI assistance for schema inference.
+* Transform data sources accessible via OneLake Shortcuts including but not limited to those in AWS S3, Azure Data Lake Storage and Google Cloud Storage.
+* Query the graph in the Fabric console.
+* Explore and run graph algorithms in the Fabric console.
+* Build applications using the Neo4j .Net driver.
+* Use collaborative Notebooks in the Fabric console that run PySpark with the Neo4j Python Driver or GDS Client.
+
+Requirements:
+
+* Public end point for Microsoft Fabric.
+* Microsoft Fabric Capacity, or Free Trial.
+* An AuraDB Professional instance or Free Trial.
+* Single Sign-On login with EntraID (see xref:security/single-sign-on.adoc#_microsoft_entra_id_sso[Microsoft Entra ID SSO] for more information).
+* Tool authentication enabled for the database.
+//(see xref:security/tool-auth.adoc[Tool authentication] for more information).
+* Generative AI assistance enabled.
+
+The following Azure regions are supported.
+If your Fabric capacity is outside of one of these regions, then you need to select your preferred region from this list.
+
+** `brazilsouth`
+** `centralindia`
+** `eastus`
+** `francecentral`
+** `koreacentral`
+** `uksouth`
+** `westus3`
+
+**Limitations**
+
+* The graph is created by generative AI assistance and cannot be edited before it is imported into Aura.
+* Results of graph analytics are not returned to OneLake.
+* Graphs can only be created from a single Lakehouse.
+* Creating a new item will create a new database in Aura.
+* AuraDB Professional free trial is limited to one per organization.
+
+**Microsoft Certification**
+
+This workload has been certified by Microsoft, refer to the link:{neo4j-docs-base-uri}/reference/neo4j-for-microsoft-attestation[Microsoft Attestation Certification Checklist] for details.
+
+== Configure the workload
+
+If the workload is not available in your workspace, contact your administrator to add it.
+The administrator need to perform the following steps:
+
+. Select *Workloads* in the Fabric console
+. Select the Neo4j AuraDB workload
+. Add the workload
+. Either select capabilities from the list or select *All capacities*
+. Add the workload
+
+== Create a graph data set
+
+Once the workload has been enabled in your workspace, follow the steps outlined to create a graph data set:
+
+. Select `...`
+. Select *Neo4j Graph Dataset (preview)*
+. Provide a name for the dataset
+. *Optional*: Select *Start trial now* to start an AuraDB Professional 14-day trial if you don't have a paid Aura plan already.
+If your Fabric capacity is outside one of the supported Aura regions, then you need to select your preferred region from the list.
+. Select the Lakehouse you want to use
+. Select one or more tables in the Lakehouse
+. Finally, select *Transform into graph data set* to create a new AuraDB graph database within an existing project.
+
+Keep in mind that you can only start one free trial per organization.
+
+[NOTE]
+====
+If the AuraDB administrator disables Generative AI assistance in Aura, then the *Transform* button will be greyed out.
+See xref:visual-tour/index.adoc#org-settings[Organization settings] for more information.
+====
+
+== Exploring data sets
+
+Once the transformation is complete, you can explore the graph with the *Explore* button on the Fabric ribbon.
+Explore in Fabric offers the same functionality as Explore in the Aura console.
+
+For more information Explore, see xref:explore/introduction.adoc[Explore].
+
+== Query data sets
+
+You can also query the graph, once the transformation is complete, with the *Query* button on the Fabric ribbon.
+Query in Fabric works in the same way as Query in the Aura console.
+
+For more information about Query, see xref:query/introduction.adoc[Query].
+
+== Administration
+
+Administration is available via the Aura console and not the Microsoft Fabric console.
+Refer to xref:visual-tour/index.adoc[Visual tour of the console] for more information about the Aura console.
+
+== Upgrading your trial
+
+At any time during your 14-day trial or up to 14 days after your trial expires, you can upgrade to a paid plan using one of the following options:
+
+=== Credit Card
+
+. Open your trial graph dataset.
+. Select *Update* at the top right corner of your screen.
+. Select *Request Upgrade* on the popup dialog.
+. Navigate to the provided billing URL and login to the Aura console using *Continue with Microsoft* with the same account you use to login to Fabric.
+. Add your payment method on the billing page and this transitions your account to a paid account.
+
+=== Azure Marketplace
+
+. Open the link:https://azuremarketplace.microsoft.com/en-us/marketplace/apps/neo4j.neo4j_aura_professional[Neo4j AuraDB Professional (pay-as-you-go)] product page on Azure Marketplace.
+. Use the button *Get It Now* to purchase the plan.
+. At the end of the process, you will have a new Aura project that is linked to Azure Marketplace.
+When creating new graph dataset instances, you will be presented with a list of available projects, where you can select this new project and use the workload without any trial limitations.
+
+[NOTE]
+====
+Upgrading via Azure Marketplace will result in a new Organization and Project being created in Aura, which means that you will need to recreate your graph dataset item from scratch.
+====
\ No newline at end of file
diff --git a/modules/ROOT/pages/new-console.adoc b/modules/ROOT/pages/new-console.adoc
new file mode 100644
index 000000000..4a1fc5bc8
--- /dev/null
+++ b/modules/ROOT/pages/new-console.adoc
@@ -0,0 +1,90 @@
+:description: This page highlights the main differences between the new and classic concoles.
+= New Neo4j Aura console
+
+The new console is an evolution of the classic Aura console, updated to enhance the experience with Neo4j Aura.
+It integrates the features and functionalities of various tools, services, and operations from the Neo4j catalog.
+
+The key features, structural changes, and improvements compared to the classic console are outlined here.
+
+== What's new
+
+=== Key features and improvements
+
+** *Vector-optimized configurations* - Optimized for AI workloads, enabling more efficient use of graph data.
+See xref:managing-instances/instance-details.adoc#aura-vector-optimization[Vector optimization] for more information.
+
+** *Graph Analytics plugin* - Allows you to experiment with graph algorithms within the console.
+See xref:graph-analytics/index.adoc#aura-gds-plugin[Graph Analytics plugin] for more information.
+
+** *Secondaries* - Scale the read query workload of your AuraDB instance while maintaining high availablility.
+See xref:managing-instances/secondaries.adoc[Secondaries] for more information.
+
+** *Database import/restore* - Upload and restore databases from multiple data sources, including support for PostgreSQL, MySQL, SQL Server, Oracle, as well as local _.CSV_ files.
+See xref:import/introduction.adoc[Import] for more information.
+
+** *Billing* - The interface has been redesigned to be easier to navigate and more transparent.
+
+=== Navigation updates
+
+** *More streamlined access to instance details* - Connection status, instance configuration, metrics, and more are available from the instance card.
+See xref:managing-instances/instance-details.adoc[Instance details] for more information.
+
+** *A unified navigation bar* - Allows you to quickly switch between tools like Query and Explore, to Settings and User management, for example.
+See xref:visual-tour/index.adoc[Visual tour] for more information about the UI.
+
+=== Topology
+
+*Organization*
+
+** *Centralized management* of settings and roles, supporting improved scaling and governance.
+
+** *Organization owner* - A new role that allows shared administrative responsibility.
+
+** *OAuth (SSO)* - Configurable single sign-on for secure and simplified authentication.
+
+*Project* is the evolution of *Tenant*.
+
+** *Projects* allow you to organize multiple instances under a single project to standardize configurations and streamline access control.
+
+** You can assign *project-specific roles* to manage permissions effectively.
+See xref:user-management.adoc[User management] for more information.
+
+*Instance*
+
+** *Improved instance management* - You can now resize instances, monitor monthly usage, and manage custom endpoints directly from the instance card.
+
+** *Rolling updates* - Updates are applied without downtime with optional deferred updates for production environments.
+
+=== Integrated tools
+
+** *Query and Explore* are tools for querying and visualizing your data and are now integrated into the console.
+You can navigate between instances directly from the tools.
+
+=== Assisted querying
+
+** *Cypher reference* - Available directly in the Query tool.
+
+** *Co-pilots* - Allows you to use natural language to query the database.
+Co-pilots are available both in the Query and Explore tools.
+
+=== Connectivity
+
+** *Auto-connect* - Establish connections to your database automatically.
+
+** *Database user connections* - Support for connecting using database credentials.
+
+** *Remote connections* - Allows you to connect from remote sources.
+
+** *Cloud metadata storage* - Centralized repository for queries, visualization scenes, and configurations.
+
+=== Monitoring and logs
+
+** *Metrics dashboard* - Visualize performance metrics for your databases.
+
+** *Logs explorer* - Allows you to view detailed logs to troubleshoot and analyze database activity.
+
+** *Metrics scraping* - You can configure instance metrics for integration with external monitoring systems.
+
+=== Educational tools
+
+** *Learning resources in one place* - Interactive guides, sample datasets, and directions to documentation, Developer center, and Graph Academy.
diff --git a/modules/ROOT/pages/platform/create-account.adoc b/modules/ROOT/pages/platform/create-account.adoc
deleted file mode 100644
index 8ef34f5a3..000000000
--- a/modules/ROOT/pages/platform/create-account.adoc
+++ /dev/null
@@ -1,14 +0,0 @@
-[[aura-create-account]]
-= Creating an account
-:description: This page describes how to create a Neo4j Aura account.
-
-To access Neo4j Aura, you need to have an Aura account.
-
-To create an Aura account:
-
-. Navigate to the https://console.neo4j.io/[Neo4j Aura Console] in your browser.
-. Enter an email address and password and select *Register*, or select *Continue with Google* to use a Google account.
-If entering an email address and password, follow these additional steps:
-.. Verify your email address.
-.. Select *Go to the dashboard* from the Aura Console.
-. Select *I agree* once you have read the Terms of Service and Privacy Policy.
\ No newline at end of file
diff --git a/modules/ROOT/pages/platform/logging/log-forwarding.adoc b/modules/ROOT/pages/platform/logging/log-forwarding.adoc
deleted file mode 100644
index b2e4f99b5..000000000
--- a/modules/ROOT/pages/platform/logging/log-forwarding.adoc
+++ /dev/null
@@ -1,54 +0,0 @@
-[[aura-query-logs]]
-= Security log forwarding
-
-label:AuraDB-Enterprise[]
-label:AuraDS-Enterprise[]
-
-With security log forwarding, you can stream security logs directly to a cloud project owned by your organization, in real time.
-
-To access *Log forwarding*:
-
-. Navigate to the https://console.neo4j.io/[Neo4j Aura Console] in your browser.
-. Select *Log forwarding* from the sidebar menu.
-
-This will display a list of currently configured log forwarding processes for the active tenant.
-
-If no log forwarding process is set up, a button to do so is displayed in the center of the page.
-
-[NOTE]
-====
-A log forwarding process is scoped to a specific product and region combination, and limited to one for each.
-====
-
-== Set up log forwarding
-
-[NOTE]
-====
-Aura Database and Analytics services are business critical for our users. We have requests to introduce more capabilities enabling access to logs and metrics to derive actionable insights using your choice of monitoring platform.
-
-We have a strong roadmap of observability sharing features including security logs, query logs and other capabilities. Many of these logs can be of significant size hence *we will introduce in the future a new consumption based billing model including cloud egress costs*.
-
-We believe security is of paramount importance hence we have decided to make security logs available for you initially at no extra charge.
-====
-
-The complete steps for setting up log forwarding depends on the chosen cloud provider.
-
-Exhaustive instructions are provided in the wizard which appears by following the steps below.
-
-. Navigate to the *Log forwarding* page as described above.
-. Click *Create new log forwarding process*.
-. Follow the instructions specific to your cloud provider.
-
-== Output destination
-
-Log forwarding can forward logs to the log service of the same cloud provider as the monitored instance is located in.
-
-Cross-region log forwarding is supported.
-
-If your instance is in:
-
-* *Google Cloud Platform* - Forward logs to Google Cloud Logging in your own GCP project.
-* *Amazon Web Services* - Forward logs to CloudWatch in your own AWS account.
-* *Azure* - Forward logs to a Log Analytics workspace in your own Azure subscription.
-
-Logs can be further forwarded into third party systems using the log routing capabilities provided by your cloud provider.
diff --git a/modules/ROOT/pages/platform/logging/query-log-analyzer.adoc b/modules/ROOT/pages/platform/logging/query-log-analyzer.adoc
deleted file mode 100644
index 0259dcd9f..000000000
--- a/modules/ROOT/pages/platform/logging/query-log-analyzer.adoc
+++ /dev/null
@@ -1,103 +0,0 @@
-[[aura-monitoring]]
-= Query log analyzer
-
-label:AuraDB-Professional[]
-label:AuraDB-Enterprise[]
-
-Query log analyzer is a feature that provides a UI to review the queries executed on an Aura instance.
-
-To access *Query log analyzer*:
-
-. Navigate to the https://console.neo4j.io/?product=aura-db[Neo4j Aura Console] in your browser.
-. Select the instance you want to access.
-. Select the *Logs* tab.
-. Select the *Query log analyzer* button.
-
-Query log analyzer is split up in three parts:
-
-* *Query timeline* - Timeline showing metrics for number of queries, failed queries and query latency.
-* *Summary table* - An aggregated view of query logs, giving a high level overview over the selected time period.
-* *Details table* - A detailed view showing individual query executions in the selected time period.
-
-To fetch logs, first choose a time range in the Query timeline.
-With a time selection done, press the *Fetch logs* button.
-You may optionally choose any filters or search text if required, then press *Go*.
-
-A summary of query executions is returned, showing aggregations per query.
-To see the individual query executions, click the right arrow at the end of the line to show details for that query.
-The details pane shows individual executions.
-
-== Timeline interactions
-
-When viewing the query timeline, you can select from the following time intervals:
-
-* 30 minutes
-* Last hour
-* Last 2 hours
-* Last 6 hours
-* Last 24 hours
-* Last 3 days
-* Last week
-
-The query timeline can be collapsed by clicking on the header.
-
-=== Zoom
-
-To zoom in to a narrower time interval, select and drag inside the timeline to select your desired time interval.
-The data in the timeline will automatically update to match the increased resolution.
-To update the table, click the *Fetch logs* button.
-
-To reset zoom, double-click anywhere inside the timeline.
-
-=== Toggle data series
-
-To hide or show individual data series, select the corresponding data series in the legend below the timeline.
-
-
-== Fetch logs
-
-The *Fetch logs* button will open up a dialog where you can add filters and search before fetching the logs.
-The Query timeline determines the current time selection, which can be changed by closing the dialog and modifing the timeline.
-To fetch the logs after selection of filters and search is done, click the *Go* button.
-
-=== Filters
-
-To filter, click the filter button.
-This will load the available filters over the selected time period.
-Filters are available for the following fields:
-
-* Status
-* User
-* Driver
-* Application
-* Initiation type
-
-=== Search
-
-To search, click the search button.
-Search can be specified for the *Query text* and the *Error text*.
-The fields are case insensitive and allows you to find specific queries or error that are interesting.
-
-
-== Table interactions
-
-=== Sort table
-
-By default the table will be sorted on *Count* for *Summary* and *Status* for *Details*.
-To sort by a column (such as Max Time ms) click on the column heading.
-
-=== Modify columns
-
-The columns in the table can be modified by clicking the button to the right of the column row.
-Columns can be enabled or disabled, or the order changed using the grid icon at the top right of the table.
-
-=== Expand query
-
-In the table three rows of query text will be shown.
-To see the whole query if the query is longer, press the *View more* button under the query text.
-
-== Limitations
-
-* Query logs are available for a period of 7 days, and each request can be for up to 24 hours of data.
-* The query timeline may show activity from internal meta queries, which are filtered in the table.
-
diff --git a/modules/ROOT/pages/platform/security/encryption.adoc b/modules/ROOT/pages/platform/security/encryption.adoc
deleted file mode 100644
index 5ff7aed22..000000000
--- a/modules/ROOT/pages/platform/security/encryption.adoc
+++ /dev/null
@@ -1,181 +0,0 @@
-[[aura-reference-security]]
-= Encryption
-:description: Aura is encrypted using intra-cluster encryption, and is CMK compatible.
-
-All data stored in Neo4j Aura is encrypted using intra-cluster encryption between the various nodes comprising your instance and encrypted at rest using the underlying cloud provider's encryption mechanism.
-
-By default, each cloud provider encrypts all backup buckets (including the objects stored inside) using either link:https://cloud.google.com/storage/docs/encryption/default-keys[Google-managed encryption], link:https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingServerSideEncryption.html[AWS SSE-S3 encryption], or link:https://learn.microsoft.com/en-us/azure/storage/common/storage-service-encryption[Azure Storage encryption].
-
-== Customer Managed Keys
-
-label:AuraDB-Enterprise[]
-label:AuraDS-Enterprise[]
-
-[NOTE]
-====
-This feature has been released as a public GA for AuraDB Enterprise and AuraDS Enterprise for AWS managed keys.
-GCP’s Cloud Key Management and Azure’s Key Vault are in development.
-====
-
-A Customer Managed Key (CMK) gives you more control over key operations than the standard Neo4j encryption.
-These are created and managed using a supported cloud key management service (KMS).
-Externally, Customer Managed Keys are also known as Customer Managed Encryption Keys (CMEK).
-
-When using a Customer Managed Key, all data at rest is encrypted with the key.
-Customer Managed Keys are supported for v4.x and v5.x instances.
-
-When using Customer Managed Keys, you give Aura permission to encrypt and decrypt using the key, but Aura has no access to the key’s material.
-Aura has no control over the availability of your externally managed key in the KMS.
-If you lose keys that are managed outside of Aura, Aura can’t recover your data.
-
-[WARNING]
-====
-The loss of a Customer Managed Key, through deletion, disabling, or expiration, renders all data encrypted with that key unrecoverable.
-Neo4j cannot administer database instances when keys are disabled, deleted, or permissions revoked.
-====
-
-=== Delete a key
-
-If a Customer Managed Key is being used to encrypt one or more Aura instances in the console, it can't be deleted.
-If you need to delete the key, first delete the Aura database instances encrypted with the key, then delete the key.
-
-=== Key rotation
-
-In your KMS platform, you can either configure automatic rotation for the Customer Managed Key, or you can perform a manual rotation.
-
-Although automatic rotation is not enforced by Aura, it is best practice to rotate keys regularly.
-Manual key rotation is **not** recommended.
-
-=== Regions
-
-There is a limit of one key for AuraDB and one key for AuraDS per region.
-Depending on the KMS, there may be a delay between disabling a key, and when it can no longer be used to encrypt and decrypt data.
-
-=== Import an existing database
-
-You can upload a database to instances encrypted with Customer Managed Keys in Neo4j 5 directly from the console or by using `neo4j-admin database upload`.
-If the database is larger than 4 GB, you have to use `neo4j-admin database upload`.
-Note that the `neo4j-admin push-to-cloud` command in Neo4j v4.4 and earlier is **not** supported for instances encrypted with Customer Managed Keys.
-For more information see the xref:auradb/importing/import-database.adoc#_neo4j_admin_database_upload[Neo4j Admin `database upload`] documentation.
-
-=== Clone an instance protected by CMK
-
-To clone an instance protected by a Customer Managed Key, the key must be valid and available to Aura.
-The cloned instance, by default, uses the available Customer Managed Key for that region and product.
-
-You can override this behavior by selecting the Neo4j Managed Key when cloning the database.
-If there is no valid CMK for the destination region and product, the Neo4j Managed Key is used to encrypt the cloned instance.
-
-== AWS keys
-
-=== Create an AWS key
-
-. Create a key in the AWS KMS making sure the region matches your Aura database instance.
-Copy the generated ARN.
-You need it in the next step.
-. Go to *security settings* in the Aura Console, add a *Customer Managed Key* and copy the JSON code that is generated in the Aura Console when you add a key.
-. In the AWS KMS, edit the key policy to include the JSON code.
-
-=== Edit the AWS key policy
-
-After you have initially created a key in the AWS KMS, you can edit the key policy.
-In the AWS key policy, "Statement" is an array that consists of one or more objects.
-Each object in the array describes a security identifier (SID).
-The objects in the AWS code array are comma-separated, e.g. `{[{'a'}, {'b'}, {'c'}]}`
-
-Add a comma after the curly brace in the final SID, and then paste the JSON code that was generated in the Aura Console, e.g. `{[{'a'}, {'b'}, {'c'}, _add code here_ ]}`
-
-=== AWS regions
-
-When creating a Customer Managed Key in the AWS KMS, you can create a single-region key in a single AWS region, or create a multi-region key that you can replicate into multiple AWS regions.
-Aura only supports AWS Customer Managed Keys that reside in the same region as the instance.
-
-[CAUTION]
-====
-In Aura, you can use AWS single-region keys, multi-region keys or replica keys as long as the key resides in the same region as the Aura instace.
-====
-
-=== AWS automatic key rotation
-
-Aura supports automatic key rotation via the AWS KMS.
-To enable automatic key rotation in the AWS KMS, tick the *Key rotation* checkbox after initially creating a key, to automatically rotate the key once a year.
-
-== Azure keys
-
-=== Create an Azure key vault
-
-Create a Key Vault in the Azure portal ensuring the region matches your Aura database instance region.
-Move through the tabs to enable to following:
-
-* Purge protection
-* Azure role-based access control
-* Azure Disk Encryption for volume encryption
-* Allow access from all networks
-
-=== Create a key
-
-. When preparing to create a key, if needed grant a role assigment:
-.. Inside the key vault, go to *Access Control (IAM)* and *add role assignment*.
-.. In the *Role* tab, select *Key Vault Administrator*.
-.. In the *Member* tab, select *User, group, or service principal*.
-.. *Select members* and select yourself or the relevant person, then *Review + Assign*.
-
-. Create a key in the Azure Key Vault.
-. After the key is created, click into key version and copy the *Key Identifier*, you need it in the next step.
-. Go to *security settings* in the Aura Console and add a *Customer Managed Key*.
-. Follow the instructions in the Aura Console for the next sections.
-
-=== Create a service principal
-
-In the Azure Entra ID tenant where your key is located, create a service principal linked to the Neo4j CMK Application with the *Neo4j CMK Application ID* displayed in the Aura Console.
-
-One way to do this is by clicking the terminal icon at the top of the Azure portal, to open the Azure Cloud Shell.
-
-Using Azure CLI, the command is:
-
-[source,bash]
-----
-az ad sp create --id Neo4jCMKApplicationID
-----
-For more information about the Azure CLI, see link:https://learn.microsoft.com/en-us/cli/azure/ad/sp?view=azure-cli-latest#az-ad-sp-create[az ad sp documentation].
-
-=== Grant key permissions
-
-. To add role assignment to the Azure key, inside the key, go to *Access control (IAM)* and add *role assignment*.
-. In the *Role* tab, select *Key Vault Crypto Officer*.
-. In the *Member* tab, select *User, group, or service principal*.
-. *Select members* and paste the *Neo4j CMK Application name* that is displayed in the Aura Console.
-. The *Neo4j CMK Application* should appear, select this application then *Review + Assign*.
-
-== GCP keys
-
-=== Create a key ring
-
-. Go to *Key Management* in the Google Cloud console.
-. Create a *key ring*.
-. The key ring *Location type* should be set to *Region.*
-. Make sure the region matches your Aura database instance region.
-. Select *Create* and you are automatically taken to the key creation page.
-
-=== Create a key
-
-. Create a key in the Google Console.
-You can use default settings for the options, but setting a key rotation period is recommended.
-. Select *Create* and you are brought to the key ring, with your key listed.
-. Click *More* (three dots) and *Copy resource name*, you need it in the next step.
-For more information, see link:https://cloud.google.com/kms/docs/getting-resource-ids[Google Cloud docs]
-. Go to *security settings* in the Aura Console and add a *Customer Managed Key*.
-Paste the *resource name* into the *Encryption Key Resource Name* field.
-. After you select *Add Key* in the Aura Console, three *service accounts* are displayed in the Aura Console.
-You will need these in the next steps.
-
-=== Grant key permissions
-
-. Go to the Google Cloud console, click into the key and go to *Permissions* then *Grant Access*.
-. In *Add principals* paste the three service accounts from the Aura Console.
-. In *Assign roles* assign both *Cloud KMS CryptoKey Encrypter/Decrypter* and *Cloud KMS Viewer* roles to all three service accounts.
-
-
-
-
-
diff --git a/modules/ROOT/pages/platform/security/secure-connections.adoc b/modules/ROOT/pages/platform/security/secure-connections.adoc
deleted file mode 100644
index 00a1b1206..000000000
--- a/modules/ROOT/pages/platform/security/secure-connections.adoc
+++ /dev/null
@@ -1,227 +0,0 @@
-[[aura-reference-security]]
-= Secure connections
-:description: VPC boundaries enable you to operate within an isolated section of the service.
-
-== VPC isolation
-
-label:AuraDB-Enterprise[]
-label:AuraDS-Enterprise[]
-
-AuraDB Enterprise and AuraDS Enterprise run in a dedicated cloud Account (AWS), Subscription (Azure) or Project (GCP) to achieve complete isolation for your deployment.
-
-Additional VPC boundaries enable you to operate within an isolated section of the service, where your processing, networking, and storage are further protected.
-
-The Aura Console runs in a separate VPC, separate from the rest of Aura.
-
-== Network access
-
-An Aura instance can be publicly available, completely private, or both.
-To configure this, you need to be authorized to access the part of the infrastructure that runs and handles these instances as well as the networking used to establish secure connections between the database and the application's VPC.
-This includes the ability to connect over the cloud provider's private link and private endpoint.
-
-If your Aura instances are public, traffic to them is allowed to traverse the public internet and they are accessible with the correct username and password.
-
-For your instance to be completely private, turn public traffic off, use the cloud provider's network, and create a private endpoint inside your VPC, which gives you a private connection to Aura.
-The only way to connect to your database is from inside your network (your VPC in your AWS/Azure/GCP account) using an internal IP address you choose and DNS records you create.
-
-To select network access settings go to *Aura Console* > *Security* > *Network Access*.
-
-== Private endpoints
-
-Private endpoints are network interfaces inside your own VPC, which can only be accessed within your private network.
-The cloud provider connects them over their network to Neo4j Aura.
-By design they are not exposed to the public internet, ensuring that critical services are accessible only through private, secure networks.
-
-A single private link connection applies to all instances in a region.
-So if you've set one up for `us-east-1` then those network connections will apply to all instances in that region.
-You can set up a second private link connection to applications that are hosted in a second region i.e. `us-west-1` but still housed inside the same Aura tenant.
-
-=== AWS private endpoints
-
-label:AuraDB-Enterprise[]
-label:AuraDS-Enterprise[]
-
-AuraDB Enterprise supports private endpoints on AWS using https://aws.amazon.com/privatelink[AWS PrivateLink].
-
-Once activated, you can create an endpoint in your VPC that connects to Aura.
-
-For a step-by-step guide, see the link:https://neo4j.com/blog/neo4j-aws-privatelink-configuration/[How to Configure Neo4j Aura With AWS PrivateLink] blog article.
-
-image::privatelink.png[title="VPC connectivity with AWS PrivateLink"]
-
-All applications running Neo4j workloads inside the VPC are routed directly to your isolated environment in Aura without traversing the public internet.
-You can then disable public traffic, ensuring all traffic to the instance remains private to your VPC.
-
-[NOTE]
-====
-* PrivateLink applies to all instances in the region.
-* When activated, a *Private Connection* label, shield icon, and dedicated *Private URI* will appear on any instance tile using PrivateLink in the Aura Console.
-* If you disable public traffic, you must use a dedicated VPN to connect to your instance via Browser or Bloom.
-* Connections using private endpoints are one-way. Aura VPCs can't initiate connections back to your VPCs.
-* In AWS region us-east-1, we do not support the Availability Zone with ID use1-az3 for private endpoints.
-====
-
-==== Browser and Bloom access over private endpoints
-
-To connect to your instance via Browser or Bloom, you must use a dedicated VPN. This is because when you disable public access to your instance, this applies to all connections, including those from your computer when using Browser or Bloom.
-
-Without private endpoints, you access Browser and Bloom over the internet:
-
-image::privatelink_01_before_enabling.png[title="Architecture overview before enabling private endpoints"]
-
-When you have enabled private endpoints **and** disabled public internet access, you can no longer connect Browser or Bloom to your instances over the internet:
-
-image::privatelink_02_enabled_private_traffic_only.png[title="Architecture overview with private endpoints enabled and public traffic disabled"]
-
-To continue accessing Browser and Bloom, you can configure a VPN (Virtual Private Network) in your VPC and connect to Browser and Bloom over the VPN.
-
-[NOTE]
-====
-To access Bloom and Browser over a VPN, you must ensure that:
-
-* The VPN server uses the https://docs.aws.amazon.com/vpc/latest/userguide/vpc-dns.html#AmazonDNS[VPC's DNS servers].
-* You use the *Private URI* shown on the instance tile and in the instance details. It will be different from the *Connection URI* you used before.
-====
-
-image::privatelink_03_browser_bloom_over_vpn.png[title="Accessing Browser and Bloom over a VPN"]
-
-==== Enabling private endpoints
-
-To enable private endpoints using AWS PrivateLink:
-
-. Select *Network Access* from the sidebar menu of the Console.
-. Select *New network access configuration* and follow the setup instructions.
-
-You will need an AWS account with permissions to create, modify, describe and delete endpoints.
-Please see the https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints.html[AWS Documentation] for more information.
-
-=== GCP private endpoints
-
-label:AuraDB-Enterprise[]
-label:AuraDS-Enterprise[]
-
-Aura Enterprise supports private endpoints on GCP using https://cloud.google.com/vpc/docs/private-service-connect[GCP Private Service Connect].
-
-Once activated, you can create an endpoint in your VPC that connects to Aura.
-
-image::privateserviceconnect.png[title="VPC connectivity with GCP Private Service Connect"]
-
-All applications running Neo4j workloads inside the VPC are routed directly to your isolated environment in Aura without traversing the public internet.
-You can then disable public traffic, ensuring all traffic to the instance remains private to your VPC.
-
-[NOTE]
-====
-* Private Service Connect applies to all instances in the region.
-* When activated, a *Private Connection* label, shield icon, and dedicated *Private URI* will appear on any instance tile using Private Service Connect in the Aura Console.
-* If you disable public traffic, you must use a dedicated VPN to connect to your instance via Browser or Bloom.
-* Connections using private endpoints are one-way. Aura VPCs can't initiate connections back to your VPCs.
-====
-
-==== Browser and Bloom access over private endpoints
-
-To connect to your instance via Browser or Bloom, you must use a dedicated VPN. This is because when you disable public access to your instance, this applies to all connections, including those from your computer when using Browser or Bloom.
-
-Without private endpoints, you access Browser and Bloom over the internet:
-
-image::privateserviceconnect_01_before_enabling.png[title="Architecture overview before enabling private endpoints"]
-
-When you have enabled private endpoints and disabled public internet access, you can no longer connect Browser or Bloom to your instances over the internet:
-
-image::privateserviceconnect_02_enabled_private_traffic_only.png[title="Architecture overview with private endpoints enabled and public traffic disabled"]
-
-To continue accessing Browser and Bloom, you can configure a https://cloud.google.com/network-connectivity/docs/vpn/concepts/overview[GCP Cloud VPN] (Virtual Private Network) in your VPC and connect to Browser and Bloom over the VPN.
-
-[NOTE]
-====
-To access Bloom and Browser over a VPN, you must ensure that:
-
-* You have set up link:https://cloud.google.com/dns/docs/zones/manage-response-policies[GCP Response Policy Zone], or an equivalent DNS service, inside of the VPC.
-* You use the *Private URI* shown on the instance tile and in the instance details. It will be different from the *Connection URI* you used before.
-====
-
-image::privateserviceconnect_03_browser_bloom_over_vpn.png[title="Accessing Browser and Bloom over a VPN"]
-
-==== Enabling private endpoints
-
-To enable private endpoints using GCP Private Service Connect:
-
-. Select *Network Access* from the sidebar menu of the Console.
-. Select *New network access configuration* and follow the setup instructions.
-
-Please see the https://cloud.google.com/vpc/docs/configure-private-service-connect-services[GCP Documentation] for required roles and permissions.
-
-=== Azure private endpoints
-
-label:AuraDB-Enterprise[]
-label:AuraDS-Enterprise[]
-
-Aura Enterprise supports private endpoints on Azure using https://azure.microsoft.com/en-us/products/private-link/#overview[Azure Private Link].
-
-Once activated, you can create an endpoint in your Virtual Network (VNet) that connects to Aura.
-
-image::azure_privatelink.png[title="VNet connectivity with Azure Private Link"]
-
-All applications running Neo4j workloads inside the VNet are routed directly to your isolated environment in Aura without traversing the public internet.
-You can then disable public traffic, ensuring all traffic to the instance remains private to your VNet.
-
-[NOTE]
-====
-* Private Link applies to all instances in the region.
-* When activated, a *Private Connection* label, shield icon, and dedicated *Private URI* will appear on any instance tile using Private Link in the Aura Console.
-* If you disable public traffic, you must use a dedicated VPN to connect to your instance via Browser or Bloom.
-* Connections using private endpoints are one-way. Aura VNets can't initiate connections back to your VNets.
-====
-
-==== Browser and Bloom access over private endpoints
-
-To connect to your instance via Browser or Bloom, you must use a dedicated VPN. This is because when you disable public access to your instance, this applies to all connections, including those from your computer when using Browser or Bloom.
-
-Without private endpoints, you access Browser and Bloom over the internet:
-
-image::azure_privatelink_01_before_enabling.png[title="Architecture overview before enabling private endpoints"]
-
-When you have enabled private endpoints and disabled public internet access, you can no longer connect Browser or Bloom to your instances over the internet:
-
-image::azure_privatelink_02_enabled_private_traffic_only.png[title="Architecture overview with private endpoints enabled and public traffic disabled"]
-
-To continue accessing Browser and Bloom, you can configure a VPN (Virtual Private Network) in your VNet and connect to Browser and Bloom over the VPN.
-
-[NOTE]
-====
-To access Bloom and Browser over a VPN, you must ensure that:
-
-* You have setup https://learn.microsoft.com/en-us/azure/dns/private-dns-overview[Azure Private DNS], or an equivalent DNS service, inside of the VNet.
-* You use the *Private URI* shown on the instance tile and in the instance details. It will be different from the *Connection URI* you used before.
-====
-
-image::azure_privatelink_03_browser_bloom_over_vpn.png[title="Accessing Browser and Bloom over a VPN"]
-
-==== Enabling private endpoints
-
-To enable private endpoints using Azure Private Link:
-
-. Select *Network Access* from the sidebar menu of the Console.
-. Select *New network access configuration* and follow the setup instructions.
-
-Please see the link:https://learn.microsoft.com/en-us/azure/private-link/rbac-permissions#private-endpoint[Azure Documentation] for required roles and permissions.
-
-== Supported TLS cipher suites
-
-For additional security, client communications are carried via TLS v1.2 and TLS v1.3.
-
-AuraDB has a restricted list of cipher suites accepted during the TLS handshake, and does not accept all of the available cipher suites.
-The following list conforms to safety recommendations from IANA, the OpenSSL, and GnuTLS library.
-
-TLS v1.3:
-
-* `TLS_CHACHA20_POLY1305_SHA256 (RFC8446)`
-* `TLS_AES_128_GCM_SHA256 (RFC8446)`
-* `TLS_AES_256_GCM_SHA384 (RFC8446)`
-
-TLS v1.2:
-
-* `TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 (RFC5288)`
-* `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 (RFC5289)`
-* `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 (RFC5289)`
-* `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 (RFC7905)`
-* `TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 (RFC5288)`
diff --git a/modules/ROOT/pages/platform/security/single-sign-on.adoc b/modules/ROOT/pages/platform/security/single-sign-on.adoc
deleted file mode 100644
index d6c31e200..000000000
--- a/modules/ROOT/pages/platform/security/single-sign-on.adoc
+++ /dev/null
@@ -1,54 +0,0 @@
-[[aura-reference-security]]
-= Single Sign-On (SSO)
-:description: SSO allows you to log in to the Aura Console using their company IdP credentials.
-
-label:AuraDB-Enterprise[]
-label:AuraDS-Enterprise[]
-label:AuraDB-Business-Critical[]
-
-Aura Enterprise and Aura Business Critical supports Single Sign-On (SSO) at both the Console level and for accessing Workspace, Bloom and Browser clients directly at the instance level.
-
-[NOTE]
-====
-Accessing Aura with SSO requires:
-
-* Authorization Code Flow with PKCE.
-* A publicly accessible Identity Provider (IdP) server.
-====
-
-== Console SSO
-
-Console SSO allows you to log in to the Aura Console using company IdP credentials and grants link:{neo4j-docs-base-uri}/cypher-manual/current/administration/access-control/built-in-roles#access-control-built-in-roles-public[Public Access privileges] to all instances in the tenant.
-
-The following OpenID connect (OIDC) certified Identity Providers (IdPs) are currently supported for Console-level Authentication with Aura Enterprise and Aura Business Critical:
-
-* Microsoft Entra ID
-* Okta
-
-To enable Console SSO on your tenant(s), please link:https://support.neo4j.com/[raise a support ticket] including the following information:
-
-. The _Tenant ID_ of the tenant(s) you want to use SSO. See xref:platform/user-management.adoc#_tenants[Tenants] for more information on how to find your __Tenant ID__.
-. The name of your IdP.
-
-== Instance SSO
-
-Instance SSO allows you to directly map groups of users (as defined in your IdP) to DBMS RBAC roles when launching Workspace, Bloom and Browser clients from an Aura instance.
-
-The following OIDC certified IdPs are currently supported for instance-level Authentication:
-
-* Microsoft Entra ID
-* Okta
-* Keycloak
-* Google Authentication
-
-To add SSO for Workspace, Bloom, and Browser to your Aura Enterprise instances, please https://support.neo4j.com/[raise a support ticket] including the following information:
-
-. The *Connection URI* of the instance(s) you want to use SSO.
-. Whether or not you want Workspace, Bloom, Browser, or a combination of them enabled.
-. The name of your IdP.
-
-[NOTE]
-====
-If you have to specify an application type when configuring your client, Neo4j is a Single-page application.
-For more information on configuring your client, see link:{neo4j-docs-base-uri}/operations-manual/current/tutorial/tutorial-sso-configuration/[Neo4j Single Sign-On (SSO) Configuration].
-====
diff --git a/modules/ROOT/pages/platform/user-management.adoc b/modules/ROOT/pages/platform/user-management.adoc
deleted file mode 100644
index 3da6117cc..000000000
--- a/modules/ROOT/pages/platform/user-management.adoc
+++ /dev/null
@@ -1,105 +0,0 @@
-[[aura-user-management]]
-= User management
-:description: This page describes how to manage users in Neo4j Aura.
-
-User management is a feature within Aura that allows you to invite users and set their roles within an isolated environment.
-
-== Tenants
-
-Tenants are the primary mechanism for granting users access to an Aura environment.
-
-The tenant you're currently viewing is displayed in the header of the Console.
-You can select the tenant name to open the tenant dropdown menu, allowing you to view all the tenants that you have access to and switch between them.
-
-Additionally, you can perform the following actions from the tenant dropdown menu:
-
-* Copy the _Tenant ID_ of any tenant in the list by selecting the clipboard icon that appears when you hover over the tenant.
-* Edit the name of the tenant you are currently viewing by selecting the pencil icon next to the tenant. This action requires you to be an _Admin_ of the tenant.
-
-== Users
-
-Each tenant can have multiple users with individual accounts allowing access to the same environment.
-
-The users with access to a tenant can be viewed and managed from the **User Management** page.
-You can access the **User Management** page by selecting **User Management** from the sidebar menu of the Console.
-
-=== Roles
-
-Users within a tenant can be assigned one of the following roles:
-
-* _Admin_
-* _Member_
-* _Viewer_
-
-:check-mark: icon:check[]
-
-.Roles
-[opts="header",cols="3,1,1,1"]
-|===
-| Capability | Admin | Member | Viewer
-| View users and their roles | {check-mark} | {check-mark} | {check-mark}
-| View and open instances | {check-mark} | {check-mark} | {check-mark}
-| Access the Neo4j Customer Support Portal | {check-mark} | {check-mark} | {check-mark}
-| Perform all actions on instances footnote:[Actions include creating, deleting, pausing, resuming, and editing instances.] | {check-mark} | {check-mark} |
-| Clone data to new and existing instances | {check-mark} | {check-mark} |
-| Take on-demand snapshots | {check-mark} | {check-mark} |
-| Restore from snapshots | {check-mark} | {check-mark} |
-| Edit the tenant name | {check-mark} | |
-| Invite new users to the tenant | {check-mark} | |
-| Edit existing users' roles | {check-mark} | |
-| Delete existing users from the tenant | {check-mark} | |
-| View and edit billing information | {check-mark} | |
-|===
-
-[NOTE]
-====
-Each tenant must have at least one _Admin_, but it is also possible for tenants to have multiple _Admins_.
-====
-
-=== Inviting users
-
-As an _Admin_, to invite a new user:
-
-. Select **Invite user** from the **User Management** page.
-. Enter the **Email** address of the person you want to invite.
-. Select the user's **Role**.
-. Select **Invite**.
-
-The new user will appear within the list of users on the **User Management** page with the _Pending invite_ **Status** until they accept the invite.
-
-An email will be sent to the user with a link to accept the invite.
-
-=== Editing users
-
-As an _Admin_, to edit an existing user's role:
-
-. Select the pencil icon next to the user's name from the **User Management** page.
-. Select the user's new **Role**.
-. Select **Save changes**.
-
-=== Deleting users
-
-As an _Admin_, to delete an existing user:
-
-. Select the trash can icon next to the user's name from the **User Management** page.
-. Select **Delete**.
-
-[NOTE]
-====
-It is also possible to delete a user whose **Status** is _Pending invite_.
-
-Select the trash can icon next to the user's name, and then select **Revoke**.
-====
-
-=== Accepting an invite
-
-When invited to a tenant, you will receive an email with a link to accept the invite.
-This link will direct you to the Aura Console, where a **Tenant invitation** modal will appear.
-You can select the tenant(s) you have been invited to and choose to accept or decline the invite(s).
-
-You can also close the **Tenant invitation** modal without accepting or declining the invite(s) and later manually re-open the modal by selecting the **Pending invites** envelope icon in the Console header.
-
-[TIP]
-====
-User management within the Aura Console does not replace built-in roles or fine-grained RBAC at the database level.
-====
diff --git a/modules/ROOT/pages/query/command-reference.adoc b/modules/ROOT/pages/query/command-reference.adoc
new file mode 100644
index 000000000..5767eb045
--- /dev/null
+++ b/modules/ROOT/pages/query/command-reference.adoc
@@ -0,0 +1,77 @@
+:description: This section list all the Query commands.
+= Command reference
+
+The editor in Query understands a few client-side commands, which begin with a colon (`:`).
+
+
+[[query-commands]]
+== Query commands
+
+
+.List of commands
+[options="header",cols="3,7"]
+|===
+| Command
+| Description
+
+m| :access-mode read/write
+a| Set the access mode to read or write.
+
+m| :clear
+a| Remove all frames from the stream.
+
+m| :connect
+a| Connect to a Neo4j instance.
+If already connected, this returns the instance you are connected to.
+
+m| :disconnect
+a| Disconnect the currently open connection toan instance.
+
+m| :history
+a| Open the history drawer of your executed commands.
+
+m| +:param +
+a|
+Set one parameter. +
+The key-value can be specified as `+x => 1+`.
+
+m| :params
+a| Show all parameters.
+See xref:query/operations.adoc#query-parameters[Query parameters] for more information.
+
+m| +:params clear+
+a| Remove all parameters.
+
+m| +:params {}+
+a|
+Remove all parameters and then set the parameters to the specified key-value pairs. +
+The key-value pairs can be specified as `+{x: 1, y: 2}+`.
+
+// [NOTE]
+// ====
+// Integers will be set as numbers in this form.
+// ====
+
+m| :server connect
+a| Connect to an instance.
+If already connected, this returns the instance you are connected to.
+
+m| :server disconnect
+a| Disconnect the currently open connection to Neo4j.
+
+m| :style
+a| Show the current style configuration.
+
+m| :style reset
+a| Reset the style to the default styling.
+
+m| :sysinfo
+a| Show information about _Store Size_, _Id Allocation_, _Page Cache_, _Transactions_, and _Databases_.
+
+m| :welcome
+a| Launches the entry page with interactive guides.
+This is the initial command that is automatically executed every time you connect to an instance.
+|===
+
+
+
diff --git a/modules/ROOT/pages/query/introduction.adoc b/modules/ROOT/pages/query/introduction.adoc
new file mode 100644
index 000000000..e3e73c6f1
--- /dev/null
+++ b/modules/ROOT/pages/query/introduction.adoc
@@ -0,0 +1,14 @@
+[[query-introduction]]
+= Query
+:description: This section introduces the Query tool for querying data.
+:page-aliases: auradb/getting-started/query-database.adoc
+
+Query is a tool that allows you to write and execute Cypher queries and visualize the results.
+It is a way to interact with the graph with the main focus on:
+
+* Writing and running graph queries with Cypher.
+* Exportable results of queries.
+* Graph visualization of query results containing nodes and relationships.
+
+A similar experience is available with Neo4j Browser.
+See the link:https://neo4j.com/docs/browser-manual/current/[Neo4j Browser] documentation for more information.
\ No newline at end of file
diff --git a/modules/ROOT/pages/query/operations.adoc b/modules/ROOT/pages/query/operations.adoc
new file mode 100644
index 000000000..51e9af599
--- /dev/null
+++ b/modules/ROOT/pages/query/operations.adoc
@@ -0,0 +1,341 @@
+:description: This section describes the basic operations in Query
+[[query-operations]]
+= Query operations
+
+
+[[result-frames]]
+== Result frames
+
+There are a variety of ways to view data in Query.
+All queries that you run in the Cypher editor populate a reusable result frame.
+Query results are rendered as:
+
+* Visual graph -- graph result frame.
+* Table -- table result frame.
+* Meta data -- RAW result frame.
+* Execution plan -- plan result frame (only available for `PROFILE` and `EXPLAIN` queries).
+
+You can switch between them in the top left corner of the result frame.
+
+
+[[graph-result-frame]]
+=== Graph result frame
+
+The graph visualization functionality is designed to display a node-graph representation of the underlying data stored in the database in response to a given Cypher query.
+The visual representation of the graph is useful for determining areas of interest or assessing the current state and structure of the data.
+Graph view results can be downloaded as _PNG_.
+
+[.shadow]
+.Graph result frame
+image::graph-result-frame.png[]
+
+[NOTE]
+====
+A squiggly line anywhere in your query indicates a warning.
+This is most commonly caused by a query attempting to match a pattern not present in the graph.
+Hover over the underlined segment to see the explanation.
+====
+
+Tips when using the *Graph* view:
+
+* Use the controls in the bottom right corner of the frame to zoom in and out of the visualization.
+Additionally, you can zoom using trackpad zoom gestures or a mouse wheel in combination with a modifier key.
+(If you are in full-screen view, the modifier key is not needed to zoom.)
+On Mac, use `⌘ + scroll` and on Windows and Linux, use `Ctrl + scroll` to trigger zoom.
+You can also use the _Fit to screen_ button to fit all query results into the view.
+* Expand the Cypher editor area with the image:expand.svg[width=3%] icon next to the play button in the editor.
+* Select a node or a relationship to view its properties.
+The nodes already have sensible captions assigned by the Query tool, which auto-selects a property from the property list to use as a caption.
+To change the look of your graph, see xref:query/operations.adoc#styling[].
+* Right-click a node to *expand* (see its neighbors), *dismiss* it from the visualization, or *unpin* it.
+If you double-click a node, you automatically expand it, and if you double-click it again, you undo the expansion.
+If you right-click a relationship, you can dismiss it.
+* Right-click in an empty spot allows you to show *all* relationships between elements in the result frame or to undo your last ation with the visualization.
+* If you cannot see the whole graph or the results display too close together, you can adjust by moving the visual view and dragging nodes to rearrange them.
+
+The graph results also reports the query time, including the actual query execution time, latency, and deserialization costs.
+
+
+[[table-result-frame]]
+=== Table result frame
+
+The *Table* result view displays the result in a table format.
+It also reports the query time, including the actual query execution time, latency, and deserialization costs.
+
+[.shadow]
+.Table format
+image::table.png[]
+
+
+[[raw-result-frame]]
+=== RAW result frame
+
+The *RAW* result view displays the submitted request, the Neo4j Server version and address, and the response.
+It also reports the query time, including the actual query execution time, latency, and deserialization costs.
+
+[.shadow]
+.RAW format
+image::raw.png[]
+
+=== Plan view
+
+The *Plan* view is available for `EXPLAIN` and `PROFILE` queries and shows the execution plan for the query.
+For such queries, you can also toggle to view the results in a RAW format.
+
+[.shadow]
+.Plan view
+image::plan-view.png[]
+
+[[styling]]
+== Styling
+
+You can customize your graph query results directly in the result frame based on node labels and relationship types.
+
+[.shadow]
+.Query styling
+image::query-styling.png[width=800]
+
+If you select a node label in the *Results overview*, there are several styling options available:
+
+* Color -- set the color for nodes of the selected label.
+* Size -- set the size for nodes of the selected label.
+* Caption -- set what should be displayed as the caption for nodes of the selected label.
+
+[.shadow]
+image::node-styling.png[width=400]
+
+If you select a relationship type in the *Results overview*, there are several styling options available:
+
+* Color -- set the color for relationships of the selected type.
+* Line width -- set the line width for relationships of the selected type.
+* Caption -- set what should be displayed as the caption for relationships of the selected type.
+
+[.shadow]
+image::relationship-styling.png[width=350]
+
+For nodes with multiple labels, you can select which label should take priority.
+Use the arrows to get a list of available labels in your graph and order them as you like.
+Nodes with multiple labels are then styled according to the first label in the list.
+
+[.shadow]
+image::prioritize.png[width=600]
+
+[[query-parameters]]
+== Query parameters
+
+Query supports querying based on parameters.
+It allows the Cypher query planner to re-use your queries instead of parse and build new execution plans.
+
+Parameters can be used for:
+
+* literals and expressions
+* node and relationship IDs
+* properties, when referenced *dynamically* (for more information, see link:https://neo4j.com/docs/cypher-manual/current/clauses/where/#filter-on-dynamic-property[Filter on dynamically-computed node property]).
+* node labels and relationship types, when referenced *dynamically* (for more information, see link:https://neo4j.com/docs/cypher-manual/current/clauses/match/#dynamic-match[MATCH using dynamic node labels and relationship types]).
+
+Parameters cannot be used for the following constructs, as these form part of the query structure that is compiled into a query plan:
+
+* Property keys; `MATCH (n) WHERE n.$param = 'something'` is invalid.
+* Relationship types; `MATCH (n)-[:$param]→(m)` is invalid.
+* Node labels; `MATCH (n:$param)` is invalid.
+
+Parameters may consist of letters and numbers and any combination of these but cannot start with a number or a currency symbol.
+
+[TIP]
+====
+For more details on the Cypher parameters, see link:https://neo4j.com/docs/cypher-manual/current/syntax/parameters/[Cypher Manual -> Parameters^].
+====
+
+
+[[set-params]]
+=== Set query parameters
+
+You can set a parameter to be sent with your queries via the *Parameters drawer* (*{}*) or by using the `:param` command.
+
+
+==== Parameter drawer
+
+The Parameter drawer provides inputs directly from the UI for most of the property types in Neo4j.
+
+.Parameter drawer
+[.shadow]
+image::param-drawer.png[]
+
+For other property types, such as link:https://neo4j.com/docs/cypher-manual/current/values-and-types/spatial/#spatial-values-point-type[Point] and setting link:https://neo4j.com/docs/cypher-manual/current/values-and-types/property-structural-constructed/#constructed-types[constructed types], the parameter drawer has a special `evaluated` option.
+This option allows you to express a parameter type and have it evaluated by the server as Cypher.
+Give the parameter a name, select `evaluated` as the type, enter the value, and use the play button to evaluate the parameter.
+This process is much like using the `:param` command, as described in the following section.
+
+==== `:param` command
+
+The `+:param name => 'Example'+` command defines a parameter named `name`, which will be sent along with your queries.
+The right hand side of `=>` is sent to the server and evaluated as Cypher with an implicit `RETURN` in front.
+This gives better type safety since some types (especially numbers) in JavaScript are hard to match with Neo4j's type system.
+To see the list of all currently set query parameters and their values, use the `:params` command.
+For more information on how to use the commands, see `:help param` and `:help params`.
+
+
+// [NOTE]
+// ====
+// If you are using a multi-database DBMS, parameters cannot be declared when using the `system` database.
+// Switch to a different database and declare, then switch back to the `system` database and use them.
+// ====
+
+
+.Set a parameter as an integer
+====
+[source, query command, role=noheader]
+----
+:param x => 1
+----
+====
+
+
+.Set a parameter as a float
+====
+[source, query command, role=noheader]
+----
+:param x => 1.0
+----
+====
+
+
+.Set a parameter as a string
+====
+[source, query command, role=noheader]
+----
+:param x => "Example"
+----
+====
+
+
+.Set a parameter as an object
+=====
+
+. Map
++
+[source, query command, role=noheader]
+----
+:param obj1 => ({props: {productName: "Chai", productID:1}})
+----
++
+[source, parameter, role=nocopy]
+.The obj1 parameter
+----
+$obj1 = {"props": {"productName": "Chai", "productID": 1}}
+----
++
+[NOTE]
+====
+Maps like `{x: 1, y: 2}` must be wrapped in parentheses `({x: 1, y: 2})`.
+====
++
+. List
++
+[source, query command, role=noheader]
+----
+:param obj2 => [1, 2, 3, 4]
+----
++
+[source, parameter, role=nocopy]
+.The obj2 parameter
+----
+$obj2 = [1, 2, 3, 4]
+----
+
+=====
+
+
+.Cypher query example with a parameter
+=====
+
+[source, query command, role=noheader]
+----
+:param name => 'Chai';
+----
+
+[source, cypher, role=noplay]
+----
+MATCH (p:Product)
+WHERE p.productName = $name
+RETURN p
+----
+
+[NOTE]
+====
+You need to run the `:param` command separately from the `MATCH` query.
+====
+
+=====
+
+[NOTE]
+====
+Any parameter value enclosed in single (') or double (") quotes is considered a `string`.
+If the intention is to use _JSON_ objects as a parameter, you may use the APOC function `apoc.convert.fromJsonList` to achieve this.
+See link:{neo4j-docs-base-uri}/apoc/current/overview/apoc.convert/apoc.convert.fromJsonList/[APOC Core documentation -> Procedures and functions] for more information.
+====
+
+[[clear-params]]
+=== Clear parameters
+
+You can clear all currently set parameters from Query by running:
+
+
+[source, query command, role=noheader]
+----
+:params clear
+----
+
+
+=== Set several parameters
+
+You can set several parameters with the `:params` command, this also clears all currently set parameters.
+
+
+// [NOTE]
+// ====
+// Integers are set to float with this style.
+// ====
+
+
+.Set several parameters
+====
+[source, query command, role=noheader]
+----
+:params {x: 1, y: 2.0, z: 'abc', d: null, e: true, f: false}
+----
+
+[source, parameter, role=noheader]
+----
+$x = 1.0
+$y = 2.0
+$z = "abc"
+$d = null
+$e = true
+$f = false
+----
+====
+
+=== Parameter assistance
+
+If you run a query using parameters without first declaring them all, Query returns a parameter-missing error and lists the missing parameter(s).
+You can click the provided template to populate the editor with the command for setting parameters and all you have to do is enter the value(s) for the missing parameter(s).
+Since the result frame is reusable, once you have set your parameter(s), you can run the same Cypher query again without having to re-enter it.
+
+.Parameter assistance
+[.shadow]
+image::param-assist.png[]
+
+
+=== Duration for the query parameters
+
+Parameters are not automatically saved when you refresh or close Query, nor if you switch instances.
+
+If you wish to retain your parameters across sessions, you can use the *Local storage* toggle in the Query Settings, as shown:
+
+.Save parameters across sessions
+[.shadow]
+image::param-settings.png[width=400]
+
+
+You can also save a `:params` command to your Saved Cypher.
\ No newline at end of file
diff --git a/modules/ROOT/pages/query/visual-tour.adoc b/modules/ROOT/pages/query/visual-tour.adoc
new file mode 100644
index 000000000..4c7951a9f
--- /dev/null
+++ b/modules/ROOT/pages/query/visual-tour.adoc
@@ -0,0 +1,218 @@
+[[query-overview]]
+= Query overview
+:description: This section describes how to use the Query tool.
+
+[.shadow]
+image::query-ui.png[width=800]
+
+== Connection bar
+
+If you are **not** connected to an instance, the *Connection dropdown* shows grey for *No instance connected* but lets you select an instance to connect to.
+Once you have selected and entered your credentials, if needed, the status shows green for *Connected* and you are ready to start querying.
+This dropdown is also where you can switch instance, or disconnect.
+
+[.shadow]
+image::query-connected-dropdown.png[width=300]
+
+The database selector shows which database (and the Cypher version for that database) you are connected to.
+Additionally, the connection bar also contains information about the user.
+
+== Sidebar
+
+The sidebar contains a set of drawers to set up the environment for graph management and explore your data.
+
+=== Database information
+
+This drawer contains information about the database you are connected to.
+It gives you an overview of the *node labels* and *relationship types*, as well as which *property keys* exist in the database.
+If you select one, you see a sample of the selected element as a graph or table.
+
+[.shadow]
+image::database-drawer.png[width=400]
+
+Additionally, the drawer contains node and relationship counts, displayed in parantheses.
+
+=== Saved Cypher
+
+The Saved Cypher drawer is where you keep your bookmarked queries and commands.
+
+[.shadow]
+image::saved-cypher-drawer.png[width=400]
+
+From here, you can organize your saved Cypher, download or upload these, or delete them if needed.
+To run a saved query, click on it to populate it to the Cypher editor and use the play button to execute.
+
+To save a query, use the bookmark icon in the Cypher editor.
+
+[.shadow]
+image::save-cypher.png[width=800]
+
+
+=== Query history
+
+This drawer contains a list of your previously run queries, for your reference.
+Queries are kept here until you delete them and are not limited to the current instance.
+
+=== Cypher Reference
+
+The Cypher reference is an embedded version of the link:https://neo4j.com/docs/cypher-cheat-sheet/5/aura-dbe/auradb-free[Cypher Cheat Sheet].
+It allows you to search for Cypher-related terms such as queries, patterns, and clauses, directly in Query.
+It is divided into sections and subsections for easy navigation and each subsection can be expanded and collapsed.
+
+[.shadow]
+.Cypher reference
+image::cypher-reference.png[width=300]
+
+You can select an example to run it in the editor or copy it to the clipboard.
+Note that some examples require data you may not have in your database and may not be runnable nor yield desired results, but are used to illustrate syntax and functionality of Cypher.
+
+Every subsection is also a link to the link:https://neo4j.com/docs/cypher-manual/current/[Cypher Manual] for more details.
+
+=== Parameters
+
+The parameter drawer allows you to set parameters to use in your queries.
+This can also be accomplished by using the :param command. For more information about using parameters see xref:query/operations.adoc#query-parameters[Query parameters].
+
+[.shadow]
+.Parameter drawer
+image::param-drawer.png[width=400]
+
+== Cypher editor
+
+The Cypher editor is the primary interface for entering and running Cypher queries and commands.
+The editor can be instantiated several times, which allows you to edit the query inside the result frame and rerun it.
+It can hold multiple lines for long queries or commands.
+
+=== Syntax highlighting
+
+* A smart highlight of matching pairs around the current position of the cursor, for example, matching brackets, braces, and parenthesis.
+* Matching pairs are auto-closed.
+* A smart highlight of identical words on a word click.
+* Words, such as attributes, anon name, and values, are highlighted in different colors.
+* Any punctuation, such as parenthesis and comma, has a slightly different color than text.
+* Warnings are displayed with a red squiggly line that displays the error if you hover.
+
+.Useful shortcuts
+[cols="3,2,2",options="header"]
+|===
+| Description
+| Keyboard shortcut (Mac OS)
+| Keyboard shortcut (Windows and Linux)
+
+| Select highlighted identical words one by one.
+| *command + D*
+| *Ctrl + D*
+
+| Select all highlighted identical words.
+| *command + shift + L*
+| *Ctrl + shift + L*
+
+| Move a query line up and down.
+| *ALT + arrow*
+| *ALT + arrow*
+
+| Delete a query line.
+| *command + shift + K*
+| *Ctrl + shift + K*
+
+| Add multiple cursors, if you want to add several lines at the same time.
+| *command + ALT + arrow*
+| *Ctrl + ALT + arrow*
+
+| Search and replace.
+| *command + F*
+| *Ctrl + F*
+
+| Run a query.
+| *command + enter*
+| *Ctrl + Enter*
+|===
+
+
+== Query settings
+
+The settings menu is located to the right of the Cypher editor and contains various adjustable settings for the tool.
+
+[.shadow]
+.Query settings
+image::query-settings.png[]
+
+The first part contains settings that have to do with the *performance* of the tool.
+The *Visualization node limit* controls the number of nodes returned by an *initial* query.
+If your query's results exceeds this limit, you can still add more elements to the graph visualization.
+Setting any of the limits too high may degrade performance.
+
+The *local storage* section allows you to save your parameters to use across sessions.
+
+The next part relates to the *Cypher editor*.
+You can control whether your queries can write to the database or not with the *Access mode* setting.
+
+// [NOTE]
+// ====
+// This setting should mainly be used for load balancing/routing in a clustered environment and is *not* intended to replace RBAC in terms of restricting access.
+// ====
+
+// To learn more about custom load balancing, see the link:https://neo4j.com/docs/create-applications/[Neo4j Drivers documentation], specifically the section on *Run your own transactions/Request routing*, available for the link:https://neo4j.com/docs/javascript-manual/current/transactions/#_request_routing[JavaScript Driver], the link:https://neo4j.com/docs/java-manual/current/transactions/#_request_routing[Java Driver], the link:https://neo4j.com/docs/python-manual/current/transactions/#_request_routing[Python Driver], and the link:https://neo4j.com/docs/go-manual/current/transactions/#_request_routing[Go Driver].
+
+The toggle for errors and warnings is used to enable hints to help you find errors in your Cypher queries.
+
+
+== Reusable result frames
+
+The reusable result frames in Query allow you to edit the query of an existing result directly in the result fram and rerun it to update the result in situ.
+You can also use _Cmd/Ctrl + click_ to send it back to the main editor and re-run it from there.
+
+Query supports different result frame views:
+
+* Graph -- Display the result as nodes and relationships and allow xref:query/operations.adoc#styling[styling] to be configured.
+* Table -- Display the result as JSON formatted rows.
+* RAW -- Display the submitted request, the Neo4j Server version and address, and the response.
+* Plan -- Display the execution plan for the query, with out without execution.
+
+== Stream
+
+A stream is a scrolling series of result frames.
+
+[.shadow]
+.Stream
+image::stream.png[]
+
+A reusable result frame is created for each command execution, added to the top of the stream to create a scrollable collection in reverse chronological order.
+You can expand and collapse the result frames using the *Collapse* icon.
+To remove all the frames from the stream, use the `:clear` command.
+Clearing the stream does **not** clear the history, that is done from the **Query history** drawer, as mentioned previously.
+
+== Query co-pilot
+
+The co-pilot is a feature that helps you write Cypher queries.
+It allows you to use natural language to ask the database a question, to *query* the database in other words.
+You enter your question and the co-pilot suggests a Cypher query based on your input.
+You can edit the query and run it or rephrase your question to generate a new query.
+
+When you write a question, it is automatically augmented with the current database schema, which provides the LLM with the right context.
+Being familiar with the database schema helps you write questions that are more likely to generate accurate Cypher queries.
+
+[TIP]
+====
+If you need a reminder what your datamodel looks like, you can use the procedure `CALL db.schema.visualization()` for a visual representation of your database.
+====
+
+The co-pilot is available from the Cypher editor once you have enabled it in the Org settings.
+See xref:visual-tour/index.adoc#org-settings[Organization settings] for more information.
+
+[.shadow]
+.Query co-pilot
+image::query-copilot.png[width=800]
+
+The co-pilot is not guaranteed to always generate accurate Cypher queries.
+It is recommended to review the generated query before running it, especially if the query is meant to write to the database.
+
+Some known limitations include:
+
+* Incorrect relationship direction.
+* Using deprecated Cypher syntax.
+* Tendency to not return relationships.
+
+
+
+
diff --git a/modules/ROOT/pages/security/encryption.adoc b/modules/ROOT/pages/security/encryption.adoc
new file mode 100644
index 000000000..7d705085c
--- /dev/null
+++ b/modules/ROOT/pages/security/encryption.adoc
@@ -0,0 +1,224 @@
+[[aura-reference-security]]
+= Encryption
+:description: Aura is encrypted using intra-cluster encryption, and is CMK compatible.
+:page-aliases: platform/security/encryption.adoc
+
+All data stored in Neo4j Aura is encrypted using intra-cluster encryption between the various nodes comprising your instance and encrypted at rest using the underlying cloud provider's encryption mechanism.
+
+Aura always requires encrypted connections and ensures that clients validate server certificates when establishing a connection.
+This means that network traffic flowing to and from Neo4j Aura is always encrypted.
+
+By default, each cloud provider encrypts all backup buckets (including the objects stored inside) using either link:https://cloud.google.com/storage/docs/encryption/default-keys[Google-managed encryption], link:https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingServerSideEncryption.html[AWS SSE-S3 encryption], or link:https://learn.microsoft.com/en-us/azure/storage/common/storage-service-encryption[Azure Storage encryption].
+
+To protect data at rest, Aura uses encrypted data storage capabilities offered by the cloud providers.
+Whether customers choose to host in AWS, Azure, or GCP, each object store provides server-side encrypted buckets for data at rest encryption.
+By default, AWS, Azure, and GCP encrypt all backup buckets (including the objects stored inside) with AWS SSE-S3 encryption, Azure Storage Encryption (SSE), or Google-managed encryption.
+This ensures all your data stored in any one of these cloud providers uses 256-bit Advanced Encryption Standard (AES).
+
+In addition to Aura’s default encryption for data at rest, Customer Managed Keys enable security-conscious enterprises to manage encryption keys through their Cloud Service Provider's Key Management Services (KMS) on Aura, granting control over data protection and access management, including the ability to revoke access from Neo4j.
+This allows adherence to strict security policies alongside Aura's default enterprise-grade security measures.
+
+== Customer Managed Keys
+
+label:AuraDB-Virtual-Dedicated-Cloud[]
+label:AuraDS-Enterprise[]
+
+A Customer Managed Key (CMK) gives you more control over key operations than the standard Neo4j encryption.
+These are created and managed using a supported cloud key management service (KMS).
+Externally, Customer Managed Keys are also known as Customer Managed Encryption Keys (CMEK).
+
+When using a Customer Managed Key, all data at rest is encrypted with the key.
+Customer Managed Keys are supported for v4.x and latest version instances.
+
+It is not possible to add a Customer Managed Key to an existing Neo4j Aura instance.
+The encryption key must be selected during instance creation.
+To change an encryption key, clone the Aura instance and select a different encryption key.
+
+When using Customer Managed Keys, you give Aura permission to encrypt and decrypt using the key, but Aura has no access to the key’s material.
+Aura has no control over the availability of your externally managed key in the KMS.
+
+[WARNING]
+====
+The loss of a Customer Managed Key makes all data encrypted with that key inaccessible.
+Neo4j is unable to manage database instances if the key is disabled, deleted, expired, or if permissions are revoked.
+====
+
+=== Key rotation
+
+In your KMS platform, you can either configure automatic rotation for the Customer Managed Key, or you can perform a manual rotation.
+
+Although automatic rotation is not enforced by Aura, it is best practice to rotate keys regularly.
+Manual key rotation is **not** recommended.
+
+=== Import an existing database
+
+You can upload a database to instances encrypted with Customer Managed Keys in Neo4j 5 directly from the console or by using `neo4j-admin database upload`.
+If the database is larger than 4 GB, you have to use `neo4j-admin database upload`.
+Note that the `neo4j-admin push-to-cloud` command in Neo4j v4.4 and earlier is **not** supported for instances encrypted with Customer Managed Keys.
+For more information see the link:https://neo4j.com/docs/aura/classic/auradb/importing/import-database/#_neo4j_admin_database_upload[Neo4j Admin `database upload`] documentation.
+
+=== Clone an instance protected by CMK
+
+To clone an instance protected by a Customer Managed Key, the key must be valid and available to Aura.
+If the same CMK does not exist in the destination region and product, the cloned instance must be encrypted with an available CMK for that region and product.
+
+It is best practice to use the same Customer Managed Key as the instance it’s being cloned from.
+You can override this to use another Customer Managed Key - but you can not use the Neo4j Managed Key.
+
+When cloning an instance that is encrypted with a Customer Managed Key, specific restrictions apply when using the API.
+Below are the details and possible errors that you may encounter depending on the cloning method and key configurations.
+
+.Summary of cloning restrictions
+|===
+| Cloning method | Destination key | Result
+
+| **Console & API** | Same CMK as source instance | Cloning allowed.
+| **Console** | Different CMK than source instance | Cloning allowed. Warning message shown.
+| **Console** | Neo4j Managed Key | Cloning blocked. Error message shown.
+| **API** | Different CMK than source instance | Cloning blocked using API but allowed in console. Error message shown.
+| **API** | Neo4j Managed Key | Cloning blocked. Error message shown.
+|===
+
+=== Remove a CMK from Aura
+
+When using a Customer Managed Key within Aura to encrypt one or more Aura database instances, it cannot be removed from Aura.
+If you no longer need to use this Customer Managed Key to encrypt Aura databases, first delete the Aura database instances that are encrypted with the key, then you can remove the key from Aura.
+Keep in mind that this process only breaks the link between the key and Aura - it does not delete the actual key from the Cloud KMS.
+
+== AWS keys
+
+=== Create an AWS key
+
+[IMPORTANT]
+====
+Aura requires a symmetric key stored in AWS KMS.
+Asymmetric keys are not supported.
+====
+
+. Create a *symmetric key* in the AWS KMS making sure the region matches your Aura database instance.
+Copy the generated ARN.
+You need it in the next step.
+. Go to *security settings* in the Aura Console, add a *Customer Managed Key* and copy the JSON code that is generated in the Aura Console when you add a key.
+. In the AWS KMS, edit the key policy to include the JSON code.
+
+=== Edit the AWS key policy
+
+After you have initially created a key in the AWS KMS, you can edit the key policy.
+In the AWS key policy, "Statement" is an array that consists of one or more objects.
+Each object in the array describes a security identifier (SID).
+The objects in the AWS code array are comma-separated, for example `{[{'a'}, {'b'}, {'c'}]}`.
+
+Add a comma after the curly brace in the final SID, and then paste the JSON code that was generated in the Aura Console (for example `{[{'a'}, {'b'}, {'c'}, _add code here_ ]}`).
+
+=== AWS regions
+
+Aura supports AWS Customer Managed Keys that reside in the same region as the instance.
+When creating a Customer Managed Key in the AWS KMS, you can create a single-region key, or create a multi-region key.
+
+Single-region keys reside in only one AWS region, which must be the same region as your Aura instance.
+
+Multi-region keys have a primary region, however these can be replicated to other regions that match the region of your Aura instance.
+The replicas share the same key ID and different Amazon Resource Names (ARNs) with the primary key.
+
+=== AWS automatic key rotation
+
+Aura supports automatic key rotation via the AWS KMS.
+To enable automatic key rotation in the AWS KMS, tick the *Key rotation* checkbox after initially creating a key, to automatically rotate the key once a year.
+
+== Azure keys
+
+=== Create an Azure key vault
+
+Create a Key Vault in the Azure portal ensuring the region matches your Aura database instance region.
+Move through the tabs to enable to following:
+
+* Purge protection
+* Azure role-based access control
+* Azure Disk Encryption for volume encryption
+* When setting up the key vault, in Networking you can choose:
+** *Allow public access from all networks*
+** *Allow public access from specific virtual networks and IP addresses* (need to check *Allow trusted Microsoft services to bypass this firewall*)
+** *Disable public access* (need to check *Allow trusted Microsoft services to bypass this firewall*)
+** If you need to edit the public access setting after setting up the key vault, you will find public access options in Networking > Firewalls and Virtual Networks and below it is the Exception section the checkbox to Allow trusted Microsoft services to bypass this firewall.
+
+=== Create an Azure key
+
+. When preparing to create a key, if needed grant a role assignment:
+.. Inside the key vault, go to *Access Control (IAM)* and *add role assignment*.
+.. In the *Role* tab, select *Key Vault Administrator*.
+.. In the *Member* tab, select *User, group, or service principal*.
+.. From *Select members*, add yourself or the relevant person, then *Review + Assign*.
+
+[IMPORTANT]
+====
+Aura requires an Azure RSA key, size 2048, 3072 or 4096.
+====
+
+. Create a key in the Azure Key Vault.
+. After the key is created, click into key version and copy the *Key Identifier*, you need it in the next step.
+. Go to *security settings* in the Aura Console and add a *Customer Managed Key*.
+. Follow the instructions in the Aura Console for the next sections.
+
+=== Create a service principal
+
+In the Azure Entra ID tenant where your key is located, create a service principal linked to the Neo4j CMK Application with the *Neo4j CMK Application ID* displayed in the Aura Console.
+
+One way to do this is by clicking the terminal icon at the top of the Azure portal, to open the Azure Cloud Shell.
+
+Using Azure CLI, the command is:
+
+[source,bash]
+----
+az ad sp create --id Neo4jCMKApplicationID
+----
+For more information about the Azure CLI, see link:https://learn.microsoft.com/en-us/cli/azure/ad/sp?view=azure-cli-latest#az-ad-sp-create[`az ad sp` documentation].
+
+=== Grant key permissions
+
+. To add role assignment to the Azure key, inside the key, go to *Access control (IAM)* and add *role assignment*.
+. In the *Role* tab, select *Key Vault Crypto Officer*.
+. In the *Member* tab, select *User, group, or service principal*.
+. In *Select members*, paste the *Neo4j CMK Application name* that is displayed in the Aura Console.
+. The *Neo4j CMK Application* should appear, select this application then *Review + Assign*.
+
+=== Azure key rotation
+
+If you immediately disable the old key version after the Azure key is rotated, the connection status in Aura changes from "Ready" to "Pending".
+This happens because Azure Storage checks for key updates once every 24 hours, as outlined in link:https://learn.microsoft.com/en-gb/azure/storage/common/customer-managed-keys-configure-new-account?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&bc=%2Fazure%2Fstorage%2Fblobs%2Fbreadcrumb%2Ftoc.json&tabs=azure-portal#configure-encryption-for-automatic-updating-of-key-versions[Microsoft Azure documentation].
+If a key is rotated and the old version is disabled before this time passes, services relying on the key in Neo4j Aura lose access.
+To avoid this wait at least 24 hours after rotating a key before disabling the old version to allow the change to take effect in Azure.
+Disabling the old version too early results in Aura losing access to the key.
+
+== GCP keys
+
+=== Create a key ring
+
+. Go to *Key Management* in the Google Cloud console.
+. Create a *key ring*.
+. The key ring *Location type* should be set to *Region.*
+. Make sure the region matches your Aura database instance region.
+. Select *Create* and you are automatically taken to the key creation page.
+
+=== Create a GCP key
+
+[IMPORTANT]
+====
+Aura requires a symmetric encrypt/decrypt key stored in GCP.
+Asymmetric keys are not supported.
+====
+
+. Create a *symmetric encrypt/decrypt* key in the Google Console.
+You can use default settings for the options, but setting a key rotation period is recommended.
+. Select *Create* and you are brought to the key ring, with your key listed.
+. Click *More* (three dots) and *Copy resource name*, you need it in the next step.
+For more information, see link:https://cloud.google.com/kms/docs/getting-resource-ids[Google Cloud docs]
+. Go to *security settings* in the Aura Console and add a *Customer Managed Key*.
+Paste the *resource name* into the *Encryption Key Resource Name* field.
+. After you select *Add Key* in the Aura Console, three *service accounts* are displayed in the Aura Console.
+You will need these in the next steps.
+
+=== Grant key permissions
+
+. Go to the Google Cloud console, click into the key and go to *Permissions* then *Grant Access*.
+. In *Add principals* paste the three service accounts from the Aura Console.
+. In *Assign roles* assign both *Cloud KMS CryptoKey Encrypter/Decrypter* and *Cloud KMS Viewer* roles to all three service accounts.
diff --git a/modules/ROOT/pages/security/ip-filtering.adoc b/modules/ROOT/pages/security/ip-filtering.adoc
new file mode 100644
index 000000000..2e0622ac2
--- /dev/null
+++ b/modules/ROOT/pages/security/ip-filtering.adoc
@@ -0,0 +1,96 @@
+= IP filtering
+:description: Control access to networks or systems by allowing or blocking traffic based on specified IP addresses.
+
+label:AuraDB-Business-Critical[]
+label:AuraDB-Virtual-Dedicated-Cloud[]
+
+IP filtering is a way to restrict access to your Aura instances over the public internet.
+Only allowing trusted IP addresses or CIDR ranges helps secure your environment without requiring private network configurations.
+
+* AuraDB Business Critical supports up to 20 allowed IP ranges.
+* AuraDB Virtual Dedicated Cloud supports up to 100 allowed IP ranges.
+* Contact support if you need to increase the limit.
+
+== Required roles
+
+To create or edit IP filters, users must have one of the following roles:
+
+* `Organization Owner`
+* `Organization Admin`
+
+== Add a new filter
+
+. In the Aura console, go to *Organization Settings > Security > IP Filtering*
+. Add the name and description of the IP filter (Adding a description makes it easier to manage later, because descriptions will be displayed in the Allow List when you’ve finished creating the filter.)
+. Select where to apply the filter:
+.. If you apply a filter to an Organization it applies to all instances in the org
+.. If you apply a filter to a Project it applies to all instances in the project
+.. If you apply a filter to an Instance it applies to that individual instance
+. Select allowed IP addresses:
+.. *All*: No filtering is applied, all IPs are allowed.
+.. *Specific range of IP addresses*: Add addresses or CIDR ranges. Ranges of IP addresses require CIDR notation e.g. `46.15.1.0` with a subnet mask of `255.255.255.240` is written as `46.15.1.0/28` which includes all hosts from `46.15.1.1` through to `46.15.1.14`. You can use online subnet calculators to help determine the CIDR.
+. Once all the required information is provided, selecting *Create* enforces the IP filter.
+
+.Add a filter
+[.shadow]
+image::ip-filtering.png[]
+
+== Edit a filter
+
+IP filters apply only to new network connections.
+Existing connections are not affected if they are no longer in the allow list after an IP filter is edited or deleted.
+Only new connections are compared to the updated allow list.
+
+To edit an existing filter, use the [...] more menu, then select Inspect.
+
+.Edit a filter
+[.shadow]
+image::edit-a-filter.png[]
+
+== Scope and inheritance
+
+IP filters allow or deny a connection to an instance.
+Each instance can only have one IP filter.
+Applying filters at broader levels (organization or project level) helps admins enforce access control across multiple instances without configuring each one individually.
+Filters set at the organization or project level are inherited by all existing instances and newly created instances within that scope.
+
+New instances created in an organization or project will automatically inherit the IP filter applied to the project.
+
+== IP filtering and GDS Sessions
+
+xref:graph-analytics/index.adoc[Graph Analytics] is an on-demand ephemeral compute environment for running GDS workloads.
+Each compute unit is called a GDS Session.
+
+When a GDS Session uses an Aura instance as its data source, the IP filter set on that Aura instance applies to the GDS Session.
+GDS Sessions connecting to non-Aura or self-managed instances are out of scope.
+
+== Benefits and use cases
+
+IP filtering is helpful if user credentials are compromised because access is restricted to traffic originating from approved IP addresses.
+
+It’s a great fit when you want to:
+
+* Quickly secure public instances without cloud configuration - useful when an instance is not managing sensitive production data.
+* Limit access to trusted networks such as office locations or partner data centers.
+* Enforce corporate or regulatory access boundaries with minimal setup.
+* Apply access controls to dev or test environments where private endpoints are unnecessary.
+* Block access from geographic regions outside an area of operation.
+Standardize access policies across an organization or project without having to manage each instance individually.
+
+== IP filtering vs. Private Endpoints
+
+IP filtering is a simpler solution for restricting network access when you don’t yet need to implement a Private Endpoint.
+Filters work on public endpoints and can be used if you later configure a Private Endpoint, as long as public traffic is still enabled.
+If you want to restrict traffic more securely to only private cloud networks, set up a Private Endpoint.
+Private Endpoints provide secure access through your VPC, while IP filtering works on public endpoints.
+
+With IP filtering:
+
+* No VPN or Private Endpoint setup is required.
+* Users only need to add their local IP address to the Allow List to access Aura from tools such as Query, Explore, and Neo4j Desktop.
+
+== Using `neo4j-admin database upload` with IP filtering
+
+To use `neo4j-admin database upload` add the specific link:https://support.neo4j.com/s/article/360050504254-What-are-the-public-IP-addresses-to-provision-in-a-firewall-configuration-to-allow-Aura-use[Control Plane Egress] to your IP filter allow list.
+This ensures the temporary connection needed to push data is permitted.
+
diff --git a/modules/ROOT/pages/security/mfa.adoc b/modules/ROOT/pages/security/mfa.adoc
new file mode 100644
index 000000000..7887fcd25
--- /dev/null
+++ b/modules/ROOT/pages/security/mfa.adoc
@@ -0,0 +1,37 @@
+= Multi-Factor Authentication
+
+Multi-Factor Authentication (MFA) adds an extra layer of security to an Aura account log-in by requiring a verification code in addition to username and password.
+The verification code is generated using any authenticator app, such as Google Authenticator.
+
+There are two scenarios for MFA:
+
+* Individual users can enable MFA for their own accounts via account settings.
+* Organization administrators can enable MFA for an entire organization, then it's mandatory for all members to go through the MFA setup.
+
+[IMPORTANT]
+====
+Setting up MFA requires logging in using email and password, not using SSO or Google Sign-In.
+====
+
+== Enable individual MFA
+
+. Log in to the Aura Console using email/password.
+. Go to *Account > Settings > Preferences > Security*.
+. Enable *Multi-Factor Authentication (MFA)*.
+. Follow the MFA set up steps.
+
+== Enable MFA for an organization
+
+`Organization Owners` or `Organization Admins` can require all members of an organization to set up MFA from organization security settings.
+Then each organization member will be prompted to complete the setup, with an authenticator app of their choice.
+
+To set up organization-wide MFA:
+
+. Go to *Organization Settings > Security & Networking > App MFA (Multi-Factor Authentication)*.
+. Enable *Multi-Factor Authentication (MFA)*.
+
+After MFA is enabled at the organization level, all members will see the following message in the console:
+
+[quote]
+"_Your organization has required Multi Factor Authentication (MFA).
+Clicking enable MFA below will log you out and you will then need to login using your email and password to configure MFA._"
\ No newline at end of file
diff --git a/modules/ROOT/pages/security/secure-connections.adoc b/modules/ROOT/pages/security/secure-connections.adoc
new file mode 100644
index 000000000..921e4bc94
--- /dev/null
+++ b/modules/ROOT/pages/security/secure-connections.adoc
@@ -0,0 +1,342 @@
+[[aura-reference-security]]
+= Secure connections
+:description: VPC boundaries enable you to operate within an isolated section of the service.
+:page-aliases: platform/security/secure-connections.adoc
+
+== VPC isolation
+
+label:AuraDB-Virtual-Dedicated-Cloud[]
+label:AuraDS-Enterprise[]
+
+AuraDB Virtual Dedicated Cloud and AuraDS Enterprise run in a dedicated AWS cloud account, Azure subscription, or GCP project to achieve complete isolation for your deployment.
+Additional Virtual Private Cloud (VPC) boundaries enable you to operate within an isolated section of the service, where your processing, networking, and storage are further protected.
+The Aura console resides in a separate VPC, isolated from the rest of the Aura services.
+
+== Network access
+
+An Aura instance can be publicly available, completely private, or allow both public and private access.
+
+If public traffic is enabled, your Aura instances are public and traffic to them is allowed to traverse the public internet and they are accessible with the correct username and password.
+
+To make your instance completely private, you need to disable public traffic, use the cloud provider's network, and create a private endpoint inside your VPC, which gives you a private connection to Aura.
+The only way to connect to your database is from inside your network (your VPC in your AWS/Azure/GCP account) using an internal IP address you choose and DNS records you create.
+
+To configure network access, you need to be authorized to access the part of the infrastructure that runs and handles these instances as well as the networking used to establish secure connections between the database and the application's VPC.
+This includes the ability to connect over the cloud provider's private link and private endpoint.
+
+To configure settings for network access to your instance, go to *Aura console* > *Project settings* > *Security & Networking* > *Private endpoints* > *New network access configuration*.
+
+From there, you can either set up a new network access configuration, or edit current configuration settings.
+
+The Aura console provides a step-by-step configuration guide to:
+
+. Choose your Aura instance details
+. Create an endpoint
+. Accept endpoint connection requests and enable private DNS in the cloud provider's console
+. *Disable public traffic (optional)*
+If you disable public traffic it is highly recommended to test connectivity through the private endpoint before disabling public traffic.
+
+You can return to Step 4 at any time to disable public traffic, even if you’ve already completed the network access configuration and initially allowed public traffic.
+To do this, click through the steps in the network access configuration guide until you reach Step 4, where there is the option to disable public traffic.
+Disabling public traffic does not take effect immediately.
+You can monitor the status change in the console to confirm when the process is complete.
+
+To continue accessing Browser and Bloom, you can configure a VPN in your VPC and connect to these services over the VPN.
+
+== Tool access
+
+When public traffic is disabled, Query and Explore are not accessible via the public internet.
+To continue accessing these tools, xref:getting-started/connect-instance.adoc#_connection_method[connect via HTTPS (port 443)], this is helpful when network security blocks Bolt (port 7687), e.g. when a private link is set up on the database with public traffic disabled.
+Alternatively you can set up a VPN (Virtual Private Network) in your VPC and connect to Query and Explore over the VPN.
+
+== Private endpoints
+
+Private endpoints are network interfaces inside your own VPC, which can only be accessed within your private network.
+The cloud provider connects them over their network to Neo4j Aura.
+By design they are not exposed to the public internet, ensuring that critical services are accessible only through private, secure networks.
+
+A single private link connection applies to all instances in a region.
+If you set one up for `us-east-1` then those network connections will apply to all instances in that region.
+You can set up a second private link connection to applications that are hosted in a second region (for example `us-west-1`) but still housed inside the same Aura project.
+
+* When activated, a Private Connection label, shield icon, and dedicated Private URI will appear on any instance tile using a private endpoint in the Aura console.
+* Connections using private endpoints are one-way. Aura VPCs can’t initiate connections back to your VPCs.
+
+=== AWS private endpoints
+
+label:AuraDB-Virtual-Dedicated-Cloud[]
+label:AuraDS-Enterprise[]
+
+Refer to link:https://aws.amazon.com/privatelink[AWS PrivateLink] documentation for IAM requirements.
+
+[NOTE]
+====
+In AWS region `us-east-1`, Neo4j does not support the Availability Zone with ID `use1-az3` for private endpoints.
+====
+
+All applications running Neo4j workloads inside the VPC are routed directly to your isolated environment in Aura without traversing the public internet.
+You can then disable public traffic, ensuring all traffic to the instance remains private to your VPC.
+
+.VPC connectivity with AWS PrivateLink
+image::privatelink.png["VPC connectivity with AWS PrivateLink"]
+
+Without private endpoints, you access the tools Query and Explore over the internet:
+
+.Architecture overview before enabling private endpoints
+image::privatelink_01_before_enabling.png["Architecture overview before enabling private endpoints"]
+
+When you have enabled private endpoints and disabled public internet access, you can no longer connect Query and Explore to your instances over the internet.
+To continue accessing the tools, you can set up a VPN.
+
+To access Query and Explore over a VPN, you must ensure that:
+
+* The VPN server uses the https://docs.aws.amazon.com/vpc/latest/userguide/vpc-dns.html#AmazonDNS[VPC's DNS servers].
+* You use the *Private URI* shown on the instance tile and in the instance details.
+It is different from the *Connection URI* you used before.
+
+.Accessing tools over a VPN
+image::privatelink_03_browser_bloom_over_vpn.png["Accessing tools over a VPN"]
+
+=== GCP private endpoints
+
+label:AuraDB-Virtual-Dedicated-Cloud[]
+label:AuraDS-Enterprise[]
+
+Refer to https://cloud.google.com/vpc/docs/private-service-connect[GCP Private Service Connect] documentation for required permissions.
+
+All applications running Neo4j workloads inside the VPC are routed directly to your isolated environment in Aura without traversing the public internet.
+You can then disable public traffic, ensuring all traffic to the instance remains private to your VPC.
+
+.VPC connectivity with GCP Private Service Connect
+image::privateserviceconnect.png["VPC connectivity with GCP Private Service Connect"]
+
+Without private endpoints, you access the tools Query and Explore over the internet:
+
+.Architecture overview before enabling private endpoints
+image::privateserviceconnect_01_before_enabling.png["Architecture overview before enabling private endpoints"]
+
+When you have enabled private endpoints and disabled public internet access, you can no longer connect Query and Explore to your instances over the internet.
+To continue accessing the tools, you can set up a VPN.
+
+To access Query and Explore over a VPN, you must ensure that:
+
+* You have set up link:https://cloud.google.com/dns/docs/zones/manage-response-policies[GCP Response Policy Zone], or an equivalent DNS service, inside of the VPC.
+* You use the *Private URI* shown on the instance tile and in the instance details.
+It is different from the *Connection URI* you used before.
+
+.Accessing tools over a VPN
+image::privateserviceconnect_03_browser_bloom_over_vpn.png["Accessing tools over a VPN"]
+
+=== Azure private endpoints
+
+label:AuraDB-Virtual-Dedicated-Cloud[]
+label:AuraDS-Enterprise[]
+
+Refer to link:https://azure.microsoft.com/en-us/products/private-link/#overview[Azure Private Link] documentation to create an endpoint in your Virtual Network (VNet) that connects to Aura.
+
+All applications running Neo4j workloads inside the VPC are routed directly to your isolated environment in Aura without traversing the public internet.
+You can then disable public traffic, ensuring all traffic to the instance remains private to your VPC.
+
+.VNet connectivity with Azure Private Link
+image::azure_privatelink.png["VNet connectivity with Azure Private Link"]
+
+Without private endpoints, you access the tools Query and Explore over the internet:
+
+.Architecture overview before enabling private endpoints
+image::azure_privatelink_01_before_enabling.png["Architecture overview before enabling private endpoints"]
+
+When you have enabled private endpoints and disabled public internet access, you can no longer connect Query and Explore to your instances over the internet.
+To continue accessing the tools, you can set up a VPN.
+
+To access Query and Explore over a VPN, you must ensure that:
+
+* You have setup https://learn.microsoft.com/en-us/azure/dns/private-dns-overview[Azure Private DNS], or an equivalent DNS service, inside of the VNet.
+* You use the *Private URI* shown on the instance tile and in the instance details.
+It will be different from the *Connection URI* you used before.
+
+.Accessing tools over a VPN
+image::azure_privatelink_03_browser_bloom_over_vpn.png["Accessing tools over a VPN"]
+
+==== Enable Azure Private Endpoints for Aura
+
+. To enable private endpoints using Azure Private Link:
+.. From the sidebar menu in the Aura console, select *Security > Network Access > Network Access*
+.. Select *New network access configuration* and follow the setup instructions.
+
+. Configure Network Access in the Aura console
+.. Select your product from the available options.
+.. Select the appropriate region for your deployment. (Azure Private Link applies to all instances in the region.)
+.. Enter the *Target Azure Subscription IDs*.
+.. Select *Enable Private Link*.
+
+. Obtain a Private Link service name
+.. After enabling Private Link, you receive a Private Link service name in the Aura console.
+.. Copy this service name, you need it in the next step.
+
+. Create Private Link endpoint in the Azure portal
+.. Log in to your Azure portal.
+.. Navigate to your cloud VPC and create a new Private Link endpoint.
+.. Use the Private Link service name obtained in step three for the configuration.
+
+. Accept Endpoint in Aura console
+.. Return to the Aura Console.
+.. Check for the newly created Private Link endpoint.
+.. Accept the endpoint to complete the connection process.
+.. *At this point, it is highly recommended to test connectivity through the private endpoint.*
+
+. Disable public traffic
+.. Before disabling public traffic, test all your application connectivity with Private Link to ensure everything is functioning correctly.
+.. Once verified, you can disable public traffic by toggling off the public access option.
+.. Note: If needed, you can postpone disabling public traffic.
+
+. Monitor Private Link status
+.. You can monitor the status of your Private Link configuration in the Aura Console.
+.. Ensure that all services are running as expected and troubleshoot any issues if necessary.
+
+Please see the link:https://learn.microsoft.com/en-us/azure/private-link/rbac-permissions#private-endpoint[Azure Documentation] for required roles and permissions.
+
+== Private links
+
+label:AuraDB-Virtual-Dedicated-Cloud[]
+
+This private link section is cloud-agnostic and therefore applicable to all clouds.
+A private link provides secure network connectivity between your application and AuraDB without exposing traffic to the public internet.
+
+The term “private link” refers to:
+
+* Private Service Connect = Google Cloud platform
+* PrivateLink = AWS
+* Private Link = Azure
+
+The following steps explain the process of establishing a private link connection to securely connect your application to an AuraDB Virtual Dedicated Cloud environment.
+
+[NOTE]
+======
+The dbid: abcd1234 and orch-id: 0000 are used in this example.
+These are different in your AuraDB Virtual Dedicated Cloud environment.
+======
+
+. The application initializes a driver connection to neo4j+s://abcd1234.production-orch-0000.neo4j.io.
+. The network layer then queries the DNS server to resolve the fully qualified domain name (FQDN) (in this case, abcd1234.production-orch-0000.neo4j.io) to its corresponding IP address.
+. The Cloud Virtual Network private DNS is queried, and it resolves the FQDN to 10.10.10.10, based on the wildcard DNS A record created: *.production-orch-0000.neo4j.io -> 10.10.10.10
+. The application's connection is directed to 10.10.10.10, which is the private link endpoint.
+From there, the private link endpoint forwards the network connection to the private ingress through the private link.
+. The private ingress extracts the dbid from the FQDN and directs the connection to the appropriate Aura instance (dbid: abcd1234).
+. The Aura instance responds by sending the Neo4j cluster routing table back to the application, which contains information about the instances and their roles.
+. Based on the type of transaction (read or write) the driver selects an appropriate instance to execute a read or write transaction. The code has the ability to direct the transaction to the appropriate instances this way.
+. Similar to before, the Cloud Virtual Network private DNS is queried and resolves the FQDN to 10.10.10.10. The application's connection is sent to the private link endpoint (10.10.10.10), which forwards the network connection to the private ingress through the private link.
+The private ingress then directs the connection to the Aura instance with dbid: abcd1234.
+. Finally, the write transaction is received and executed within the Aura instance with dbid: abcd1234.
+
+.Available instances and their roles
+[cols="1,1"]
+|===
+|abcd1234.production-orch-0000.neo4j.io
+|role: write
+
+|abcd1234.production-orch-0000.neo4j.io
+|role: read
+
+|abcd1234.production-orch-0000.neo4j.io
+|role: read
+|===
+
+
+=== Custom endpoints with private link
+
+In addition to the production-orch-.neo4j.io DNS records configured for your private link databases, you must add the following records in order for a Custom Endpoint assigned to a Private Link database to work.
+When configuring a custom endpoint with a URI like `my-endpoint-abcdef-123456.endpoints.neo4j.io`, you must add the following DNS records for the custom endpoint to function properly:
+
+[source,bash]
+----
+my-endpoint-abcdef-123456.endpoints.neo4j.io IN A
+a-my-endpoint-abcdef-123456.endpoints.neo4j.io IN A
+b-my-endpoint-abcdef-123456.endpoints.neo4j.io IN A
+c-my-endpoint-abcdef-123456.endpoints.neo4j.io IN A
+d-my-endpoint-abcdef-123456.endpoints.neo4j.io IN A
+----
+
+*Alternative wildcard approach*
+
+Instead of adding individual records for a custom endpoint, it is possible to use a wildcard:
+
+[source,bash]
+----
+*.endpoints.neo4j.io IN A
+----
+
+This would automatically cover any custom endpoint created for that region.
+Note that similarly to the individual records, this wildcard record must also be added in addition to the `production-orch-.neo4j.io` DNS records as mentioned above.
+
+[IMPORTANT]
+====
+If users have regions with different private link endpoints, but have linked those endpoints to one client VPC , then the wildcard record would direct all traffic for custom endpoints to only one region—whichever is associated with the IP address used in the DNS records.
+This breaks routing for custom endpoints located in the other regions, and therefore, if you do not have a simple private link setup, it is recommended to use the individual custom endpoint records, rather than the wildcard.
+====
+
+
+== Test connectivity through the private endpoint
+
+Use the `nslookup` command to confirm whether the Fully Qualified Domain Names (FQDNs) of your Aura instances are directed to the IP address of the PrivateLink endpoint (usually represented by an internal IP address, such as 10.0.0.0).
+
+[source,bash]
+----
+nslookup .production-orch-.neo4j.io
+----
+
+Use cURL from a VM instance or a container located in the related VPC network.
+
+[source,bash]
+----
+curl https://.production-orch-.neo4j.io
+----
+
+Use nc commands on one of your VM instances or container located in the related GCP Project VPC network, and make sure you get a successful response for all commands
+
+[source,bash]
+----
+nc -vz .production-orch-.neo4j.io 443
+nc -vz .production-orch-.neo4j.io 7687
+nc -vz .production-orch-.neo4j.io 7474
+# if you are using AuraDS
+nc -vz .production-orch-.neo4j.io 8491
+----
+
+On Windows, you can get https://nmap.org/download.html[Netcat] or use PowerShell
+
+[source,bash]
+----
+Test-NetConnection .production-orch-.neo4j.io -Port 7687
+Test-NetConnection .production-orch-.neo4j.io -Port 7474
+Test-NetConnection .production-orch-.neo4j.io -Port 443
+# if you are using AuraDS
+Test-NetConnection .production-orch-.neo4j.io -Port 8491
+----
+
+== Supported TLS cipher suites
+
+For additional security, client communications are carried via TLS v1.2 and TLS v1.3.
+
+AuraDB has a restricted list of cipher suites accepted during the TLS handshake, and does not accept all of the available cipher suites.
+The following list conforms to safety recommendations from IANA, the OpenSSL, and GnuTLS library.
+
+TLS v1.3:
+
+* `TLS_CHACHA20_POLY1305_SHA256 (RFC8446)`
+* `TLS_AES_128_GCM_SHA256 (RFC8446)`
+* `TLS_AES_256_GCM_SHA384 (RFC8446)`
+
+TLS v1.2:
+
+* `TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 (RFC5288)`
+* `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 (RFC5289)`
+* `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 (RFC5289)`
+* `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 (RFC7905)`
+* `TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 (RFC5288)`
+
+
+
+
+
+
+
diff --git a/modules/ROOT/pages/security/single-sign-on.adoc b/modules/ROOT/pages/security/single-sign-on.adoc
new file mode 100644
index 000000000..1cc362da4
--- /dev/null
+++ b/modules/ROOT/pages/security/single-sign-on.adoc
@@ -0,0 +1,269 @@
+[[aura-reference-security]]
+= Single Sign-On (SSO)
+:description: SSO allows you to log in to the Aura Console using their company IdP credentials.
+:page-aliases: platform/security/single-sign-on.adoc
+
+label:AuraDB-Virtual-Dedicated-Cloud[]
+label:AuraDB-Business-Critical[]
+label:AuraDS-Enterprise[]
+
+[NOTE]
+====
+If you're planning to use Single Sign-On (SSO), keep in mind that SSO does not retroactively apply to existing instances.
+It only applies to instances created after SSO is configured.
+Therefore, plan accordingly.
+====
+
+Single Sign-On (SSO) enables organization owners and organization admins to use your organization’s identity provider (IdP) to authenticate users so they can access the Aura console and Aura instances.
+
+Aura supports SSO authentication and authorization using https://learn.microsoft.com/en-us/entra/identity-platform/v2-protocols-oidc[Microsoft Entra] and link:https://developer.okta.com/docs/guides/oin-sso-overview/[Okta] as IdPs, implementing the OpenID Connect (OIDC) protocol.
+
+As the service provider, Neo4j Aura redirects authentication requests to the configured IdP using the OpenID Connect (OIDC) protocol.
+Aura also supports authenticating with Google as the identity provider.
+When a user attempts to log in, Aura generates a redirect URL with authentication parameters and sends the user to the IdP for authentication.
+After successful authentication, the IdP redirects the user back to Aura with a secure token, allowing Aura to establish an authenticated session.
+
+== Required roles
+
+The person setting up SSO needs an `Organization Owner` or `Organization Admin` role.
+For information on how to assign roles see xref:user-management.adoc[User management].
+
+== SSO levels
+
+* *Use as a log in for the Organization:* Login with SSO to the Aura console. Allows org admins to control how users log in when they are trying to access the organization.
+
+* *Use as login method for instances with Projects in this Org:* The SSO log in will be on the instances. Impacts new database instances created within that project.
+It ensures users logging in with SSO have access to the database instances within the project.
+It does not give access to edit the project settings, for example to edit the project name, network access, or to edit the instance settings such as to rename an instance, or pause and resume.
+It depends on RBAC if the user can access and view or modify data within the instances themselves.
+For this level, the role mapping may be used to grant users different levels of access based on a role in their IdP.
+
+== Log-in methods
+
+Log-in methods are different for each SSO level.
+Administrators can configure a combination of one or more of the log-in methods.
+
+*Supported log-in methods at the organization-level:*
+
+* Email/password
+* Okta
+* Microsoft Entra ID
+* Google SSO (not Google Workspace SSO)
+
+At the organization-level, admins can disable email/password and Google SSO if one other custom SSO provider is configured.
+
+*Supported log-in methods at the project-level:*
+
+* User/password
+* Okta
+* Microsoft Entra ID
+
+At the project-level, admins cannot disable user/password.
+
+== Setup requirements
+
+Accessing Aura with SSO requires:
+
+Aura requires the Authorization Code Flow, an OAuth2 authentication method that involves redirecting users to a publicly accessible IdP server for login.
+
+To create an SSO Configuration, either a Discovery URI or a combination of Issuer, Authorization Endpoint, Token Endpoint, and JWKS URI is required.
+
+== Create a new SSO configuration
+
+. Go to xref:visual-tour/index.adoc#_organizations[Organization] > *Security* > *Single Sign-On* to set up a new SSO configuration.
+
+. The checkboxes *Use as a log in for the Organization* and *Use as login method for instances with projects in this Org* define whether SSO should be only on organization-level, only on project-level, or both.
+
+. The required basic SSO configuration information can be retrieved from the IdP.
+Entering the Discovery URI pre-fills the fields below.
+If this is not known these fields can be completed manually.
+
+[IMPORTANT]
+====
+If you want users to authenticate with SSO for both the Aura Console and your database instances, make sure to select both of the following checkboxes during setup:
+
+- *Use as login for the Organization*
+- *Use as login method for instances within Projects in this Org*
+====
+
+.SSO configuration
+[.shadow]
+image::sso.png[A screenshot of the SSO configuration,640,480]
+
+== Log-in link
+
+After setting up SSO, the `Organization sso login` link can be found in the organization summary page in the Aura console.
+
+== Role mapping
+
+Role mapping links a user’s identity from an identity provider (such as Okta or Microsoft Entra ID) to a specific Aura role, based on attributes passed during authentication, like group or department.
+
+When SSO is enabled at the project level, Aura uses role mapping to determine what access a user should have based on their IdP group membership.
+
+This access then applies to *all newly created* instances the user has access to.
+
+=== Example role mapping
+
+`aws-neo4j-sso=editor`
+
+In this case:
+
+`aws-neo4j-sso` is a group name from the groups claim in the token and `editor` is the instance role assigned to any user in that group.
+
+== Instance-level SSO support
+
+Customer support can assist with:
+
+* Adding SSO just for a specific instance
+* Updating SSO settings on existing instances
+* Role mapping specific IdP groups to a single database instance, instead of all instances which is the default behavior.
+* Creating link:https://auth0.com/docs/secure/tokens/json-web-tokens/create-custom-claims[custom claims] beyond `groups`
+
+=== Ticket template
+
+To request assistance, contact Support and raise a ticket using the following template:
+
+[source]
+----
+Instance ID (Aura-only): [Insert]
+Project ID (Aura-only): [Insert]
+IdP name: [e.g., Okta, Azure]
+SSO update for existing instance? Yes / No
+Role mappings? Yes / No
+If yes: [Insert details]
+Custom claim? Yes / No
+If yes: [Insert details]
+----
+
+Instance ID (also known as DBID) can be found at xref:managing-instances/instance-details.adoc[instance details].
+Project ID can be found at xref:visual-tour/index.adoc#_settings[project settings].
+
+== Microsoft Entra ID SSO
+
+. In the *Azure Portal*, go to *App Registrations* and then *New Registration*.
+
+. Add a name for the new app registration and select *Register*.
+Skip redirect URI’s for now.
+
+. On the app overview page, take note of the Application (client) ID.
+
+. Select the *Client Credentials* link to open the client credentials page.
+
+. Create a new secret and *copy the Value field*, it won’t be visible after leaving the page.
+
+. Go back to the *App Overview* page and open the *App Endpoints* and take note of the OpenID Connection metadata document URI
+
+. Under *Authentication* in the left-hand navigation, setup redirect URLs:
+
+.. Adding a new Web platform
+.. Enter `https://login.neo4j.com/login/callback` as the redirect URI.
+
+. In the Aura console, go to xref:visual-tour/index.adoc#_organizations[Organization] > *Security* > *Single Sign On* > *New configuration*
+
+. Select how you want the SSO configuration to be applied in Aura:
+
+.. *Use as a log in method for the organization* applies to organization-level logins (which acts as a login to the Aura console).
+
+.. *Use as a login method for instances within Projects in this Org* applies to the project-level and you can select specific projects within the organization (where login is on the instance).
+
+.. Or, select both.
+
+. For IdP Type select *Microsoft Entra ID*.
+
+. For Client ID enter the *Application (client) ID* from the Azure app.
+
+. For Client Secret enter the client secret value (not secret id) from the secret you created in the Azure app.
+
+. For Discovery URI enter the *OpenID Connect metadata document URI*.
+
+. Configure any additional settings as needed:
+
+.. For organization-level SSO, no additional settings needed.
+
+.. For project-level SSO, enter role mappings if applicable.
+
+. Select *Create*.
+
+. Select the additional log in methods:
+
+.. For *Organization-level testing* it is recommended to keep the Email/password or Google log-in method enabled, so that if SSO fails, you can still access the Aura console and adjust the configuration.
+
+.. For *Project-level testing* the user/password login is always available on the instance, so if SSO isn't working, the instance is still accessible.
+
+=== Token request scopes
+
+When requesting the token from Azure, the scopes Aura sends are:
+
+* `openid` access to a unique identifier to identify the user.
+
+* `profile` access to basic profile information.
+
+* `email` contains the user's email address.
+
+This will result in Azure asking for consent to display details related to these scopes.
+For more information, see link:https://auth0.com/docs/get-started/apis/scopes/openid-connect-scopes#standard-claims[OpenID Connect Scopes]
+
+== Okta SSO
+
+. In the *Okta admin portal* go to *Applications* and then *Create App Integration*.
+
+. For *Sign-in method* select *OIDC - OpenID Connect*.
+
+. For *Application type* select *Web Application*.
+
+. Select *Next*.
+
+. For *Grant type* select *Authorization Code*.
+
+. For *Sign-in redirect URIs* add https://login.neo4j.com/login/callback as the redirect URI.
+
+. Save.
+
+. In the Aura console, go to xref:visual-tour/index.adoc#_organizations[Organization] > *Security* > *Single Sign On* > *New configuration*.
+
+. Select how you want the SSO configuration to be applied in Aura:
+
+.. *Use as a log in method for the organization* applies to organization-level logins (which acts as a login to the Aura console).
+
+.. *Use as a login method for instances within Projects in this Org* applies to the project-level and you can select specific projects within the organization (where login is on the instance).
+
+.. Or, select both.
+
+. For IdP Type select *Okta*.
+
+. For Client ID enter the Okta *Client ID*.
+
+. For Client Secret enter the *Client Secret*.
+
+. Select discovery method:
+
+.. For Discovery URI take the domain from your Okta portal which should be something like https://dev-123-admin.okta.com/ and add `.well-known/openid-configuration`.
+The final URL should look similar to `https://dev-123-admin.okta.com/.well-known/openid-configuration`.
+
+.. Alternatively, you can select *Manual Configuration* and enter the values separately, including Issuer, Authorization Endpoint, Token Endpoint and JWKS URI.
+
+. Configure any additional settings as needed:
+
+.. For organization-level SSO, no additional settings needed.
+
+.. For project-level SSO, enter role mappings if applicable.
+
+. Select *Create*.
+
+. Select the additional log in methods:
+
+.. For *Organization-level testing* it is recommended to keep the Email/password or Google log-in method enabled, so that if SSO fails, you can still access the Aura console and adjust the configuration.
+
+.. For *Project-level testing* the user/password login is always available on the instance, so if SSO isn't working, the instance is still accessible.
+
+== FAQ
+
+*Can users get roles added to them in Aura console via SSO and a group to role mapping?*
+
+No, users must be granted the role on the organization via Aura console invites and access management like with any other organization.
+
+*Why am I unable to connect to the instance after completing the SSO login, the connection is showing as unconnected?*
+
+Ensure that the email field is provided on your user in Microsoft Entra ID.
+If it already is, contact support for further assistance.
+
diff --git a/modules/ROOT/pages/security/tool-auth.adoc b/modules/ROOT/pages/security/tool-auth.adoc
new file mode 100644
index 000000000..42a06e4f7
--- /dev/null
+++ b/modules/ROOT/pages/security/tool-auth.adoc
@@ -0,0 +1,29 @@
+= Tool authentication with Aura user
+:description: This section describes the seamless tool authentication functionality in AuraDB.
+
+label:AuraDB-Free[]
+label:AuraDB-Professional[]
+
+Organization admins can allow their users to seamlessly and securely connect to instances using their Aura account credentials.
+When enabled, users connect to an instance via Query or Explore with a predefined database role matching their console role (see xref:user-management.adoc#roles[User management - Roles] for more information about roles and privileges.)
+
+If this setting is disabled, all users are required to connect to graph tools with a database username and password.
+
+[NOTE]
+====
+Tool authentication with Aura user is enabled by default on all new organizations created after May 29th 2025.
+However, this does *not* apply to Virtual Dedicated Cloud.
+====
+
+This feature can be enabled and configured from the Organization settings, available by selecting the organization name in the dropdown menu.
+
+Organization admins control the scope of seamless tool authentication via Aura user roles.
+You can enable or disable access via the checkboxes on *individual instance level*, for an *entire project*, and set *the default for new instances within a project*.
+
+You can select which projects and instances users can connect seamlessly to and which they should be required to use username and password to connect to.
+
+To prevent unauthorized access and allow Project admins full access control, the authentication is used in conjunction with predefined roles with varying levels of access to the database.
+This means that Project admins assign roles to the users that grants them seamless connection to the project and its instances as well as certain privileges to the databases there.
+
+[.shadow]
+image::tool-authentication.png[]
\ No newline at end of file
diff --git a/modules/ROOT/pages/tutorials/bi.adoc b/modules/ROOT/pages/tutorials/bi.adoc
index bf8f28109..617584931 100644
--- a/modules/ROOT/pages/tutorials/bi.adoc
+++ b/modules/ROOT/pages/tutorials/bi.adoc
@@ -1,19 +1,22 @@
-= Using the BI Connector
+= Using the Neo4j BI Connector
+:page-aliases:platform/tutorials/bi.adoc
In this tutorial we use the Neo4j Connector for BI to read graph data from an Aura instance using some common <<_using_command_line_sql_clients,SQL clients>> and <<_using_bi_tools,BI tools>>.
[CAUTION]
====
-This tutorial includes instructions on the usage of third-party software, which may be subject to changes beyond our control. In case of doubt, please refer to the third-party software documentation.
+This tutorial includes instructions on the usage of third-party software, which may be subject to changes beyond our control.
+In case of doubt, refer to the third-party software documentation.
====
== Downloading the connector
-Download the connector from the https://neo4j.com/download-center/#integrations[Download Center]. Depending on the SQL client or BI tool it will be used with, you will need either the JDBC or the ODBC connector; see the usage examples for further details.
+Download the connector from the https://neo4j.com/download-center/#integrations[Download Center].
+Depending on the SQL client or BI tool it will be used with, you will need either the JDBC or the ODBC connector; see the usage examples for further details.
== Preparing example data
-Before trying the connector with any of the listed tools, some data needs to be loaded on Aura.
+Before trying the connector with any of the listed tools, some data needs to be loaded on Aura.
This can be achieved by running the following Cypher query in the Neo4j Browser:
[source, cypher, subs=attributes+, role=noplay]
@@ -44,19 +47,26 @@ Refer to the link:https://help.tableau.com/current/pro/desktop/en-us/examples_ot
After downloading the JDBC Neo4j Connector for BI from the https://neo4j.com/download-center/#integrations[Download Center]:
-- Close any running instances of Tableau Desktop.
-- Copy the Neo4j driver to the appropriate Tableau drivers folder (e.g. `C:\Program Files\Tableau\Drivers` on Windows, or `~/Library/Tableau/Drivers` on macOS).
-- Start Tableau and search for the `Other Databases (JDBC)` option.
-- Insert the Aura URL as `jdbc:neo4j://xxxxxxxx.databases.neo4j.io?SSL=true`, leave the SQL dialect as `SQL92`, and complete the relevant credentials.
+. Close any running instances of Tableau Desktop.
+. Copy the Neo4j driver JAR file into the appropriate Tableau `Drivers` folder.
+* Use `C:\Program Files\Tableau\Drivers` on Windows.
+* Use `~/Library/Tableau/Drivers` on macOS.
+If the folder is not visible, select `Go -> Go to Folder` in Finder to open the folder manually.
-If the connection fails with a `Generic JDBC connection error`, you can do one of the following:
-
-* Download the SSL.com CA root certificate from link:https://www.ssl.com/how-to/install-ssl-com-ca-root-certificates/[ssl.com] and install it as explained in the link:https://help.tableau.com/current/pro/desktop/en-us/jdbc_ssl_config.htm[Tableau documentation], then restart Tableau and repeat the previous steps (recommended option).
-* Add `&sslTrustStrategy=TRUST_ALL_CERTIFICATES` to the connection string (after `SSL=true`) and try to connect again. **This option requires caution and should not be used in a production environment**.
+. Start Tableau and search for the `Other Databases (JDBC)` option.
+. Insert the Aura URL as `jdbc:neo4j://xxxxxxxx.databases.neo4j.io?SSL=true`, leave the SQL dialect as `SQL92`, and complete the relevant credentials.
After the connection is established, you can select the `neo4j` database and the `Node` schema to find the `Person` table.
You can then explore the table to find the example data.
+==== Troubleshooting
+
+If the connection fails with a `Generic JDBC connection error`, check if you installed the Neo4j driver in the correct location and then:
+
+* Download the `SSL.com` root certificates as explained on link:https://www.ssl.com/how-to/install-ssl-com-ca-root-certificates/[ssl.com] and install them as shown in the link:https://help.tableau.com/current/pro/desktop/en-us/jdbc_ssl_config.htm[Tableau documentation], then restart Tableau and repeat the previous steps (recommended option).
+* Add `&sslTrustStrategy=TRUST_ALL_CERTIFICATES` to the connection string (after `SSL=true`) and try to connect again.
+**This option requires caution and should not be used in a production environment**.
+
=== Power BI
[NOTE]
@@ -68,112 +78,11 @@ Refer to the link:https://docs.microsoft.com/en-us/power-bi/connect-data/desktop
After downloading and installing the ODBC Neo4j Connector for BI from the https://neo4j.com/download-center/#integrations[Download Center]:
-- Open Power BI Desktop.
-- Search for `ODBC` in the *Get data from another source* panel.
-- Select `Simba Neo4j` in the *DSN dropdown* menu.
-- Insert the connection string `Host=xxxxxxxx.databases.neo4j.io;SSL=1` in the *Advanced options* section.
-- Insert your username and password.
+. Open Power BI Desktop.
+. Search for `ODBC` in the *Get data from another source* panel.
+. Select `Simba Neo4j` in the *DSN dropdown* menu.
+. Insert the connection string `Host=xxxxxxxx.databases.neo4j.io;SSL=1` in the *Advanced options* section.
+. Insert your username and password.
Once connected, open sequentially `ODBC` -> `neo4j` -> `Node` -> `Person` in the *Navigator* window to see a preview of the table.
-== Using command-line SQL clients
-
-In order to run SQL queries, we need a SQL client that can use a custom driver.
-Common JDBC-based command-line SQL clients include <<_sqlline>> and <<_jdbcsql>>.
-
-[TIP]
-====
-When connecting with a JDBC driver, the `neo4j+s` URI scheme must be changed into `neo4j` and the `SSL=true` parameter must be added to the URL.
-====
-
-=== sqlline
-
-https://github.com/julianhyde/sqlline[`sqlline`^] is a command-line tool for issuing SQL queries to relational databases via JDBC.
-To clone and build it, run the following:
-
-[source, shell, subs=attributes+]
-----
-$ git clone https://github.com/julianhyde/sqlline
-$ cd sqlline
-$ ./mvnw package
-----
-
-We now need to make the BI connector driver available to `sqllite`.
-This can be done by extracting the `Neo4jJDBC42.jar` file from the downloaded _JDBC BI connector_ into the `sqlline/target` folder.
-
-The `sqlline` client can now be run as follows:
-
-[source, shell, subs=attributes+]
-----
-$ ./bin/sqlline -d com.simba.neo4j.neo4j.jdbc42.Driver
-----
-
-From the client prompt, it is possible to connect to the Aura instance by supplying the username and password when prompted to do so:
-
-[source, shell, subs=attributes+]
-----
-sqlline> !connect jdbc:neo4j://xxxxxxxx.databases.neo4j.io?SSL=true
-----
-
-When the connection is established, a list of tables can be obtained with the `!tables` command:
-
-[source, shell, subs=attributes+]
-----
-jdbc:neo4j://xxxxxxxx.databases.neo4j.io> !tables
-----
-
-----
-+-----------+--------------+---------------------+------------+---------+----------+------------+-----------+--------+
-| TABLE_CAT | TABLE_SCHEM | TABLE_NAME | TABLE_TYPE | REMARKS | TYPE_CAT | TYPE_SCHEM | TYPE_NAME | SELF_R |
-+-----------+--------------+---------------------+------------+---------+----------+------------+-----------+--------+
-| neo4j | Node | Person | TABLE | | | | | |
-| neo4j | Relationship | Person_KNOWS_Person | TABLE | | | | | |
-+-----------+--------------+---------------------+------------+---------+----------+------------+-----------+--------+
-----
-
-It is also possible to run SQL queries:
-
-[source, shell, subs=attributes+]
-----
-jdbc:neo4j://xxxxxxxx.databases.neo4j.io> SELECT * FROM Person;
-----
-
-----
-+----------+-----+------+---------+
-| _NodeId_ | age | name | surname |
-+----------+-----+------+---------+
-| 0 | 42 | John | Doe |
-| 1 | 40 | Jane | Doe |
-+----------+-----+------+---------+
-----
-
-=== jdbcsql
-
-http://jdbcsql.sourceforge.net/[jdbcsql^] is a command-line tool that can be used to connect to a DBMS via a JDBC driver.
-
-After downloading the `jdbcsql-1.0.zip` file from https://sourceforge.net/projects/jdbcsql/files/[SourceForge^], extract it into the `jdbcsql` folder; then, copy the `Neo4jJDBC42.jar` file from the downloaded _JDBC BI Connector_ into `jdbcsql` and make the following changes:
-
-1. Add the following lines to `JDBCConfig.properties`
-+
-----
-# neo4j settings
-neo4j_driver = com.simba.neo4j.neo4j.jdbc42.Driver
-neo4j_url = jdbc:neo4j://host?SSL=true
-----
-
-2. Add `Neo4jJDBC42.jar` to `Rsrc-Class-Path` line in `META-INF/MANIFEST.MF`
-
-Now run the following command (replacing `xxxxxxxx.databases.neo4j.io` with the Aura connection URI, and `yyyyyyyy` with the actual password):
-
-[source, shell, subs=attributes+]
-----
-$ java org.eclipse.jdt.internal.jarinjarloader.JarRsrcLoader -m neo4j -h xxxxxxxx.databases.neo4j.io -d neo4j -U neo4j -P yyyyyyyy 'SELECT * FROM Person'
-----
-
-The result of the query is:
-
-----
-"_NodeId_" age name surname
-0 42 John Doe
-1 40 Jane Doe
-----
\ No newline at end of file
diff --git a/modules/ROOT/pages/tutorials/create-auradb-instance-from-terminal.adoc b/modules/ROOT/pages/tutorials/create-auradb-instance-from-terminal.adoc
index 80f85994b..29eb69e0c 100644
--- a/modules/ROOT/pages/tutorials/create-auradb-instance-from-terminal.adoc
+++ b/modules/ROOT/pages/tutorials/create-auradb-instance-from-terminal.adoc
@@ -1,8 +1,9 @@
[[create-auradb-instance-in-terminal]]
= Create an AuraDB instance in the terminal
-:description: This tutorial describes using the terminal to create an instance in the Aura Console.
+:description: This tutorial describes using the terminal to create an instance in the Aura Console.
+:page-aliases:platform/tutorials/create-auradb-instance-from-terminal.adoc
-This tutorial describes using the terminal to create an instance in the Aura Console.
+This tutorial describes using the terminal to create an instance in the Aura Console.
== Preparation
@@ -10,14 +11,14 @@ This tutorial describes using the terminal to create an instance in the Aura Con
* Log in to the Aura Console.
* Click your email address in the top right corner and select *Account details*.
-* In the *API credentials* section, select *Create*.
+* In the *API credentials* section, select *Create*.
Enter a descriptive name and save the generated Client ID and Client Secret.
=== cURL
* Install cURL via your terminal
* For macOS with Homebrew: use `brew install curl`.
-* Install cURL.
-See link:https://curl.se/dlwiz/[curl download wizard] for more information.
+* Install cURL.
+See link:https://curl.se/[curl] for more information.
* Check cURL is available: Type `curl -V` in the terminal
== Obtain a bearer token
@@ -27,7 +28,7 @@ See link:https://curl.se/dlwiz/[curl download wizard] for more information.
Bearer tokens are valid for one hour.
====
-In the terminal paste the snippet, replacing `YOUR_CLIENT_ID` and `YOUR_CLIENT_SECRET` with the values generated by the Aura Console.
+In the terminal paste the snippet, replacing `YOUR_CLIENT_ID` and `YOUR_CLIENT_SECRET` with the values generated by the Aura Console.
Keep the `:` between the values.
[source, cURL]
@@ -46,14 +47,14 @@ It looks similar to this example:
"access_token":"eyJ1c3IiOiJkNzI2MzE1My03MWZmLTUxMjQtOWVjYy1lOGFlM2FjNjNjZWUiLCJpc3MiOiJodHRwczovL2F1cmEtYXBpLmV1LmF1dGgwLmNvbS8iLCJzdWIiOiJFSDdsRTgwbEhWQVVkbDVHUUpEY0M1VDdxZ3BNTnpqVkBjbGllbnRzIiwiYXVkIjoiaHR0cHM6Ly9jb25zb2xlLm5lbzRqLmlvIiwiaWF0IjoxNzAyOTgzODQzLCJleHAiOjE3MDI5ODc0NDMsImF6cCI6IkVIN2xFODBsSFZBVWRsNUdRSkRjQzVUN3FncE1OempWIiwiZ3R5IjoiY2xpZW50LWNyZWRlbnRpYWxzIn0eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6ImFKbWhtUTlYeExsQmFLdHNuZnJIcCJ9..jkpatG4SCRnxwTPzfEcSJk3Yyd0u_NMH8epNqmSBMUlp_JvvqbKpNdkPIE6vx5hLRgVCVKovxl4KY9yzEkr7R5s4YU3s2K25eNB1q1y3yQ_-9N0e6eOhmjIrsWHMd_rl2NuGIHo6pHihumuJlEg-U2ELkWyu8Iz3zQxjycVnPHzlbu7sbtwVJdU7UzgO12jgDLA1T4mUqvxdAAdnoXO57SwczYoYKY2YL61CMTn-xdQ6MFS8A3vwpGQbRirwVVxvEmoIPCLlQwHeEC4_modJ4cifmjt6ChJb1sxsRpFvdNHm0vNcLjy-96e88D50AMgjvS4VQCmVKA7kUgt7t5IpKg","expires_in":3600,"token_type":"Bearer"
----
-== Obtain the tenant ID
+== Obtain the project ID
-Use cURL to obtain the tenant ID with your token.
+Use cURL to obtain the project ID with your token.
Replace `YOUR_BEARER_TOKEN` with your token.
[source, cURL]
----
-curl --location 'https://api.neo4j.io/v1/tenants' --header 'Accept: application/json' --header 'Authorization: Bearer YOUR_BEARER_TOKEN'
+curl --location 'https://api.neo4j.io/v1/projects' --header 'Accept: application/json' --header 'Authorization: Bearer YOUR_BEARER_TOKEN'
----
This returns something similar to:
@@ -63,25 +64,37 @@ This returns something similar to:
{"data":[{"id":"6e6bbbe2-5678-5f8a-1234-b1f62f08b98f","name":"team1"},{"id":"ad69ee24-1234-5678-af02-ff8d3cc23611","name":"team2"}]}
----
-In the example response above, two tenants are returned.
-If you’re a member of multiple tenants, select the one you wish to use.
+In the example response above, two projects are returned.
+If you're a member of multiple projects, select the one you wish to use.
+
+[NOTE]
+====
+_Project_ replaces _Tenant_ in the console UI and documentation.
+However, in the API, `tenant` remains the nomenclature.
+====
== Configure an AuraDB instance
=== Configure the instance values
-Use the bearer token and Tenant ID to create the Aura instance.
+Use the bearer token and Project ID to create the Aura instance.
Replace `YOUR_BEARER_TOKEN` with your token.
-Replace `YOUR_TENANT_ID` with your tenant ID.
+Replace `YOUR_PROJECT_ID` with your project ID.
-The following values are customizable `version`, `region`, `memory`, `name`, `type`, `tenantid`, and `cloud provider`.
+The following values are customizable `version`, `region`, `memory`, `name`, `type`, `tenant_id`, and `cloud_provider`.
[source, cURL]
----
-curl --location 'https://api.neo4j.io/v1/instances' --header 'Content-Type: application/json' --header 'Accept: application/json' --header 'Authorization: Bearer YOUR_BEARER_TOKEN' --data ' { "version": "5", "region": "europe-west1", "memory": "8GB", "name": "instance01", "type": "enterprise-db", "tenant_id": "YOUR_TENANT_ID", "cloud_provider": "gcp" }'
+curl --location 'https://api.neo4j.io/v1/instances' --header 'Content-Type: application/json' --header 'Accept: application/json' --header 'Authorization: Bearer YOUR_BEARER_TOKEN' --data ' { "version": "5", "region": "europe-west1", "memory": "8GB", "name": "instance01", "type": "enterprise-db", "tenant_id": "YOUR_PROJECT_ID", "cloud_provider": "gcp" }'
----
-See xref:platform/api/overview.adoc[Aura API documentation] for more details.
+See xref:api/overview.adoc[Aura API documentation] for more details.
+
+[CAUTION]
+====
+The legacy term `Enterprise` is still used within the codebase and API.
+However, in the Aura console and documentation, the AuraDB Enterprise project type is now known as AuraDB Virtual Dedicated Cloud.
+====
At this point, an Aura instance is provisioned in the Aura Console.
Optionally, use this code in the terminal to check the status:
@@ -98,4 +111,4 @@ curl --location 'https://api.neo4j.io/v1/instances/YOUR_INSTANCE_ID' --header 'A
curl --location 'https://api.neo4j.io/v1/instances/YOUR_INSTANCE_ID' --header 'Accept: application/json' --header 'Authorization: Bearer YOUR_BEARER_TOKEN'
----
-If the value of `status` shows `running`, you can start using the new Aura instance.
\ No newline at end of file
+If the value of `status` shows `running`, you can start using the new Aura instance.
diff --git a/modules/ROOT/pages/tutorials/migration-free.adoc b/modules/ROOT/pages/tutorials/migration-free.adoc
new file mode 100644
index 000000000..6f04add00
--- /dev/null
+++ b/modules/ROOT/pages/tutorials/migration-free.adoc
@@ -0,0 +1,89 @@
+= Migrating your AuraDB Free instance to another AuraDB tier
+:description: This section describes migrating your Neo4j AuraDB Free Instance to another AuraDB tier.
+:page-aliases:platform/tutorials/migration-free.adoc
+
+== AuraDB Professional or AuraDB Virtual Dedicated Cloud
+
+Upgrading your tier to AuraDB Professional or AuraDB Virtual Dedicated Cloud gives you access to additional resources and functionalities to support production workloads and applications with demanding storage and processing needs.
+
+*Migration options*
+
+* Upgrade to AuraDB Professional
+* Clone to new (Works for AuraDB Professional and AuraDS Professional)
+* Manual process
+
+== Upgrade to AuraDB Professional
+
+You can upgrade an instance to the Professional tier directly from the console.
+
+On the instance card, click the button that says *Upgrade*.
+
+Verify that the cloud provider and region are correct and select the instance size you need, add the extra details and select *Create*.
+
+== Clone (Works for AuraDB Professional and AuraDS)
+
+The other way is to _clone_ your existing instance to the Professional tier.
+
+* Click the three dots (*...*) on an instance card
+* Select either: *Clone to new instance* or create a new *Clone to existing instance* (the current content will be overwritten)
+
+== Manual process
+
+In your existing instance:
+
+. (Optional but recommended) Capture existing index and constraint definitions:
+.. Run the following Cypher statement:
++
+[source,cypher]
+----
+SHOW CONSTRAINTS YIELD createStatement
+----
++
+Save result to a file, to use later in the process.
+.. Run the following Cypher statement:
++
+[source,cypher]
+----
+SHOW INDEXES YIELD createStatement
+----
++
+Save result to a file, to use later in the process.
+
+. (Optional but recommended) Drop the indexes and constraints.
+.. Run the following Cypher statement to generate the commands to drop existing constraints:
++
+[source,cypher]
+----
+SHOW CONSTRAINTS YIELD name
+RETURN 'DROP CONSTRAINT ' + name + ';'
+----
+.. Execute the generated commands to drop existing constraints.
+.. Run the following Cypher statement to generate the commands to drop existing indexes:
++
+[source,cypher]
+----
+SHOW INDEX YIELD name
+RETURN 'DROP INDEX ' + name + ';'
+----
+.. Execute the generated commands to drop existing indexes.
++
+For more information about indexes and constrains, see link:{neo4j-docs-base-uri}/cypher-manual/current/indexes/[Cypher Manual -> Indexes] and link:{neo4j-docs-base-uri}/cypher-manual/current/constraints/[Cypher Manual -> Constraints].
++
+. In the console of your existing instance (AuraDB Free), do the following:
+
+.. Download snapshot/Dump locally (the daily automatic snapshot)
+.. In the Aura Console select the AuraDB instance
+.. Go to the *Snapshots* tab
+.. Click the *three dots*, and select *Export*
+.. Save the dump file locally (preserve the .backup extension)
++
+. Then create a new AuraDB instance in AuraDB Professional or AuraDB Virtual Dedicated Cloud with the right resource sizing.
+From your new instance, do the following:
+
+.. Go to the instance card
+.. Select *Backup & Restore*
+.. Upload your .backup file
++
+. In the newly created AuraDB Professional or AuraDB Virtual Dedicated Cloud instance
++
+(Optional) Once the AuraDB instance is loaded and started, you can recreate the indexes and constraints, using the information captured earlier in the process.
\ No newline at end of file
diff --git a/modules/ROOT/pages/tutorials/migration.adoc b/modules/ROOT/pages/tutorials/migration.adoc
index ab75c3553..a69b4070a 100644
--- a/modules/ROOT/pages/tutorials/migration.adoc
+++ b/modules/ROOT/pages/tutorials/migration.adoc
@@ -13,9 +13,9 @@ If your local Neo4j version is older than 4.3, you need to upgrade to at least N
== Preparation
-=== Migrating to Neo4j 5
+=== Migrating to Neo4j latest
-If you are migrating from self-managed Neo4j 4.3 or 4.4 to Neo4j 5 on Aura, carefully read the xref:tutorials/upgrade.adoc#_preparation[Preparation section in the Upgrade tutorial] to ensure you are well prepared for the migration.
+If you are migrating from self-managed Neo4j 4.3 or 4.4 to Neo4j latest on Aura, carefully read the xref:tutorials/upgrade.adoc#_preparation[Preparation section in the Upgrade tutorial] to ensure you are well prepared for the migration.
=== Aura instance size
@@ -25,6 +25,7 @@ The Aura RAM-to-storage ratio is 1:2, which means, for example, that a 32 GB Aur
=== APOC compatibility
+Aura contains most but not all functions and procedures contained in the link:{neo4j-docs-base-uri}/apoc/current[APOC Core] library.
If you are using any APOC procedures and functions, make sure they are all available in Aura by checking the link:https://neo4j.com/docs/aura/platform/apoc/[APOC support page].
== Creating and uploading a database dump
@@ -40,7 +41,7 @@ The following admin commands must be invoked with the same user as your self-man
This guarantees that Neo4j has full rights to start and work with the database files you use.
. Stop your self-managed Neo4j database.
-If you are running an Enterprise Edition, you can stop only the database you want to dump using the command `STOP DATABASE {database}` in Cypher Shell or Browser.
+Stop the database you want to make a dump of, using the command `STOP DATABASE {database}` in Cypher Shell or Browser.
. Ensure the target directory to store the database dumps (for instance `{dump-folder}`) exists.
@@ -71,7 +72,7 @@ bin/neo4j-admin database dump {database} --to-path={dump-folder}
====
+
-. Depending on your self-managed Neo4j version, upload the database dump (e.g., `{database}`) to your Aura instance using one of the following options:
+. Depending on your self-managed Neo4j version, upload the database dump (e.g., `{database}`) to your Aura instance (the Aura instance does *not* need to be stopped) using one of the following options:
+
[.tabbed-example]
@@ -97,4 +98,4 @@ bin/neo4j-admin database upload {database} --from-path={dump-folder} --to-uri={a
----
=====
====
-+
\ No newline at end of file
++
diff --git a/modules/ROOT/pages/tutorials/performance-improvements.adoc b/modules/ROOT/pages/tutorials/performance-improvements.adoc
index 1d5a975f0..fd0eac8e3 100644
--- a/modules/ROOT/pages/tutorials/performance-improvements.adoc
+++ b/modules/ROOT/pages/tutorials/performance-improvements.adoc
@@ -1,5 +1,6 @@
[[aura-performance]]
= Improving Cypher performance
+:page-aliases:platform/tutorials/performance-improvements.adoc
This page covers a number of steps you can take to improve the Cypher performance of your workload.
@@ -51,7 +52,7 @@ Once in the cache, the subsequent execution time will improve.
Furthermore, always use parameters instead of literal values to benefit from the cache.
====
-Read more about link:{neo4j-docs-base-uri}/cypher-manual/current/execution-plans/[execution plans] and see this detailed guide for the steps on link:https://support.neo4j.com/s/article/4404022359443-Performance-tuning-with-Neo4j-AuraDB instead[how to capture the execution plans]
+Read more about link:{neo4j-docs-base-uri}/cypher-manual/current/execution-plans/[execution plans] and see a detailed guide for the steps on link:https://support.neo4j.com/s/article/4404022359443-Performance-tuning-with-Neo4j-AuraDB[how to capture the execution plans].
To best interpret the output of your execution plan, it is recommended that you get familiar with the terms used on it.
See link:{neo4j-docs-base-uri}/cypher-manual/current/execution-plans/operator-summary/[this summary of execution plan operators] for more information.
@@ -61,7 +62,7 @@ See link:{neo4j-docs-base-uri}/cypher-manual/current/execution-plans/operator-su
As your data volume grows, it is important to define constraints and indexes in order to achieve the best performance for your queries.
For that, the runtime engine will need to evaluate the cost associated with a query and, to get the best estimations, it will rely on already existing indexes.
This will likely show whether an index is missing from the execution plan and which one is it.
-Though in some circunstances it might look like an index is not available or possible, it may also make sense to reconsider the model and create an intermediate node or another relationship type just to leverage it.
+Though in some circumstances it might look like an index is not available or possible, it may also make sense to reconsider the model and create an intermediate node or another relationship type just to leverage it.
Read more about link:{neo4j-docs-base-uri}/cypher-manual/current/query-tuning/indexes/[the use of indexes] for a more comprehensive explanation.
@@ -95,14 +96,14 @@ To review what is running at any given time (this makes particular sense if you
== Runtime engine and Cypher version
The execution plan should show you the runtime that is selected for the execution of your query.
-Usually, the planner makes the right decision, but it may be worth checking at times if the other runtimes do not perform better.
+Usually, the planner makes the right decision, but it may be worth checking at times if any other runtime performs better.
Read more about link:{neo4j-docs-base-uri}/cypher-manual/current/query-tuning/#cypher-runtime[query tuning] on Cypher runtime.
To invoke the use of a given runtime forcibly, prepend your Cypher statement with:
-* `CYPHER runtime=pipelined` for pipelined runtime
-* `CYPHER runtime=slotted` for slotted runtime
-* `CYPHER runtime=interpreted` for interpreted runtime
+* `CYPHER runtime=pipelined` for `pipelined` runtime
+* `CYPHER runtime=slotted` for `slotted` runtime
+* `CYPHER runtime=interpreted` for `interpreted` runtime
If you have a Cypher pattern that is not performing without error, it could as well be running on a prior Cypher version.
You can control the version used to interpret your queries by using these link:{neo4j-docs-base-uri}/cypher-manual/current/query-tuning/#cypher-version[Cypher query options].
diff --git a/modules/ROOT/pages/tutorials/spark.adoc b/modules/ROOT/pages/tutorials/spark.adoc
index 280dcf5b7..8bd911e2d 100644
--- a/modules/ROOT/pages/tutorials/spark.adoc
+++ b/modules/ROOT/pages/tutorials/spark.adoc
@@ -1,5 +1,6 @@
-= Using the Apache Spark Connector
+= Using the Neo4j Connector for Apache Spark
:product: Aura
+:page-aliases:platform/tutorials/spark.adoc
This tutorial shows how to use the Neo4j Connector for Apache Spark to write to and read data from an Aura instance.
@@ -20,7 +21,7 @@ _Example: Neo4j Connector 5.1.0, built for Spark 3.x with Scala 2.12._
$ spark-3.4.1-bin-hadoop3/bin/spark-shell --jars neo4j-connector-apache-spark_2.12-5.1.0_for_spark_3.jar
----
-== Running code in Spark
+== Running code in Apache Spark
[TIP]
====
@@ -87,4 +88,5 @@ val data = spark.read.format("org.neo4j.spark.DataSource")
data.show()
----
-For further information on how to use the connector, read the link:{neo4j-docs-base-uri}/spark/[Neo4j Spark Connector docs].
\ No newline at end of file
+For further information on how to use the connector, read the link:{neo4j-docs-base-uri}/spark/[Neo4j Spark Connector docs].
+
diff --git a/modules/ROOT/pages/tutorials/troubleshooting.adoc b/modules/ROOT/pages/tutorials/troubleshooting.adoc
index 3a8e9ad77..e5b5da0f4 100644
--- a/modules/ROOT/pages/tutorials/troubleshooting.adoc
+++ b/modules/ROOT/pages/tutorials/troubleshooting.adoc
@@ -1,10 +1,11 @@
[[aura-troubleshooting]]
= Troubleshooting
:description: Troubleshooting information that can help you diagnose and correct problems.
+:page-aliases:platform/tutorials/troubleshooting.adoc
This page provides possible solutions to several common issues you may encounter when using Neo4j Aura.
-Regardless of the issue, viewing the link:/docs/aura/platform/logging/[Aura query log] is always recommended to monitor processes and verify any problems.
+Regardless of the issue, viewing the xref:platform/logging/download-logs.adoc[Aura query log] is always recommended to monitor processes and verify any problems.
== Query performance
@@ -20,11 +21,11 @@ Currently using 275.1 MiB. dbms.memory.transaction.global_max_size threshold rea
----
The `org.neo4j.memory.MemoryLimitExceededException` configuration acts as a safeguard, limiting the quantity of memory allocated to all transactions while preserving the regular operations of the Aura instance.
-Similarly, the property `dbms.memory.transaction.global_max_size` also aims to protect the Aura Instance from experiencing any OOMs (OutOfMemory exceptions) and increase resiliency.
+Similarly, the property `dbms.memory.transaction.global_max_size` also aims to protect the Aura Instance from experiencing any OOM (Out of memory) exceptions and increase resiliency.
It is enabled in Aura and cannot be disabled.
However, the measured heap usage of all transactions is only an estimate and may differ from the actual number.
-The estimation algorithm relies on a conservative approach, which can lead to overestimations of memory usage.
+The estimation algorithm relies on a conservative approach, which can lead to overestimation of memory usage.
In such cases, all contributing objects' identities are unknown and cannot be assumed to be shared.
Solution::
@@ -34,7 +35,7 @@ Solution::
We recommend handling this error in your application code, as it may be intermittent.
====
-Overestimations are most likely to happen when using `UNWIND` on long lists or when expanding a variable length or shortest path pattern.
+Overestimation is most likely to happen when using `UNWIND` on long lists or when expanding a variable length or shortest path pattern.
The many relationships shared between the computed result paths could be the cause of a lack of precision in the estimation algorithm.
To avoid this scenario, try running the same query without using a sorting operation like `ORDER BY` or `DISTINCT`.
@@ -47,11 +48,11 @@ Keep in mind that the query can succeed regardless.
+
* Rework the relevant query to optimize it.
** Use `EXPLAIN` or `PROFILE` to review the plans (see more about link:https://neo4j.com/docs/cypher-manual/current/query-tuning/[query tuning]).
-** Use `PROFILE` in cypher-shell to check the overall memory footprint of a query.
+** Use `PROFILE` in the Cypher Shell to check the overall memory footprint of a query.
The output will include memory consumption information, the query's result, if any, and the execution plan.
In the following example, the memory consumed is 11,080 Bytes:
+
-image::planSummary.png[]
+image::planSummary.png["Plan summary"]
* Increase the instance size of your Aura deployment to get more resources.
* Reduce the concurrency of queries heavy on resources to get a better chance of success.
@@ -66,7 +67,8 @@ See link:https://neo4j.com/docs/operations-manual/current/performance/memory-con
== Neo4j Admin database upload errors
-The `database upload` command was introduced in Neo4j Admin version 5, replacing the `push-to-cloud` command that was present in Neo4j Admin version 4.4 and earlier. The following solutions are relevant to both commands.
+The `database upload` command was introduced in Neo4j Admin version 5, replacing the `push-to-cloud` command that was present in Neo4j Admin version 4.4 and earlier.
+The following solutions are relevant to both commands.
=== `LegacyIndexes`
@@ -83,7 +85,8 @@ Solution::
To resolve the issue, follow these steps:
-. Make sure you are at least on Neo4j version 4.4 or later. See more information about link:https://neo4j.com/docs/upgrade-migration-guide/current/[upgrade and migration].
+. Make sure you are at least on Neo4j version 4.4 or later.
+See more information about link:https://neo4j.com/docs/upgrade-migration-guide/current/[upgrade and migration].
. In your local graph, use the following commands to get a list of the indexes and their types.
This will also provide the sequential list of commands to drop and then recreate the indexes:
+
@@ -97,7 +100,7 @@ RETURN command + " " + description
+
. In Neo4j Browser, select the "Enable multi statement query editor" option under the browser settings.
. Take the list of commands from the 2nd step and copy them in one list of multiple queries into Browser and run those queries.
-. After the indexes are recreated, attempt the `database upload` command again.
+. After the indexes are recreated, try the `database upload` command again.
=== `InconsistentData`
@@ -113,7 +116,8 @@ You may get this error if the store you are uploading is in a Neo4j version that
Solution::
-. link:https://neo4j.com/docs/upgrade-migration-guide/current/[Upgrade your database]. Make sure you are on Neo4j 4.4 or later.
+. link:https://neo4j.com/docs/upgrade-migration-guide/current/[Upgrade your database].
+Make sure you are on Neo4j 4.4 or later.
. If you encounter problems upgrading, please raise a ticket with our link:https://support.neo4j.com[Customer Support] team.
=== `LogicalRestrictions`
@@ -168,3 +172,5 @@ let session = driver.session(....)
====
Rapid session creation can exceed the database's maximum concurrent connection limit, resulting in the “Session Expired” error when creating more sessions.
====
+
+
diff --git a/modules/ROOT/pages/tutorials/upgrade.adoc b/modules/ROOT/pages/tutorials/upgrade.adoc
index 3af0bf702..1cb661f3b 100644
--- a/modules/ROOT/pages/tutorials/upgrade.adoc
+++ b/modules/ROOT/pages/tutorials/upgrade.adoc
@@ -1,11 +1,12 @@
-= Upgrade to Neo4j 5 within Aura
-:description: This tutorial describes how to upgrade an Aura instance running Neo4j version 4 to Neo4j version 5.
+= Migrate a version 4 instance to the latest version
+:description: This tutorial describes how to migrate an Aura instance running Neo4j version 4 to Neo4j latest.
-This tutorial describes how to upgrade an Aura instance running Neo4j version 4 to Neo4j version 5.
+This tutorial describes how to migrate an Aura instance running Neo4j 4 to the latest version of Neo4j.
-[CAUTION]
+[NOTE]
====
-New AuraDS and AuraDB Free instances use Neo4j 5 as standard, while all others give the option to choose between Neo4j 4 and 5 during creation.
+* New instances are created with the latest version of Neo4j.
+* The Migration Readiness Report in the Aura console helps Neo4j Aura 4 users prepare for migration.
====
== Prepare for the upgrade
@@ -17,111 +18,53 @@ For a smooth migration:
. Check the breaking changes for each driver you use, for example in the link:https://neo4j.com/docs/api/python-driver/5.0/breaking_changes.html#breaking-changes[Python driver] and in the link:https://github.com/neo4j/graph-data-science-client/blob/main/changelog.md[GDS client].
. Make sure you switch to the latest version of the driver in line with the version of the Neo4j database.
-This can be done before upgrading the version of Neo4j that you are using with Aura, as 5.x drivers are backward compatible.
+This can be done before upgrading the version of Neo4j that you are using with Aura, as Neo4j latest version drivers are backward compatible.
-The link:https://neo4j.com/docs/upgrade-migration-guide/current/version-5/migration/breaking-changes/[Update and migration guide] contains all information and lists all the breaking changes.
+The link:https://neo4j.com/docs/upgrade-migration-guide/current/version-5/migration/breaking-changes/[Upgrade and migration guide] contains all information and lists all the breaking changes.
=== Indexes
-In Neo4j 5, BTREE indexes are replaced by RANGE, POINT, and TEXT indexes.
-Before migrating a database, in Neo4j 4, you should create a matching RANGE, POINT, or TEXT index for each BTREE index (or index-backed constraint).
-You can run `SHOW INDEXES` on your Neo4j 4 database to display its indexes.
+In Neo4j 5, BTREE indexes were replaced by RANGE, POINT, and TEXT indexes.
+These indexes are not automatically created during migration, so you must manually create an equivalent index for each BTREE index in Neo4j 4.
-In most cases, RANGE indexes can replace BTREE.
-However, there might be occasions when a different index type is more suitable, such as:
+The `SHOW INDEXES` command lists all database indexes.
-* Use POINT indexes if the property value type is `point` and `distance` or `bounding box` queries are used for the property.
-* Use TEXT indexes if the property value type is `text` and the values can be larger than 8Kb.
-* Use TEXT indexes if the property value type is `text` and `CONTAINS` and `ENDS WITH` are used in queries for the property.
+If your database is running Neo4j 4.4 or later, you can create these new index types before migrating.
+Read more in the link:{neo4j-docs-base-uri}cypher-manual/4.4/indexes-for-search-performance/#indexes-future-indexes[Cypher Manual > Future Indexes].
+
+RANGE indexes can replace most BTREE indexes.
+
+TEXT indexes are more appropriate for queries evaluating `STRING` predicates involving the `CONTAINS` or `ENDS WITH` operators.
+For more information, see the link:{neo4j-docs-base-uri}/cypher-manual/current/indexes/search-performance-indexes/managing-indexes/#create-text-index[Cypher Manual -> Create text indexes].
+
+POINT indexes should be used for queries evaluating `POINT` values.
+For more information, see the link:{neo4j-docs-base-uri}/cypher-manual/current/indexes/search-performance-indexes/managing-indexes/#create-text-index[Cypher Manual -> Create point indexes].
After creating the new index, the old index should be dropped.
-The following example shows how to create a new RANGE index and drop an existing `index_name` index:
+.Create a range index and drop an existing BTREE index
[source, Cypher, role="noplay"]
----
CREATE RANGE INDEX range_index_name FOR (n:Label) ON (n.prop1);
DROP INDEX index_name;
----
-The following example instead shows how to create a constraint backed by a RANGE index:
+Propety uniqueness and key constraints are backed by indexes.
+For more information, see the link:{neo4j-docs-base-uri}/cypher-manual/current/constraints/managing-constraints/#constraints-and-backing-indexes[Cypher Manual -> Constraints and backing indexes].
-[source, Cypher, role="noplay"]
-----
-CREATE CONSTRAINT constraint_with_provider FOR (n:Label) REQUIRE (n.prop1) IS UNIQUE OPTIONS {indexProvider: 'range-1.0'}
-----
-
-For more information about creating indexes, see link:https://neo4j.com/docs/cypher-manual/current/indexes-for-search-performance/#administration-indexes-examples[Cypher Manual -> Creating indexes].
+For information about indexes and query performance, see the link: link:{neo4j-docs-base-uri}/cypher-manual/current/indexes/search-performance-indexes/using-indexes/[Cypher Manual -> The impact of indexes on query performance].
=== Cypher updates
-Neo4j 5 introduces some changes to the Cypher syntax and error handling.
+All changes to Cypher, including feature additions, deprecations, and removals, are introduced in Neo4j versions.
+For more information, see the link:{neo4j-docs-base-uri}/cypher-manual/current/deprecations-additions-removals-compatibility[Cypher Manual -> Removals, deprecations, additions and extensions].
-==== Cypher syntax
-
-All changes in the Cypher language syntax are detailed in link:https://neo4j.com/docs/cypher-manual/5/deprecations-additions-removals-compatibility[Cypher Manual -> Removals, deprecations, additions and extensions].
Thoroughly review this section in the version you are moving to and make the necessary changes in your code.
-Here is a short list of the main changes introduced in Neo4j 5:
-
-[cols="1a,1a", options="header"]
-|===
-|*Deprecated feature*
-|*Details*
-
-|[source, Cypher, role="noplay"]
-----
-MATCH (n)-[r:REL]->(m) SET n=r
-----
-|Use the `properties()` function instead to get the map of properties of nodes/relationships that can then be used in a `SET` clause:
-
-[source, Cypher, role="noplay"]
-----
-MATCH (n)-[r:REL]->(m) SET n=properties(r)
-----
-
-|[source, Cypher, role="noplay"]
-----
-MATCH (a), (b), allShortestPaths((a)-[r]->(b)) RETURN b
-
-MATCH (a), (b), shortestPath((a)-[r]->(b)) RETURN b
-----
-|`shortestPath` and `allShortestPaths` without link:https://neo4j.com/docs/cypher-manual/5/syntax/patterns/#cypher-pattern-varlength[variable-length relationship] are deprecated. Instead, use a `MATCH` with a `LIMIT` of 1 or:
-[source, Cypher, role="noplay"]
-----
-MATCH (a), (b), shortestPath((a)-[r*1..1]->(b)) RETURN b
-----
-
-|[source, Cypher, role="noplay"]
-----
-CREATE DATABASE databaseName.withDot ...
-----
-|Creating a database with unescaped dots in the name has been deprecated, instead escape the database name:
-[source, Cypher, role="noplay"]
-----
-CREATE DATABASE `databaseName.withDot` ...
-----
-|===
-
-==== Error/Warning/Info handling in Cypher
-
-Many semantic errors that Cypher finds are reported as `Neo.ClientError.Statement.SyntaxError` even though they are semantic and not syntax errors.
-In Neo4j 5, the metadata returned by Cypher queries is improved.
-
-* The severity of some of the Warning codes is moved to Info:
-
-** `SubqueryVariableShadowingWarning` -> `SubqueryVariableShadowing`
-** `NoApplicableIndexWarning` -> `NoApplicableIndex`
-** `CartesianProductWarning` -> `CartesianProduct`
-** `DynamicPropertyWarning` -> `DynamicProperty`
-** `EagerOperatorWarning` -> `EagerOperator`
-** `ExhustiveShortestPathWarning` -> `ExhaustiveShortestPath`
-** `UnboundedVariableLengthPatternWarning` -> `UnboundedVariableLengthPattern`
-** `ExperimentalFeature` -> `RuntimeExperimental`
-
=== APOC
+Aura contains most but not all functions and procedures contained in the link:{neo4j-docs-base-uri}/apoc/current[APOC Core] library.
All APOC procedures and functions available in Aura are listed in the link:https://neo4j.com/docs/aura/platform/apoc/[APOC Core library].
-See the link:https://neo4j.com/docs/apoc/5/[APOC documentation] for further details.
=== Procedures
@@ -150,24 +93,24 @@ Some procedures have been replaced by commands:
| `dbms.scheduler.profile` | -
|===
-Refer to the link:https://neo4j.com/docs/upgrade-migration-guide/current/version-5/migration/breaking-changes/#_removals[Update and migration guide] for a full list of removals and deprecations.
+Refer to the link:https://neo4j.com/docs/upgrade-migration-guide/current/version-5/migration/breaking-changes/#_removals[Upgrade and migration guide] for a full list of removals and deprecations.
=== Neo4j Connectors
-If you are using a Neo4j Connector for link:https://github.com/neo4j-contrib/neo4j-spark-connector/releases/[Apache Spark] or link:https://github.com/neo4j-contrib/neo4j-streams/releases[Apache Kafka], make sure its version is compatible with Neo4j 5.
+If you are using a Neo4j Connector for link:https://github.com/neo4j-contrib/neo4j-spark-connector/releases/[Apache Spark] or link:https://github.com/neo4j-contrib/neo4j-streams/releases[Apache Kafka], make sure its version is compatible with Neo4j latest version.
-The Neo4j BI Connectors available on the link:https://neo4j.com/download-center/#integrations[Download center] are compatible with Neo4j 5.
+The Neo4j BI Connectors available on the link:https://neo4j.com/deployment-center/#integrations[Deployment center] are compatible with Neo4j latest version.
== Perform the upgrade
-Once you have prepared your Neo4j 4 Aura instance, you are ready to migrate the instance to a new or existing Neo4j 5 instance.
+Once you have prepared your Neo4j 4 Aura instance, you are ready to migrate the instance to a new or existing Neo4j latest version instance.
=== Clone
-If you have an existing Neo4j 5 instance, you can use the *Clone To Existing* instance action on your Neo4j 4 xref:auradb/managing-databases/database-actions.adoc#_clone_to_an_existing_auradb_instance[AuraDB] or xref:aurads/managing-instances/instance-actions#_clone_to_an_existing_aurads_instance[AuraDS] instance.
+If you have an existing Neo4j latest version instance, you can use the *Clone To Existing* instance action on your link:https://neo4j.com/docs/aura/managing-instances/instance-actions/#_clone_to_a_new_instance[Neo4j 4 AuraDB] instance.
-If you do not have an existing Neo4j 5 instance, you can use the *Clone To New* instance action on your Neo4j 4 xref:auradb/managing-databases/database-actions.adoc#_clone_to_a_new_auradb_instance[AuraDB] or xref:aurads/managing-instances/instance-actions#_clone_to_a_new_aurads_instance[AuraDS] instance.
+If you do not have an existing Neo4j latest version instance, you can use the *Clone To New* instance action on your link:https://neo4j.com/docs/aura/managing-instances/instance-actions/#_clone_to_an_existing_instance[Neo4j 4 AuraDB instance.]
-=== Export and Import
+=== Export and import
-Alternatively, you can *Export* a snapshot dump file from your Neo4j 4 xref:auradb/managing-databases/backup-restore-export#_backup_and_export[AuraDB] or xref:aurads/managing-instances/backup-restore-export#_backup_and_export[AuraDS] instance, create a new Neo4j 5 instance manually, and then import the dump file into your new Neo4j 5 xref:auradb/importing/import-database#_import_database[AuraDB] or xref:aurads/importing-data/import-db#_import_database[AuraDS] instance.
\ No newline at end of file
+Alternatively, you can *Export* a snapshot dump file from your Neo4j 4 xref:auradb/managing-databases/backup-restore-export#_backup_and_export[AuraDB] instance, create a new Neo4j latest version instance manually, and then link:https://neo4j.com/docs/aura/managing-instances/backup-restore-export/#restore-backup[import the dump file] into your new Neo4j latest version instance.
diff --git a/modules/ROOT/pages/user-management.adoc b/modules/ROOT/pages/user-management.adoc
new file mode 100644
index 000000000..d389cbb69
--- /dev/null
+++ b/modules/ROOT/pages/user-management.adoc
@@ -0,0 +1,450 @@
+[[aura-user-management]]
+= User management
+:description: This page describes how to manage users in Neo4j Aura.
+:page-aliases: platform/user-management.adoc
+
+User management is a feature within Aura that allows admins to invite users and set their roles within an isolated environment.
+
+You can view and manage roles from *Users* pages, accessible via the console sidebar.
+An organization has one *Users* page for managing organization-level roles.
+Each project has a *Users* page for managing project-specific roles.
+
+[NOTE]
+====
+Users can only be invited on the project-level.
+Regardless of which project-role is specified in the invitation, the user is added to the organization as an `ORG_MEMBER` by default.
+The organization-level role cannot be changed until the user accepts their invitation.
+====
+
+== Organization-level roles
+
+Roles at the organization level determine what administrative capabilities a user has across all projects within the organization.
+
+The following roles are available at the org level:
+
+* `ORG_OWNER`
+* `ORG_ADMIN`
+* `ORG_MEMBER`
+
+:check-mark: icon:check[]
+.Roles and organization capabilities
+[opts="header",cols="3,1,1,1"]
+|===
+| Capability
+| Owner
+| Admin
+| Member
+
+| List org
+| {check-mark}
+| {check-mark}
+| {check-mark}
+
+| List org projects
+| {check-mark}
+| {check-mark}
+| {check-mark}
+
+| Update org
+| {check-mark}
+| {check-mark}
+|
+
+| Invite users to projects
+| {check-mark}
+| {check-mark}
+|
+
+| List existing organization settings
+| {check-mark}
+| {check-mark}
+|
+
+| Add organization settings
+| {check-mark}
+| {check-mark}
+|
+
+| List organization settings on project-level
+| {check-mark}
+| {check-mark}
+|
+
+| Update organization settings on project-level
+| {check-mark}
+| {check-mark}
+|
+
+| Delete organization settings on project-level
+| {check-mark}
+| {check-mark}
+|
+
+| Invite non-owner users to org
+| {check-mark}
+| {check-mark}
+|
+
+| List users
+| {check-mark}
+| {check-mark}
+|
+
+| List roles
+| {check-mark}
+| {check-mark}
+|
+
+| List members of a project
+| {check-mark}
+| {check-mark} footnote:[An admin can only list members of projects the admin is also a member of.]
+|
+
+// | Add customer information for a trial within org
+// | {check-mark}
+// | {check-mark}
+// |
+
+// | List customer information for a trial within org
+// | {check-mark}
+// | {check-mark}
+// |
+
+// | List seamless login for org
+// | {check-mark}
+// | {check-mark}
+// |
+
+// | Update seamless login for org
+// | {check-mark}
+// | {check-mark}
+// |
+
+| Invite owners to org
+| {check-mark}
+|
+|
+
+| Add owner
+| {check-mark}
+|
+|
+
+| Delete owners
+| {check-mark}
+|
+|
+
+| Transfer projects to and from the org
+| {check-mark} footnote:[An owner needs to permission for both the source and destination orgs.]
+|
+|
+|===
+
+[[roles]]
+== Project-level roles
+
+Users within a project can be assigned one of the following roles:
+
+* `PROJECT_VIEWER`
+* `METRICS READER`
+* `PROJECT_MEMBER`
+* `PROJECT_ADMIN`
+
+[NOTE]
+====
+Each project must have at least one Project Admin, but it is also possible for projects to have multiple Project Admins.
+====
+
+=== Metrics reader role
+
+The `METRICS_READER` role can be assigned to any user or service account.
+It has the same permissions as the `PROJECT_VIEWER` role, but with some extra permissions specifically for reading metrics via an API endpoint.
+The role allows access to metrics for all instances in a project.
+Accessing metric endpoints requires xref:/api/authentication.adoc[Aura API Credentials] and the `METRICS_READER` role enables the creation of these credentials.
+
+The `METRICS_READER` role can view and open instances in the console, however, login to the instance is required to interact with it, with access to Explore and Query defined by the instance’s RBAC settings.
+
+:check-mark: icon:check[]
+
+.Roles and console capabilities
+[opts="header",cols="3,1,1,1,1"]
+|===
+| Capability
+| Project Viewer
+| Metrics Reader
+| Member
+| Admin
+
+| View users and their roles
+| {check-mark}
+| {check-mark}
+| {check-mark}
+| {check-mark}
+
+| View and open instances
+| {check-mark}
+| {check-mark}
+| {check-mark}
+| {check-mark}
+
+| Access the Neo4j Customer Support Portal
+| {check-mark}
+| {check-mark}
+| {check-mark}
+| {check-mark}
+
+| Perform all actions on instances footnote:[Actions include creating, deleting, pausing, resuming, and editing instances.]
+|
+|
+| {check-mark}
+| {check-mark}
+
+| Clone data to new and existing instances
+|
+|
+| {check-mark}
+| {check-mark}
+
+| Take on-demand snapshots
+|
+|
+| {check-mark}
+| {check-mark}
+
+| Restore from snapshots
+|
+|
+| {check-mark}
+| {check-mark}
+
+| Edit the project name
+|
+|
+|
+| {check-mark}
+
+| Invite new users to the project
+|
+|
+|
+| {check-mark}
+
+| Edit existing users' roles
+|
+|
+|
+| {check-mark}
+
+| Delete existing users from the project
+|
+|
+|
+| {check-mark}
+
+| View and edit billing information
+|
+|
+|
+| {check-mark}
+|===
+
+=== Predefined roles
+
+Users within a project can access instances seamlessly with their console role if xref:security/tool-auth.adoc[Tool authentication with Aura user] is enabled.
+
+When enabled, a user connects seamlessly with a predefined database role that matches their console role, i.e. their project-level role.
+Predefined roles are *immutable* and apply to all Free, Professional, and Business Critical instances.
+The predefined roles are assigned the following privileges on the instance level:
+
+.Predefined roles and database privileges
+[options="header", cols="3,^,^,^,^,^"]
+|===
+| Privilege
+| Viewer
+| Member
+3+| Admin
+
+|
+|
+|
+| Free
+| Professional
+| Business Critical
+
+| Access to database
+| {check-mark}
+| {check-mark}
+| {check-mark}
+| {check-mark}
+| {check-mark}
+
+| Start and stop database
+|
+|
+|
+|
+| {check-mark}
+
+| List constraints
+| {check-mark}
+| {check-mark}
+| {check-mark}
+| {check-mark}
+| {check-mark}
+
+| Create constraints
+|
+|
+| {check-mark}
+| {check-mark}
+| {check-mark}
+
+| Delete constraints
+|
+|
+| {check-mark}
+| {check-mark}
+| {check-mark}
+
+| List indexes
+| {check-mark}
+| {check-mark}
+| {check-mark}
+| {check-mark}
+| {check-mark}
+
+| Create indexes
+|
+|
+| {check-mark}
+| {check-mark}
+| {check-mark}
+
+| Delete indexes
+|
+|
+| {check-mark}
+| {check-mark}
+| {check-mark}
+
+| Find nodes and relationships and read their properties
+| {check-mark}
+| {check-mark}
+| {check-mark}
+| {check-mark}
+| {check-mark}
+
+| Load external data in queries
+|
+| {check-mark}
+| {check-mark}
+| {check-mark}
+| {check-mark}
+
+| Write to the graph
+|
+| {check-mark}
+| {check-mark}
+| {check-mark}
+| {check-mark}
+
+| Execute procedures and functions
+| {check-mark}
+| {check-mark}
+| {check-mark}
+| {check-mark}
+| {check-mark}
+
+| Name management for node labels, relationship types, and property names.
+|
+| {check-mark}
+| {check-mark}
+| {check-mark}
+| {check-mark}
+
+| List and end transactions for specified users on the database.
+|
+|
+| {check-mark}
+| {check-mark}
+| {check-mark}
+
+| List, create, delete, and modify users.
+|
+|
+|
+| {check-mark}
+| {check-mark}
+
+| Assign roles
+|
+|
+|
+| {check-mark}
+| {check-mark}
+
+| Remove roles
+|
+|
+|
+| {check-mark}
+| {check-mark}
+
+| Create roles
+|
+|
+|
+|
+| {check-mark}
+
+| Delete roles
+|
+|
+|
+|
+| {check-mark}
+
+| Rename roles
+|
+|
+|
+|
+| {check-mark}
+
+| List roles
+|
+|
+|
+| {check-mark}
+| {check-mark}
+
+| Privilege management footnote:[This includes to list, grant, and revoke privileges.]
+|
+|
+|
+|
+| {check-mark}
+|===
+
+The predefined roles take the following format: `console__`, for example `console_member_73b84556` or `console_admin_pro_73b84556`.
+
+[TIP]
+====
+User management within the Aura console does not replace built-in roles or fine-grained RBAC at the database level.
+====
+
+=== Invite users
+
+* As an _Admin_, go to *Users* from within a project, and select *Invite users*.
+You need to provide an email address for the new user and decide which project-level role to assign them.
+* The invited user will receive an email with a link to accept the invitation and their status is *Pending* until they accept the invitation.
+Note that on accepting the invite, the invited user automatically gets an `ORG_MEMBER` role in the organization the project is part of.
+If needed, you can edit the organization-level role after the invite is accepted.
+
+.Grant users access to a project
+image::inviteusers.png[]
+
+=== Edit users and roles
+
+From the *Users* page, as and _Admin_, you can delete users or edit their roles using the [...] more menu by the user's name.
diff --git a/modules/ROOT/pages/visual-tour/index.adoc b/modules/ROOT/pages/visual-tour/index.adoc
new file mode 100644
index 000000000..a9108d9f0
--- /dev/null
+++ b/modules/ROOT/pages/visual-tour/index.adoc
@@ -0,0 +1,203 @@
+[[visual-overview]]
+= Visual tour of the console
+:description: This section introduces the console UI.
+:gds-sessions-page: {neo4j-docs-base-uri}/graph-data-science/current/aura-graph-analytics/
+
+== Structure
+
+The hierarchy of the console consists of organizations, projects, and instances:
+
+* *Organization:* The highest level, representing the overall team or company.
+* *Projects:* Folders that organize your instances.
+* *Instances:* Contain the databases.
+
+=== Switch organizations and projects
+
+The organization or project you're currently viewing is always displayed in the header of the console.
+
+To switch between different organizations or projects, click on its name.
+This opens a dropdown menu where you can see all the organizations and projects you have access to and select the one you want to switch to.
+
+[.shadow]
+.Breadcrumb navigation / org and project switcher
+image::breadcrumbs.png[Breadcrumb navigation]
+
+== Organizations
+
+Organizations are a layer in the Aura account hierarchy that sit above projects.
+This allows you to organize and manage multiple projects under a single organization.
+Management at this level allows `Organization owners` and `Organization admins` to roll out settings to all projects and instances within an organization.
+
+To access organization-level management, click the organization name at the top of the page.
+
+.Organization navigation
+[.shadow]
+image::organizationnav.png[]
+
+From there, use the left-hand panel to access:
+
+* xref:visual-tour/index.adoc#_projects[Projects]: View projects within the organization.
+* Users: View users and their xref:user-management.adoc#_organization_level_roles[organization-level roles]
+* Security: Configure organization-level features such as SSO, IP filtering, and tool authentication with Aura user.
+* xref:visual-tour/index.adoc#org-settings[Settings]: Where you can manage an organization's general settings.
+
+[[org-settings]]
+=== Organization settings
+
+You can edit the name of the organization, find your organization ID, enable Generative AI and Aura Graph Analytics.
+
+.Organization settings
+[.shadow]
+image::organizationsettings.png[]
+
+=== Generative AI assistance
+
+When enabled, Generative AI assistance is available in a number of places, including the xref:query/visual-tour.adoc#copilot[Query copilot] and xref:explore/explore-visual-tour/search-bar.adoc#copilot[Explore copilot].
+These features are always identified with a ✨ and your use is subject to the link:{neo4j-docs-base-uri}/reference/license/#_genai_outputs[GenAI outputs] disclaimer.
+
+// TO-DO: When section exists for Import GenAI feature, add link to it.
+
+[[graph-analytics-org-settings]]
+=== Aura Graph Analytics settings
+
+From this page you can enable or disable link:{gds-sessions-page}[Aura Graph Analytics], as well as configure the following settings:
+
+* The maximum available memory for each session
+* The maximum number of concurrent sessions within the organization
+
+== Data Services
+
+This section contains the data services you have access to.
+To access your data, navigate to a project and xref:getting-started/connect-instance.adoc[connect to an instance].
+
+=== Instances
+
+An instance in Aura is an environment of the Neo4j database, managed and run in the cloud.
+A project can contain one or more instances.
+In the instance section, you can view and select which instance you want to connect to.
+
+By expanding an instance card, you can explore various options, such as viewing metrics, taking snapshots, or pausing the instance.
+Additionally, you can also connect the instance to an application.
+
+=== Import
+
+If your instance doesn't contain any data, the Import service allows you to import CSV files to your database.
+This service lets you create your data model and map it to your files.
+See the xref:import/introduction.adoc[What is Import?] for more information about this service.
+
+[[graph-analytics-page]]
+=== Graph Analytics
+
+This page lists the link:{gds-sessions-page}[Aura Graph Analytics] sessions running within a project (if any).
+
+The list shows session details and a **Delete** button for each session.
+Session details include:
+
+* The Neo4j database instance containing the data projected in the session
+* The memory allocated to the session (configurable in the <> page)
+* The user who created the session
+* The time remaining to the session configured time limit
+
+=== Data APIs
+
+Create a GraphQL API to use the power of GraphQL with AuraDB.
+For information on GraphQL Data APIs, see the link:https://neo4j.com/docs/graphql/7/aura-graphql/[GraphQL docs]
+
+== Tools
+
+The tools allow you to interact with your data and therefore require an active connection to an instance.
+Once connected to an instance, you can use both the Explore and the Query tools.
+
+[.shadow]
+.Tools on the left side panel
+image::leftsidepanel.png[]
+
+=== Explore
+
+Explore helps you visualize and interact with datasets without using any code.
+
+* *Visual Exploration:* See your data as a graph, with nodes and relationships between them, making it easier to understand and analyze complex data connections.
+
+* *Data Insights:* By interacting with the graph, uncover patterns, trends, and insights that aren't easily visible in traditional tabular formats.
+Explore is designed to make working with graph data more intuitive and insightful by providing a visual and interactive way to analyze and manage your data.
+
+See xref:explore/introduction.adoc[What is Explore?] for more information.
+
+=== Dashboards
+
+Neo4j dashboards let you compose different visualizations such as tables and graphs in tabbed pages to have relevant data at a glance.
+
+=== Query
+
+Query is a helpful tool to interact with your data using Cypher, the graph query language.
+
+* *Cypher Editor:* Where you write Cypher queries and get instant feedback on syntax errors and other helpful advice.
+* *Result frames:* Where query results are displayed as a graph, table, or RAW.
+* *Query History:* A feature that shows previously run queries.
+
+See xref:query/introduction.adoc[What is Query?] for more information.
+
+== Operations
+
+=== Metrics
+
+Metrics help you monitor and analyze your database's performance and usage.
+Some metrics are available directly on the instance card, and you can find the full range in **Metrics**.
+See xref:metrics/view-metrics.adoc[Metrics] for more information.
+
+=== Logs
+
+Track and review system activities and events.
+Logs provide insights into database operations, errors, and other critical events, helping you monitor performance and troubleshoot issues.
+
+Review queries with the xref:logging/query-log-analyzer.adoc[Query Log Analyzer] and view security events using the xref:logging/security-log-analyzer.adoc[Security Log Analyzer].
+
+== Projects
+
+An organization can contain one or more projects.
+A project is a grouping for one or more instances.
+Access, permissions, and billing are managed at the project level.
+
+There's a summary of each project, including the number of instances and members associated with it.
+Opening a project takes you inside that project, where you can view existing instances and create new ones as needed.
+
+[.shadow]
+.Project view
+image::project.png[]
+
+=== Users
+
+Users are associated with a project and can have various roles and permissions.
+New users can be invited from the users' page.
+From there, you can manage accounts, permissions, and control access levels to ensure secure and appropriate instance use.
+Individuals can have access to a project for administrative work, or to the instances for data work — you can also assign more specific permissions.
+See xref:user-management.adoc[User management] for more information.
+
+=== Billing
+
+View and export real-time credit consumption reports by instance or session, add payment info, and track usage with filtering options.
+See xref:billing.adoc[Billing] for more information.
+
+// === Roles
+
+// image::roles1.png[]
+// image::roles2.png[]
+
+// Roles define the permissions and responsibilities of users within your console.
+// Roles manage what actions users can perform and what data they can access, ensuring proper control and organization.
+
+=== Project settings
+
+The project settings allow you to change your project name.
+If you need to reference or share your project, you can copy your project ID.
+
+.Project settings
+[.shadow]
+image::projectsettings.png[]
+
+// Configure options to customize and optimize your console.
+// This includes adjusting performance settings, configuring alerts, and managing system preferences to suit your needs.
+
+== Learning
+
+Access educational tools and learning resources in one place, including interactive guides, sample datasets, directions to documentation, Developer center, and Graph Academy.
diff --git a/modules/ROOT/partials/EAP.adoc b/modules/ROOT/partials/EAP.adoc
new file mode 100644
index 000000000..afc2e674d
--- /dev/null
+++ b/modules/ROOT/partials/EAP.adoc
@@ -0,0 +1,4 @@
+[NOTE]
+====
+This feature is currently in Early Access and not available by default.
+====
\ No newline at end of file
diff --git a/modules/ROOT/partials/apoc-procedures.adoc b/modules/ROOT/partials/apoc-procedures.adoc
index 873bc2925..45473edd8 100644
--- a/modules/ROOT/partials/apoc-procedures.adoc
+++ b/modules/ROOT/partials/apoc-procedures.adoc
@@ -3,48 +3,48 @@
== apoc
-[.procedures, opts=header, cols='5a,1a', separator=¦]
+[.procedures, opts=header, cols='5a,2a', separator=¦]
|===
-¦ Qualified Name ¦ Type
-¦ link:https://neo4j.com/docs/apoc/5/overview/apoc/apoc.case[apoc.case icon:book[] ^] +
+¦ Qualified Name ¦ Type and language details
+¦ link:https://neo4j.com/docs/apoc/current/overview/apoc/apoc.case[apoc.case icon:book[] ^] +
For each pair of conditional and read-only queries in the given `LIST`, this procedure will run the first query for which the conditional is evaluated to true. If none of the conditionals are true, the `ELSE` query will run instead.
-¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc/apoc.help[apoc.help icon:book[] ^] +
+¦ label:procedure[] label:deprecated[Deprecated in Cypher 25]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc/apoc.help[apoc.help icon:book[] ^] +
Returns descriptions of the available APOC procedures and functions. If a keyword is provided, it will return only those procedures and functions that have the keyword in their name.
-¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc/apoc.version[apoc.version icon:book[] ^] +
+¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc/apoc.version[apoc.version icon:book[] ^] +
Returns the APOC version currently installed.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc/apoc.when[apoc.when icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc/apoc.when[apoc.when icon:book[] ^] +
This procedure will run the read-only `ifQuery` if the conditional has evaluated to true, otherwise the `elseQuery` will run.
-¦ label:procedure[]
+¦ label:procedure[] label:deprecated[Deprecated in Cypher 25]
|===
== apoc.agg
-[.procedures, opts=header, cols='5a,1a', separator=¦]
+[.procedures, opts=header, cols='5a,2a', separator=¦]
|===
-¦ Qualified Name ¦ Type
-¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.agg/apoc.agg.first[apoc.agg.first icon:book[] ^] +
+¦ Qualified Name ¦ Type and language details
+¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.agg/apoc.agg.first[apoc.agg.first icon:book[] ^] +
Returns the first value from the given collection.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.agg/apoc.agg.graph[apoc.agg.graph icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.agg/apoc.agg.graph[apoc.agg.graph icon:book[] ^] +
Returns all distinct `NODE` and `RELATIONSHIP` values collected into a `MAP` with the keys `nodes` and `relationships`.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.agg/apoc.agg.last[apoc.agg.last icon:book[] ^] +
+¦ label:function[] label:deprecated[Deprecated in Cypher 25]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.agg/apoc.agg.last[apoc.agg.last icon:book[] ^] +
Returns the last value from the given collection.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.agg/apoc.agg.maxItems[apoc.agg.maxItems icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.agg/apoc.agg.maxItems[apoc.agg.maxItems icon:book[] ^] +
Returns a `MAP` `{items: LIST, value: ANY}` where the `value` key is the maximum value present, and `items` represent all items with the same value. The size of the list of items can be limited to a given max size.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.agg/apoc.agg.median[apoc.agg.median icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.agg/apoc.agg.median[apoc.agg.median icon:book[] ^] +
Returns the mathematical median for all non-null `INTEGER` and `FLOAT` values.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.agg/apoc.agg.minItems[apoc.agg.minItems icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.agg/apoc.agg.minItems[apoc.agg.minItems icon:book[] ^] +
Returns a `MAP` `{items: LIST, value: ANY}` where the `value` key is the minimum value present, and `items` represent all items with the same value. The size of the list of items can be limited to a given max size.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.agg/apoc.agg.nth[apoc.agg.nth icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.agg/apoc.agg.nth[apoc.agg.nth icon:book[] ^] +
Returns the nth value in the given collection (to fetch the last item of an unknown length collection, -1 can be used).
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.agg/apoc.agg.percentiles[apoc.agg.percentiles icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.agg/apoc.agg.percentiles[apoc.agg.percentiles icon:book[] ^] +
Returns the given percentiles over the range of numerical values in the given collection.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.agg/apoc.agg.product[apoc.agg.product icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.agg/apoc.agg.product[apoc.agg.product icon:book[] ^] +
Returns the product of all non-null `INTEGER` and `FLOAT` values in the collection.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.agg/apoc.agg.slice[apoc.agg.slice icon:book[] ^] +
+¦ label:function[] label:deprecated[Deprecated in Cypher 25]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.agg/apoc.agg.slice[apoc.agg.slice icon:book[] ^] +
Returns a subset of non-null values from the given collection (the collection is considered to be zero-indexed).
To specify the range from start until the end of the collection, the length should be set to -1.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.agg/apoc.agg.statistics[apoc.agg.statistics icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.agg/apoc.agg.statistics[apoc.agg.statistics icon:book[] ^] +
Returns the following statistics on the `INTEGER` and `FLOAT` values in the given collection: percentiles, min, minNonZero, max, total, mean, stdev.
¦ label:function[]
|===
@@ -52,20 +52,20 @@ Returns the following statistics on the `INTEGER` and `FLOAT` values in the give
== apoc.algo
-[.procedures, opts=header, cols='5a,1a', separator=¦]
+[.procedures, opts=header, cols='5a,2a', separator=¦]
|===
-¦ Qualified Name ¦ Type
-¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.algo/apoc.algo.aStar[apoc.algo.aStar icon:book[] ^] +
+¦ Qualified Name ¦ Type and language details
+¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.algo/apoc.algo.aStar[apoc.algo.aStar icon:book[] ^] +
Runs the A* search algorithm to find the optimal path between two `NODE` values, using the given `RELATIONSHIP` property name for the cost function.
-¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.algo/apoc.algo.aStarConfig[apoc.algo.aStarConfig icon:book[] ^] +
+¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.algo/apoc.algo.aStarConfig[apoc.algo.aStarConfig icon:book[] ^] +
Runs the A* search algorithm to find the optimal path between two `NODE` values, using the given `RELATIONSHIP` property name for the cost function.
This procedure looks for weight, latitude and longitude properties in the config.
-¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.algo/apoc.algo.allSimplePaths[apoc.algo.allSimplePaths icon:book[] ^] +
+¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.algo/apoc.algo.allSimplePaths[apoc.algo.allSimplePaths icon:book[] ^] +
Runs a search algorithm to find all of the simple paths between the given `RELATIONSHIP` values, up to a max depth described by `maxNodes`.
The returned paths will not contain loops.
-¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.algo/apoc.algo.cover[apoc.algo.cover icon:book[] ^] +
+¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.algo/apoc.algo.cover[apoc.algo.cover icon:book[] ^] +
Returns all `RELATIONSHIP` values connecting the given set of `NODE` values.
-¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.algo/apoc.algo.dijkstra[apoc.algo.dijkstra icon:book[] ^] +
+¦ label:procedure[] label:deprecated[Deprecated in Cypher 25]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.algo/apoc.algo.dijkstra[apoc.algo.dijkstra icon:book[] ^] +
Runs Dijkstra's algorithm using the given `RELATIONSHIP` property as the cost function.
¦ label:procedure[]
|===
@@ -73,13 +73,13 @@ Runs Dijkstra's algorithm using the given `RELATIONSHIP` property as the cost fu
== apoc.any
-[.procedures, opts=header, cols='5a,1a', separator=¦]
+[.procedures, opts=header, cols='5a,2a', separator=¦]
|===
-¦ Qualified Name ¦ Type
-¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.any/apoc.any.properties[apoc.any.properties icon:book[] ^] +
+¦ Qualified Name ¦ Type and language details
+¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.any/apoc.any.properties[apoc.any.properties icon:book[] ^] +
Returns all properties of the given object.
The object can be a virtual `NODE`, a real `NODE`, a virtual `RELATIONSHIP`, a real `RELATIONSHIP`, or a `MAP`.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.any/apoc.any.property[apoc.any.property icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.any/apoc.any.property[apoc.any.property icon:book[] ^] +
Returns the property for the given key from an object.
The object can be a virtual `NODE`, a real `NODE`, a virtual `RELATIONSHIP`, a real `RELATIONSHIP`, or a `MAP`.
¦ label:function[]
@@ -88,25 +88,25 @@ The object can be a virtual `NODE`, a real `NODE`, a virtual `RELATIONSHIP`, a r
== apoc.atomic
-[.procedures, opts=header, cols='5a,1a', separator=¦]
+[.procedures, opts=header, cols='5a,2a', separator=¦]
|===
-¦ Qualified Name ¦ Type
-¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.atomic/apoc.atomic.add[apoc.atomic.add icon:book[] ^] +
+¦ Qualified Name ¦ Type and language details
+¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.atomic/apoc.atomic.add[apoc.atomic.add icon:book[] ^] +
Sets the given property to the sum of itself and the given `INTEGER` or `FLOAT` value.
The procedure then sets the property to the returned sum.
-¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.atomic/apoc.atomic.concat[apoc.atomic.concat icon:book[] ^] +
+¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.atomic/apoc.atomic.concat[apoc.atomic.concat icon:book[] ^] +
Sets the given property to the concatenation of itself and the `STRING` value.
The procedure then sets the property to the returned `STRING`.
-¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.atomic/apoc.atomic.insert[apoc.atomic.insert icon:book[] ^] +
+¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.atomic/apoc.atomic.insert[apoc.atomic.insert icon:book[] ^] +
Inserts a value at position into the `LIST` value of a property.
The procedure then sets the result back on the property.
-¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.atomic/apoc.atomic.remove[apoc.atomic.remove icon:book[] ^] +
+¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.atomic/apoc.atomic.remove[apoc.atomic.remove icon:book[] ^] +
Removes the element at position from the `LIST` value of a property.
The procedure then sets the property to the resulting `LIST` value.
-¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.atomic/apoc.atomic.subtract[apoc.atomic.subtract icon:book[] ^] +
+¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.atomic/apoc.atomic.subtract[apoc.atomic.subtract icon:book[] ^] +
Sets the property of a value to itself minus the given `INTEGER` or `FLOAT` value.
The procedure then sets the property to the returned sum.
-¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.atomic/apoc.atomic.update[apoc.atomic.update icon:book[] ^] +
+¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.atomic/apoc.atomic.update[apoc.atomic.update icon:book[] ^] +
Updates the value of a property with a Cypher operation.
¦ label:procedure[]
|===
@@ -114,10 +114,10 @@ Updates the value of a property with a Cypher operation.
== apoc.bitwise
-[.procedures, opts=header, cols='5a,1a', separator=¦]
+[.procedures, opts=header, cols='5a,2a', separator=¦]
|===
-¦ Qualified Name ¦ Type
-¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.bitwise/apoc.bitwise.op[apoc.bitwise.op icon:book[] ^] +
+¦ Qualified Name ¦ Type and language details
+¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.bitwise/apoc.bitwise.op[apoc.bitwise.op icon:book[] ^] +
Returns the result of the bitwise operation
¦ label:function[]
|===
@@ -125,222 +125,228 @@ Returns the result of the bitwise operation
== apoc.coll
-[.procedures, opts=header, cols='5a,1a', separator=¦]
+[.procedures, opts=header, cols='5a,2a', separator=¦]
|===
-¦ Qualified Name ¦ Type
-¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.avg[apoc.coll.avg icon:book[] ^] +
+¦ Qualified Name ¦ Type and language details
+¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.avg[apoc.coll.avg icon:book[] ^] +
Returns the average of the numbers in the `LIST`.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.combinations[apoc.coll.combinations icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.combinations[apoc.coll.combinations icon:book[] ^] +
Returns a collection of all combinations of `LIST` elements between the selection size `minSelect` and `maxSelect` (default: `minSelect`).
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.contains[apoc.coll.contains icon:book[] ^] +
-Returns whether or not the given value exists in the given collection (using a HashSet).
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.containsAll[apoc.coll.containsAll icon:book[] ^] +
-Returns whether or not all of the given values exist in the given collection (using a HashSet).
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.containsAllSorted[apoc.coll.containsAllSorted icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.contains[apoc.coll.contains icon:book[] ^] +
+Returns whether or not the given value exists in the given collection.
+¦ label:function[] label:deprecated[Deprecated in Cypher 25]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.containsAll[apoc.coll.containsAll icon:book[] ^] +
+Returns whether or not all of the given values exist in the given collection.
+¦ label:function[] label:deprecated[Deprecated in Cypher 25]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.containsAllSorted[apoc.coll.containsAllSorted icon:book[] ^] +
Returns whether or not all of the given values in the second `LIST` exist in an already sorted collection (using a binary search).
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.containsDuplicates[apoc.coll.containsDuplicates icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.containsDuplicates[apoc.coll.containsDuplicates icon:book[] ^] +
Returns true if a collection contains duplicate elements.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.containsSorted[apoc.coll.containsSorted icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.containsSorted[apoc.coll.containsSorted icon:book[] ^] +
Returns whether or not the given value exists in an already sorted collection (using a binary search).
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.different[apoc.coll.different icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.different[apoc.coll.different icon:book[] ^] +
Returns true if all the values in the given `LIST` are unique.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.disjunction[apoc.coll.disjunction icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.disjunction[apoc.coll.disjunction icon:book[] ^] +
Returns the disjunct set from two `LIST` values.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.dropDuplicateNeighbors[apoc.coll.dropDuplicateNeighbors icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.dropDuplicateNeighbors[apoc.coll.dropDuplicateNeighbors icon:book[] ^] +
Removes duplicate consecutive objects in the `LIST`.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.duplicates[apoc.coll.duplicates icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.duplicates[apoc.coll.duplicates icon:book[] ^] +
Returns a `LIST` of duplicate items in the collection.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.duplicatesWithCount[apoc.coll.duplicatesWithCount icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.duplicatesWithCount[apoc.coll.duplicatesWithCount icon:book[] ^] +
Returns a `LIST` of duplicate items in the collection and their count, keyed by `item` and `count`.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.elements[apoc.coll.elements icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.elements[apoc.coll.elements icon:book[] ^] +
Deconstructs a `LIST` into identifiers indicating their specific type.
-¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.fill[apoc.coll.fill icon:book[] ^] +
+¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.fill[apoc.coll.fill icon:book[] ^] +
Returns a `LIST` with the given count of items.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.flatten[apoc.coll.flatten icon:book[] ^] +
+¦ label:function[] label:deprecated[Deprecated in Cypher 25]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.flatten[apoc.coll.flatten icon:book[] ^] +
Flattens the given `LIST` (to flatten nested `LIST` values, set recursive to true).
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.frequencies[apoc.coll.frequencies icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.frequencies[apoc.coll.frequencies icon:book[] ^] +
Returns a `LIST` of frequencies of the items in the collection, keyed by `item` and `count`.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.frequenciesAsMap[apoc.coll.frequenciesAsMap icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.frequenciesAsMap[apoc.coll.frequenciesAsMap icon:book[] ^] +
Returns a `MAP` of frequencies of the items in the collection, keyed by `item` and `count`.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.indexOf[apoc.coll.indexOf icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.indexOf[apoc.coll.indexOf icon:book[] ^] +
Returns the index for the first occurrence of the specified value in the `LIST`.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.insert[apoc.coll.insert icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.insert[apoc.coll.insert icon:book[] ^] +
Inserts a value into the specified index in the `LIST`.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.insertAll[apoc.coll.insertAll icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.insertAll[apoc.coll.insertAll icon:book[] ^] +
Inserts all of the values into the `LIST`, starting at the specified index.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.intersection[apoc.coll.intersection icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.intersection[apoc.coll.intersection icon:book[] ^] +
Returns the distinct intersection of two `LIST` values.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.isEqualCollection[apoc.coll.isEqualCollection icon:book[] ^] +
-Returns true if the two collections contain the same elements with the same cardinality in any order (using a HashMap).
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.max[apoc.coll.max icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.isEqualCollection[apoc.coll.isEqualCollection icon:book[] ^] +
+Returns true if the two collections contain the same elements with the same cardinality in any order.
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.max[apoc.coll.max icon:book[] ^] +
Returns the maximum of all values in the given `LIST`.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.min[apoc.coll.min icon:book[] ^] +
+¦ label:function[] label:deprecated[Deprecated in Cypher 25]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.min[apoc.coll.min icon:book[] ^] +
Returns the minimum of all values in the given `LIST`.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.occurrences[apoc.coll.occurrences icon:book[] ^] +
+¦ label:function[] label:deprecated[Deprecated in Cypher 25]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.occurrences[apoc.coll.occurrences icon:book[] ^] +
Returns the count of the given item in the collection.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.pairs[apoc.coll.pairs icon:book[] ^] +
+¦ label:function[] label:deprecated[Deprecated in Cypher 25]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.pairs[apoc.coll.pairs icon:book[] ^] +
Returns a `LIST` of adjacent elements in the `LIST` ([1,2],[2,3],[3,null]).
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.pairsMin[apoc.coll.pairsMin icon:book[] ^] +
+¦ label:function[] label:deprecated[Deprecated in Cypher 25]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.pairsMin[apoc.coll.pairsMin icon:book[] ^] +
Returns `LIST` values of adjacent elements in the `LIST` ([1,2],[2,3]), skipping the final element.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.partition[apoc.coll.partition icon:book[] ^] +
+¦ label:function[] label:deprecated[Deprecated in Cypher 25]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.partition[apoc.coll.partition icon:book[] ^] +
Partitions the original `LIST` into a new `LIST` of the given batch size.
The final `LIST` may be smaller than the given batch size.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.partition[apoc.coll.partition icon:book[] ^] +
+¦ label:function[] label:deprecated[Deprecated in Cypher 25]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.partition[apoc.coll.partition icon:book[] ^] +
Partitions the original `LIST` into a new `LIST` of the given batch size.
The final `LIST` may be smaller than the given batch size.
-¦ label:procedure[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.randomItem[apoc.coll.randomItem icon:book[] ^] +
+¦ label:procedure[] label:deprecated[Deprecated in Cypher 25]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.randomItem[apoc.coll.randomItem icon:book[] ^] +
Returns a random item from the `LIST`, or null on `LIST` or `LIST`.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.randomItems[apoc.coll.randomItems icon:book[] ^] +
+¦ label:function[] label:deprecated[Deprecated in Cypher 25]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.randomItems[apoc.coll.randomItems icon:book[] ^] +
Returns a `LIST` of `itemCount` random items from the original `LIST` (optionally allowing elements in the original `LIST` to be selected more than once).
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.remove[apoc.coll.remove icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.remove[apoc.coll.remove icon:book[] ^] +
Removes a range of values from the `LIST`, beginning at position index for the given length of values.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.removeAll[apoc.coll.removeAll icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.removeAll[apoc.coll.removeAll icon:book[] ^] +
Returns the first `LIST` with all elements also present in the second `LIST` removed.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.runningTotal[apoc.coll.runningTotal icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.runningTotal[apoc.coll.runningTotal icon:book[] ^] +
Returns an accumulative `LIST`.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.set[apoc.coll.set icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.set[apoc.coll.set icon:book[] ^] +
Sets the element at the given index to the new value.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.shuffle[apoc.coll.shuffle icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.shuffle[apoc.coll.shuffle icon:book[] ^] +
Returns the `LIST` shuffled.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.sort[apoc.coll.sort icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.sort[apoc.coll.sort icon:book[] ^] +
Sorts the given `LIST` into ascending order.
-¦ label:function[]¦ link:https://neo4j.com/docs/apoc/5/overview/apoc.coll/apoc.coll.sortMaps[apoc.coll.sortMaps icon:book[] ^] +
+¦ label:function[]¦ link:https://neo4j.com/docs/apoc/current/overview/apoc.coll/apoc.coll.sortMaps[apoc.coll.sortMaps icon:book[] ^] +
Sorts the given `LIST