diff --git a/modules/install/pages/upgrade.adoc b/modules/install/pages/upgrade.adoc index d790f326db..e55f00ab5f 100644 --- a/modules/install/pages/upgrade.adoc +++ b/modules/install/pages/upgrade.adoc @@ -23,6 +23,31 @@ See xref:install:upgrade-procedure-selection.adoc#swap-rebalance[Swap Rebalance] Before upgrading, consider the following version compatibility concerns. +[#8-0-storage-backend] +=== New Default Storage Backend in Couchbase Server Version 8.0 + +After you have fully upgraded a cluster to Couchbase Server 8.0.x and later, the default storage backend for buckets is Magma with 128 vBuckets. +Previous versions of Couchbase Server used Couchstore with 1024 vBuckets as the default storage backend. + +This new default results in two behavior changes from previous versions: + +* If you create a bucket and do not specify the storage backend, your bucket will use the Magma storage backend instead of the Couchstore backend. +* If you specify Magma as the storage backend but do not set the new `numVBuckets` parameter, the bucket will have 128 vBuckets instead of the prior default of 1024 vBuckets. +Magma buckets with 128 vBuckets is a new feature in Couchbase Server 8.0 and later. + +These behavior changes could cause issues if you rely on the prior behavior, especially if you use deployment scripts. +If you have deployment scripts that create buckets, review them to determine if you need to make changes. + +For example, suppose your deployment script does not specify the storage backend when it creates a bucket that you intend to use with the xref:views/views-mapreduce-intro.adoc[] feature. +On versions prior to Couchbase Server 8.0, your script created a Couchstore bucket with 1024 vBuckets, +In version 8.0, due to change in the default backend, your script creates a bucket with the Magma storage backend with 128 vBuckets. +Attempting to use MapReduce Views with this bucket results in errors, because Magma buckets do not support this feature. + +Another concern is that versions of Couchbase Server earlier than 8.0 do not support XDCR replication between buckets with different numbers of vBuckets. +Therefore, you cannot replicate between a bucket you create with the new default backend setting and buckets on an earlier server version. +To able to replicate with a bucket on an earlier version of Couchbase Server, explicitly set the new bucket's storage backend to Couchstore or to Magma with 1024 vBuckets during creation. + +For more information about storage backends, see xref:buckets-memory-and-storage/storage-engines.adoc#storage-engines[]. // So long as upgrading from 6.x is supported, this notice will need to stay in some form in each new release. === Upgrading to Version 7.x With Earlier Versions of .NET SDK @@ -43,7 +68,7 @@ You cannot upgrade directly from a version of Couchbase Server earlier than 7.1 For example, you can directly upgrade from version 6.6 to version 7.2.3. You cannot directly upgrade from version 6.6 to version 7.2.4. A compatibility issue with the Erlang version used by these earlier server versions prevents a direct upgrade to later versions of the server. -To upgrade from server versions 6.5, 6.6, or 7.0 to version 7.6 or later, first upgrade to version between 7.1 and 7.2.3. +To upgrade from server versions 6.5, 6.6, or 7.0 to version 7.6 or later, first upgrade to a version between 7.1 and 7.2.3. Then upgrade to version 7.6 or later. [#understanding-upgrade] diff --git a/modules/introduction/partials/new-features-80.adoc b/modules/introduction/partials/new-features-80.adoc index e609f68246..577eef77b7 100644 --- a/modules/introduction/partials/new-features-80.adoc +++ b/modules/introduction/partials/new-features-80.adoc @@ -62,6 +62,45 @@ curl --get -u \ [#section-new-feature-data-service] === Data Service +[#mb-62777] +https://jira.issues.couchbase.com/browse/MB-62777[MB-62777] Magma with 128 vBuckets is now the default storage engine:: +In earlier versions of Couchbase Server, if you did not specify which storage engine to use when creating a new bucket, it would use Couchstore. +Once you upgrade your entire cluster to Couchbase Server Enterprise 8.0, the default storage engine for new buckets is Magma with 128 vBuckets. + ++ +Magma buckets with 128 vBuckets is a new feature that provides the advantages of the Magma storage engine while reducing the minimum memory quota. +Before Couchbase Server 8.0, Magma only supported using 1024 vBuckets and had a minimum memory quota of 1{nbsp}GiB per node. +Buckets that use Magma with 128 vBuckets have a minimum memory quota of 100{nbsp}MiB per node, which is the same as Couchstore. + ++ +NOTE: These changes do not affect Couchbase Server Community Edition because it does not support the Magma storage engine. + ++ +This change in the default storage engine does not affect existing buckets. +You can still create buckets that use the Couchstore storage engine or the Magma st5orage engine with 1024 vBuckets by explicitly specifying them when you create the bucket. + ++ +See xref:learn:buckets-memory-and-storage/storage-engines.adoc[] for more information about storage engines. + ++ +.default behavior change +[IMPORTANT] +==== +The new default storage backend for buckets is a behavior change that may cause issues if you rely on the previous behavior. + +Before upgrading to Couchbase Server 8.0, consider the following: + +* If you have deployment scripts that create buckets without specifying the storage engine, those scripts create Magma buckets with 128 vBuckets instead of Couchstore buckets after the upgrade. +This may affect your deployment if you depend on buckets using the Couchstore storage engine. +* You cannot use XDCR to replicate Magma buckets using 128 vBuckets with pre-8.0 clusters. +XDCR in pre-8.0 clusters only supports replication between buckets that contain the same number of vBuckets. +Pre-8.0 versions of Couchbase Server only support Couchstore and Magma buckets with 1024 vBuckets (or 64 vBuckets on macOS). + ++ +If you're manually deploying a new cluster, set the number of vBuckets to 1024 for any Magma bucket you need to replicate with a pre-8.0 cluster. +If you have a deployment script that creates Magma buckets for replication with pre-8.0 clusters, have it set the number of vBuckets to 1024. +To set this value, use the `numVbuckets` parameter in xref:rest-api:rest-bucket-create.adoc#numvbuckets[the REST API] or `--num-vbuckets` in the xref:cli:cbcli/couchbase-cli-bucket-create.adoc[`couchbase-cli` command line tool]. +==== https://jira.issues.couchbase.com/browse/MB-9418[MB-9418]:: The previous warmup behavior was to load values and not complete warmup until the Bucket reaches one of: diff --git a/modules/learn/pages/buckets-memory-and-storage/storage-engines.adoc b/modules/learn/pages/buckets-memory-and-storage/storage-engines.adoc index 83e6dbd97f..f4f98bdc41 100644 --- a/modules/learn/pages/buckets-memory-and-storage/storage-engines.adoc +++ b/modules/learn/pages/buckets-memory-and-storage/storage-engines.adoc @@ -1,95 +1,119 @@ = Storage Engines -:description: pass:q[Couchbase supports two different backend storage mechanisms: Couchstore and Magma.] +:description: pass:q[Couchbase supports two different backend storage engines: Couchstore and Magma.] [abstract] {description} -It is important to understand which backend storage is best suited to your requirements. +These storage engines organize the data both on disk ad in memory. +This page explains how to choose a backend storage to suit your needs. -== Couchstore - -Couchstore is the default bucket storage engine. -It is optimized for high performance with large data sets, -while using fewer system resources (the minimum bucket size for the Couchstore backend is 100{nbsp}MB). -If you have a small data set that can fit in memory, then you should consider using Couchstore. [#storage-engine-magma] == Magma [.edition]#{enterprise}# -Magma is designed for high performance with very large datasets that do not fit in memory. -It is ideal for use cases that rely primarily on disk access. -The performance of disk access will be as good as the underlying disk sub-systems -- -for example, using NVMe SSDs will give higher performance. +Magma is designed for high performance with large datasets that do not fit in memory. +It's ideal for use cases that rely primarily on data stored on disk. +The performance of disk access is as good as the underlying disk sub-systems. +For example, using NVMe SSDs gives better performance than spinning drives. -In order to get maximum performance from Magma for disk-oriented workloads, it is recommended to set the Writer Threads to `Disk i/o optimized`. This setting will ensure there are enough threads to sustain high write rates. +When you create a bucket using the Magma storage engine, you choose the number of xref:learn:buckets-memory-and-storage/vbuckets.adoc[vBuckets] a bucket uses. +Magma supports two vBucket configurations: 128 or 1024. +This choice affects the minimum memory quota for the bucket. +The 128 vBucket configuration is the default for new buckets in Couchbase Server Enterprise Edition 8.0 and later. +It has a minimum memory quota of 100{nbsp}MiB per node. -To learn more about Writer Thread settings, see xref:manage:manage-settings/general-settings.adoc#data-settings[Data Settings] +NOTE: Magma buckets with 128 vBuckets are only available in clusters that have fully migrated to Couchbase Server 8.0 or later. -Magma can work with very low amounts of memory for large datasets: a minimum memory-to-data ratio of 1% is required. -For example, if a node is holding 5{nbsp}TB of data, Magma can be used with only 64{nbsp}GB RAM. +Magma using 1024 vBuckets has a minimum memory quota of 1{nbsp}GiB per node. +As your dataset grows, Magma has a minimum memory-to-data ratio of 1%. +For example, a node with 5{nbsp}TiB data in a Magma bucket must allocate at least 51{nbsp}GiB of RAM for the bucket. -.Magma Supported Services -|=== -|Couchbase Version |Services Supported +=== Magma Writer Thread Settings -|*Version 7.1* -|Query, Index, XDCR, Backup +Couchbase Server lets you set the number of threads for reading and writing data to the disk. +Under some circumstances, you may want to set the Writer Threads to `Disk i/o optimized` for Magma buckets. +This setting makes sure there are enough threads to sustain high write rates. -|*Version 7.1.2 and Higher* -|[[magma-support-ref]]Search, Eventing, Analytics<> -|=== +To learn more about how you should configure the Writer Thread settings for your Magma bucket, see xref:manage:manage-settings/general-settings.adoc#data-settings[Data Settings] + +== Couchstore -[horizontal] -[[magma-support-note]]<>:: If these services are required in versions prior to 7.1.2, Couchstore should be used. +Couchstore is the original storage engine for Couchbase Server. +It's the only storage engine available in Couchbase Server Community Edition. +Couchstore is designed for high performance with datasets that fit in memory. +It has a minimum memory requirement of 100{nbsp}MB per node, and a minimum memory-to-data ratio of 10%. +If you have a small dataset whose working set (frequently accessed data) can fit in memory, then you should consider using Couchstore. -== Couchstore and Magma at a Glance +== Couchstore verses Magma at a Glance + +The following table summarizes the differences between Couchstore and Magma storage engines. |=== -| {empty} | Couchstore |Magma +| {empty} | Couchstore |Magma 128 vBuckets | Magma 1024 vBuckets | Minimum bucket memory quota -| 100{nbsp}MB -| [[quota-ref]]1{nbsp}GB<> +| 100{nbsp}MiB +| 100{nbsp}MiB +| 1{nbsp}GiB | Minimum memory to data ratio | 10% | 1% +| 1% | Maximum data per node | 3{nbsp}TB +| 10{nbsp}TB, but with much higher possibility of data imbalance. | 10{nbsp}TB |=== -[horizontal] -[[quota-note]]<>:: Magma's minimum memory requirement is higher at 1GB per node due to the more complex data structures it has to maintain. - -== When should you use Couchstore? -The choice of Couchstore or Magma is set at the bucket level xref:manage:manage-buckets/create-bucket.adoc[when the bucket is created]. -A single Couchbase cluster can have a mix of Couchstore and Magma buckets. +== Which Storage Engine Should You Use? -You should use the Couchstore backend if: +The choice of which storage engine to use depends on your use case and the size of your dataset. +As of Couchbase Serve 8.0, the Magma is the recommended storage engine for most use cases. +The choice between 128 and 1024 vBuckets gives you flexibility when configuring your bucket. -* You have a dataset with a working set that will fit into available memory (and the working set is >{nbsp}20%). -* You are running the Couchbase server on low-end hardware. -* You are running a version prior to 7.1.2, and your bucket needs to support the Search, Eventing, or Analytics Service. -* You are running the legacy xref:learn:views/views-intro.adoc[MapReduce Views] Service, which will not run on Magma storage. +When to choose Magma with 128 vBuckets:: +You should use the Magma backend with 128 vBuckets if any of the following are true: -== When should you use Magma? ++ +* You want to minimize memory use and have a working dataset that does not fit in the available memory or is more than 20% of the total dataset. +Magma with 128 vBuckets is more efficient in this case than Couchstore. +* You want to conserve memory as your dataset grows. +Because Magma has a 1% memory-to-data ratio, its memory requirements grow more slowly than Couchstore's 10% memory-to-data ratio. +* You have a dataset size that's less than 100{nbsp}GiB. +With this small dataset size, the 128 vBucket configuration saves you memory verses the minimum 1{nbsp}GiB memory required by the 1024 vBucket Magma configuration. +* You're configuring a POC or testing system and plan to use Magma with 1024 vBuckets in production. -You should use the Magma backend if: +When to choose Magma with 1024 vBuckets:: +You should use the Magma backend with 1024 vBuckets if any of the following are true: -* Your working set is much larger than the available memory, and you need high disk-access speed. -* You need to store and access large amounts of data (several terabytes) using a small amount of memory. ++ +* Your working set is larger than the available memory (or you expect it to outgrow memory), and you need high disk-access speed. +* You need to store and access large amounts of data (multiple terabytes) using a small amount of memory. +In this case, the 1024 vBucket configuration is less likely to have data imbalance issues than the 128 vBucket configuration. +Also, Magma with 1024 vBuckets uses less CPU for operations such as compaction verse the 128 vBucket configuration. * Your applications make heavy use of transactions with persistence-based durability. +* Your dataset will grow beyond Couchstore's 3{nbsp}TB limit. + +When should you choose Couchstore?:: +You should use the Couchstore backend if any of the following are true: + ++ +* You're using Couchbase Server Community Edition which only supports the Couchstore storage engine. +* You have a dataset with a working set that fits into available memory and the working set is less than 20% of the total dataset. +* You're running Couchbase Server on low-end hardware. +* You're running the legacy xref:learn:views/views-intro.adoc[] Service, which does not run on Magma storage. + -== Migrating Between Storage Engines +== Migrating between Storage Engines You can migrate a bucket to use a different storage engine. Consider migrating a bucket if it no longer meets the criteria explained in the previous sections. -For example, suppose you have a bucket using the Couchstore backend that has grown to the point where it no longer fits in memory. +For example, suppose you have a bucket using the Couchstore backend whose working dataset has grown to the point where it no longer fits in memory. In this case, you should migrate the bucket to use Magma as a storage backend. To learn how to migrate a bucket's storage backend, see xref:manage:manage-buckets/migrate-bucket.adoc[]. diff --git a/modules/learn/pages/buckets-memory-and-storage/vbuckets.adoc b/modules/learn/pages/buckets-memory-and-storage/vbuckets.adoc index 2945b1f895..e074182734 100644 --- a/modules/learn/pages/buckets-memory-and-storage/vbuckets.adoc +++ b/modules/learn/pages/buckets-memory-and-storage/vbuckets.adoc @@ -1,5 +1,5 @@ = vBuckets -:description: pass:q[_vBuckets_ are virtual buckets that help distribute data effectively across a cluster, and support replication across multiple nodes.] +:description: pass:q[vBuckets are virtual buckets that break bucket data into smaller pieces to make distributing data the cluster and replicating data across multiple nodes easier.] :page-aliases: understanding-couchbase:buckets-memory-and-storage/vbuckets,architecture:core-data-access-bucket-disk-storage,architecture:core-data-access-vbuckets-bucket-partition [abstract] @@ -8,40 +8,69 @@ [#understanding-vbuckets] == Understanding vBuckets -Couchbase Server allows users and applications to save data, in binary or JSON format, in named _buckets_. -Each bucket therefore contains _keys_ and associated _values_. -See xref:buckets-memory-and-storage/buckets.adoc[Buckets], for detailed information. +Couchbase Server lets users and applications save data in binary or JSON format in named buckets. +Each bucket contains keys and their associated values. +See xref:buckets-memory-and-storage/buckets.adoc[Buckets] for detailed information about buckets. -Within the memory and storage management system of Couchbase Server, both Couchbase and Ephemeral buckets are implemented as _vBuckets_, 1024 of which are created for every bucket (except on MacOS, where the number is 64). -vBuckets are distributed evenly across the memory and storage facilities of the cluster; and the bucket's items are distributed evenly across its vBuckets. -This evenness of distribution ensures that all instances of the xref:services-and-indexes/services/data-service.adoc[Data Service] take an approximately equal share of the workload, in terms of numbers of documents to maintain, and operations to handle. +Couchbase Server breaks the data in buckets into smaller units called vBuckets (short for virtual buckets). +Some people refer to vBuckets as shards. +The vBuckets let Couchbase Server work with smaller chunks of data to ease distributing work around the cluster and maintaining data availability through replication. -The 1024 vBuckets that implement a defined bucket are referred to as _active_ vBuckets. -If a bucket is replicated, each replica is implemented as a further 1024 (or 64) vBuckets, referred to as _replica_ vBuckets. -Thus, a bucket configured on Linux to be replicated twice results in a total of 3072 vBuckets, distributed across the cluster. -_Write_ operations are performed only on _active_ vBuckets. -Most _read_ operations are performed on _active_ vBuckets, though items can also be read from _replica_ vBuckets when necessary. +When it creates the bucket, Couchbase Server breaks it into a fixed number of vBuckets . +Once created, the number of vBuckets in a bucket does not change. +The number of vBuckets depends on the bucket's storage backend (the system that manages the data storage) and the operating system running Couchbase Server: -Note that vBuckets are sometimes referred to as _shards_. +* On MacOS, Couchbase Server creates 64 vBuckets for each bucket, no matter what the storage engine is. +* Buckets that use the Couchstore storage engine use 1024 vBuckets on Linux and Windows. +* Buckets that use Magma storage engine can use either 128 or 1024 vBuckets on Linux and Windows. +You choose the number of vBuckets when you create the bucket. -Items are written to and retrieved from vBuckets by means of a _CRC32_ hashing algorithm, which is applied to the item's key, and so produces the number of the vBucket in which the item resides. -vBuckets are mapped to individual nodes by the Cluster Manager: the mapping is constantly updated and made generally available to SDK and other clients. +The system distributes vBuckets evenly across the memory and storage resources of nodes that run the xref:services-and-indexes/services/data-service.adoc[Data Service]. +The bucket's data is distributed evenly across its vBuckets. +This even distribution balances the workload of processing and maintaining data across all of the Data Service instances in the cluster. -The relationships between a bucket, its keys (and their associated values), the hashing algorithm, vBuckets, server-mappings, and servers, is illustrated below: +=== Accessing Data in vBuckets + +When reading or writing data, Couchbase Server uses a CRC32 hashing algorithm to map items to vBuckets. +It hashes the item's key, to determine which vBucket stores the item. + +The Cluster Manager tracks which nodes contain each vBucket. +It also determines which vBuckets are active vs which are replicas (see <>). +When the mapping changes, the Cluster Manager updates the vBucket map and notifies clients of the change. + +The following diagram shows the relationships between a bucket, its keys and values, the hashing algorithm, vBuckets, server mappings, and servers: [#vbucket_to_node_mapping] image::buckets-memory-and-storage/vbucketToNodeMapping.png[,820,align=left] -Thus, an authorized client attempting to access data performs a hash operation on the appropriate key, and thereby calculates the number of the vBucket that owns the key. -The client then examines the vBucket map to determine the server-node on which the vBucket resides; and finally performs its operation directly on that server-node. +When accessing data via keys, a client hashes the key to calculate which the vBucket contains the key. +It checks the vBucket map it got from the Cluster Manager to find the node containing the active vBucket. +The then client directly connects to the node to read or modify the data. + +Buckets organize their documents into xref:learn:data/scopes-and-collections.adoc[Scopes and Collections]. +Scopes and collections do not affect the way in which keys are allocated to vBuckets. +However, each vBucket is aware of the scope and collection containing of each of its keys. + +[[active-vs-replica]] +=== Active and Replica vBuckets + +The vBuckets that Couchbase Server uses to access and store data in a bucket are called active vBuckets. +If you enable replication for a bucket, each replica uses another set vBuckets, called replica vBuckets. +These replicas are stored across the cluster, similar to active vBuckets. +The active vBucket and its replicas are always on different nodes in the cluster to protect against data loss from node failovers. + +For example, suppose you have a Magma bucket configured with 1024 vBuckets and two replicas on Linux. +Then, Couchbase Server has have a total of 3072 vBuckets distributed across the cluster for the bucket. +In most cases, replica vBuckets are not actively used for data access. +Most data read operations use active vBuckets, but the system can read items from replica vBuckets when needed. +Data write operations write only to active vBuckets. +However, durable writes can require that data be replicated from an active vBucket to a replica vBucket before Couchbase Server considers the write operation a success. +See xref:learn:data/durability.adoc[] for more information about durable writes. -In Couchbase Server Version 7.0+, documents within a bucket are organized into xref:learn:data/scopes-and-collections.adoc[Scopes and Collections]. -Scopes and collections do _not_ affect the way in which keys are allocated to vBuckets. -However, each vBucket _is_ aware, for each of its allocated keys, of the scope and collection with which that particular key is associated. +When the configuration of the cluster changes due to rebalance, failover, or node-addition, Couchbase Server promotes replica buckets to active if necessary. +For example, if a node fails over, any active vBuckets on it becomes unavailable. +Couchbase Server promotes replicas of the lost vBuckets to active to maintain data availability. -When a cluster-configuration changes — due to rebalance, failover, or node-addition — replica buckets are promoted to primaries if appropriate, and both primaries and replicas are redistributed across the available nodes of the modified cluster. -The vBucket map is duly updated by the Cluster Manager. -The updated map is then sent to all cluster-participants. -For additional information on the distribution of vBuckets across the cluster, see xref:clusters-and-availability/replication-architecture.adoc[Availability]. +During a rebalance, Couchbase Server redistributes active and replica vBuckets across the available nodes. +For additional information about the distribution of vBuckets across the cluster, see xref:clusters-and-availability/replication-architecture.adoc[Availability]. -Note that this use of client-side hashing for access to Couchbase and Ephemeral bucket-data contrasts with the Memcached data-access method; which requires active management of the server-list, and a specific hashing algorithm, such as Ketama, to handle topology-changes. diff --git a/modules/learn/pages/clusters-and-availability/xdcr-overview.adoc b/modules/learn/pages/clusters-and-availability/xdcr-overview.adoc index 16b2a536d9..3cf6e4a57d 100644 --- a/modules/learn/pages/clusters-and-availability/xdcr-overview.adoc +++ b/modules/learn/pages/clusters-and-availability/xdcr-overview.adoc @@ -1,6 +1,7 @@ = Cross Data Center Replication (XDCR) :description: pass:q[_Cross Data Center Replication_ (XDCR) allows data to be replicated across clusters that are potentially located in different data centers.] :page-aliases: ha-dr:ha-dr-intro,learn:clusters-and-availability/xdcr-cloud-deployment.adoc,install:deployments-xdcr,xdcr:xdcr-tuning-performance +:page-edition: Enterprise Edition [abstract] {description} @@ -8,11 +9,12 @@ [#introduction-to-xdcr] == Introduction to XDCR -Cross Data Center Replication (XDCR) replicates data between a source bucket and a target bucket. +Cross data center replication (XDCR) replicates data between a source bucket and a target bucket. The buckets may be located on different clusters, and in different data centers: this provides protection against data-center failure, and also provides high-performance data-access for globally distributed, mission-critical applications. -Note that Couchbase has modified the license restrictions to its Couchbase Server Community Edition package, for version 7.0 and higher: in consequence, XDCR is promoted to a commercial-only feature of Enterprise Edition. -See https://blog.couchbase.com/couchbase-modifies-license-free-community-edition-package/[Couchbase Modifies License of Free Community Edition Package^], for more information on the license restrictions; and see xref:manage:manage-xdcr/xdcr-management-overview.adoc#xdcr-and-community-edition[XDCR and Community Edition], for information on how the new restrictions affect the experience of Community-Edition administrators. +NOTE: In Version 7.0, Couchbase made XDCR a commercial-only feature of Enterprise Edition. +See https://blog.couchbase.com/couchbase-modifies-license-free-community-edition-package/[Couchbase Modifies License of Free Community Edition Package^], for more information about the license restrictions. +Also see xref:manage:manage-xdcr/xdcr-management-overview.adoc#xdcr-and-community-edition[XDCR and Community Edition], for information about how the new restrictions affect the experience of Community-Edition administrators. Data from the source bucket is pushed to the target bucket by means of an XDCR agent, running on the source cluster, using the Database Change Protocol. Any bucket (Couchbase or Ephemeral) on any cluster can be specified as a source or a target for one or more XDCR definitions. @@ -28,6 +30,7 @@ Cross Data Center Replication differs from intra-cluster replication in the foll The starting, stopping, and pausing of XDCR all occur independently of whatever intra-cluster replication is in progress on either the source or target cluster. While running, XDCR continuously propagates mutations from the source to the target bucket. +include::learn:partial$xdcr-magma-128-vbucket-incompatibility.adoc[] [#tools-for-managing-xdcr] == Tools and Procedures for Managing XDCR diff --git a/modules/learn/partials/xdcr-magma-128-vbucket-incompatibility.adoc b/modules/learn/partials/xdcr-magma-128-vbucket-incompatibility.adoc new file mode 100644 index 0000000000..f7addfade8 --- /dev/null +++ b/modules/learn/partials/xdcr-magma-128-vbucket-incompatibility.adoc @@ -0,0 +1,3 @@ +NOTE: Versions of Couchbase Server before 8.0 do not support XDCR replication between buckets with different numbers of vBuckets. +They also do not support Magma buckets with 128 vBuckets. +Due to both these limitations, you cannot replicate a Magma bucket with 128 vBuckets on an 8.0 or later cluster to a bucket on a pre-8.0 cluster. diff --git a/modules/manage/assets/images/manage-buckets/addDataBucketDialogInitial.png b/modules/manage/assets/images/manage-buckets/addDataBucketDialogInitial.png index a3b1c93d4d..fbb640a849 100644 Binary files a/modules/manage/assets/images/manage-buckets/addDataBucketDialogInitial.png and b/modules/manage/assets/images/manage-buckets/addDataBucketDialogInitial.png differ diff --git a/modules/manage/examples/duplicate-scopes-collections.py b/modules/manage/examples/duplicate-scopes-collections.py new file mode 100644 index 0000000000..5955635535 --- /dev/null +++ b/modules/manage/examples/duplicate-scopes-collections.py @@ -0,0 +1,43 @@ +from couchbase.cluster import Cluster +from couchbase.options import ClusterOptions +from couchbase.auth import PasswordAuthenticator +from couchbase.management.collections import CollectionManager, CollectionSpec +from couchbase.exceptions import ScopeAlreadyExistsException, CollectionAlreadyExistsException + +# Connect to source and target clusters +# Here, the target and source are the the same. +src_cluster = Cluster('couchbase://127.0.0.1', ClusterOptions(PasswordAuthenticator('Administrator', 'password'))) +tgt_cluster = Cluster('couchbase://127.0.0.1', ClusterOptions(PasswordAuthenticator('Administrator', 'password'))) +src_bucket = src_cluster.bucket('travel-sample') +tgt_bucket = tgt_cluster.bucket('travel-sample-1024') + +src_coll_mgr = src_bucket.collections() +tgt_coll_mgr = tgt_bucket.collections() + +# Get all scopes and their collections from source +scopes = src_coll_mgr.get_all_scopes() + +for scope in scopes: + scope_name = scope.name + if scope_name.startswith('_'): + continue # Skip system scopes + # Create scope in target + try: + print(f"Creating scope: {scope_name}") + tgt_coll_mgr.create_scope(scope_name) + except ScopeAlreadyExistsException: + pass + except Exception as e: + print(f"Error creating scope {scope_name}: {e}") + exit(1) + # Create collections in target + for collection in scope.collections: + try: + print(f"Creating collection: {collection.name} in scope: {scope_name}") + # tgt_coll_mgr.create_collection(CollectionSpec(collection.name, scope_name=scope_name)) + tgt_coll_mgr.create_collection(scope_name, collection.name) + except CollectionAlreadyExistsException: + pass + except Exception as e: + print(f"Error creating collection {collection.name} in scope {scope_name}: {e}") + exit(1) diff --git a/modules/manage/pages/manage-buckets/create-bucket.adoc b/modules/manage/pages/manage-buckets/create-bucket.adoc index 7290327995..438b04e4eb 100644 --- a/modules/manage/pages/manage-buckets/create-bucket.adoc +++ b/modules/manage/pages/manage-buckets/create-bucket.adoc @@ -1,18 +1,23 @@ = Create a Bucket -:description: pass:q[_Full_ and _Cluster_ Administrators can use Couchbase Web Console, the CLI, or the REST API to create a bucket.] +:description: pass:q[Full, Cluster, and Backup Administrators can use Couchbase Web Console, the CLI, or the REST API to create a bucket.] :page-aliases: clustersetup:create-bucket :page-topic-type: guide [abstract] {description} -You can create a bucket with the xref:manage:manage-buckets/create-bucket.adoc#create-bucket-using-couchbase-web-console[Couchbase Server UI], xref:manage:manage-buckets/create-bucket.adoc#create-bucket-with-the-cli[CLI] or the xref:manage:manage-buckets/create-bucket.adoc#create-bucket-with-the-rest-api[REST API]. +You can create a bucket using the following methods: + +* <> +* <> +* <> You can create a maximum of 30 buckets per cluster. == Prerequisites -* You must be a Full or Cluster Administrator. +* You have the Full, Cluster, or Backup Administrator role. +* Your cluster must have less than 30 buckets. [#create-bucket-using-couchbase-web-console] == Create a Bucket with the UI @@ -20,29 +25,27 @@ You can create a maximum of 30 buckets per cluster. To create a bucket with the Couchbase UI: . Log in to the Couchbase Server Web Console. -. Select btn:[Add Bucket]. +. Click menu:Buckets[]. + +. Click btn:[Add Bucket]. + The [.ui]*Add Data Bucket* dialog appears: [#add-data-bucket-dialog-initial] -image::manage-buckets/addDataBucketDialogInitial.png[,320,align=center, alt="An image that displays the Add Data Bucket dialog. The Name field is empty. Bucket Type is set to Couchbase, and the Storage Backend is set to CouchStore. The Memory Quota is set to 18488MiB. The Advanced bucket settings are collapsed."] +image::manage-buckets/addDataBucketDialogInitial.png[align=center, alt="An image that displays the Add Data Bucket dialog. The Name field is empty. Bucket Type is set to Couchbase, and the Storage Backend is set to Magma with 128 vBuckets. The Memory Quota is set to 312MiB. The Advanced bucket settings is collapsed."] [start="4"] . In the *Name* field, enter a name for the new bucket. -+ -[NOTE] -==== A bucket name can be up to 100 characters in length and contain: ++ * Uppercase and lowercase characters (A-Z and a-z) * Digits (0-9) * Underscores (_), periods (.), dashes (-), and percent symbols (%) -==== -[start="5"] + . Choose a *Bucket Type* for the bucket: + * *Couchbase* -* *Memcached* * *Ephemeral* + @@ -54,16 +57,38 @@ For more information about bucket types, see xref:learn:buckets-memory-and-stora * *Magma* + +If you choose *Magma*, you can also choose the number of xref:learn:buckets-memory-and-storage/vbuckets.adoc[vBuckets] for the bucket. For more information about the available storage engines, see xref:learn:buckets-memory-and-storage/storage-engines.adoc[Storage Engines]. -. In the *Memory Quota* field, enter a value in MiB per node for the total RAM available for the bucket. This value can't exceed the total RAM quota for your cluster. +. In the *Memory Quota* field, enter a value in MiB per node for the total RAM available for the bucket. +This value cannot exceed the total RAM quota for your cluster. +It must also be equal to or greater than the minimum memory quota required by your chosen storage engine: + ++ +|=== +| Storage Engine | Minimum Memory Quota + +| Couchstore +| 100{nbsp}MiB + +| Magma (128 vBuckets) +| 100{nbsp}MiB + +| Magma (1024 vBuckets) +| 1{nbsp}GiB +|=== + + -NOTE: Your memory quota needs to match the minimum memory resident ratio required by your chosen storage engine. +Your setting should also meet or surpass the memory resident requirement of the storage backend for your anticipated dataset size. +For example, Magma has a minimum memory-to-data ratio of 1% of the data size. +Suppose you expect the dataset in your Magma bucket to be 5{nbsp}TiB. +Then set the memory quota to at least 51{nbsp}GiB. For more information, see xref:learn:buckets-memory-and-storage/memory.adoc#bucket-memory[Bucket Memory Quotas]. [start="8"] . Expand *Advanced bucket settings*. -. Set any advanced settings for your bucket. See <>. +. Set any advanced settings for your bucket. +See <>. . Select btn:[Add Bucket]. The bucket appears on the *Buckets* screen. @@ -263,7 +288,7 @@ image::manage-buckets/addDataBucketDialogExpandedForEphemeral.png[,350,align=cen [#create-bucket-with-the-cli] == Create a Bucket with the CLI -To create a bucket with the Couchbase CLI, use the `bucket-create` command. +To create a bucket with the Couchbase command-line tool, use the `bucket-create` command. For example: @@ -281,7 +306,7 @@ For example: --enable-flush 0 ---- -The preceding example creates a `Couchbase` bucket named `testBucket`, with a RAM size of `1024`. +The preceding example creates a `Couchbase` bucket named `testBucket`, with a RAM size of `1024` with the default storage backed (Magma with 128 vBucket). It sets a Maximum Time-to-Live and disables Flush. It also sets a Minimum Durability Level of `persistToMajority`. @@ -306,5 +331,6 @@ curl -v -X POST http://10.143.201.101:8091/pools/default/buckets \ The preceding example creates a `Couchbase` bucket named `testBucket`, with a RAM size of `512`. It sets a Minimum Durability Level of `majorityAndPersistActive`. +This bucket uses the default storage backend (Magma with 128 vBuckets). For more information about the `/pools/default/buckets` endpoint and its parameters, see xref:rest-api:rest-bucket-create.adoc[Creating and Editing Buckets] in the Buckets API reference. diff --git a/modules/manage/pages/manage-buckets/migrate-bucket.adoc b/modules/manage/pages/manage-buckets/migrate-bucket.adoc index fa90d38807..cd5c695e11 100644 --- a/modules/manage/pages/manage-buckets/migrate-bucket.adoc +++ b/modules/manage/pages/manage-buckets/migrate-bucket.adoc @@ -7,8 +7,15 @@ You can migrate a bucket's storage backend if you find the bucket's current performance is not meeting your needs. For example, you can migrate a bucket from Couchstore to Magma if the bucket's working set grows beyond its memory quota. - You can migrate from Couchstore to Magma, or from Magma to Couchstore. +Migrating to a Magma bucket always results in a bucket with 1024 vBuckets, regardless of the number of vBuckets in the original bucket. + +NOTE: The backend migration described in this section does not support migrating between buckets with different numbers of vBuckets. +You cannot migrate a Couchstore or Magma bucket with 1024 vBuckets to a Magma bucket with 128 vBuckets. +Similarly, you cannot migrate from a Magma bucket with 128 vBuckets to a Couchstore or a Magma bucket with 128 vBuckets. +To migrate between buckets with different number of vBuckets, you can use a local cross datacenter replication (XDCR). +See <> for more information. + You start a bucket's migration by calling the REST API to edit the bucket's `storageBackend` setting. This call changes the bucket's global storage backend parameter. However, it does not trigger an immediate conversion of the vBuckets to the new backend. @@ -185,3 +192,122 @@ include::manage:example$migrate-bucket-storage-backend.sh[tag=rebalance-cluster] ---- . Repeat the previous step until all nodes that you'd migrated have rolled back to their original storage backend. + + +[#xdcr-migration] +== XDCR Storage Backend Migration + +You can use xref:learn:clusters-and-availability/xdcr-overview.adoc[] to migrate data between two buckets with different storage backends, including between Magma buckets using different numbers of vBuckets. +You can perform this migration on the same cluster or between two clusters. + +include::learn:partial$xdcr-magma-128-vbucket-incompatibility.adoc[] + +To perform an XDCR storage backed migration on the same cluster, it must have enough memory and storage for two copies of the bucket's data. +After the migration, you can drop the original bucket to free the resources it uses. + +The process for performing a backend migration using XDCR is similar to configuring any other XDCR replication. +The only difference is that the source and destination of the replication are the same cluster. + +The following steps demonstrate migrating a Magma bucket with 128 vBuckets named `travel-sample` to a Magma bucket with 1024 vBuckets named `travel-sample-1024`: + +. Create a new bucket named `travel-sample-1024` using the Magma storage backend with 1024 vBuckets. +For more information about creating a bucket, see xref:manage:manage-buckets/create-bucket.adoc[]. +The following example uses the REST API to create the new bucket: + ++ +[source,console] +---- +curl -X POST http://127.0.0.1:8091/pools/default/buckets \ + -u Administrator:password \ + -d name=travel-sample-1024 \ + -d storageBackend=magma \ + -d numVbuckets=1024 \ + -d ramQuota=1024 +---- + +. Recreate any scopes and collections in the new bucket that are in the original bucket. +Replication does not recreating missing scopes and collections for you. +You can create the scopes and collections manually or reuse any deployment scripts you have. +See xref:manage:manage-scopes-and-collections/manage-scopes-and-collections.adoc[] for details on creating scopes and collections. + ++ +You can also create a script to recreate the scopes and collections in the new bucket. +For example, the following Python script uses the Python SDK to accomplish this task: + ++ +[source,python] +---- +include::manage:example$duplicate-scopes-collections.py[] +---- + +. Add a loopback reference to the cluster. +The following example uses the REST API to add an XDCR reference named `self` to the cluster that uses the loopback IP address as the hostname: + ++ +[source,console] +---- + curl -X POST http://127.0.0.1:8091/pools/default/remoteClusters -u Administrator:password \ +-d username=Administrator \ +-d password=password \ +-d hostname=127.0.0.1 \ +-d name=self \ +-d demandEncryption=0 | jq +---- + ++ +The out of previous command is: + ++ +[source,json] +---- +{ + "connectivityErrors": null, + "deleted": false, + "hostname": "127.0.0.1:8091", + "name": "self", + "network_type": "", + "secureType": "none", + "uri": "/pools/default/remoteClusters/self", + "username": "Administrator", + "uuid": "a43e930240738b5aee16e2688a65d08f", + "validateURI": "/pools/default/remoteClusters/self?just_validate=1" +} +---- + +. Create an XDCR replication from the original bucket to the new bucket. +The following example uses the REST API to create the replication: + ++ +[source,console] +---- +curl -v -X POST -u Administrator:password \ +http://127.0.0.1:8091/controller/createReplication \ +-d fromBucket=travel-sample \ +-d toCluster=self \ +-d toBucket=travel-sample-1024 \ +-d replicationType=continuous \ +-d createTarget=true \ +-d enableCompression=1 | jq +---- + ++ +The result of the previous command looks like this: + ++ +[source,json] +---- +{ + "id": "a43e930240738b5aee16e2688a65d08f/travel-sample/travel-sample-1024" +} +---- + ++ +The replication process starts. + +. Monitor the replication process until it completes. +You can monitor the replication process xref:manage:manage-xdcr/create-xdcr-replication.adoc#monitor-current-replications[via the Couchbase Server Web Console] or by xref:rest-api:rest-xdcr-statistics.adoc[calling the REST API]. +Once the replication has duplicated all of the documents in the original bucket without errors, you can stop and delete it. +Then you can drop the original bucket. + ++ +IMPORTANT: Be sure to update all clients to use the new bucket before you stop the replication. diff --git a/modules/manage/pages/manage-xdcr/prepare-for-xdcr.adoc b/modules/manage/pages/manage-xdcr/prepare-for-xdcr.adoc index 7120d407f2..a532b83b7b 100644 --- a/modules/manage/pages/manage-xdcr/prepare-for-xdcr.adoc +++ b/modules/manage/pages/manage-xdcr/prepare-for-xdcr.adoc @@ -33,6 +33,8 @@ If a cluster is not sized to handle _both_ the existing workload _and_ the new X * Couchbase Server uses TCP/IP port `8091` to exchange cluster configuration information. If you are communicating with a destination cluster over a dedicated connection, or over the Internet, ensure that all nodes in the destination and source clusters can communicate with each other over ports `8091` and `8092`. +include::learn:partial$xdcr-magma-128-vbucket-incompatibility.adoc[] + [#next-xdcr-steps-after-preparation] == Next Steps diff --git a/modules/rest-api/pages/rest-bucket-create.adoc b/modules/rest-api/pages/rest-bucket-create.adoc index fbc196b313..b012eddd7d 100644 --- a/modules/rest-api/pages/rest-bucket-create.adoc +++ b/modules/rest-api/pages/rest-bucket-create.adoc @@ -46,6 +46,7 @@ curl -X POST -u : -d bucketType=[ couchbase | ephemeral | memcached ] -d ramQuota= -d storageBackend=[ couchstore | magma ] + -d numVbuckets=[ 128 | 1024 ] -d evictionPolicy=[ [ valueOnly | fullEviction ] | [ noEviction | nruEviction ] @@ -104,7 +105,7 @@ xref:rest-api:rest-bucket-create.adoc#historyretentioncollectiondefault[historyR xref:rest-api:rest-bucket-create.adoc#historyretentionbytes[historyRetentionBytes], xref:rest-api:rest-bucket-create.adoc#storagebackend[storageBackend], and xref:rest-api:rest-bucket-create.adoc#historyretentionseconds[historyRetentionSeconds]. -** Parameters that _cannot_ be edited after bucket creation; these being xref:rest-api:rest-bucket-create.adoc#buckettype[bucketType], xref:rest-api:rest-bucket-create.adoc#replicaindex[replicaIndex], and xref:rest-api:rest-bucket-create.adoc#conflictresolutiontype[conflictResolutionType]. +** Parameters that _cannot_ be edited after bucket creation; these being xref:rest-api:rest-bucket-create.adoc#buckettype[bucketType], xref:rest-api:rest-bucket-create.adoc#replicaindex[replicaIndex], <>, and xref:rest-api:rest-bucket-create.adoc#conflictresolutiontype[conflictResolutionType]. For full details and examples, see xref:rest-api:rest-bucket-create.adoc#general-parameters[General Parameters], below. @@ -249,21 +250,27 @@ No object is returned. [#storagebackend] === storageBackend -The _storage backend_ to be assigned to and used by the bucket. -This can be either `couchstore` (which is the default) or `magma`. -For information, see xref:learn:buckets-memory-and-storage/storage-engines.adoc[Storage Engines]. +The storage backend to use for the new bucket. +In Enterprise Edition, this value can be set to either `couchstore` or `magma` (the default). +In Couchbase Server Community Edition, the default and only valid value is `couchstore`. +For more information, see xref:learn:buckets-memory-and-storage/storage-engines.adoc[Storage Engines]. -NOTE: You can edit this value after initially creating the bucket. Couchbase Server sets the new backend value globally. However, this change does not convert the bucket to the new backend storage engine. Instead, Couchbase Server adds overrides to every node containing the bucket to indicate that their vBuckets are still in the old format. You must take additional steps to complete the migration to the new storage backend. See xref:manage:manage-buckets/migrate-bucket.adoc[] for more information. +NOTE: You can edit this value after initially creating the bucket. +Couchbase Server sets the new backend value globally. +However, this change does not convert the bucket to the new backend storage engine. +Instead, Couchbase Server adds overrides to every node containing the bucket to indicate that their vBuckets are still in the old format. +You must take additional steps to complete the migration to the new storage backend. +See xref:manage:manage-buckets/migrate-bucket.adoc[] for more information. [#example-storage-backend] ==== Example: Specifying the Storage Backend -A minimum of 1024 MiB is required if the `magma` option is used; a minimum of 100 MiB if the default `couchstore` is used. +The following example creates a new bucket, named `testBucket`, with the Magma storage backend. ---- curl -v -X POST http://127.0.0.1:8091/pools/default/buckets \ -u Administrator:password \ --d ramQuota=1024 \ +-d ramQuota=300 \ -d storageBackend=magma \ -d name=testBucket ---- @@ -271,6 +278,38 @@ curl -v -X POST http://127.0.0.1:8091/pools/default/buckets \ If successful, the call returns a `202 Accepted` notification. No object is returned. + +[#numvbuckets] +=== numVbuckets + +Sets the number of vBuckets for a Magma bucket. +The possible values are `128` or `1024`. +If you do not supply this value (or if you supply a value other than `128` or `1024`), Couchbase Server uses the default value of `128`. + +If you set `storageBackend` to `couchstore`, the number of vBuckets is always 1024, and Couchbase Server ignores this parameter if you provide it. + +NOTE: You cannot change the number of vBuckets for a bucket after creating it. +If you need a bucket with a different number of vBuckets, you must create a new bucket with the desired number of vBuckets and then migrate your data to it. +See xref:manage:manage-buckets/migrate-bucket.adoc#xdcr-migration[XDCR Storage Backend Migration] for more information. + +[#example-numvbuckets-create] +==== Example: Setting the Number of vBuckets + +The following example creates a new bucket named `testBucket` with the Magma stage backend assigns it `1024` vBuckets. +It also sets the `ramQuota` to `1024`, which is the minimum value allowed for a Magma bucket with 1024 vBuckets. + +[source,console] +---- +curl -X POST http://127.0.0.1:8091/pools/default/buckets \ + -u Administrator:password \ + -d name=testBucket \ + -d storageBackend=magma \ + -d numVbuckets=1024 \ + -d ramQuota=1024 +---- + +This example returns the status code `202 Accepted` and no additional output. + [#evictionpolicy] === evictionPolicy diff --git a/modules/rest-api/pages/rest-buckets-summary.adoc b/modules/rest-api/pages/rest-buckets-summary.adoc index ffc8c71160..3982e909a3 100644 --- a/modules/rest-api/pages/rest-buckets-summary.adoc +++ b/modules/rest-api/pages/rest-buckets-summary.adoc @@ -41,18 +41,19 @@ If an internal error prevents successful execution, `500 Internal Server Error` [#example] == Example -The following example returns information on a single bucket named `travel-sample`. -The output of the call is piped to https://stedolan.github.io/jq/[jq^] to improve readability. +The following example demonstrates getting information about a bucket named `travel-sample`. +It uses the https://stedolan.github.io/jq/[jq^] command-line JSON processor to format the output for readability and also filter out some of the less relevant fields. + ---- curl -X GET -u Administrator:password \ -http://localhost:8091/pools/default/buckets/travel-sample | jq '.' + http://localhost:8091/pools/default/buckets/travel-sample \ + | jq '.vBucketServerMap.vBucketMap = "" + | .nodes = ""' ---- If successful, the call returns `200 OK`, and an object similar to the one shown in the following example. -NOTE: The output contains an extensive array of vBucket data that is omitted from this example. - The fields `historyRetentionCollectionDefault`, `historyRetentionCollectionBytes`, and `historyRetentionCollectionSeconds` are specific to Magma storage. When the bucket does not use Magma as its storage backend, these properties do not appear in the output. @@ -70,5 +71,5 @@ See the xref:rest-bucket-create.adoc#notes[Notes] section of xref:rest-bucket-cr [#see-also] == See Also -An overview of buckets is provided in xref:learn:buckets-memory-and-storage/buckets.adoc[Buckets]. -An introduction to scopes and collections is provided in xref:learn:data/scopes-and-collections.adoc[Scopes and Collections]. +* xref:learn:buckets-memory-and-storage/buckets.adoc[] for an overview of buckets. +* xref:learn:data/scopes-and-collections.adoc[] for an overview of scopes and collections. diff --git a/modules/rest-api/partials/get_bucket_travel_sample.json b/modules/rest-api/partials/get_bucket_travel_sample.json index bbe2812419..12f793a49a 100644 --- a/modules/rest-api/partials/get_bucket_travel_sample.json +++ b/modules/rest-api/partials/get_bucket_travel_sample.json @@ -2,25 +2,27 @@ "name": "travel-sample", "nodeLocator": "vbucket", "bucketType": "membase", - "storageBackend": "couchstore", - "uuid": "85ff541d1f4cfbc9e67cda3db698cac6", - "uri": "/pools/default/buckets/travel-sample?bucket_uuid=85ff541d1f4cfbc9e67cda3db698cac6", - "streamingUri": "/pools/default/bucketsStreaming/travel-sample?bucket_uuid=85ff541d1f4cfbc9e67cda3db698cac6", - "numVBuckets": 1024, + "storageBackend": "magma", + "uuid": "823f32a2b2abd57581230b39e31b7a34", + "uri": "/pools/default/buckets/travel-sample?bucket_uuid=823f32a2b2abd57581230b39e31b7a34", + "streamingUri": "/pools/default/bucketsStreaming/travel-sample?bucket_uuid=823f32a2b2abd57581230b39e31b7a34", + "numVBuckets": 128, "bucketCapabilitiesVer": "", "bucketCapabilities": [ "collections", "durableWrite", "tombstonedUserXAttrs", - "couchapi", "subdoc.ReplaceBodyWithXattr", "subdoc.DocumentMacroSupport", "subdoc.ReviveDocument", + "nonDedupedHistory", "dcp.IgnorePurgedTombstones", "preserveExpiry", "querySystemCollection", "mobileSystemCollection", "subdoc.ReplicaRead", + "subdoc.BinaryXattr", + "subdoc.AccessDeleted", "rangeScan", "dcp", "cbhello", @@ -31,9 +33,6 @@ "xattr" ], "collectionsManifestUid": "2", - "ddocs": { - "uri": "/pools/default/buckets/travel-sample/ddocs" - }, "vBucketServerMap": { "hashAlgorithm": "CRC", "numReplicas": 1, @@ -42,23 +41,7 @@ "node2.:11210", "node3.:11210" ], - "vBucketMap": [ - [ - 0, - 1 - ], - [ - 0, - 1 - ], - . - . - . - [ - 2, - 1 - ] - ] + "vBucketMap": "" }, "localRandomKeyUri": "/pools/default/buckets/travel-sample/localRandomKey", "controllers": { @@ -67,235 +50,7 @@ "purgeDeletes": "/pools/default/buckets/travel-sample/controller/unsafePurgeBucket", "startRecovery": "/pools/default/buckets/travel-sample/controller/startRecovery" }, - "nodes": [ - { - "couchApiBaseHTTPS": "https://node3.:18092/travel-sample%2B85ff541d1f4cfbc9e67cda3db698cac6", - "couchApiBase": "http://node3.:8092/travel-sample%2B85ff541d1f4cfbc9e67cda3db698cac6", - "clusterMembership": "active", - "recoveryType": "none", - "status": "healthy", - "otpNode": "ns_1@node3.", - "hostname": "node3.:8091", - "nodeUUID": "d6bfd3cccf28f3e648bca46cb30ac271", - "clusterCompatibility": 524288, - "version": "8.0.0-1649-enterprise", - "os": "aarch64-unknown-linux-gnu", - "cpuCount": 4, - "ports": { - "direct": 11210, - "httpsMgmt": 18091, - "httpsCAPI": 18092, - "distTCP": 21100, - "distTLS": 21150 - }, - "services": [ - "backup", - "index", - "kv", - "n1ql" - ], - "nodeEncryption": false, - "nodeEncryptionClientCertVerification": false, - "addressFamilyOnly": false, - "configuredHostname": "node3.:8091", - "addressFamily": "inet", - "externalListeners": [ - { - "afamily": "inet", - "nodeEncryption": false - } - ], - "serverGroup": "Group 1", - "replication": 1, - "nodeHash": 48264202, - "systemStats": { - "cpu_utilization_rate": 10.20000000018626, - "cpu_stolen_rate": 0, - "swap_total": 2147479552, - "swap_used": 396525568, - "mem_total": 8327258112, - "mem_free": 1855406080, - "mem_limit": 8327258112, - "cpu_cores_available": 4, - "allocstall": 37181 - }, - "interestingStats": { - "cmd_get": 0, - "couch_docs_actual_disk_size": 48142369, - "couch_docs_data_size": 32943627, - "couch_spatial_data_size": 0, - "couch_spatial_disk_size": 0, - "couch_views_actual_disk_size": 0, - "couch_views_data_size": 0, - "curr_items": 21189, - "curr_items_tot": 42289, - "ep_bg_fetched": 0, - "get_hits": 0, - "index_data_size": 37010997, - "index_disk_size": 16332886, - "mem_used": 63213008, - "ops": 0, - "vb_active_num_non_resident": 0, - "vb_replica_curr_items": 21100 - }, - "uptime": "788913", - "memoryTotal": 8327258112, - "memoryFree": 1855406080, - "mcdMemoryReserved": 6353, - "mcdMemoryAllocated": 6353 - }, - { - "couchApiBaseHTTPS": "https://node2.:18092/travel-sample%2B85ff541d1f4cfbc9e67cda3db698cac6", - "couchApiBase": "http://node2.:8092/travel-sample%2B85ff541d1f4cfbc9e67cda3db698cac6", - "clusterMembership": "active", - "recoveryType": "none", - "status": "healthy", - "otpNode": "ns_1@node2.", - "hostname": "node2.:8091", - "nodeUUID": "b737df3d566f6c6ccb2bcafec61e85a2", - "clusterCompatibility": 524288, - "version": "8.0.0-1649-enterprise", - "os": "aarch64-unknown-linux-gnu", - "cpuCount": 4, - "ports": { - "direct": 11210, - "httpsMgmt": 18091, - "httpsCAPI": 18092, - "distTCP": 21100, - "distTLS": 21150 - }, - "services": [ - "eventing", - "fts", - "kv", - "n1ql" - ], - "nodeEncryption": false, - "nodeEncryptionClientCertVerification": false, - "addressFamilyOnly": false, - "configuredHostname": "node2.:8091", - "addressFamily": "inet", - "externalListeners": [ - { - "afamily": "inet", - "nodeEncryption": false - } - ], - "serverGroup": "Group 1", - "replication": 1, - "nodeHash": 34469021, - "systemStats": { - "cpu_utilization_rate": 10.23397660196727, - "cpu_stolen_rate": 0, - "swap_total": 2147479552, - "swap_used": 396525568, - "mem_total": 8327258112, - "mem_free": 1855901696, - "mem_limit": 8327258112, - "cpu_cores_available": 4, - "allocstall": 37181 - }, - "interestingStats": { - "cmd_get": 0, - "couch_docs_actual_disk_size": 56100897, - "couch_docs_data_size": 32866921, - "couch_spatial_data_size": 0, - "couch_spatial_disk_size": 0, - "couch_views_actual_disk_size": 0, - "couch_views_data_size": 0, - "curr_items": 21118, - "curr_items_tot": 42167, - "ep_bg_fetched": 0, - "get_hits": 0, - "mem_used": 63213888, - "ops": 0, - "vb_active_num_non_resident": 0, - "vb_replica_curr_items": 21049 - }, - "uptime": "788913", - "memoryTotal": 8327258112, - "memoryFree": 1855901696, - "mcdMemoryReserved": 6353, - "mcdMemoryAllocated": 6353 - }, - { - "couchApiBaseHTTPS": "https://node1.:18092/travel-sample%2B85ff541d1f4cfbc9e67cda3db698cac6", - "couchApiBase": "http://node1.:8092/travel-sample%2B85ff541d1f4cfbc9e67cda3db698cac6", - "clusterMembership": "active", - "recoveryType": "none", - "status": "healthy", - "otpNode": "ns_1@node1.", - "thisNode": true, - "hostname": "node1.:8091", - "nodeUUID": "87a797d06f374f8006cc4a3a683db4e1", - "clusterCompatibility": 524288, - "version": "8.0.0-1649-enterprise", - "os": "aarch64-unknown-linux-gnu", - "cpuCount": 4, - "ports": { - "direct": 11210, - "httpsMgmt": 18091, - "httpsCAPI": 18092, - "distTCP": 21100, - "distTLS": 21150 - }, - "services": [ - "cbas", - "index", - "kv", - "n1ql" - ], - "nodeEncryption": false, - "nodeEncryptionClientCertVerification": false, - "addressFamilyOnly": false, - "configuredHostname": "node1.:8091", - "addressFamily": "inet", - "externalListeners": [ - { - "afamily": "inet", - "nodeEncryption": false - } - ], - "serverGroup": "Group 1", - "replication": 1, - "nodeHash": 72627629, - "systemStats": { - "cpu_utilization_rate": 10.24295140934561, - "cpu_stolen_rate": 0, - "swap_total": 2147479552, - "swap_used": 396525568, - "mem_total": 8327258112, - "mem_free": 1854889984, - "mem_limit": 8327258112, - "cpu_cores_available": 4, - "allocstall": 37181 - }, - "interestingStats": { - "cmd_get": 0, - "couch_docs_actual_disk_size": 44320702, - "couch_docs_data_size": 32823159, - "couch_spatial_data_size": 0, - "couch_spatial_disk_size": 0, - "couch_views_actual_disk_size": 0, - "couch_views_data_size": 0, - "curr_items": 21036, - "curr_items_tot": 42230, - "ep_bg_fetched": 0, - "get_hits": 0, - "index_data_size": 38186104, - "index_disk_size": 23976600, - "mem_used": 62882016, - "ops": 0, - "vb_active_num_non_resident": 0, - "vb_replica_curr_items": 21194 - }, - "uptime": "788913", - "memoryTotal": 8327258112, - "memoryFree": 1854889984, - "mcdMemoryReserved": 6353, - "mcdMemoryAllocated": 6353 - } - ], + "nodes": "", "stats": { "uri": "/pools/default/buckets/travel-sample/stats", "directoryURI": "/pools/default/buckets/travel-sample/stats/Directory", @@ -303,7 +58,6 @@ }, "authType": "sasl", "autoCompactionSettings": false, - "replicaIndex": false, "rank": 0, "enableCrossClusterVersioning": false, "versionPruningWindowHrs": 720, @@ -314,22 +68,48 @@ "rawRAM": 209715200 }, "basicStats": { - "quotaPercentUsed": 30.08984120686849, + "quotaPercentUsed": 21.90842056274414, "opsPerSec": 0, "diskFetches": 0, - "itemCount": 63343, - "diskUsed": 148563968, - "dataUsed": 98633707, - "memUsed": 189308912, + "itemCount": 63321, + "diskUsed": 119897122, + "dataUsed": 119897122, + "memUsed": 137835864, "vbActiveNumNonResident": 0 }, "evictionPolicy": "fullEviction", "durabilityMinLevel": "none", - "pitrEnabled": false, - "pitrGranularity": 600, - "pitrMaxHistoryAge": 86400, + "storageQuotaPercentage": 50, + "historyRetentionSeconds": 0, + "historyRetentionBytes": 0, + "historyRetentionCollectionDefault": true, + "magmaKeyTreeDataBlockSize": 4096, + "magmaSeqTreeDataBlockSize": 4096, + "continuousBackupEnabled": false, + "continuousBackupInterval": 2, + "continuousBackupLocation": "", "conflictResolutionType": "seqno", + "workloadPatternDefault": "undefined", "maxTTL": 0, "compressionMode": "passive", - "accessScannerEnabled": false + "expiryPagerSleepTime": 600, + "memoryLowWatermark": 75, + "memoryHighWatermark": 85, + "durabilityImpossibleFallback": "disabled", + "warmupBehavior": "background", + "invalidHlcStrategy": "error", + "hlcMaxFutureThreshold": 3900, + "dcpConnectionsBetweenNodes": 1, + "dcpBackfillIdleProtectionEnabled": true, + "dcpBackfillIdleLimitSeconds": 720, + "dcpBackfillIdleDiskThreshold": 90, + "accessScannerEnabled": true, + "encryptionAtRestInfo": { + "dataStatus": "unencrypted", + "dekNumber": 0, + "issues": [] + }, + "encryptionAtRestKeyId": -1, + "encryptionAtRestDekRotationInterval": 2592000, + "encryptionAtRestDekLifetime": 31536000 } \ No newline at end of file diff --git a/preview/HEAD.yml b/preview/HEAD.yml index 0075f29c45..3602f574b3 100644 --- a/preview/HEAD.yml +++ b/preview/HEAD.yml @@ -1,13 +1,13 @@ sources: docs-devex: - branches: DOC-12565_vector_search_concepts + branches: release/8.0 docs-analytics: branches: release/8.0 couchbase-cli: # url: ../../docs-includes/couchbase-cli url: https://github.com/couchbaselabs/couchbase-cli-doc # branches: HEAD - branches: master + branches: morpheus startPaths: docs/ backup: # url: ../../docs-includes/backup