diff --git a/common/api-review/firestore.api.md b/common/api-review/firestore.api.md index 34b56b97f21..26c379a6e34 100644 --- a/common/api-review/firestore.api.md +++ b/common/api-review/firestore.api.md @@ -375,15 +375,14 @@ export interface LoadBundleTaskProgress { export { LogLevel } // @public -export interface MemoryCacheSettings { +export type MemoryCacheSettings = { garbageCollector?: MemoryGarbageCollector; -} +}; // @public -export interface MemoryEagerGarbageCollector { - // (undocumented) +export type MemoryEagerGarbageCollector = { kind: 'memoryEager'; -} +}; // @public export function memoryEagerGarbageCollector(): MemoryEagerGarbageCollector; @@ -392,19 +391,17 @@ export function memoryEagerGarbageCollector(): MemoryEagerGarbageCollector; export type MemoryGarbageCollector = MemoryEagerGarbageCollector | MemoryLruGarbageCollector; // @public -export interface MemoryLocalCache { - // (undocumented) +export type MemoryLocalCache = { kind: 'memory'; -} +}; // @public export function memoryLocalCache(settings?: MemoryCacheSettings): MemoryLocalCache; // @public -export interface MemoryLruGarbageCollector { - // (undocumented) +export type MemoryLruGarbageCollector = { kind: 'memoryLru'; -} +}; // @public export function memoryLruGarbageCollector(settings?: { @@ -494,42 +491,39 @@ export class PersistentCacheIndexManager { } // @public -export interface PersistentCacheSettings { +export type PersistentCacheSettings = { cacheSizeBytes?: number; tabManager?: PersistentTabManager; -} +}; // @public -export interface PersistentLocalCache { - // (undocumented) +export type PersistentLocalCache = { kind: 'persistent'; -} +}; // @public export function persistentLocalCache(settings?: PersistentCacheSettings): PersistentLocalCache; // @public -export interface PersistentMultipleTabManager { - // (undocumented) +export type PersistentMultipleTabManager = { kind: 'PersistentMultipleTab'; -} +}; // @public export function persistentMultipleTabManager(): PersistentMultipleTabManager; // @public -export interface PersistentSingleTabManager { - // (undocumented) +export type PersistentSingleTabManager = { kind: 'persistentSingleTab'; -} +}; // @public export function persistentSingleTabManager(settings: PersistentSingleTabManagerSettings | undefined): PersistentSingleTabManager; // @public -export interface PersistentSingleTabManagerSettings { +export type PersistentSingleTabManagerSettings = { forceOwnership?: boolean; -} +}; // @public export type PersistentTabManager = PersistentSingleTabManager | PersistentMultipleTabManager; diff --git a/common/api-review/vertexai.api.md b/common/api-review/vertexai.api.md index 1650bf3381e..8758d25bdce 100644 --- a/common/api-review/vertexai.api.md +++ b/common/api-review/vertexai.api.md @@ -112,7 +112,8 @@ export class BooleanSchema extends Schema { // @public export class ChatSession { - constructor(apiSettings: ApiSettings, model: string, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined); + // Warning: (ae-forgotten-export) The symbol "ChromeAdapter" needs to be exported by the entry point index.d.ts + constructor(apiSettings: ApiSettings, model: string, chromeAdapter: ChromeAdapter, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined); getHistory(): Promise; // (undocumented) model: string; @@ -392,8 +393,9 @@ export interface GenerativeContentBlob { // @public export class GenerativeModel extends AIModel { - constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions); + constructor(ai: AI, modelParams: ModelParams, chromeAdapter: ChromeAdapter, requestOptions?: RequestOptions); countTokens(request: CountTokensRequest | string | Array): Promise; + static DEFAULT_HYBRID_IN_CLOUD_MODEL: string; generateContent(request: GenerateContentRequest | string | Array): Promise; generateContentStream(request: GenerateContentRequest | string | Array): Promise; // (undocumented) @@ -415,7 +417,7 @@ export class GenerativeModel extends AIModel { export function getAI(app?: FirebaseApp, options?: AIOptions): AI; // @public -export function getGenerativeModel(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel; +export function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel; // @beta export function getImagenModel(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel; @@ -547,6 +549,14 @@ export enum HarmSeverity { HARM_SEVERITY_UNSUPPORTED = "HARM_SEVERITY_UNSUPPORTED" } +// @public +export interface HybridParams { + inCloudParams?: ModelParams; + mode: InferenceMode; + // Warning: (ae-forgotten-export) The symbol "LanguageModelCreateOptions" needs to be exported by the entry point index.d.ts + onDeviceParams?: LanguageModelCreateOptions; +} + // @beta export enum ImagenAspectRatio { LANDSCAPE_16x9 = "16:9", @@ -631,6 +641,9 @@ export interface ImagenSafetySettings { safetyFilterLevel?: ImagenSafetyFilterLevel; } +// @public +export type InferenceMode = 'prefer_on_device' | 'only_on_device' | 'only_in_cloud'; + // @public export interface InlineDataPart { // (undocumented) diff --git a/docs-devsite/_toc.yaml b/docs-devsite/_toc.yaml index 03d6b5f6ec7..c1a10429ad7 100644 --- a/docs-devsite/_toc.yaml +++ b/docs-devsite/_toc.yaml @@ -245,28 +245,10 @@ toc: path: /docs/reference/js/firestore_.loadbundletask.md - title: LoadBundleTaskProgress path: /docs/reference/js/firestore_.loadbundletaskprogress.md - - title: MemoryCacheSettings - path: /docs/reference/js/firestore_.memorycachesettings.md - - title: MemoryEagerGarbageCollector - path: /docs/reference/js/firestore_.memoryeagergarbagecollector.md - - title: MemoryLocalCache - path: /docs/reference/js/firestore_.memorylocalcache.md - - title: MemoryLruGarbageCollector - path: /docs/reference/js/firestore_.memorylrugarbagecollector.md - title: PersistenceSettings path: /docs/reference/js/firestore_.persistencesettings.md - title: PersistentCacheIndexManager path: /docs/reference/js/firestore_.persistentcacheindexmanager.md - - title: PersistentCacheSettings - path: /docs/reference/js/firestore_.persistentcachesettings.md - - title: PersistentLocalCache - path: /docs/reference/js/firestore_.persistentlocalcache.md - - title: PersistentMultipleTabManager - path: /docs/reference/js/firestore_.persistentmultipletabmanager.md - - title: PersistentSingleTabManager - path: /docs/reference/js/firestore_.persistentsingletabmanager.md - - title: PersistentSingleTabManagerSettings - path: /docs/reference/js/firestore_.persistentsingletabmanagersettings.md - title: Query path: /docs/reference/js/firestore_.query.md - title: QueryCompositeFilterConstraint @@ -482,6 +464,8 @@ toc: path: /docs/reference/js/vertexai.aioptions.md - title: ArraySchema path: /docs/reference/js/vertexai.arrayschema.md + - title: Backend + path: /docs/reference/js/vertexai.backend.md - title: BaseParams path: /docs/reference/js/vertexai.baseparams.md - title: BooleanSchema @@ -540,10 +524,14 @@ toc: path: /docs/reference/js/vertexai.generativecontentblob.md - title: GenerativeModel path: /docs/reference/js/vertexai.generativemodel.md + - title: GoogleAIBackend + path: /docs/reference/js/vertexai.googleaibackend.md - title: GroundingAttribution path: /docs/reference/js/vertexai.groundingattribution.md - title: GroundingMetadata path: /docs/reference/js/vertexai.groundingmetadata.md + - title: HybridParams + path: /docs/reference/js/vertexai.hybridparams.md - title: ImagenGCSImage path: /docs/reference/js/vertexai.imagengcsimage.md - title: ImagenGenerationConfig @@ -606,6 +594,8 @@ toc: path: /docs/reference/js/vertexai.toolconfig.md - title: UsageMetadata path: /docs/reference/js/vertexai.usagemetadata.md + - title: VertexAIBackend + path: /docs/reference/js/vertexai.vertexaibackend.md - title: VertexAIOptions path: /docs/reference/js/vertexai.vertexaioptions.md - title: VideoMetadata diff --git a/docs-devsite/firestore_.md b/docs-devsite/firestore_.md index 91d21e32708..7dfde135ab0 100644 --- a/docs-devsite/firestore_.md +++ b/docs-devsite/firestore_.md @@ -174,16 +174,7 @@ https://github.com/firebase/firebase-js-sdk | [IndexConfiguration](./firestore_.indexconfiguration.md#indexconfiguration_interface) | (Public Preview) A list of Firestore indexes to speed up local query execution.See [JSON Format](https://firebase.google.com/docs/reference/firestore/indexes/#json_format) for a description of the format of the index definition. | | [IndexField](./firestore_.indexfield.md#indexfield_interface) | (Public Preview) A single field element in an index configuration. | | [LoadBundleTaskProgress](./firestore_.loadbundletaskprogress.md#loadbundletaskprogress_interface) | Represents a progress update or a final state from loading bundles. | -| [MemoryCacheSettings](./firestore_.memorycachesettings.md#memorycachesettings_interface) | An settings object to configure an MemoryLocalCache instance. | -| [MemoryEagerGarbageCollector](./firestore_.memoryeagergarbagecollector.md#memoryeagergarbagecollector_interface) | A garbage collector deletes documents whenever they are not part of any active queries, and have no local mutations attached to them.This collector tries to ensure lowest memory footprints from the SDK, at the risk of documents not being cached for offline queries or for direct queries to the cache.Use factory function to create an instance of this collector. | -| [MemoryLocalCache](./firestore_.memorylocalcache.md#memorylocalcache_interface) | Provides an in-memory cache to the SDK. This is the default cache unless explicitly configured otherwise.To use, create an instance using the factory function , then set the instance to FirestoreSettings.cache and call initializeFirestore using the settings object. | -| [MemoryLruGarbageCollector](./firestore_.memorylrugarbagecollector.md#memorylrugarbagecollector_interface) | A garbage collector deletes Least-Recently-Used documents in multiple batches.This collector is configured with a target size, and will only perform collection when the cached documents exceed the target size. It avoids querying backend repeated for the same query or document, at the risk of having a larger memory footprint.Use factory function to create a instance of this collector. | | [PersistenceSettings](./firestore_.persistencesettings.md#persistencesettings_interface) | Settings that can be passed to enableIndexedDbPersistence() to configure Firestore persistence.Persistence cannot be used in a Node.js environment. | -| [PersistentCacheSettings](./firestore_.persistentcachesettings.md#persistentcachesettings_interface) | An settings object to configure an PersistentLocalCache instance.Persistent cache cannot be used in a Node.js environment. | -| [PersistentLocalCache](./firestore_.persistentlocalcache.md#persistentlocalcache_interface) | Provides a persistent cache backed by IndexedDb to the SDK.To use, create an instance using the factory function , then set the instance to FirestoreSettings.cache and call initializeFirestore using the settings object. | -| [PersistentMultipleTabManager](./firestore_.persistentmultipletabmanager.md#persistentmultipletabmanager_interface) | A tab manager supporting multiple tabs. SDK will synchronize queries and mutations done across all tabs using the SDK. | -| [PersistentSingleTabManager](./firestore_.persistentsingletabmanager.md#persistentsingletabmanager_interface) | A tab manager supporting only one tab, no synchronization will be performed across tabs. | -| [PersistentSingleTabManagerSettings](./firestore_.persistentsingletabmanagersettings.md#persistentsingletabmanagersettings_interface) | Type to configure an PersistentSingleTabManager instance. | | [SnapshotListenOptions](./firestore_.snapshotlistenoptions.md#snapshotlistenoptions_interface) | An options object that can be passed to [onSnapshot()](./firestore_.md#onsnapshot_0312fd7) and [QuerySnapshot.docChanges()](./firestore_.querysnapshot.md#querysnapshotdocchanges) to control which types of changes to include in the result set. | | [SnapshotOptions](./firestore_.snapshotoptions.md#snapshotoptions_interface) | Options that configure how data is retrieved from a DocumentSnapshot (for example the desired behavior for server timestamps that have not yet been set to their final value). | | [TransactionOptions](./firestore_.transactionoptions.md#transactionoptions_interface) | Options to customize transaction behavior. | @@ -208,10 +199,19 @@ https://github.com/firebase/firebase-js-sdk | [FirestoreErrorCode](./firestore_.md#firestoreerrorcode) | The set of Firestore status codes. The codes are the same at the ones exposed by gRPC here: https://github.com/grpc/grpc/blob/master/doc/statuscodes.mdPossible values: - 'cancelled': The operation was cancelled (typically by the caller). - 'unknown': Unknown error or an error from a different error domain. - 'invalid-argument': Client specified an invalid argument. Note that this differs from 'failed-precondition'. 'invalid-argument' indicates arguments that are problematic regardless of the state of the system (e.g. an invalid field name). - 'deadline-exceeded': Deadline expired before operation could complete. For operations that change the state of the system, this error may be returned even if the operation has completed successfully. For example, a successful response from a server could have been delayed long enough for the deadline to expire. - 'not-found': Some requested document was not found. - 'already-exists': Some document that we attempted to create already exists. - 'permission-denied': The caller does not have permission to execute the specified operation. - 'resource-exhausted': Some resource has been exhausted, perhaps a per-user quota, or perhaps the entire file system is out of space. - 'failed-precondition': Operation was rejected because the system is not in a state required for the operation's execution. - 'aborted': The operation was aborted, typically due to a concurrency issue like transaction aborts, etc. - 'out-of-range': Operation was attempted past the valid range. - 'unimplemented': Operation is not implemented or not supported/enabled. - 'internal': Internal errors. Means some invariants expected by underlying system has been broken. If you see one of these errors, something is very broken. - 'unavailable': The service is currently unavailable. This is most likely a transient condition and may be corrected by retrying with a backoff. - 'data-loss': Unrecoverable data loss or corruption. - 'unauthenticated': The request does not have valid authentication credentials for the operation. | | [FirestoreLocalCache](./firestore_.md#firestorelocalcache) | Union type from all supported SDK cache layer. | | [ListenSource](./firestore_.md#listensource) | Describe the source a query listens to.Set to default to listen to both cache and server changes. Set to cache to listen to changes in cache only. | +| [MemoryCacheSettings](./firestore_.md#memorycachesettings) | An settings object to configure an MemoryLocalCache instance. | +| [MemoryEagerGarbageCollector](./firestore_.md#memoryeagergarbagecollector) | A garbage collector deletes documents whenever they are not part of any active queries, and have no local mutations attached to them.This collector tries to ensure lowest memory footprints from the SDK, at the risk of documents not being cached for offline queries or for direct queries to the cache.Use factory function to create an instance of this collector. | | [MemoryGarbageCollector](./firestore_.md#memorygarbagecollector) | Union type from all support garbage collectors for memory local cache. | +| [MemoryLocalCache](./firestore_.md#memorylocalcache) | Provides an in-memory cache to the SDK. This is the default cache unless explicitly configured otherwise.To use, create an instance using the factory function , then set the instance to FirestoreSettings.cache and call initializeFirestore using the settings object. | +| [MemoryLruGarbageCollector](./firestore_.md#memorylrugarbagecollector) | A garbage collector deletes Least-Recently-Used documents in multiple batches.This collector is configured with a target size, and will only perform collection when the cached documents exceed the target size. It avoids querying backend repeated for the same query or document, at the risk of having a larger memory footprint.Use factory function to create a instance of this collector. | | [NestedUpdateFields](./firestore_.md#nestedupdatefields) | For each field (e.g. 'bar'), find all nested keys (e.g. {'bar.baz': T1, 'bar.qux': T2}). Intersect them together to make a single map containing all possible keys that are all marked as optional | | [OrderByDirection](./firestore_.md#orderbydirection) | The direction of a [orderBy()](./firestore_.md#orderby_006d61f) clause is specified as 'desc' or 'asc' (descending or ascending). | | [PartialWithFieldValue](./firestore_.md#partialwithfieldvalue) | Similar to TypeScript's Partial<T>, but allows nested fields to be omitted and FieldValues to be passed in as property values. | +| [PersistentCacheSettings](./firestore_.md#persistentcachesettings) | An settings object to configure an PersistentLocalCache instance.Persistent cache cannot be used in a Node.js environment. | +| [PersistentLocalCache](./firestore_.md#persistentlocalcache) | Provides a persistent cache backed by IndexedDb to the SDK.To use, create an instance using the factory function , then set the instance to FirestoreSettings.cache and call initializeFirestore using the settings object. | +| [PersistentMultipleTabManager](./firestore_.md#persistentmultipletabmanager) | A tab manager supporting multiple tabs. SDK will synchronize queries and mutations done across all tabs using the SDK. | +| [PersistentSingleTabManager](./firestore_.md#persistentsingletabmanager) | A tab manager supporting only one tab, no synchronization will be performed across tabs. | +| [PersistentSingleTabManagerSettings](./firestore_.md#persistentsingletabmanagersettings) | Type to configure an PersistentSingleTabManager instance. | | [PersistentTabManager](./firestore_.md#persistenttabmanager) | A union of all available tab managers. | | [Primitive](./firestore_.md#primitive) | Primitive types. | | [QueryConstraintType](./firestore_.md#queryconstrainttype) | Describes the different query constraints available in this SDK. | @@ -924,7 +924,7 @@ export declare function memoryEagerGarbageCollector(): MemoryEagerGarbageCollect ``` Returns: -[MemoryEagerGarbageCollector](./firestore_.memoryeagergarbagecollector.md#memoryeagergarbagecollector_interface) +[MemoryEagerGarbageCollector](./firestore_.md#memoryeagergarbagecollector) ### persistentMultipleTabManager() {:#persistentmultipletabmanager} @@ -937,7 +937,7 @@ export declare function persistentMultipleTabManager(): PersistentMultipleTabMan ``` Returns: -[PersistentMultipleTabManager](./firestore_.persistentmultipletabmanager.md#persistentmultipletabmanager_interface) +[PersistentMultipleTabManager](./firestore_.md#persistentmultipletabmanager) ### serverTimestamp() {:#servertimestamp} @@ -2293,11 +2293,11 @@ export declare function memoryLocalCache(settings?: MemoryCacheSettings): Memory | Parameter | Type | Description | | --- | --- | --- | -| settings | [MemoryCacheSettings](./firestore_.memorycachesettings.md#memorycachesettings_interface) | | +| settings | [MemoryCacheSettings](./firestore_.md#memorycachesettings) | | Returns: -[MemoryLocalCache](./firestore_.memorylocalcache.md#memorylocalcache_interface) +[MemoryLocalCache](./firestore_.md#memorylocalcache) ### memoryLruGarbageCollector(settings) {:#memorylrugarbagecollector_5ee014c} @@ -2321,7 +2321,7 @@ export declare function memoryLruGarbageCollector(settings?: { Returns: -[MemoryLruGarbageCollector](./firestore_.memorylrugarbagecollector.md#memorylrugarbagecollector_interface) +[MemoryLruGarbageCollector](./firestore_.md#memorylrugarbagecollector) ### persistentLocalCache(settings) {:#persistentlocalcache_d312f71} @@ -2339,11 +2339,11 @@ export declare function persistentLocalCache(settings?: PersistentCacheSettings) | Parameter | Type | Description | | --- | --- | --- | -| settings | [PersistentCacheSettings](./firestore_.persistentcachesettings.md#persistentcachesettings_interface) | | +| settings | [PersistentCacheSettings](./firestore_.md#persistentcachesettings) | | Returns: -[PersistentLocalCache](./firestore_.persistentlocalcache.md#persistentlocalcache_interface) +[PersistentLocalCache](./firestore_.md#persistentlocalcache) ### persistentSingleTabManager(settings) {:#persistentsingletabmanager_c99c68d} @@ -2359,11 +2359,11 @@ export declare function persistentSingleTabManager(settings: PersistentSingleTab | Parameter | Type | Description | | --- | --- | --- | -| settings | [PersistentSingleTabManagerSettings](./firestore_.persistentsingletabmanagersettings.md#persistentsingletabmanagersettings_interface) \| undefined | Configures the created tab manager. | +| settings | [PersistentSingleTabManagerSettings](./firestore_.md#persistentsingletabmanagersettings) \| undefined | Configures the created tab manager. | Returns: -[PersistentSingleTabManager](./firestore_.persistentsingletabmanager.md#persistentsingletabmanager_interface) +[PersistentSingleTabManager](./firestore_.md#persistentsingletabmanager) ## function(snapshot, ...) @@ -2591,6 +2591,34 @@ Set to `default` to listen to both cache and server changes. Set to `cache` to l export declare type ListenSource = 'default' | 'cache'; ``` +## MemoryCacheSettings + +An settings object to configure an `MemoryLocalCache` instance. + +Signature: + +```typescript +export declare type MemoryCacheSettings = { + garbageCollector?: MemoryGarbageCollector; +}; +``` + +## MemoryEagerGarbageCollector + +A garbage collector deletes documents whenever they are not part of any active queries, and have no local mutations attached to them. + +This collector tries to ensure lowest memory footprints from the SDK, at the risk of documents not being cached for offline queries or for direct queries to the cache. + +Use factory function to create an instance of this collector. + +Signature: + +```typescript +export declare type MemoryEagerGarbageCollector = { + kind: 'memoryEager'; +}; +``` + ## MemoryGarbageCollector Union type from all support garbage collectors for memory local cache. @@ -2601,6 +2629,36 @@ Union type from all support garbage collectors for memory local cache. export declare type MemoryGarbageCollector = MemoryEagerGarbageCollector | MemoryLruGarbageCollector; ``` +## MemoryLocalCache + +Provides an in-memory cache to the SDK. This is the default cache unless explicitly configured otherwise. + +To use, create an instance using the factory function , then set the instance to `FirestoreSettings.cache` and call `initializeFirestore` using the settings object. + +Signature: + +```typescript +export declare type MemoryLocalCache = { + kind: 'memory'; +}; +``` + +## MemoryLruGarbageCollector + +A garbage collector deletes Least-Recently-Used documents in multiple batches. + +This collector is configured with a target size, and will only perform collection when the cached documents exceed the target size. It avoids querying backend repeated for the same query or document, at the risk of having a larger memory footprint. + +Use factory function to create a instance of this collector. + +Signature: + +```typescript +export declare type MemoryLruGarbageCollector = { + kind: 'memoryLru'; +}; +``` + ## NestedUpdateFields For each field (e.g. 'bar'), find all nested keys (e.g. {'bar.baz': T1, 'bar.qux': T2}). Intersect them together to make a single map containing all possible keys that are all marked as optional @@ -2635,6 +2693,71 @@ export declare type PartialWithFieldValue = Partial | (T extends Primitive } : never); ``` +## PersistentCacheSettings + +An settings object to configure an `PersistentLocalCache` instance. + +Persistent cache cannot be used in a Node.js environment. + +Signature: + +```typescript +export declare type PersistentCacheSettings = { + cacheSizeBytes?: number; + tabManager?: PersistentTabManager; +}; +``` + +## PersistentLocalCache + +Provides a persistent cache backed by IndexedDb to the SDK. + +To use, create an instance using the factory function , then set the instance to `FirestoreSettings.cache` and call `initializeFirestore` using the settings object. + +Signature: + +```typescript +export declare type PersistentLocalCache = { + kind: 'persistent'; +}; +``` + +## PersistentMultipleTabManager + +A tab manager supporting multiple tabs. SDK will synchronize queries and mutations done across all tabs using the SDK. + +Signature: + +```typescript +export declare type PersistentMultipleTabManager = { + kind: 'PersistentMultipleTab'; +}; +``` + +## PersistentSingleTabManager + +A tab manager supporting only one tab, no synchronization will be performed across tabs. + +Signature: + +```typescript +export declare type PersistentSingleTabManager = { + kind: 'persistentSingleTab'; +}; +``` + +## PersistentSingleTabManagerSettings + +Type to configure an `PersistentSingleTabManager` instance. + +Signature: + +```typescript +export declare type PersistentSingleTabManagerSettings = { + forceOwnership?: boolean; +}; +``` + ## PersistentTabManager A union of all available tab managers. diff --git a/docs-devsite/firestore_.memorycachesettings.md b/docs-devsite/firestore_.memorycachesettings.md deleted file mode 100644 index 69f46acdf7c..00000000000 --- a/docs-devsite/firestore_.memorycachesettings.md +++ /dev/null @@ -1,35 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# MemoryCacheSettings interface -An settings object to configure an `MemoryLocalCache` instance. - -Signature: - -```typescript -export declare interface MemoryCacheSettings -``` - -## Properties - -| Property | Type | Description | -| --- | --- | --- | -| [garbageCollector](./firestore_.memorycachesettings.md#memorycachesettingsgarbagecollector) | [MemoryGarbageCollector](./firestore_.md#memorygarbagecollector) | The garbage collector to use, for the memory cache layer. A MemoryEagerGarbageCollector is used when this is undefined. | - -## MemoryCacheSettings.garbageCollector - -The garbage collector to use, for the memory cache layer. A `MemoryEagerGarbageCollector` is used when this is undefined. - -Signature: - -```typescript -garbageCollector?: MemoryGarbageCollector; -``` diff --git a/docs-devsite/firestore_.memoryeagergarbagecollector.md b/docs-devsite/firestore_.memoryeagergarbagecollector.md deleted file mode 100644 index 01e7341611a..00000000000 --- a/docs-devsite/firestore_.memoryeagergarbagecollector.md +++ /dev/null @@ -1,37 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# MemoryEagerGarbageCollector interface -A garbage collector deletes documents whenever they are not part of any active queries, and have no local mutations attached to them. - -This collector tries to ensure lowest memory footprints from the SDK, at the risk of documents not being cached for offline queries or for direct queries to the cache. - -Use factory function to create an instance of this collector. - -Signature: - -```typescript -export declare interface MemoryEagerGarbageCollector -``` - -## Properties - -| Property | Type | Description | -| --- | --- | --- | -| [kind](./firestore_.memoryeagergarbagecollector.md#memoryeagergarbagecollectorkind) | 'memoryEager' | | - -## MemoryEagerGarbageCollector.kind - -Signature: - -```typescript -kind: 'memoryEager'; -``` diff --git a/docs-devsite/firestore_.memorylocalcache.md b/docs-devsite/firestore_.memorylocalcache.md deleted file mode 100644 index 92b7d3a2c72..00000000000 --- a/docs-devsite/firestore_.memorylocalcache.md +++ /dev/null @@ -1,35 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# MemoryLocalCache interface -Provides an in-memory cache to the SDK. This is the default cache unless explicitly configured otherwise. - -To use, create an instance using the factory function , then set the instance to `FirestoreSettings.cache` and call `initializeFirestore` using the settings object. - -Signature: - -```typescript -export declare interface MemoryLocalCache -``` - -## Properties - -| Property | Type | Description | -| --- | --- | --- | -| [kind](./firestore_.memorylocalcache.md#memorylocalcachekind) | 'memory' | | - -## MemoryLocalCache.kind - -Signature: - -```typescript -kind: 'memory'; -``` diff --git a/docs-devsite/firestore_.memorylrugarbagecollector.md b/docs-devsite/firestore_.memorylrugarbagecollector.md deleted file mode 100644 index 6e15513934a..00000000000 --- a/docs-devsite/firestore_.memorylrugarbagecollector.md +++ /dev/null @@ -1,37 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# MemoryLruGarbageCollector interface -A garbage collector deletes Least-Recently-Used documents in multiple batches. - -This collector is configured with a target size, and will only perform collection when the cached documents exceed the target size. It avoids querying backend repeated for the same query or document, at the risk of having a larger memory footprint. - -Use factory function to create a instance of this collector. - -Signature: - -```typescript -export declare interface MemoryLruGarbageCollector -``` - -## Properties - -| Property | Type | Description | -| --- | --- | --- | -| [kind](./firestore_.memorylrugarbagecollector.md#memorylrugarbagecollectorkind) | 'memoryLru' | | - -## MemoryLruGarbageCollector.kind - -Signature: - -```typescript -kind: 'memoryLru'; -``` diff --git a/docs-devsite/firestore_.persistentcachesettings.md b/docs-devsite/firestore_.persistentcachesettings.md deleted file mode 100644 index a32d05e4e8e..00000000000 --- a/docs-devsite/firestore_.persistentcachesettings.md +++ /dev/null @@ -1,50 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# PersistentCacheSettings interface -An settings object to configure an `PersistentLocalCache` instance. - -Persistent cache cannot be used in a Node.js environment. - -Signature: - -```typescript -export declare interface PersistentCacheSettings -``` - -## Properties - -| Property | Type | Description | -| --- | --- | --- | -| [cacheSizeBytes](./firestore_.persistentcachesettings.md#persistentcachesettingscachesizebytes) | number | An approximate cache size threshold for the on-disk data. If the cache grows beyond this size, Firestore will start removing data that hasn't been recently used. The SDK does not guarantee that the cache will stay below that size, only that if the cache exceeds the given size, cleanup will be attempted.The default value is 40 MB. The threshold must be set to at least 1 MB, and can be set to CACHE_SIZE_UNLIMITED to disable garbage collection. | -| [tabManager](./firestore_.persistentcachesettings.md#persistentcachesettingstabmanager) | [PersistentTabManager](./firestore_.md#persistenttabmanager) | Specifies how multiple tabs/windows will be managed by the SDK. | - -## PersistentCacheSettings.cacheSizeBytes - -An approximate cache size threshold for the on-disk data. If the cache grows beyond this size, Firestore will start removing data that hasn't been recently used. The SDK does not guarantee that the cache will stay below that size, only that if the cache exceeds the given size, cleanup will be attempted. - -The default value is 40 MB. The threshold must be set to at least 1 MB, and can be set to `CACHE_SIZE_UNLIMITED` to disable garbage collection. - -Signature: - -```typescript -cacheSizeBytes?: number; -``` - -## PersistentCacheSettings.tabManager - -Specifies how multiple tabs/windows will be managed by the SDK. - -Signature: - -```typescript -tabManager?: PersistentTabManager; -``` diff --git a/docs-devsite/firestore_.persistentlocalcache.md b/docs-devsite/firestore_.persistentlocalcache.md deleted file mode 100644 index 48d876d15bd..00000000000 --- a/docs-devsite/firestore_.persistentlocalcache.md +++ /dev/null @@ -1,35 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# PersistentLocalCache interface -Provides a persistent cache backed by IndexedDb to the SDK. - -To use, create an instance using the factory function , then set the instance to `FirestoreSettings.cache` and call `initializeFirestore` using the settings object. - -Signature: - -```typescript -export declare interface PersistentLocalCache -``` - -## Properties - -| Property | Type | Description | -| --- | --- | --- | -| [kind](./firestore_.persistentlocalcache.md#persistentlocalcachekind) | 'persistent' | | - -## PersistentLocalCache.kind - -Signature: - -```typescript -kind: 'persistent'; -``` diff --git a/docs-devsite/firestore_.persistentmultipletabmanager.md b/docs-devsite/firestore_.persistentmultipletabmanager.md deleted file mode 100644 index 20d9cc24452..00000000000 --- a/docs-devsite/firestore_.persistentmultipletabmanager.md +++ /dev/null @@ -1,33 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# PersistentMultipleTabManager interface -A tab manager supporting multiple tabs. SDK will synchronize queries and mutations done across all tabs using the SDK. - -Signature: - -```typescript -export declare interface PersistentMultipleTabManager -``` - -## Properties - -| Property | Type | Description | -| --- | --- | --- | -| [kind](./firestore_.persistentmultipletabmanager.md#persistentmultipletabmanagerkind) | 'PersistentMultipleTab' | | - -## PersistentMultipleTabManager.kind - -Signature: - -```typescript -kind: 'PersistentMultipleTab'; -``` diff --git a/docs-devsite/firestore_.persistentsingletabmanager.md b/docs-devsite/firestore_.persistentsingletabmanager.md deleted file mode 100644 index 22601cf31fb..00000000000 --- a/docs-devsite/firestore_.persistentsingletabmanager.md +++ /dev/null @@ -1,33 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# PersistentSingleTabManager interface -A tab manager supporting only one tab, no synchronization will be performed across tabs. - -Signature: - -```typescript -export declare interface PersistentSingleTabManager -``` - -## Properties - -| Property | Type | Description | -| --- | --- | --- | -| [kind](./firestore_.persistentsingletabmanager.md#persistentsingletabmanagerkind) | 'persistentSingleTab' | | - -## PersistentSingleTabManager.kind - -Signature: - -```typescript -kind: 'persistentSingleTab'; -``` diff --git a/docs-devsite/firestore_.persistentsingletabmanagersettings.md b/docs-devsite/firestore_.persistentsingletabmanagersettings.md deleted file mode 100644 index afe2842d4c4..00000000000 --- a/docs-devsite/firestore_.persistentsingletabmanagersettings.md +++ /dev/null @@ -1,35 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# PersistentSingleTabManagerSettings interface -Type to configure an `PersistentSingleTabManager` instance. - -Signature: - -```typescript -export declare interface PersistentSingleTabManagerSettings -``` - -## Properties - -| Property | Type | Description | -| --- | --- | --- | -| [forceOwnership](./firestore_.persistentsingletabmanagersettings.md#persistentsingletabmanagersettingsforceownership) | boolean | Whether to force-enable persistent (IndexedDB) cache for the client. This cannot be used with multi-tab synchronization and is primarily intended for use with Web Workers. Setting this to true will enable IndexedDB, but cause other tabs using IndexedDB cache to fail. | - -## PersistentSingleTabManagerSettings.forceOwnership - -Whether to force-enable persistent (IndexedDB) cache for the client. This cannot be used with multi-tab synchronization and is primarily intended for use with Web Workers. Setting this to `true` will enable IndexedDB, but cause other tabs using IndexedDB cache to fail. - -Signature: - -```typescript -forceOwnership?: boolean; -``` diff --git a/docs-devsite/vertexai.ai.md b/docs-devsite/vertexai.ai.md index 2901c2ccd01..3be9ea5d488 100644 --- a/docs-devsite/vertexai.ai.md +++ b/docs-devsite/vertexai.ai.md @@ -25,7 +25,7 @@ export interface AI | Property | Type | Description | | --- | --- | --- | | [app](./vertexai.ai.md#aiapp) | [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface) | The [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface) this [AI](./vertexai.ai.md#ai_interface) instance is associated with. | -| [backend](./vertexai.ai.md#aibackend) | [Backend](./vertexai.md#backend) | A [Backend](./vertexai.md#backend) instance that specifies the backend configuration. | +| [backend](./vertexai.ai.md#aibackend) | [Backend](./vertexai.backend.md#backend_class) | A [Backend](./vertexai.backend.md#backend_class) instance that specifies the backend configuration. | | [location](./vertexai.ai.md#ailocation) | string | The location configured for this AI service instance, relevant for Vertex AI backends. | ## AI.app @@ -40,7 +40,7 @@ app: FirebaseApp; ## AI.backend -A [Backend](./vertexai.md#backend) instance that specifies the backend configuration. +A [Backend](./vertexai.backend.md#backend_class) instance that specifies the backend configuration. Signature: diff --git a/docs-devsite/vertexai.aioptions.md b/docs-devsite/vertexai.aioptions.md index 4d5e7117740..393a83b3f9c 100644 --- a/docs-devsite/vertexai.aioptions.md +++ b/docs-devsite/vertexai.aioptions.md @@ -22,11 +22,11 @@ export interface AIOptions | Property | Type | Description | | --- | --- | --- | -| [backend](./vertexai.aioptions.md#aioptionsbackend) | [Backend](./vertexai.md#backend) | The backend configuration to use for the AI service instance. Use [googleAIBackend()](./vertexai.md#googleaibackend) or [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534) to create this configuration. | +| [backend](./vertexai.aioptions.md#aioptionsbackend) | [Backend](./vertexai.backend.md#backend_class) | The backend configuration to use for the AI service instance. | ## AIOptions.backend -The backend configuration to use for the AI service instance. Use [googleAIBackend()](./vertexai.md#googleaibackend) or [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534) to create this configuration. +The backend configuration to use for the AI service instance. Signature: diff --git a/docs-devsite/vertexai.backend.md b/docs-devsite/vertexai.backend.md new file mode 100644 index 00000000000..b55224f5205 --- /dev/null +++ b/docs-devsite/vertexai.backend.md @@ -0,0 +1,57 @@ +Project: /docs/reference/js/_project.yaml +Book: /docs/reference/_book.yaml +page_type: reference + +{% comment %} +DO NOT EDIT THIS FILE! +This is generated by the JS SDK team, and any local changes will be +overwritten. Changes should be made in the source code at +https://github.com/firebase/firebase-js-sdk +{% endcomment %} + +# Backend class +Abstract base class representing the configuration for an AI service backend. This class should not be instantiated directly. Use its subclasses [GoogleAIBackend](./vertexai.googleaibackend.md#googleaibackend_class) or [VertexAIBackend](./vertexai.vertexaibackend.md#vertexaibackend_class). + +Signature: + +```typescript +export declare abstract class Backend +``` + +## Constructors + +| Constructor | Modifiers | Description | +| --- | --- | --- | +| [(constructor)(type)](./vertexai.backend.md#backendconstructor) | | Protected constructor for use by subclasses. | + +## Properties + +| Property | Modifiers | Type | Description | +| --- | --- | --- | --- | +| [backendType](./vertexai.backend.md#backendbackendtype) | | [BackendType](./vertexai.md#backendtype) | Specifies the backend type (either 'GOOGLE\_AI' or 'VERTEX\_AI'). | + +## Backend.(constructor) + +Protected constructor for use by subclasses. + +Signature: + +```typescript +protected constructor(type: BackendType); +``` + +#### Parameters + +| Parameter | Type | Description | +| --- | --- | --- | +| type | [BackendType](./vertexai.md#backendtype) | The specific backend type constant (e.g., BackendType.GOOGLE\_AI). | + +## Backend.backendType + +Specifies the backend type (either 'GOOGLE\_AI' or 'VERTEX\_AI'). + +Signature: + +```typescript +readonly backendType: BackendType; +``` diff --git a/docs-devsite/vertexai.chatsession.md b/docs-devsite/vertexai.chatsession.md index ed359f7e08c..c4a06206bfd 100644 --- a/docs-devsite/vertexai.chatsession.md +++ b/docs-devsite/vertexai.chatsession.md @@ -22,7 +22,7 @@ export declare class ChatSession | Constructor | Modifiers | Description | | --- | --- | --- | -| [(constructor)(apiSettings, model, params, requestOptions)](./vertexai.chatsession.md#chatsessionconstructor) | | Constructs a new instance of the ChatSession class | +| [(constructor)(apiSettings, model, chromeAdapter, params, requestOptions)](./vertexai.chatsession.md#chatsessionconstructor) | | Constructs a new instance of the ChatSession class | ## Properties @@ -47,7 +47,7 @@ Constructs a new instance of the `ChatSession` class Signature: ```typescript -constructor(apiSettings: ApiSettings, model: string, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined); +constructor(apiSettings: ApiSettings, model: string, chromeAdapter: ChromeAdapter, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined); ``` #### Parameters @@ -56,6 +56,7 @@ constructor(apiSettings: ApiSettings, model: string, params?: StartChatParams | | --- | --- | --- | | apiSettings | ApiSettings | | | model | string | | +| chromeAdapter | ChromeAdapter | | | params | [StartChatParams](./vertexai.startchatparams.md#startchatparams_interface) \| undefined | | | requestOptions | [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) \| undefined | | diff --git a/docs-devsite/vertexai.citation.md b/docs-devsite/vertexai.citation.md index b5f5a19f231..f2e4e2581cb 100644 --- a/docs-devsite/vertexai.citation.md +++ b/docs-devsite/vertexai.citation.md @@ -24,9 +24,9 @@ export interface Citation | --- | --- | --- | | [endIndex](./vertexai.citation.md#citationendindex) | number | | | [license](./vertexai.citation.md#citationlicense) | string | | -| [publicationDate](./vertexai.citation.md#citationpublicationdate) | Date | | +| [publicationDate](./vertexai.citation.md#citationpublicationdate) | Date | This field is not supported in Google AI. | | [startIndex](./vertexai.citation.md#citationstartindex) | number | | -| [title](./vertexai.citation.md#citationtitle) | string | | +| [title](./vertexai.citation.md#citationtitle) | string | This field is not supported in Google AI. | | [uri](./vertexai.citation.md#citationuri) | string | | ## Citation.endIndex @@ -47,6 +47,8 @@ license?: string; ## Citation.publicationDate +This field is not supported in Google AI. + Signature: ```typescript @@ -63,6 +65,8 @@ startIndex?: number; ## Citation.title +This field is not supported in Google AI. + Signature: ```typescript diff --git a/docs-devsite/vertexai.counttokensresponse.md b/docs-devsite/vertexai.counttokensresponse.md index d67cc99fab2..dab373586b4 100644 --- a/docs-devsite/vertexai.counttokensresponse.md +++ b/docs-devsite/vertexai.counttokensresponse.md @@ -23,7 +23,7 @@ export interface CountTokensResponse | Property | Type | Description | | --- | --- | --- | | [promptTokensDetails](./vertexai.counttokensresponse.md#counttokensresponseprompttokensdetails) | [ModalityTokenCount](./vertexai.modalitytokencount.md#modalitytokencount_interface)\[\] | The breakdown, by modality, of how many tokens are consumed by the prompt. | -| [totalBillableCharacters](./vertexai.counttokensresponse.md#counttokensresponsetotalbillablecharacters) | number | The total number of billable characters counted across all instances from the request. | +| [totalBillableCharacters](./vertexai.counttokensresponse.md#counttokensresponsetotalbillablecharacters) | number | The total number of billable characters counted across all instances from the request.This field is not supported in Google AI, so it will default to 0 when using Google AI. | | [totalTokens](./vertexai.counttokensresponse.md#counttokensresponsetotaltokens) | number | The total number of tokens counted across all instances from the request. | ## CountTokensResponse.promptTokensDetails @@ -40,6 +40,8 @@ promptTokensDetails?: ModalityTokenCount[]; The total number of billable characters counted across all instances from the request. +This field is not supported in Google AI, so it will default to 0 when using Google AI. + Signature: ```typescript diff --git a/docs-devsite/vertexai.generativemodel.md b/docs-devsite/vertexai.generativemodel.md index ba82b65aceb..4012cf53665 100644 --- a/docs-devsite/vertexai.generativemodel.md +++ b/docs-devsite/vertexai.generativemodel.md @@ -23,12 +23,13 @@ export declare class GenerativeModel extends AIModel | Constructor | Modifiers | Description | | --- | --- | --- | -| [(constructor)(ai, modelParams, requestOptions)](./vertexai.generativemodel.md#generativemodelconstructor) | | Constructs a new instance of the GenerativeModel class | +| [(constructor)(ai, modelParams, chromeAdapter, requestOptions)](./vertexai.generativemodel.md#generativemodelconstructor) | | Constructs a new instance of the GenerativeModel class | ## Properties | Property | Modifiers | Type | Description | | --- | --- | --- | --- | +| [DEFAULT\_HYBRID\_IN\_CLOUD\_MODEL](./vertexai.generativemodel.md#generativemodeldefault_hybrid_in_cloud_model) | static | string | Defines the name of the default in-cloud model to use for hybrid inference. | | [generationConfig](./vertexai.generativemodel.md#generativemodelgenerationconfig) | | [GenerationConfig](./vertexai.generationconfig.md#generationconfig_interface) | | | [requestOptions](./vertexai.generativemodel.md#generativemodelrequestoptions) | | [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | | | [safetySettings](./vertexai.generativemodel.md#generativemodelsafetysettings) | | [SafetySetting](./vertexai.safetysetting.md#safetysetting_interface)\[\] | | @@ -52,7 +53,7 @@ Constructs a new instance of the `GenerativeModel` class Signature: ```typescript -constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions); +constructor(ai: AI, modelParams: ModelParams, chromeAdapter: ChromeAdapter, requestOptions?: RequestOptions); ``` #### Parameters @@ -61,8 +62,19 @@ constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions); | --- | --- | --- | | ai | [AI](./vertexai.ai.md#ai_interface) | | | modelParams | [ModelParams](./vertexai.modelparams.md#modelparams_interface) | | +| chromeAdapter | ChromeAdapter | | | requestOptions | [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | | +## GenerativeModel.DEFAULT\_HYBRID\_IN\_CLOUD\_MODEL + +Defines the name of the default in-cloud model to use for hybrid inference. + +Signature: + +```typescript +static DEFAULT_HYBRID_IN_CLOUD_MODEL: string; +``` + ## GenerativeModel.generationConfig Signature: diff --git a/docs-devsite/vertexai.googleaibackend.md b/docs-devsite/vertexai.googleaibackend.md new file mode 100644 index 00000000000..99e9bd18a14 --- /dev/null +++ b/docs-devsite/vertexai.googleaibackend.md @@ -0,0 +1,36 @@ +Project: /docs/reference/js/_project.yaml +Book: /docs/reference/_book.yaml +page_type: reference + +{% comment %} +DO NOT EDIT THIS FILE! +This is generated by the JS SDK team, and any local changes will be +overwritten. Changes should be made in the source code at +https://github.com/firebase/firebase-js-sdk +{% endcomment %} + +# GoogleAIBackend class +Represents the configuration class for the Google AI backend. Use this with [AIOptions](./vertexai.aioptions.md#aioptions_interface) when initializing the service with [getAI()](./vertexai.md#getai_a94a413). + +Signature: + +```typescript +export declare class GoogleAIBackend extends Backend +``` +Extends: [Backend](./vertexai.backend.md#backend_class) + +## Constructors + +| Constructor | Modifiers | Description | +| --- | --- | --- | +| [(constructor)()](./vertexai.googleaibackend.md#googleaibackendconstructor) | | Creates a configuration object for the Google AI backend. | + +## GoogleAIBackend.(constructor) + +Creates a configuration object for the Google AI backend. + +Signature: + +```typescript +constructor(); +``` diff --git a/docs-devsite/vertexai.hybridparams.md b/docs-devsite/vertexai.hybridparams.md new file mode 100644 index 00000000000..cf847b40fa7 --- /dev/null +++ b/docs-devsite/vertexai.hybridparams.md @@ -0,0 +1,57 @@ +Project: /docs/reference/js/_project.yaml +Book: /docs/reference/_book.yaml +page_type: reference + +{% comment %} +DO NOT EDIT THIS FILE! +This is generated by the JS SDK team, and any local changes will be +overwritten. Changes should be made in the source code at +https://github.com/firebase/firebase-js-sdk +{% endcomment %} + +# HybridParams interface +Toggles hybrid inference. + +Signature: + +```typescript +export interface HybridParams +``` + +## Properties + +| Property | Type | Description | +| --- | --- | --- | +| [inCloudParams](./vertexai.hybridparams.md#hybridparamsincloudparams) | [ModelParams](./vertexai.modelparams.md#modelparams_interface) | Optional. Specifies advanced params for in-cloud inference. | +| [mode](./vertexai.hybridparams.md#hybridparamsmode) | [InferenceMode](./vertexai.md#inferencemode) | Specifies on-device or in-cloud inference. Defaults to prefer on-device. | +| [onDeviceParams](./vertexai.hybridparams.md#hybridparamsondeviceparams) | LanguageModelCreateOptions | Optional. Specifies advanced params for on-device inference. | + +## HybridParams.inCloudParams + +Optional. Specifies advanced params for in-cloud inference. + +Signature: + +```typescript +inCloudParams?: ModelParams; +``` + +## HybridParams.mode + +Specifies on-device or in-cloud inference. Defaults to prefer on-device. + +Signature: + +```typescript +mode: InferenceMode; +``` + +## HybridParams.onDeviceParams + +Optional. Specifies advanced params for on-device inference. + +Signature: + +```typescript +onDeviceParams?: LanguageModelCreateOptions; +``` diff --git a/docs-devsite/vertexai.md b/docs-devsite/vertexai.md index 544deb2987d..46eafd41e80 100644 --- a/docs-devsite/vertexai.md +++ b/docs-devsite/vertexai.md @@ -18,14 +18,10 @@ The Firebase AI Web SDK. | --- | --- | | function(app, ...) | | [getAI(app, options)](./vertexai.md#getai_a94a413) | Returns the default [AI](./vertexai.ai.md#ai_interface) instance that is associated with the provided [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface). If no instance exists, initializes a new instance with the default settings. | -| [getVertexAI(app, options)](./vertexai.md#getvertexai_04094cf) | Returns a [VertexAI](./vertexai.md#vertexai) instance for the given app. | -| function() | -| [googleAIBackend()](./vertexai.md#googleaibackend) | Creates a [Backend](./vertexai.md#backend) instance configured to use Google AI. | +| [getVertexAI(app, options)](./vertexai.md#getvertexai_04094cf) | It is recommended to use the new [getAI()](./vertexai.md#getai_a94a413).Returns a [VertexAI](./vertexai.md#vertexai) instance for the given app. | | function(ai, ...) | -| [getGenerativeModel(ai, modelParams, requestOptions)](./vertexai.md#getgenerativemodel_80bd839) | Returns a [GenerativeModel](./vertexai.generativemodel.md#generativemodel_class) class with methods for inference and other functionality. | +| [getGenerativeModel(ai, modelParams, requestOptions)](./vertexai.md#getgenerativemodel_c63f46a) | Returns a [GenerativeModel](./vertexai.generativemodel.md#generativemodel_class) class with methods for inference and other functionality. | | [getImagenModel(ai, modelParams, requestOptions)](./vertexai.md#getimagenmodel_e1f6645) | (Public Preview) Returns an [ImagenModel](./vertexai.imagenmodel.md#imagenmodel_class) class with methods for using Imagen.Only Imagen 3 models (named imagen-3.0-*) are supported. | -| function(location, ...) | -| [vertexAIBackend(location)](./vertexai.md#vertexaibackend_d0a4534) | Creates a [Backend](./vertexai.md#backend) instance configured to use Vertex AI. | ## Classes @@ -34,9 +30,11 @@ The Firebase AI Web SDK. | [AIError](./vertexai.aierror.md#aierror_class) | Error class for the Firebase AI SDK. | | [AIModel](./vertexai.aimodel.md#aimodel_class) | Base class for Firebase AI model APIs. | | [ArraySchema](./vertexai.arrayschema.md#arrayschema_class) | Schema class for "array" types. The items param should refer to the type of item that can be a member of the array. | +| [Backend](./vertexai.backend.md#backend_class) | Abstract base class representing the configuration for an AI service backend. This class should not be instantiated directly. Use its subclasses [GoogleAIBackend](./vertexai.googleaibackend.md#googleaibackend_class) or [VertexAIBackend](./vertexai.vertexaibackend.md#vertexaibackend_class). | | [BooleanSchema](./vertexai.booleanschema.md#booleanschema_class) | Schema class for "boolean" types. | | [ChatSession](./vertexai.chatsession.md#chatsession_class) | ChatSession class that enables sending chat messages and stores history of sent and received messages so far. | | [GenerativeModel](./vertexai.generativemodel.md#generativemodel_class) | Class for generative model APIs. | +| [GoogleAIBackend](./vertexai.googleaibackend.md#googleaibackend_class) | Represents the configuration class for the Google AI backend. Use this with [AIOptions](./vertexai.aioptions.md#aioptions_interface) when initializing the service with [getAI()](./vertexai.md#getai_a94a413). | | [ImagenImageFormat](./vertexai.imagenimageformat.md#imagenimageformat_class) | (Public Preview) Defines the image format for images generated by Imagen.Use this class to specify the desired format (JPEG or PNG) and compression quality for images generated by Imagen. This is typically included as part of [ImagenModelParams](./vertexai.imagenmodelparams.md#imagenmodelparams_interface). | | [ImagenModel](./vertexai.imagenmodel.md#imagenmodel_class) | (Public Preview) Class for Imagen model APIs.This class provides methods for generating images using the Imagen model. | | [IntegerSchema](./vertexai.integerschema.md#integerschema_class) | Schema class for "integer" types. | @@ -44,6 +42,7 @@ The Firebase AI Web SDK. | [ObjectSchema](./vertexai.objectschema.md#objectschema_class) | Schema class for "object" types. The properties param must be a map of Schema objects. | | [Schema](./vertexai.schema.md#schema_class) | Parent class encompassing all Schema types, with static methods that allow building specific Schema types. This class can be converted with JSON.stringify() into a JSON string accepted by Vertex AI REST endpoints. (This string conversion is automatically done when calling SDK methods.) | | [StringSchema](./vertexai.stringschema.md#stringschema_class) | Schema class for "string" types. Can be used with or without enum values. | +| [VertexAIBackend](./vertexai.vertexaibackend.md#vertexaibackend_class) | Represents the configuration class for the Vertex AI backend. Use this with [AIOptions](./vertexai.aioptions.md#aioptions_interface) when initializing the server with [getAI()](./vertexai.md#getai_a94a413). | ## Enumerations @@ -98,6 +97,7 @@ The Firebase AI Web SDK. | [GenerativeContentBlob](./vertexai.generativecontentblob.md#generativecontentblob_interface) | Interface for sending an image. | | [GroundingAttribution](./vertexai.groundingattribution.md#groundingattribution_interface) | | | [GroundingMetadata](./vertexai.groundingmetadata.md#groundingmetadata_interface) | Metadata returned to client when grounding is enabled. | +| [HybridParams](./vertexai.hybridparams.md#hybridparams_interface) | Toggles hybrid inference. | | [ImagenGCSImage](./vertexai.imagengcsimage.md#imagengcsimage_interface) | An image generated by Imagen, stored in a Cloud Storage for Firebase bucket.This feature is not available yet. | | [ImagenGenerationConfig](./vertexai.imagengenerationconfig.md#imagengenerationconfig_interface) | (Public Preview) Configuration options for generating images with Imagen.See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images-imagen) for more details. | | [ImagenGenerationResponse](./vertexai.imagengenerationresponse.md#imagengenerationresponse_interface) | (Public Preview) The response from a request to generate images with Imagen. | @@ -106,10 +106,10 @@ The Firebase AI Web SDK. | [ImagenSafetySettings](./vertexai.imagensafetysettings.md#imagensafetysettings_interface) | (Public Preview) Settings for controlling the aggressiveness of filtering out sensitive content.See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) for more details. | | [InlineDataPart](./vertexai.inlinedatapart.md#inlinedatapart_interface) | Content part interface if the part represents an image. | | [ModalityTokenCount](./vertexai.modalitytokencount.md#modalitytokencount_interface) | Represents token counting info for a single modality. | -| [ModelParams](./vertexai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_80bd839). | +| [ModelParams](./vertexai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_c63f46a). | | [ObjectSchemaInterface](./vertexai.objectschemainterface.md#objectschemainterface_interface) | Interface for [ObjectSchema](./vertexai.objectschema.md#objectschema_class) class. | | [PromptFeedback](./vertexai.promptfeedback.md#promptfeedback_interface) | If the prompt was blocked, this will be populated with blockReason and the relevant safetyRatings. | -| [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_80bd839). | +| [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_c63f46a). | | [RetrievedContextAttribution](./vertexai.retrievedcontextattribution.md#retrievedcontextattribution_interface) | | | [SafetyRating](./vertexai.safetyrating.md#safetyrating_interface) | A safety rating associated with a [GenerateContentCandidate](./vertexai.generatecontentcandidate.md#generatecontentcandidate_interface) | | [SafetySetting](./vertexai.safetysetting.md#safetysetting_interface) | Safety setting that can be sent as part of request parameters. | @@ -130,7 +130,7 @@ The Firebase AI Web SDK. | Variable | Description | | --- | --- | -| [BackendType](./vertexai.md#backendtype) | An enum-like object containing constants that represent the supported backends for the Firebase AI SDK.These values are assigned to the backendType property within the specific backend configuration objects ([GoogleAIBackend](./vertexai.md#googleaibackend) or [VertexAIBackend](./vertexai.md#vertexaibackend)) to identify which service to target. | +| [BackendType](./vertexai.md#backendtype) | An enum-like object containing constants that represent the supported backends for the Firebase AI SDK.These values are assigned to the backendType property within the specific backend configuration objects ([GoogleAIBackend](./vertexai.googleaibackend.md#googleaibackend_class) or [VertexAIBackend](./vertexai.vertexaibackend.md#vertexaibackend_class)) to identify which service to target. | | [POSSIBLE\_ROLES](./vertexai.md#possible_roles) | Possible roles. | | [VertexAIError](./vertexai.md#vertexaierror) | Error class for the Firebase AI SDK.For more information, refer to the documentation for the new [AIError](./vertexai.aierror.md#aierror_class). | | [VertexAIModel](./vertexai.md#vertexaimodel) | Base class for Firebase AI model APIs.For more information, refer to the documentation for the new [AIModel](./vertexai.aimodel.md#aimodel_class). | @@ -139,15 +139,13 @@ The Firebase AI Web SDK. | Type Alias | Description | | --- | --- | -| [Backend](./vertexai.md#backend) | Union type representing the backend configuration for the AI service. This can be either a [GoogleAIBackend](./vertexai.md#googleaibackend) or a [VertexAIBackend](./vertexai.md#vertexaibackend) configuration object.Create instances using [googleAIBackend()](./vertexai.md#googleaibackend) or [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534). | | [BackendType](./vertexai.md#backendtype) | Type alias representing valid backend types. It can be either 'VERTEX_AI' or 'GOOGLE_AI'. | -| [GoogleAIBackend](./vertexai.md#googleaibackend) | Represents the configuration object for the Google AI backend. Use this with [AIOptions](./vertexai.aioptions.md#aioptions_interface) when initializing the service with [getAI()](./vertexai.md#getai_a94a413). Create an instance using [googleAIBackend()](./vertexai.md#googleaibackend). | +| [InferenceMode](./vertexai.md#inferencemode) | Determines whether inference happens on-device or in-cloud. | | [Part](./vertexai.md#part) | Content part - includes text, image/video, or function call/response part types. | | [Role](./vertexai.md#role) | Role is the producer of the content. | | [Tool](./vertexai.md#tool) | Defines a tool that model can call to access external knowledge. | | [TypedSchema](./vertexai.md#typedschema) | A type that includes all specific Schema types. | -| [VertexAI](./vertexai.md#vertexai) | An instance of the Firebase AI SDK.For more information, refer to the documentation for the new [AI](./vertexai.ai.md#ai_interface). | -| [VertexAIBackend](./vertexai.md#vertexaibackend) | Represents the configuration object for the Vertex AI backend. Use this with [AIOptions](./vertexai.aioptions.md#aioptions_interface) when initializing the server with [getAI()](./vertexai.md#getai_a94a413). Create an instance using [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534) function. | +| [VertexAI](./vertexai.md#vertexai) | An instance of the Firebase AI SDK.For more information, refer to the documentation for the new [AI](./vertexai.ai.md#ai_interface) interface. | ## function(app, ...) @@ -187,7 +185,7 @@ const ai = getAI(app); ```javascript // Get an AI instance configured to use Google AI. -const ai = getAI(app, { backend: googleAIBackend() }); +const ai = getAI(app, { backend: new GoogleAIBackend() }); ``` @@ -196,12 +194,14 @@ const ai = getAI(app, { backend: googleAIBackend() }); ```javascript // Get an AI instance configured to use Vertex AI. -const ai = getAI(app, { backend: vertexAIBackend() }); +const ai = getAI(app, { backend: new VertexAIBackend() }); ``` ### getVertexAI(app, options) {:#getvertexai_04094cf} +It is recommended to use the new [getAI()](./vertexai.md#getai_a94a413). + Returns a [VertexAI](./vertexai.md#vertexai) instance for the given app. Signature: @@ -221,33 +221,16 @@ export declare function getVertexAI(app?: FirebaseApp, options?: VertexAIOptions [VertexAI](./vertexai.md#vertexai) -## function() - -### googleAIBackend() {:#googleaibackend} - -Creates a [Backend](./vertexai.md#backend) instance configured to use Google AI. - -Signature: - -```typescript -export declare function googleAIBackend(): GoogleAIBackend; -``` -Returns: - -[GoogleAIBackend](./vertexai.md#googleaibackend) - -A [GoogleAIBackend](./vertexai.md#googleaibackend) object. - ## function(ai, ...) -### getGenerativeModel(ai, modelParams, requestOptions) {:#getgenerativemodel_80bd839} +### getGenerativeModel(ai, modelParams, requestOptions) {:#getgenerativemodel_c63f46a} Returns a [GenerativeModel](./vertexai.generativemodel.md#generativemodel_class) class with methods for inference and other functionality. Signature: ```typescript -export declare function getGenerativeModel(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel; +export declare function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel; ``` #### Parameters @@ -255,7 +238,7 @@ export declare function getGenerativeModel(ai: AI, modelParams: ModelParams, req | Parameter | Type | Description | | --- | --- | --- | | ai | [AI](./vertexai.ai.md#ai_interface) | | -| modelParams | [ModelParams](./vertexai.modelparams.md#modelparams_interface) | | +| modelParams | [ModelParams](./vertexai.modelparams.md#modelparams_interface) \| [HybridParams](./vertexai.hybridparams.md#hybridparams_interface) | | | requestOptions | [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | | Returns: @@ -293,35 +276,11 @@ export declare function getImagenModel(ai: AI, modelParams: ImagenModelParams, r If the `apiKey` or `projectId` fields are missing in your Firebase config. -## function(location, ...) - -### vertexAIBackend(location) {:#vertexaibackend_d0a4534} - -Creates a [Backend](./vertexai.md#backend) instance configured to use Vertex AI. - -Signature: - -```typescript -export declare function vertexAIBackend(location?: string): VertexAIBackend; -``` - -#### Parameters - -| Parameter | Type | Description | -| --- | --- | --- | -| location | string | The region identifier, defaulting to us-central1; see [Vertex AI locations](https://firebase.google.com/docs/vertex-ai/locations?platform=ios#available-locations) for a list of supported locations. | - -Returns: - -[VertexAIBackend](./vertexai.md#vertexaibackend) - -A [VertexAIBackend](./vertexai.md#vertexaibackend) object. - ## BackendType An enum-like object containing constants that represent the supported backends for the Firebase AI SDK. -These values are assigned to the `backendType` property within the specific backend configuration objects ([GoogleAIBackend](./vertexai.md#googleaibackend) or [VertexAIBackend](./vertexai.md#vertexaibackend)) to identify which service to target. +These values are assigned to the `backendType` property within the specific backend configuration objects ([GoogleAIBackend](./vertexai.googleaibackend.md#googleaibackend_class) or [VertexAIBackend](./vertexai.vertexaibackend.md#vertexaibackend_class)) to identify which service to target. Signature: @@ -366,18 +325,6 @@ For more information, refer to the documentation for the new [AIModel](./vertexa VertexAIModel: typeof AIModel ``` -## Backend - -Union type representing the backend configuration for the AI service. This can be either a [GoogleAIBackend](./vertexai.md#googleaibackend) or a [VertexAIBackend](./vertexai.md#vertexaibackend) configuration object. - -Create instances using [googleAIBackend()](./vertexai.md#googleaibackend) or [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534). - -Signature: - -```typescript -export type Backend = GoogleAIBackend | VertexAIBackend; -``` - ## BackendType Type alias representing valid backend types. It can be either `'VERTEX_AI'` or `'GOOGLE_AI'`. @@ -388,16 +335,14 @@ Type alias representing valid backend types. It can be either `'VERTEX_AI'` or ` export type BackendType = (typeof BackendType)[keyof typeof BackendType]; ``` -## GoogleAIBackend +## InferenceMode -Represents the configuration object for the Google AI backend. Use this with [AIOptions](./vertexai.aioptions.md#aioptions_interface) when initializing the service with [getAI()](./vertexai.md#getai_a94a413). Create an instance using [googleAIBackend()](./vertexai.md#googleaibackend). +Determines whether inference happens on-device or in-cloud. Signature: ```typescript -export type GoogleAIBackend = { - backendType: typeof BackendType.GOOGLE_AI; -}; +export type InferenceMode = 'prefer_on_device' | 'only_on_device' | 'only_in_cloud'; ``` ## Part @@ -444,7 +389,7 @@ export type TypedSchema = IntegerSchema | NumberSchema | StringSchema | BooleanS An instance of the Firebase AI SDK. -For more information, refer to the documentation for the new [AI](./vertexai.ai.md#ai_interface). +For more information, refer to the documentation for the new [AI](./vertexai.ai.md#ai_interface) interface. Signature: @@ -452,19 +397,6 @@ For more information, refer to the documentation for the new [AI](./vertexai.ai. export type VertexAI = AI; ``` -## VertexAIBackend - -Represents the configuration object for the Vertex AI backend. Use this with [AIOptions](./vertexai.aioptions.md#aioptions_interface) when initializing the server with [getAI()](./vertexai.md#getai_a94a413). Create an instance using [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534) function. - -Signature: - -```typescript -export type VertexAIBackend = { - backendType: typeof BackendType.VERTEX_AI; - location: string; -}; -``` - ## AIErrorCode Standardized error codes that [AIError](./vertexai.aierror.md#aierror_class) can have. diff --git a/docs-devsite/vertexai.modelparams.md b/docs-devsite/vertexai.modelparams.md index bb8a87d5fb2..b4930cf9895 100644 --- a/docs-devsite/vertexai.modelparams.md +++ b/docs-devsite/vertexai.modelparams.md @@ -10,7 +10,7 @@ https://github.com/firebase/firebase-js-sdk {% endcomment %} # ModelParams interface -Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_80bd839). +Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_c63f46a). Signature: diff --git a/docs-devsite/vertexai.promptfeedback.md b/docs-devsite/vertexai.promptfeedback.md index 369ef02051d..64332244e23 100644 --- a/docs-devsite/vertexai.promptfeedback.md +++ b/docs-devsite/vertexai.promptfeedback.md @@ -23,7 +23,7 @@ export interface PromptFeedback | Property | Type | Description | | --- | --- | --- | | [blockReason](./vertexai.promptfeedback.md#promptfeedbackblockreason) | [BlockReason](./vertexai.md#blockreason) | | -| [blockReasonMessage](./vertexai.promptfeedback.md#promptfeedbackblockreasonmessage) | string | | +| [blockReasonMessage](./vertexai.promptfeedback.md#promptfeedbackblockreasonmessage) | string | This field is unsupported in Google AI. | | [safetyRatings](./vertexai.promptfeedback.md#promptfeedbacksafetyratings) | [SafetyRating](./vertexai.safetyrating.md#safetyrating_interface)\[\] | | ## PromptFeedback.blockReason @@ -36,6 +36,8 @@ blockReason?: BlockReason; ## PromptFeedback.blockReasonMessage +This field is unsupported in Google AI. + Signature: ```typescript diff --git a/docs-devsite/vertexai.requestoptions.md b/docs-devsite/vertexai.requestoptions.md index 3c233d72b90..aec60365a0f 100644 --- a/docs-devsite/vertexai.requestoptions.md +++ b/docs-devsite/vertexai.requestoptions.md @@ -10,7 +10,7 @@ https://github.com/firebase/firebase-js-sdk {% endcomment %} # RequestOptions interface -Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_80bd839). +Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_c63f46a). Signature: diff --git a/docs-devsite/vertexai.safetyrating.md b/docs-devsite/vertexai.safetyrating.md index 28493bafef0..34739830a60 100644 --- a/docs-devsite/vertexai.safetyrating.md +++ b/docs-devsite/vertexai.safetyrating.md @@ -25,9 +25,9 @@ export interface SafetyRating | [blocked](./vertexai.safetyrating.md#safetyratingblocked) | boolean | | | [category](./vertexai.safetyrating.md#safetyratingcategory) | [HarmCategory](./vertexai.md#harmcategory) | | | [probability](./vertexai.safetyrating.md#safetyratingprobability) | [HarmProbability](./vertexai.md#harmprobability) | | -| [probabilityScore](./vertexai.safetyrating.md#safetyratingprobabilityscore) | number | | -| [severity](./vertexai.safetyrating.md#safetyratingseverity) | [HarmSeverity](./vertexai.md#harmseverity) | | -| [severityScore](./vertexai.safetyrating.md#safetyratingseverityscore) | number | | +| [probabilityScore](./vertexai.safetyrating.md#safetyratingprobabilityscore) | number | This field is not supported in Google AI, so it will default to 0 when using Google AI. | +| [severity](./vertexai.safetyrating.md#safetyratingseverity) | [HarmSeverity](./vertexai.md#harmseverity) | This field is not supported in Google AI, so it will default to HarmSeverity.UNSUPPORTED when using Google AI. | +| [severityScore](./vertexai.safetyrating.md#safetyratingseverityscore) | number | This field is not supported in Google AI, so it will default to 0 when using Google AI. | ## SafetyRating.blocked @@ -55,6 +55,8 @@ probability: HarmProbability; ## SafetyRating.probabilityScore +This field is not supported in Google AI, so it will default to 0 when using Google AI. + Signature: ```typescript @@ -63,6 +65,8 @@ probabilityScore: number; ## SafetyRating.severity +This field is not supported in Google AI, so it will default to `HarmSeverity.UNSUPPORTED` when using Google AI. + Signature: ```typescript @@ -71,6 +75,8 @@ severity: HarmSeverity; ## SafetyRating.severityScore +This field is not supported in Google AI, so it will default to 0 when using Google AI. + Signature: ```typescript diff --git a/docs-devsite/vertexai.vertexaibackend.md b/docs-devsite/vertexai.vertexaibackend.md new file mode 100644 index 00000000000..cd255e30034 --- /dev/null +++ b/docs-devsite/vertexai.vertexaibackend.md @@ -0,0 +1,58 @@ +Project: /docs/reference/js/_project.yaml +Book: /docs/reference/_book.yaml +page_type: reference + +{% comment %} +DO NOT EDIT THIS FILE! +This is generated by the JS SDK team, and any local changes will be +overwritten. Changes should be made in the source code at +https://github.com/firebase/firebase-js-sdk +{% endcomment %} + +# VertexAIBackend class +Represents the configuration class for the Vertex AI backend. Use this with [AIOptions](./vertexai.aioptions.md#aioptions_interface) when initializing the server with [getAI()](./vertexai.md#getai_a94a413). + +Signature: + +```typescript +export declare class VertexAIBackend extends Backend +``` +Extends: [Backend](./vertexai.backend.md#backend_class) + +## Constructors + +| Constructor | Modifiers | Description | +| --- | --- | --- | +| [(constructor)(location)](./vertexai.vertexaibackend.md#vertexaibackendconstructor) | | Creates a configuration object for the Vertex AI backend. | + +## Properties + +| Property | Modifiers | Type | Description | +| --- | --- | --- | --- | +| [location](./vertexai.vertexaibackend.md#vertexaibackendlocation) | | string | The region identifier. See [Vertex AI locations](https://firebase.google.com/docs/vertex-ai/locations?platform=ios#available-locations) for a list of supported locations. | + +## VertexAIBackend.(constructor) + +Creates a configuration object for the Vertex AI backend. + +Signature: + +```typescript +constructor(location?: string); +``` + +#### Parameters + +| Parameter | Type | Description | +| --- | --- | --- | +| location | string | The region identifier, defaulting to us-central1; see [Vertex AI locations](https://firebase.google.com/docs/vertex-ai/locations?platform=ios#available-locations) for a list of supported locations. | + +## VertexAIBackend.location + +The region identifier. See [Vertex AI locations](https://firebase.google.com/docs/vertex-ai/locations?platform=ios#available-locations) for a list of supported locations. + +Signature: + +```typescript +readonly location: string; +``` diff --git a/e2e/sample-apps/modular.js b/e2e/sample-apps/modular.js index 9e943e04494..aeebe19a4b1 100644 --- a/e2e/sample-apps/modular.js +++ b/e2e/sample-apps/modular.js @@ -58,7 +58,7 @@ import { onValue, off } from 'firebase/database'; -import { getGenerativeModel, getVertexAI, VertexAI } from 'firebase/vertexai'; +import { getGenerativeModel, getVertexAI } from 'firebase/vertexai'; import { getDataConnect, DataConnect } from 'firebase/data-connect'; /** @@ -313,9 +313,15 @@ function callPerformance(app) { async function callVertexAI(app) { console.log('[VERTEXAI] start'); const vertexAI = getVertexAI(app); - const model = getGenerativeModel(vertexAI, { model: 'gemini-1.5-flash' }); - const result = await model.countTokens('abcdefg'); - console.log(`[VERTEXAI] counted tokens: ${result.totalTokens}`); + const model = getGenerativeModel(vertexAI, { + mode: 'only_on_device' + }); + const singleResult = await model.generateContent([ + { text: 'describe the following:' }, + { text: 'the mojave desert' } + ]); + console.log(`Generated text: ${singleResult.response.text()}`); + console.log(`[VERTEXAI] end`); } /** @@ -341,18 +347,18 @@ async function main() { const app = initializeApp(config); setLogLevel('warn'); - callAppCheck(app); - await authLogin(app); - await callStorage(app); - await callFirestore(app); - await callDatabase(app); - await callMessaging(app); - callAnalytics(app); - callPerformance(app); - await callFunctions(app); + // callAppCheck(app); + // await authLogin(app); + // await callStorage(app); + // await callFirestore(app); + // await callDatabase(app); + // await callMessaging(app); + // callAnalytics(app); + // callPerformance(app); + // await callFunctions(app); await callVertexAI(app); - callDataConnect(app); - await authLogout(app); + // callDataConnect(app); + // await authLogout(app); console.log('DONE'); } diff --git a/packages/vertexai/src/api.test.ts b/packages/vertexai/src/api.test.ts index 0554ff46441..9d21dedb14e 100644 --- a/packages/vertexai/src/api.test.ts +++ b/packages/vertexai/src/api.test.ts @@ -101,6 +101,21 @@ describe('Top level API', () => { expect(genModel).to.be.an.instanceOf(GenerativeModel); expect(genModel.model).to.equal('publishers/google/models/my-model'); }); + it('getGenerativeModel with HybridParams sets a default model', () => { + const genModel = getGenerativeModel(fakeAI, { + mode: 'only_on_device' + }); + expect(genModel.model).to.equal( + `publishers/google/models/${GenerativeModel.DEFAULT_HYBRID_IN_CLOUD_MODEL}` + ); + }); + it('getGenerativeModel with HybridParams honors a model override', () => { + const genModel = getGenerativeModel(fakeAI, { + mode: 'prefer_on_device', + inCloudParams: { model: 'my-model' } + }); + expect(genModel.model).to.equal('publishers/google/models/my-model'); + }); it('getImagenModel throws if no model is provided', () => { try { getImagenModel(fakeAI, {} as ImagenModelParams); diff --git a/packages/vertexai/src/api.ts b/packages/vertexai/src/api.ts index 4f0c407e397..1da5914682d 100644 --- a/packages/vertexai/src/api.ts +++ b/packages/vertexai/src/api.ts @@ -23,6 +23,7 @@ import { AIService } from './service'; import { AI, AIOptions, VertexAI, VertexAIOptions } from './public-types'; import { ImagenModelParams, + HybridParams, ModelParams, RequestOptions, AIErrorCode @@ -31,6 +32,8 @@ import { AIError } from './errors'; import { AIModel, GenerativeModel, ImagenModel } from './models'; import { encodeInstanceIdentifier } from './helpers'; import { GoogleAIBackend, VertexAIBackend } from './backend'; +import { ChromeAdapter } from './methods/chrome-adapter'; +import { LanguageModel } from './types/language-model'; export { ChatSession } from './methods/chat-session'; export * from './requests/schema-builder'; @@ -138,16 +141,36 @@ export function getAI( */ export function getGenerativeModel( ai: AI, - modelParams: ModelParams, + modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions ): GenerativeModel { - if (!modelParams.model) { + // Uses the existence of HybridParams.mode to clarify the type of the modelParams input. + const hybridParams = modelParams as HybridParams; + let inCloudParams: ModelParams; + if (hybridParams.mode) { + inCloudParams = hybridParams.inCloudParams || { + model: GenerativeModel.DEFAULT_HYBRID_IN_CLOUD_MODEL + }; + } else { + inCloudParams = modelParams as ModelParams; + } + + if (!inCloudParams.model) { throw new AIError( AIErrorCode.NO_MODEL, `Must provide a model name. Example: getGenerativeModel({ model: 'my-model-name' })` ); } - return new GenerativeModel(ai, modelParams, requestOptions); + return new GenerativeModel( + ai, + inCloudParams, + new ChromeAdapter( + window.LanguageModel as LanguageModel, + hybridParams.mode, + hybridParams.onDeviceParams + ), + requestOptions + ); } /** diff --git a/packages/vertexai/src/backwards-compatbility.test.ts b/packages/vertexai/src/backwards-compatbility.test.ts index 62463009b24..da0b613bf21 100644 --- a/packages/vertexai/src/backwards-compatbility.test.ts +++ b/packages/vertexai/src/backwards-compatbility.test.ts @@ -28,6 +28,7 @@ import { } from './api'; import { AI, VertexAI, AIErrorCode } from './public-types'; import { VertexAIBackend } from './backend'; +import { ChromeAdapter } from './methods/chrome-adapter'; function assertAssignable(): void {} @@ -65,7 +66,11 @@ describe('backwards-compatible types', () => { it('AIModel is backwards compatible with VertexAIModel', () => { assertAssignable(); - const model = new GenerativeModel(fakeAI, { model: 'model-name' }); + const model = new GenerativeModel( + fakeAI, + { model: 'model-name' }, + new ChromeAdapter() + ); expect(model).to.be.instanceOf(AIModel); expect(model).to.be.instanceOf(VertexAIModel); }); diff --git a/packages/vertexai/src/methods/chat-session.test.ts b/packages/vertexai/src/methods/chat-session.test.ts index 0564aa84ed6..ed0b4d4877f 100644 --- a/packages/vertexai/src/methods/chat-session.test.ts +++ b/packages/vertexai/src/methods/chat-session.test.ts @@ -24,6 +24,7 @@ import { GenerateContentStreamResult } from '../types'; import { ChatSession } from './chat-session'; import { ApiSettings } from '../types/internal'; import { VertexAIBackend } from '../backend'; +import { ChromeAdapter } from './chrome-adapter'; use(sinonChai); use(chaiAsPromised); @@ -46,7 +47,11 @@ describe('ChatSession', () => { generateContentMethods, 'generateContent' ).rejects('generateContent failed'); - const chatSession = new ChatSession(fakeApiSettings, 'a-model'); + const chatSession = new ChatSession( + fakeApiSettings, + 'a-model', + new ChromeAdapter() + ); await expect(chatSession.sendMessage('hello')).to.be.rejected; expect(generateContentStub).to.be.calledWith( fakeApiSettings, @@ -63,7 +68,11 @@ describe('ChatSession', () => { generateContentMethods, 'generateContentStream' ).rejects('generateContentStream failed'); - const chatSession = new ChatSession(fakeApiSettings, 'a-model'); + const chatSession = new ChatSession( + fakeApiSettings, + 'a-model', + new ChromeAdapter() + ); await expect(chatSession.sendMessageStream('hello')).to.be.rejected; expect(generateContentStreamStub).to.be.calledWith( fakeApiSettings, @@ -82,7 +91,11 @@ describe('ChatSession', () => { generateContentMethods, 'generateContentStream' ).resolves({} as unknown as GenerateContentStreamResult); - const chatSession = new ChatSession(fakeApiSettings, 'a-model'); + const chatSession = new ChatSession( + fakeApiSettings, + 'a-model', + new ChromeAdapter() + ); await chatSession.sendMessageStream('hello'); expect(generateContentStreamStub).to.be.calledWith( fakeApiSettings, diff --git a/packages/vertexai/src/methods/chat-session.ts b/packages/vertexai/src/methods/chat-session.ts index 60794001e37..112ddf5857e 100644 --- a/packages/vertexai/src/methods/chat-session.ts +++ b/packages/vertexai/src/methods/chat-session.ts @@ -30,6 +30,7 @@ import { validateChatHistory } from './chat-session-helpers'; import { generateContent, generateContentStream } from './generate-content'; import { ApiSettings } from '../types/internal'; import { logger } from '../logger'; +import { ChromeAdapter } from './chrome-adapter'; /** * Do not log a message for this error. @@ -50,6 +51,7 @@ export class ChatSession { constructor( apiSettings: ApiSettings, public model: string, + private chromeAdapter: ChromeAdapter, public params?: StartChatParams, public requestOptions?: RequestOptions ) { @@ -95,6 +97,7 @@ export class ChatSession { this._apiSettings, this.model, generateContentRequest, + this.chromeAdapter, this.requestOptions ) ) @@ -146,6 +149,7 @@ export class ChatSession { this._apiSettings, this.model, generateContentRequest, + this.chromeAdapter, this.requestOptions ); diff --git a/packages/vertexai/src/methods/chrome-adapter.test.ts b/packages/vertexai/src/methods/chrome-adapter.test.ts new file mode 100644 index 00000000000..abdbd08c401 --- /dev/null +++ b/packages/vertexai/src/methods/chrome-adapter.test.ts @@ -0,0 +1,466 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { AIError } from '../errors'; +import { expect, use } from 'chai'; +import sinonChai from 'sinon-chai'; +import chaiAsPromised from 'chai-as-promised'; +import { ChromeAdapter } from './chrome-adapter'; +import { + Availability, + LanguageModel, + LanguageModelCreateOptions, + LanguageModelMessageContent +} from '../types/language-model'; +import { match, stub } from 'sinon'; +import { GenerateContentRequest, AIErrorCode } from '../types'; + +use(sinonChai); +use(chaiAsPromised); + +/** + * Converts the ReadableStream from response.body to an array of strings. + */ +async function toStringArray( + stream: ReadableStream +): Promise { + const decoder = new TextDecoder(); + const actual = []; + const reader = stream.getReader(); + while (true) { + const { done, value } = await reader.read(); + if (done) { + break; + } + actual.push(decoder.decode(value)); + } + return actual; +} + +describe('ChromeAdapter', () => { + describe('isAvailable', () => { + it('returns false if mode is only cloud', async () => { + const adapter = new ChromeAdapter(undefined, 'only_in_cloud'); + expect( + await adapter.isAvailable({ + contents: [] + }) + ).to.be.false; + }); + it('returns false if LanguageModel API is undefined', async () => { + const adapter = new ChromeAdapter(undefined, 'prefer_on_device'); + expect( + await adapter.isAvailable({ + contents: [] + }) + ).to.be.false; + }); + it('returns false if request contents empty', async () => { + const adapter = new ChromeAdapter( + { + availability: async () => Availability.available + } as LanguageModel, + 'prefer_on_device' + ); + expect( + await adapter.isAvailable({ + contents: [] + }) + ).to.be.false; + }); + it('returns false if request content has non-user role', async () => { + const adapter = new ChromeAdapter( + { + availability: async () => Availability.available + } as LanguageModel, + 'prefer_on_device' + ); + expect( + await adapter.isAvailable({ + contents: [ + { + role: 'model', + parts: [] + } + ] + }) + ).to.be.false; + }); + it('returns true if model is readily available', async () => { + const languageModelProvider = { + availability: () => Promise.resolve(Availability.available) + } as LanguageModel; + const adapter = new ChromeAdapter( + languageModelProvider, + 'prefer_on_device' + ); + expect( + await adapter.isAvailable({ + contents: [{ role: 'user', parts: [{ text: 'hi' }] }] + }) + ).to.be.true; + }); + it('returns false and triggers download when model is available after download', async () => { + const languageModelProvider = { + availability: () => Promise.resolve(Availability.downloadable), + create: () => Promise.resolve({}) + } as LanguageModel; + const createStub = stub(languageModelProvider, 'create').resolves( + {} as LanguageModel + ); + const expectedOnDeviceParams = { + expectedInputs: [{ type: 'image' }] + } as LanguageModelCreateOptions; + const adapter = new ChromeAdapter( + languageModelProvider, + 'prefer_on_device', + expectedOnDeviceParams + ); + expect( + await adapter.isAvailable({ + contents: [{ role: 'user', parts: [{ text: 'hi' }] }] + }) + ).to.be.false; + expect(createStub).to.have.been.calledOnceWith(expectedOnDeviceParams); + }); + it('avoids redundant downloads', async () => { + const languageModelProvider = { + availability: () => Promise.resolve(Availability.downloadable), + create: () => Promise.resolve({}) + } as LanguageModel; + const downloadPromise = new Promise(() => { + /* never resolves */ + }); + const createStub = stub(languageModelProvider, 'create').returns( + downloadPromise + ); + const adapter = new ChromeAdapter(languageModelProvider); + await adapter.isAvailable({ + contents: [{ role: 'user', parts: [{ text: 'hi' }] }] + }); + await adapter.isAvailable({ + contents: [{ role: 'user', parts: [{ text: 'hi' }] }] + }); + expect(createStub).to.have.been.calledOnce; + }); + it('clears state when download completes', async () => { + const languageModelProvider = { + availability: () => Promise.resolve(Availability.downloadable), + create: () => Promise.resolve({}) + } as LanguageModel; + let resolveDownload; + const downloadPromise = new Promise(resolveCallback => { + resolveDownload = resolveCallback; + }); + const createStub = stub(languageModelProvider, 'create').returns( + downloadPromise + ); + const adapter = new ChromeAdapter(languageModelProvider); + await adapter.isAvailable({ + contents: [{ role: 'user', parts: [{ text: 'hi' }] }] + }); + resolveDownload!(); + await adapter.isAvailable({ + contents: [{ role: 'user', parts: [{ text: 'hi' }] }] + }); + expect(createStub).to.have.been.calledTwice; + }); + it('returns false when model is never available', async () => { + const languageModelProvider = { + availability: () => Promise.resolve(Availability.unavailable), + create: () => Promise.resolve({}) + } as LanguageModel; + const adapter = new ChromeAdapter( + languageModelProvider, + 'prefer_on_device' + ); + expect( + await adapter.isAvailable({ + contents: [{ role: 'user', parts: [{ text: 'hi' }] }] + }) + ).to.be.false; + }); + }); + describe('generateContent', () => { + it('throws if Chrome API is undefined', async () => { + const adapter = new ChromeAdapter(undefined, 'only_on_device'); + await expect( + adapter.generateContent({ + contents: [] + }) + ) + .to.eventually.be.rejectedWith( + AIError, + 'Chrome AI requested for unsupported browser version.' + ) + .and.have.property('code', AIErrorCode.REQUEST_ERROR); + }); + it('generates content', async () => { + const languageModelProvider = { + create: () => Promise.resolve({}) + } as LanguageModel; + const languageModel = { + // eslint-disable-next-line @typescript-eslint/no-unused-vars + prompt: (p: LanguageModelMessageContent[]) => Promise.resolve('') + } as LanguageModel; + const createStub = stub(languageModelProvider, 'create').resolves( + languageModel + ); + const promptOutput = 'hi'; + const promptStub = stub(languageModel, 'prompt').resolves(promptOutput); + const expectedOnDeviceParams = { + systemPrompt: 'be yourself', + expectedInputs: [{ type: 'image' }] + } as LanguageModelCreateOptions; + const adapter = new ChromeAdapter( + languageModelProvider, + 'prefer_on_device', + expectedOnDeviceParams + ); + const request = { + contents: [{ role: 'user', parts: [{ text: 'anything' }] }] + } as GenerateContentRequest; + const response = await adapter.generateContent(request); + // Asserts initialization params are proxied. + expect(createStub).to.have.been.calledOnceWith(expectedOnDeviceParams); + // Asserts Vertex input type is mapped to Chrome type. + expect(promptStub).to.have.been.calledOnceWith([ + { + type: 'text', + content: request.contents[0].parts[0].text + } + ]); + // Asserts expected output. + expect(await response.json()).to.deep.equal({ + candidates: [ + { + content: { + parts: [{ text: promptOutput }] + } + } + ] + }); + }); + it('generates content using image type input', async () => { + const languageModelProvider = { + create: () => Promise.resolve({}) + } as LanguageModel; + const languageModel = { + // eslint-disable-next-line @typescript-eslint/no-unused-vars + prompt: (p: LanguageModelMessageContent[]) => Promise.resolve('') + } as LanguageModel; + const createStub = stub(languageModelProvider, 'create').resolves( + languageModel + ); + const promptOutput = 'hi'; + const promptStub = stub(languageModel, 'prompt').resolves(promptOutput); + const expectedOnDeviceParams = { + systemPrompt: 'be yourself', + expectedInputs: [{ type: 'image' }] + } as LanguageModelCreateOptions; + const adapter = new ChromeAdapter( + languageModelProvider, + 'prefer_on_device', + expectedOnDeviceParams + ); + const request = { + contents: [ + { + role: 'user', + parts: [ + { text: 'anything' }, + { + inlineData: { + data: sampleBase64EncodedImage, + mimeType: 'image/jpeg' + } + } + ] + } + ] + } as GenerateContentRequest; + const response = await adapter.generateContent(request); + // Asserts initialization params are proxied. + expect(createStub).to.have.been.calledOnceWith(expectedOnDeviceParams); + // Asserts Vertex input type is mapped to Chrome type. + expect(promptStub).to.have.been.calledOnceWith([ + { + type: 'text', + content: request.contents[0].parts[0].text + }, + { + type: 'image', + content: match.instanceOf(ImageBitmap) + } + ]); + // Asserts expected output. + expect(await response.json()).to.deep.equal({ + candidates: [ + { + content: { + parts: [{ text: promptOutput }] + } + } + ] + }); + }); + }); + describe('countTokens', () => { + it('counts tokens is not yet available', async () => { + const inputText = 'first'; + // setting up stubs + const languageModelProvider = { + create: () => Promise.resolve({}) + } as LanguageModel; + const languageModel = { + measureInputUsage: _i => Promise.resolve(123) + } as LanguageModel; + const createStub = stub(languageModelProvider, 'create').resolves( + languageModel + ); + + const adapter = new ChromeAdapter( + languageModelProvider, + 'prefer_on_device' + ); + + const countTokenRequest = { + contents: [{ role: 'user', parts: [{ text: inputText }] }] + } as GenerateContentRequest; + + try { + await adapter.countTokens(countTokenRequest); + } catch (e) { + // the call to countToken should be rejected with Error + expect((e as AIError).code).to.equal(AIErrorCode.REQUEST_ERROR); + expect((e as AIError).message).includes('not yet available'); + } + + // Asserts that no language model was initialized + expect(createStub).not.called; + }); + }); + describe('generateContentStream', () => { + it('generates content stream', async () => { + const languageModelProvider = { + create: () => Promise.resolve({}) + } as LanguageModel; + const languageModel = { + promptStreaming: _i => new ReadableStream() + } as LanguageModel; + const createStub = stub(languageModelProvider, 'create').resolves( + languageModel + ); + const part = 'hi'; + const promptStub = stub(languageModel, 'promptStreaming').returns( + new ReadableStream({ + start(controller) { + controller.enqueue([part]); + controller.close(); + } + }) + ); + const expectedOnDeviceParams = { + expectedInputs: [{ type: 'image' }] + } as LanguageModelCreateOptions; + const adapter = new ChromeAdapter( + languageModelProvider, + 'prefer_on_device', + expectedOnDeviceParams + ); + const request = { + contents: [{ role: 'user', parts: [{ text: 'anything' }] }] + } as GenerateContentRequest; + const response = await adapter.generateContentStream(request); + expect(createStub).to.have.been.calledOnceWith(expectedOnDeviceParams); + expect(promptStub).to.have.been.calledOnceWith([ + { + type: 'text', + content: request.contents[0].parts[0].text + } + ]); + const actual = await toStringArray(response.body!); + expect(actual).to.deep.equal([ + `data: {"candidates":[{"content":{"role":"model","parts":[{"text":["${part}"]}]}}]}\n\n` + ]); + }); + it('generates content stream with image input', async () => { + const languageModelProvider = { + create: () => Promise.resolve({}) + } as LanguageModel; + const languageModel = { + promptStreaming: _i => new ReadableStream() + } as LanguageModel; + const createStub = stub(languageModelProvider, 'create').resolves( + languageModel + ); + const part = 'hi'; + const promptStub = stub(languageModel, 'promptStreaming').returns( + new ReadableStream({ + start(controller) { + controller.enqueue([part]); + controller.close(); + } + }) + ); + const expectedOnDeviceParams = { + expectedInputs: [{ type: 'image' }] + } as LanguageModelCreateOptions; + const adapter = new ChromeAdapter( + languageModelProvider, + 'prefer_on_device', + expectedOnDeviceParams + ); + const request = { + contents: [ + { + role: 'user', + parts: [ + { text: 'anything' }, + { + inlineData: { + data: sampleBase64EncodedImage, + mimeType: 'image/jpeg' + } + } + ] + } + ] + } as GenerateContentRequest; + const response = await adapter.generateContentStream(request); + expect(createStub).to.have.been.calledOnceWith(expectedOnDeviceParams); + expect(promptStub).to.have.been.calledOnceWith([ + { + type: 'text', + content: request.contents[0].parts[0].text + }, + { + type: 'image', + content: match.instanceOf(ImageBitmap) + } + ]); + const actual = await toStringArray(response.body!); + expect(actual).to.deep.equal([ + `data: {"candidates":[{"content":{"role":"model","parts":[{"text":["${part}"]}]}}]}\n\n` + ]); + }); + }); +}); + +// TODO: Move to using image from test-utils. +const sampleBase64EncodedImage = + '/9j/4QDeRXhpZgAASUkqAAgAAAAGABIBAwABAAAAAQAAABoBBQABAAAAVgAAABsBBQABAAAAXgAAACgBAwABAAAAAgAAABMCAwABAAAAAQAAAGmHBAABAAAAZgAAAAAAAABIAAAAAQAAAEgAAAABAAAABwAAkAcABAAAADAyMTABkQcABAAAAAECAwCGkgcAFgAAAMAAAAAAoAcABAAAADAxMDABoAMAAQAAAP//AAACoAQAAQAAAMgAAAADoAQAAQAAACwBAAAAAAAAQVNDSUkAAABQaWNzdW0gSUQ6IDM5MP/bAEMACAYGBwYFCAcHBwkJCAoMFA0MCwsMGRITDxQdGh8eHRocHCAkLicgIiwjHBwoNyksMDE0NDQfJzk9ODI8LjM0Mv/bAEMBCQkJDAsMGA0NGDIhHCEyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMv/CABEIASwAyAMBIgACEQEDEQH/xAAbAAABBQEBAAAAAAAAAAAAAAAAAQIDBAUGB//EABgBAQEBAQEAAAAAAAAAAAAAAAABAgME/9oADAMBAAIQAxAAAAHfA7ZFFgBQAAUUBQFBFABSUBQBQBZQUiqC7wAoigooQKACgCigKIoAosIKSigABWBdZAUAUAUQUUUAFIBQAWAFAUVFABSKoLqAKAKAKJVt4BvrFLAqKooArHgoQAoKiqDyKKoaiqhSqhCqgLFKHKdBiZmbodX5n2MbWHkdZS2kWhUBQIVUBwgUucv8Oad7nUzey3vPO5q4UrlOEWjzT0vhssDpea9Gy03BsqooKhCgCgCgHIcd0fN5DnuWHseY0Ureh+ZelLIqFq+f+gQJ5f6V5r6pE4i2ioDhCFVAVWrCiBxvJdlzFzVc56GjFoy4/a8d2q2TmpN3V1OF2MWp1/NrL0hzinRnO5Sdwc+L0Jz5HQLzyy9AYQYmDrZfXkyxVs5m4yVt3F0/M7l1YotpQnScdumqsFSb0yElm4zf5hjvV56bOtteViXq3ecRMbJgG+L4tzGqNyTDJNqMx5rfSHGRdpAcidPqLyFbuBeWrdmyONg7TJTBTrqZg3b6GGzbSzILYW8uSuF2hPG9l6uFdbPQRxzU8M2Lc62fpUJZNGC5TXAseNuVc2abO0pSKUsjdI+OdNoTzYc3fIANzF1LVTalK9KU72e1coa1TOqe3naA8inKGZ0QV5ZGzSywKWVrSAUROTjuno8lSLQbFq5kNrXsYAvQu5xmW9y18l0tjmrFu8ZM66C0nLabEsPGrT3xOlnIyXjkzC8tSxh2zRbWlsVNZtY6a9SKq1ZCd0rLHS17SPlgUtvpvatrVetlYJJZRpNcOOfmRaEN+s3Vctl0qCWs+PLljs19iWw+RdZEcU1VBFVUR6Kr5a6rplEzvnH5krF9Y33LnNFkqWIynAqZ3Zno3U03xO1mVY1HrGDxgOREpURkjiMXDUXOlsVpjRIJ0RXhix3KbUuzn6DLla6nK1RwFAKKK+GNsuigXReXW6mpRS2yWu6Zgr64Rq90abqclllYVJiJxIrAkI1JXRvJZoJJqUcY1yzmrvLnMLJX1QngWQrF9hTW01IZmwlt1F5bWtMTPruLc+fYltSVo83SKpnX/8QALRAAAQQCAQMDBAIBBQAAAAAAAQACAwQREgUQExQgITAVIjEyI0AkJTM0QXD/2gAIAQEAAQUC/wDH5Z2wu/scrHmBjg+P0hzXf0pGCSPjpnwT2bDa0LOWe6dEgCW06yYIWwRf0uVrbNdf79Grg2ZeUrxkMsco+CFleP4uRuyQvPITOjdyLzS4yy+Znqts7dtcbSZOgAB8V6Yw1nlziCE39obclR8EzZ4YrUM7vRy2PLVBpbT+Plv+Nn0RPZU42jJpc9HIwOhtqk8yU/j5dxMq+1YbrVaH2eUd/lsDpJG516zRMnjLSHRt0i+PlYss613Fli5OLBhOkwv1ShNG4PlDIqdzyunjd/l/k5NwFWu0dw/gMLlXhfFyHLD+SpGZbTq8GIR3Y7NCGKvRrd9fT5F4VgLxboXZ5ALXkgs8mFZt3I5vIvLzLYXnzL6lhfVYwvq9dfVqy5IEpzTG93618me0P9S5T96GPNQDWm+f8HifZuVlZWVlZXJnPILKysoytXsuUe0y27LHxzS92Y/ca72xzmWOW1cMcklSSKIMkbIzzYNrs8b6dO1HXYLsBaHAqS0yOTKyvLb37crZOQm5Bkcw5GFykuyqZ81iJ0mru9JgJ8bmHoGly1ds+KSNMikkXZsAduVo+5HKBwmW5mFzy5z70r43WJXEyuKz9ywjs8wzSQPdkuwUAcch/u9InavA0s2maqnMYpC1rmtjAV1zvHpVi1hiiQghz4cC8SsnUqxX0+svDrix9KgzLxeHHiiG/SX4+lyI8ZMFLVmgFz9nY2UELioNnqSRz5KEa/6AUpe0Miyrf8Dadnug6uQwOjgSyKye+WyIbAEgLuRoSxORwVLU2tTyOfJj2QlkY3ua8dGN0MhO2LmkK3bkgn7Ykjk4+KQ14BXj67YNkydqtE/VahagLVqwFo3f0PHlwe4NOSWRrh7agqxUEyZmGF9+IKG/G53Q7YPfaou9amEzV+wAI9BkY0k5PWtHOwy1d3V4zC38oKaq6WQfiw+FrIIqxXutiPRlfatWLVi0YvZTU4bDnVV4zkKpRrvUbS1F3tG4hbhbhbhS2WxtmmM0nHt0gysrZZWfR7rPXKysrZbFblblbruFZ990Nc7BCYpsxXdXcWy2WyysrPXuxrvMK7sa1ytF212120RqMZGFhY6BAoFArZZWVlZWfTC1zi+0c15y9+q1WgT4F33KOUl+0a7jMtfl2PTn4K+S0xPDoIe2srKyrE2vSGPuP7LF22/EEFq5dtybDlMAYMrZbLdOsgJ7t3KJj4xn4crK2QkKDgfTnpMThmNU1jXMbNogc/DlZWVno1+FsAvz6H5x0/KhZ7/GR0wgPd7tjD1x0f8Auoxs/wCHCwtemOuUx4ag8FZHV8bcqu33+LKysArt5WpWq1WOmShIQnSZBTBs4eyz1z8AKygvZaharC1RYsdQcESLcL8rJWVn0Z6gdG9MrKys9CAUWLtuWvUEhCRbDp7rZbLKCCygvx6s9AUCisBYRCPTKyUPQ0ooOKBK/8QAIhEAAwACAgIBBQAAAAAAAAAAAAEREBIgIQIwURMiMUBQ/9oACAEDAQE/Af5k9E9yWITC9S7RCCIQhCEGuyEcPFMTYrCYsxTrDYmVQTKhPouPJ9GyNj6iG7mEIRkZGPxZGR8aTofiRkZGM6OjY/OahNFp38lZWX5NkXxPtxuzZlNjZm5ubmxc01RqakIak4XhSl9NJxf6cJxvNCxCelMp/8QAIhEAAwACAgIBBQAAAAAAAAAAAAERECASMAIhIjFAQVBx/9oACAECAQE/Af1d6LumXZs5MTLhn51pR5WlKUulz5JLFLrR/XH8ITEIQhCCHld3IbRUesez2Px0jI8PERxIz5HyPZxRxWkIQmvI5FLil6Z137C9NJ2XFL0MhD//xAA2EAABAwEFBQcDBAEFAAAAAAABAAIRIQMQEjFBEyAiMlEEMDNSYXGRQIGhIzRCklAUQ1Nwcv/aAAgBAQAGPwL/AKfYHfyMfUttf+M1TXNyIpvHCQY+icw5OEI9ktdKBbR3sAmjZDZkxnW6TQI2HZK+a00CDG/Ri3Zm3mjonWNtGMZOTJgCdTCIaS8+ixOOCyCDLMU7sWVnQxJKaHEyMy2kqWyLSYxJwtHS5u/atiOK5z7USGmIQAHdktMONAsTnEn1WQKnojgjCdE21FAUW2b5I3aHStzZ1r3jP/d5uDbV1XyWgKzrAy3Xn+L+IXWTj5e8s2aRN2SOhVm1woXLDo1oQazmOSGLOK7hY9shYdckxvQDvGWvQxuMeBiIOSbNjs36kpjvKZXihSHhOfnhE0TuDDHrdaECGMdLu9w6khYncrBiKlBozJhWTHiHAqyd6Qms+VJsmfCwhh9k97C8EDqn/quZHlVO2Wi4e2OVO2KnamrxbIr/AGimi0OA9GL9qFXsZVeyPVezWirY2qq20H2Wbv6qy+E5hzFEFZgecKwI1Vh91bOGmV1B6K1Vr9t9vsN3mCqAm7N7SOjdE0NqQZTrTrc1ztCrJ4PC3VWDcQnF+FbvLhzfhYmmicMfKuF04skQ+eI6LFtBms0xhNXH4v2MVWIHhELCDiGvoqHWE6rWwadUHTJb5dQuE16ojaEjOt0OEX0ErDBk6IF7YnqjgYTGcLw3wpwOj2WqqFTNE4qnOViJWCaR0VXnKKKr/wAKTfJMlTEjVsolZXNoAIzRuBmEHWwaGnJzRRbTZ8PnCLZaGn0WS5KrCLM1WK0xD0OS8Jhn0RH+nZ/VeC1eC1eEFyflYHWsTkAuZ/yoZaf2Xij7hTtW/YLnb+Vzs+VLsvRybaEV6SjhENu2kNwN8yfbFoMcrf4p1o9pwikTQIl1nXQkXVXCGhYiYJ8rl+4tGTlAR5nR/IthQVS4j4WztHEnQlgVLX5YtFUwvFHyqWjflcy2r3WZZ5SjifiAyXpdha8hvRCGzwprA0kzWEABT3XCQPcKpCwsIy6IY/xRTjeD7ysAM+u5ov07LaHoVithx9JyvoB8LIfCyU7Ie+60sPG3MXHEeEZIVr7qoaUDQP6obR0x0CptPhBhDhN9Ci9xDoya0IutHusmt/iFBIXDakey8QlZ31c0fdTuY2wAeqxC0OI5yoxk+l+MWpb6XfrAV0WOyAprcOAn23ch8LLcxPxfK4XfKzCqVkhxqhquMrNZrNTzegWM0U6uP00rJThF2ar3WfdSPo5mAFDcuqwu3JYYN3EQAuZRKw4e+e3QhYYWI825hGt0aLJZd5kslxKBu5IuN2hnvc+4gIzdzQVhNfX6CqpuZX0VR39d83D6ckG7F/kafT0/xf8A/8QAKhABAAIBAwMDBAIDAQAAAAAAAQARITFBURBhcSCBkTChscHR8EBQ4fH/2gAIAQEAAT8h/wAiv8iof60/24fSvm0naH+R2aUdppQR8PVerRTWafXUA+lrvlRRsJt2f+xcK5o6rMHN0LZb9Fagaq0EyEPYezzAGwavL67l+jb1sex1ucH2lNKQvo1+4DXUq1qO8JQuOPmZPNWNPbllNUa93l+m+Nx3niXqZkfLEtIvwwS75Bt1qXL9H43mjIKjs5hxLIxhtWEwAKAMH07uBuNpYwtVXCGs7xLQcmZjdZmpBJoLnaFJ1hXpOcFSE2YaxxFP5/qcz+iXToFmTpK7yt+RC1GWVyrPaHXZjILVX8kNe0A+l+w+psg/PfTViLG0CD8QCO8wRgYDiC7aYcs8evd6Brtt3jBCFweZUJVb7fUI7W74YEcS8LFVhJzjk4dy8SodQh3BdmyEXRzd7TFspRGYByYeUzF14jPPEuXLly5cuX1voJWze2sQ9Q9zg+amaprCQ2IEoCSuY63Ir4MUahd+BmIVIZuUJECnsXWXLxBDX26+XmU6Xz/7B6iXK05n8hGGqPmbfyP/ACbwnQ2SxsPmU6p4Z+gVlGn8XL6L7f8AJtJ7Q/KUi17sMo5YxypaCW4JWPpGGnmOw2v8iFmYsfKLYjkdZeDFDDg0nxh+YLPL+3rAovb+8vPUvzA65saxNfuiJo4RLXF13F2lmFXuvaKkPabIc4ZYEFrumMtNnH9E5U7Xd/MEFXvNB7FuMe0c02mB3mVhstCBhU0/pNAtCaNTXRMJW6svWpfUs6vbSB84N+NZSDuiCsttdle72mPNFBy4gHLLvAbbzAzStbf3M1+rqfeaZZioic9GqZcBKxw6mYehtWyxgJ6A0l8UrYI2w+TpmbVfCc8e01A7G4Am8NmW9XzxHqqqOF68w02AWwwaR0UXXYymRduZhOHzFc3L8ydyHa660DiXiJbc7qbQ68TJeQN5lUp3IxjxlldJXAGhvzGQDjQla/mO1nlbX8SpaWtplxI3wfuMXhYM1gea6UwzwhqIoFb6IX3dfboerh4s/c7Ku7jYbcZBKfAP4hEIvg/xCqWcYJrnusF0L2ilrPtY/UeCdwsCgzQq1kzPaNZXE8vB0QuFCtP2R/SzWKmP5lZq66aINj8zdH3JY2L3b/EUWNVZT7SgKpYEv6iCaNkipsd5QBFfMK7/ADLhKuriEWio7PmWrwcAzdF4xALHlbKs4Z1wsK+kLuRnGtlWvBMmobbEsBvLa4Ra2bGWPmIdgfeWyhbQxMealG6ViFVJbmACj/e8MOBdG1M5KoWzlPfQP2TdqXYgVMbhBCOIfJjqCjWwEDunsDxEaxiLGc+YGofiC6/tph0fEbq08FzOOphG5asjVVFSkYRPapngwWxcu0vBdTFabfWF2AxjqRcMdpCHIuhjHRaq1shjR+YLyRaBfeDFw3B95hI3XGcc98n5iGQXeCM9ykB5sGtyXMwjvSacC9j0UgA0epLcxoY1vwIuGsVEyJgECgfuUxBo3SqX0bqmOle5Fwz9XSSp7y5TclPW+DjyysaQ2D7yoIZQUVASNWtGaMDyJZG1bMueKBkF4emONKdQe8fmlpZKmGwDaCjdRVzyl+r5RZctlwODPeW5l5eWnej0a07kyste7Cuz4iOp+IbRXiF0fvmcLfaBgGB59RCuYRi1grWpmq3zACxuMsW4ipmHSFCF5eEAxPoFO6HfPOX6g+h0Hr241UgcciUSu9EJR2iYsUkpMCjTWLHiCiA7Cd0TDl5ljaUzMJfQMGEBfQvMZ3mqnuQnZf4ej09wdMswMrA4BbDfiY6VK6VAgQ6e2d5Ei4qWqn5s+itCbuWLqhlWkq2LKEXLOty5cvqlICFMPQZcHouVl00QXXQwuRGdtTZDAmnruX12bcwwxnnJGlohhFSuj0Ybtvo6KU/mKNxw06XL6X6UuLMxjxEbIUS+eOldNT7zpWodT1r8S0So9Fsy1mBrWLawbfpjeawPRVbNOteu6hB2RJpKbpkjKiWOgWj0pKSXuUpKCg6bJfRcuX1GX0CxLzOdyKnhMtou0sa9L5JmoXcg2sE0PQOcoy+lstCp7dIO81QWXhJAJh0Zhme2lG0EaxxLeickGmHRljeW3gYGMiJWUqDT0rLS24nU3GkrAgLhBQ5orOopHhhHWKMs/9oADAMBAAIAAwAAABASIMVBgAVIggAJsGy6fNBiyj4Y5ptsnyTbFtvCz9pNNPGuqMCNo42YQIEExL6CRYMEGT8YCBzUGdVEHKQHraFgCRaW/wDNpnycuGNdceiyLtY4mcgOiOu29EEGuHlAnRrvBwEb0uqOJE43dRwqzkz2egbGwwUOslkwzPIcsSwSNhRUkWEw1v62L+JMcNPr2AmjywACL2YgqfCuq0/Cz+/jqnaGEcefx1OE4WV4cia8oyMQ8U8lMsIgsWO//8QAHREAAwACAwEBAAAAAAAAAAAAAAERECEgMVFBMP/aAAgBAwEBPxBc1+a/BIhCcITMI8QhCYQhCEJkvMQmYQhMwSNeZGhNUhCEIQb2JLs6VO48HoK5+AEVawVlRxOosomXwd8GnZFXhBRoo6jcWhEUOTSFpEsbUKcC6hquh+Q9qiTHo2Gy+i7hlYQVKEyMkG6xMadEsQVNWsKSdaxKa3svsSIaTUmSLsaJEyxoR7dxN2w294KG1dcCJhIQvQkXwVG3IpKLNtFFEf038E3ME6JsbQ4LKEhtzEIQgmkJBlpkEt46D4xkZcREF0PMJiix8T5k1yH+A//EAB4RAAMBAQADAQEBAAAAAAAAAAABERAhIDFBMFFh/9oACAECAQE/EPwf5PaPLlKXwo8u0pSlHxtGUpcdGmMo/RWlC6rOhZS5zhwLrp0UmC+CpFGXTp0aFzo0Khvgvd8QpR+8Uo8UY3hhO7WUKvQfs9qhB/Q1cMLofRRZwoyLzYIjmNwtyoqx5BNoX9YkbbejnwfUEgxiqXWPwCf4cfBQoKFzOCBKesbMOHCLwvBFnCFFE4bIRBUylKUqIyEEGxKimUpcjwmijeLKUuVFHlekUospdpk/Fii0nkmn/8QAJhABAAICAgICAgIDAQAAAAAAAQARITFBURBhcYGRobHBINHw4f/aAAgBAQABPxDweDX+J4P8jfk14NeVQJUNf4G/J4NeKleKh4JQyvDDwHipXivFQJUJUrxUrxUDuVK8ceArxUJUqVA8HioeK8VAzKglSoVUqVDLKhiV4rzUCoFwxKlSpXgPBAuVK8VKrwF+K8VApm5UCV4rxmVCVA81KlngPAY8V4qV1L8DfCB7N8RCCVTnDfgMeK8G5UJXgPJhh5NeefBszFrbCQytzUeUao/D74+vBr/AgAyf4TDfk8BC0HvMPJrzz5Du/sDX4afqAmGh09Z6tZ8y6HhnL0DxVZuAzNHW4FtX6iIo7J/LlggsaQei6lY9npH/AFNo2ptfvweTUuoeUhnWfias6ur9zmvJvwbOtJ6ixUpjK35UfuXT0sbc6a5cGnnUL5mcCXrzLchY3eC3HuH3Uh0/D9mofTOTtN9iw35PBr/Ac8U7vqA+qD5uBejEvV1kHSBKE5R22G1rFxXpUFJYPmYeA58heEtci8c45jURYWjAr6YsPtTBr6p1QtXvZiUhnAA9EqG/BL8GvF+HPAhZtt/Ep6IEFjWWXZEyZxhjcAsIVY6kJuM7G4jJYFaxpL6xBJXdgs7L3DZCXPuskrndJk1KfdVNat1CRLa/LF/QQxLhuX4PA/4VRxeHLBSZcWf99S27qvcugnIGo2dXu2sS82b2g/GU/MunLN0XKR9RXnZipcJeTeMnCR4FO+1/In8VEYLeinvEoIwVXoGXnxcJcGpfi/Fy21LB7I/QfuXRjHXqK8gK5zKKcge5qpOkLtH81MXGMwG1V9/qBRMNPJuMY1SJ6Zg5lwzDEepTJTCOyvUSXhBnJM/khigpQ1Qv9+L8DDEuGZcuXLmJy595j8JEMc8nuC1NlOYZQwYgoYo0vrHxDJYqMeAChgzKA1gouBzr1iKCjyip+TcPydMB03LYrV5B7uOogpwsP/EaDsTkPzzK6RwxgYYzbLC2ZleUPuA7/crA3mse/AtMIMvwuKgIR/JSndEl3GvmUJdIWrx7blVdY7bq36i1x4YU2iJHJpkW20V/ZNdWx0Fv1REywUgayt8QlCxGmUPVal73duXYUnWY+VQ5Vkvp1Ag0hWzxDsCsXKtreYa0/wDbifph/wDkpH0qKek5slT+CIaofwlXT1a/9MP+GH5h/wB0PqaXb0oftGVjP1D/ALmeGP0e9zIIYbq2kjuNCnKUn9MAvw3aQZgIXxSv8XKN2Iv0f+yWSW7IOyCu8DX+CATBIHSMWMyI3ofUAs5L8mJc6D+IMN6h7ePz/cKYvEpSSoVxhPc7rmPMHW38zcW1eWqOWAiW1MVH4jixHSNPq63CEMEwbVAtddYleJbjRl+6qUt1UOMD8x6hdbNH3OdTEKNn3uYnWIotw22VL6i1l282Y3BCipGSWhRzahznsOD76iAbC4lVV25rqG3MRWFkeviCur66Mct/MICcbEf7V7ghVYEpzTpqFMewB7H7lg2lxHBUByqDApdpbLOHlsg7m7CgEPbvqc3VboZs7UcmYEolD8gcGV/UE4ubQVrDspUiXl23DrBwRa6lX2IrB2HTqLvOkKi3pemJetOKgvvC7GOIgruagHj22wp4akoviWsDVT8BmYYyWD9LnBBXAfoYpCBtFdrgibPAo/mGxbGKaEFBQIhVs1BrbVCoYrPUGI40OBqpS3BgF9lwUjdg5be4fSpbgAbN6lmQ2Jw5hzC5q1qIuyH3/uYsKtqcFEDqLQa8BadkDjGVt7gxY52EBmfsodOLYW6TiLZmtcnpllt3zKfRULQeUNkDIQVQ9Ff5lSnC/dWRunxDrAWE/T/CKLUlTl81iG04NeTdNFhBjiqVjdUX+Suos14DB3m7/UOlfVaPshiMBuGIXw1mWaer/wCkSLT+T/2Jf936ilV+I/7iREraYdFtsuA2+RGbJMKx8lJYIdJ/YV/UCVpV0n+iYILiy/qU5FqApirNIF6v1dxZbfwGYPzAryVXA85iHAPqGrsbZbeqMsKUJysHNv7I/FtkKAdFZwOIWOYw1Zsbz+IgC2um/lhhRL7yfqGKZ7xXaBmJzVNxbsY+KgZZbSfOFX3AboByDpRcx0HPYk/gIWAGjp9wJXC+oGmdIVbhE/uPyjmUfUb9WRDCBz+3CRAtrtSX6iStHACJ00uQJG30oN/zKAObBH5ghoDQbNAZh0hYGwesRpxTYNn3M8XUvGTdAbhRDqWQ5RfxLD8hS2NZ0IWX0ypT1Yqgdo3KBm0HyWMsIkDDQv7QutMrDgjS9trKAWqfiVhQ0OEdVHLE4pVKutai4IfbcRaHwVMBT9kIKi7Mv43KuOoPkbgk66BXXANRgEnuq/qUdpdmQ/1HgPoCBsd/B+poNfRSMQzT7Vxof3CgoFBxqV1DBEmURG919Ra5zFyNa+O4EC9qA4O+YLAIWyXNPMVlScBr5qcc8llH2wMABLUvYO/cGGRtbVwVnqYQBQ1/lg49ExPtDEHJvqC8nyxGE4ZV9wS4xFo6tbFUaFKj1/b+ojAGFMH1RhzbxQv7shIe6Av4JyvmEsVZAvISkembc1pl36c0Hmqz+5VygUUjd0R6OEhZTwJxHTZzQpPUpWRUKrftCMsCANFcymG0C8uqmp7kBXsgC3pZW4zFwW+kJkYmEfZbK8MpBpD8za0H5LYpgE5HmLL4S6a/E4AHRiLberLAAIU3doNi6JaY16Kl3gMYQQpHqXCTGK7iiHAEfctwAMl1ACDZGZIjAHhP9gmxYd0uZuDgbf8AyJllcAPVzMwCAqjBDDZgm385nymeL8C93FMbMMoyZIXZLu/zBTUZr2mXdxLcTNsaNvzO1Ms51/cA1T5ifvUIfUIUCO6GYMBDWH8SyIsutf4gQfGEPKHVDNpOYIr0gO7gJRge4B5I+k+5R4RBU1OiEBXdSdBaaYgwASymJ0xOmNu0DxLy8HMxgR5IdcC4IhiA9koep6SYdwzbCrCJ8qWgo3cHRiW6i1t8uplil/Gm+EDlhl7+IQriMAIlZgIkN1wwlhiFNqmbEbag5Z+WVoNtRWRiYR/HxADMInphBTljsbtmU1Z/gbzMPSuJWSeADDBlpK9R844ZlatMdyuLdW9S1tSrb3KFEVL9Eq0s0bgUsaYAOAPipUv1LmagX4Lwxu4kjlTQJqPVKbt6jpQ8BuZKUtrtcE6f3BHMwzcvFNF7iaBOiwmzwsOjqWBytSlBIVYSImoGtQTiAMqnDiEA6geoV4hhglzidqIWLEpFPq4I5H7lBiHJntZbuDhMI21AlSVV7uN2K5gwnXtqV7OxsqN3aLINwxATklvqX8RQiHuNdXFDzHOdDEsiibDDMuKdysqyYxKoqwgiWhZDUs7auJaGZbGLNcNRmwMZ4mIAqoKcwvLy3uWlstiyyDpAe40mHDcNKMM4mrBo9Rql+0o0V4q6xLhQY9w1j6eBRspuziNNtwcwblPH35CF9ZnqSnZHWZbiUjAm7j7cIfkQo4s4nLrTcUFojCAm0WJlBumAvA0YCENztcMQS5Y+BCDbCzczZgiXYl6wgbC/MM1MTBZNUS1kgJOBItSqTRheZaluO2c2/Ex/A6gOYM4Z8LlvH4wctYPgKMrrNz0kaSFfBcQMbTjNkVebSsAZEYVpqUXFUIMTOEVEzSZaSS9QXSoEwwdZSWPNSnWYcxGiy1hd7QEtxE6VC8oBhFOZbOXuCXgQz1JRZhEsa8GAimGoqB4BcGhixA8DEQc3Fc1LW7gsweg3Lo024ah5Q0wDmHMZ3IicQl3RmGShHATpwWJEjhZUcytCWLOYRDCktgtnuAFhmYO5vRP/2Q=='; diff --git a/packages/vertexai/src/methods/chrome-adapter.ts b/packages/vertexai/src/methods/chrome-adapter.ts new file mode 100644 index 00000000000..521e9ca7101 --- /dev/null +++ b/packages/vertexai/src/methods/chrome-adapter.ts @@ -0,0 +1,281 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { AIError } from '../errors'; +import { + CountTokensRequest, + GenerateContentRequest, + InferenceMode, + Part, + AIErrorCode +} from '../types'; +import { + Availability, + LanguageModel, + LanguageModelCreateOptions, + LanguageModelMessageContent +} from '../types/language-model'; + +/** + * Defines an inference "backend" that uses Chrome's on-device model, + * and encapsulates logic for detecting when on-device is possible. + */ +export class ChromeAdapter { + private isDownloading = false; + private downloadPromise: Promise | undefined; + private oldSession: LanguageModel | undefined; + constructor( + private languageModelProvider?: LanguageModel, + private mode?: InferenceMode, + private onDeviceParams: LanguageModelCreateOptions = {} + ) {} + + /** + * Checks if a given request can be made on-device. + * + *
    Encapsulates a few concerns: + *
  1. the mode
  2. + *
  3. API existence
  4. + *
  5. prompt formatting
  6. + *
  7. model availability, including triggering download if necessary
  8. + *
+ * + *

Pros: callers needn't be concerned with details of on-device availability.

+ *

Cons: this method spans a few concerns and splits request validation from usage. + * If instance variables weren't already part of the API, we could consider a better + * separation of concerns.

+ */ + async isAvailable(request: GenerateContentRequest): Promise { + if (this.mode === 'only_in_cloud') { + return false; + } + + // Triggers out-of-band download so model will eventually become available. + const availability = await this.downloadIfAvailable(); + + if (this.mode === 'only_on_device') { + return true; + } + + // Applies prefer_on_device logic. + return ( + availability === Availability.available && + ChromeAdapter.isOnDeviceRequest(request) + ); + } + + /** + * Generates content on device. + * + *

This is comparable to {@link GenerativeModel.generateContent} for generating content in + * Cloud.

+ * @param request a standard Vertex {@link GenerateContentRequest} + * @returns {@link Response}, so we can reuse common response formatting. + */ + async generateContent(request: GenerateContentRequest): Promise { + const session = await this.createSession(); + // TODO: support multiple content objects when Chrome supports + // sequence + const contents = await Promise.all( + request.contents[0].parts.map(ChromeAdapter.toLanguageModelMessageContent) + ); + const text = await session.prompt(contents); + return ChromeAdapter.toResponse(text); + } + + /** + * Generates content stream on device. + * + *

This is comparable to {@link GenerativeModel.generateContentStream} for generating content in + * Cloud.

+ * @param request a standard Vertex {@link GenerateContentRequest} + * @returns {@link Response}, so we can reuse common response formatting. + */ + async generateContentStream( + request: GenerateContentRequest + ): Promise { + const session = await this.createSession(); + // TODO: support multiple content objects when Chrome supports + // sequence + const contents = await Promise.all( + request.contents[0].parts.map(ChromeAdapter.toLanguageModelMessageContent) + ); + const stream = await session.promptStreaming(contents); + return ChromeAdapter.toStreamResponse(stream); + } + + async countTokens(_request: CountTokensRequest): Promise { + throw new AIError( + AIErrorCode.REQUEST_ERROR, + 'Count Tokens is not yet available for on-device model.' + ); + } + + /** + * Asserts inference for the given request can be performed by an on-device model. + */ + private static isOnDeviceRequest(request: GenerateContentRequest): boolean { + // Returns false if the prompt is empty. + if (request.contents.length === 0) { + return false; + } + + for (const content of request.contents) { + // Returns false if the request contains multiple roles, eg a chat history. + // TODO: remove this guard once LanguageModelMessage is supported. + if (content.role !== 'user') { + return false; + } + } + + return true; + } + + /** + * Encapsulates logic to get availability and download a model if one is downloadable. + */ + private async downloadIfAvailable(): Promise { + const availability = await this.languageModelProvider?.availability( + this.onDeviceParams + ); + + if (availability === Availability.downloadable) { + this.download(); + } + + return availability; + } + + /** + * Triggers out-of-band download of an on-device model. + * + *

Chrome only downloads models as needed. Chrome knows a model is needed when code calls + * LanguageModel.create.

+ * + *

Since Chrome manages the download, the SDK can only avoid redundant download requests by + * tracking if a download has previously been requested.

+ */ + private download(): void { + if (this.isDownloading) { + return; + } + this.isDownloading = true; + this.downloadPromise = this.languageModelProvider + ?.create(this.onDeviceParams) + .then(() => { + this.isDownloading = false; + }); + } + + /** + * Converts a Vertex Part object to a Chrome LanguageModelMessageContent object. + */ + private static async toLanguageModelMessageContent( + part: Part + ): Promise { + if (part.text) { + return { + type: 'text', + content: part.text + }; + } else if (part.inlineData) { + const formattedImageContent = await fetch( + `data:${part.inlineData.mimeType};base64,${part.inlineData.data}` + ); + const imageBlob = await formattedImageContent.blob(); + const imageBitmap = await createImageBitmap(imageBlob); + return { + type: 'image', + content: imageBitmap + }; + } + // Assumes contents have been verified to contain only a single TextPart. + // TODO: support other input types + throw new Error('Not yet implemented'); + } + + /** + * Abstracts Chrome session creation. + * + *

Chrome uses a multi-turn session for all inference. Vertex uses single-turn for all + * inference. To map the Vertex API to Chrome's API, the SDK creates a new session for all + * inference.

+ * + *

Chrome will remove a model from memory if it's no longer in use, so this method ensures a + * new session is created before an old session is destroyed.

+ */ + private async createSession(): Promise { + if (!this.languageModelProvider) { + throw new AIError( + AIErrorCode.REQUEST_ERROR, + 'Chrome AI requested for unsupported browser version.' + ); + } + const newSession = await this.languageModelProvider.create( + this.onDeviceParams + ); + if (this.oldSession) { + this.oldSession.destroy(); + } + // Holds session reference, so model isn't unloaded from memory. + this.oldSession = newSession; + return newSession; + } + + /** + * Formats string returned by Chrome as a {@link Response} returned by Vertex. + */ + private static toResponse(text: string): Response { + return { + json: async () => ({ + candidates: [ + { + content: { + parts: [{ text }] + } + } + ] + }) + } as Response; + } + + /** + * Formats string stream returned by Chrome as SSE returned by Vertex. + */ + private static toStreamResponse(stream: ReadableStream): Response { + const encoder = new TextEncoder(); + return { + body: stream.pipeThrough( + new TransformStream({ + transform(chunk, controller) { + const json = JSON.stringify({ + candidates: [ + { + content: { + role: 'model', + parts: [{ text: chunk }] + } + } + ] + }); + controller.enqueue(encoder.encode(`data: ${json}\n\n`)); + } + }) + ) + } as Response; + } +} diff --git a/packages/vertexai/src/methods/count-tokens.test.ts b/packages/vertexai/src/methods/count-tokens.test.ts index 7e04ddb3561..78c51d3f5b7 100644 --- a/packages/vertexai/src/methods/count-tokens.test.ts +++ b/packages/vertexai/src/methods/count-tokens.test.ts @@ -27,6 +27,7 @@ import { ApiSettings } from '../types/internal'; import { Task } from '../requests/request'; import { mapCountTokensRequest } from '../googleai-mappers'; import { GoogleAIBackend, VertexAIBackend } from '../backend'; +import { ChromeAdapter } from './chrome-adapter'; use(sinonChai); use(chaiAsPromised); @@ -66,7 +67,8 @@ describe('countTokens()', () => { const result = await countTokens( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.totalTokens).to.equal(6); expect(result.totalBillableCharacters).to.equal(16); @@ -92,7 +94,8 @@ describe('countTokens()', () => { const result = await countTokens( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.totalTokens).to.equal(1837); expect(result.totalBillableCharacters).to.equal(117); @@ -120,7 +123,8 @@ describe('countTokens()', () => { const result = await countTokens( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.totalTokens).to.equal(258); expect(result).to.not.have.property('totalBillableCharacters'); @@ -146,7 +150,12 @@ describe('countTokens()', () => { json: mockResponse.json } as Response); await expect( - countTokens(fakeApiSettings, 'model', fakeRequestParams) + countTokens( + fakeApiSettings, + 'model', + fakeRequestParams, + new ChromeAdapter() + ) ).to.be.rejectedWith(/404.*not found/); expect(mockFetch).to.be.called; }); @@ -164,7 +173,12 @@ describe('countTokens()', () => { it('maps request to GoogleAI format', async () => { makeRequestStub.resolves({ ok: true, json: () => {} } as Response); // Unused - await countTokens(fakeGoogleAIApiSettings, 'model', fakeRequestParams); + await countTokens( + fakeGoogleAIApiSettings, + 'model', + fakeRequestParams, + new ChromeAdapter() + ); expect(makeRequestStub).to.be.calledWith( 'model', @@ -176,4 +190,24 @@ describe('countTokens()', () => { ); }); }); + it('on-device', async () => { + const chromeAdapter = new ChromeAdapter(); + const isAvailableStub = stub(chromeAdapter, 'isAvailable').resolves(true); + const mockResponse = getMockResponse( + 'vertexAI', + 'unary-success-total-tokens.json' + ); + const countTokensStub = stub(chromeAdapter, 'countTokens').resolves( + mockResponse as Response + ); + const result = await countTokens( + fakeApiSettings, + 'model', + fakeRequestParams, + chromeAdapter + ); + expect(result.totalTokens).eq(6); + expect(isAvailableStub).to.be.called; + expect(countTokensStub).to.be.calledWith(fakeRequestParams); + }); }); diff --git a/packages/vertexai/src/methods/count-tokens.ts b/packages/vertexai/src/methods/count-tokens.ts index b1e60e3a182..81fb3ad061d 100644 --- a/packages/vertexai/src/methods/count-tokens.ts +++ b/packages/vertexai/src/methods/count-tokens.ts @@ -24,8 +24,9 @@ import { Task, makeRequest } from '../requests/request'; import { ApiSettings } from '../types/internal'; import * as GoogleAIMapper from '../googleai-mappers'; import { BackendType } from '../public-types'; +import { ChromeAdapter } from './chrome-adapter'; -export async function countTokens( +export async function countTokensOnCloud( apiSettings: ApiSettings, model: string, params: CountTokensRequest, @@ -48,3 +49,17 @@ export async function countTokens( ); return response.json(); } + +export async function countTokens( + apiSettings: ApiSettings, + model: string, + params: CountTokensRequest, + chromeAdapter: ChromeAdapter, + requestOptions?: RequestOptions +): Promise { + if (await chromeAdapter.isAvailable(params)) { + return (await chromeAdapter.countTokens(params)).json(); + } + + return countTokensOnCloud(apiSettings, model, params, requestOptions); +} diff --git a/packages/vertexai/src/methods/generate-content.test.ts b/packages/vertexai/src/methods/generate-content.test.ts index 13250fd83dd..16a48f473ad 100644 --- a/packages/vertexai/src/methods/generate-content.test.ts +++ b/packages/vertexai/src/methods/generate-content.test.ts @@ -34,6 +34,7 @@ import { Task } from '../requests/request'; import { AIError } from '../api'; import { mapGenerateContentRequest } from '../googleai-mappers'; import { GoogleAIBackend, VertexAIBackend } from '../backend'; +import { ChromeAdapter } from './chrome-adapter'; use(sinonChai); use(chaiAsPromised); @@ -96,7 +97,8 @@ describe('generateContent()', () => { const result = await generateContent( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.response.text()).to.include('Mountain View, California'); expect(makeRequestStub).to.be.calledWith( @@ -119,7 +121,8 @@ describe('generateContent()', () => { const result = await generateContent( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.response.text()).to.include('Use Freshly Ground Coffee'); expect(result.response.text()).to.include('30 minutes of brewing'); @@ -142,7 +145,8 @@ describe('generateContent()', () => { const result = await generateContent( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.response.usageMetadata?.totalTokenCount).to.equal(1913); expect(result.response.usageMetadata?.candidatesTokenCount).to.equal(76); @@ -177,7 +181,8 @@ describe('generateContent()', () => { const result = await generateContent( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.response.text()).to.include( 'Some information cited from an external source' @@ -204,7 +209,8 @@ describe('generateContent()', () => { const result = await generateContent( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.response.text).to.throw('SAFETY'); expect(makeRequestStub).to.be.calledWith( @@ -226,7 +232,8 @@ describe('generateContent()', () => { const result = await generateContent( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.response.text).to.throw('SAFETY'); expect(makeRequestStub).to.be.calledWith( @@ -248,7 +255,8 @@ describe('generateContent()', () => { const result = await generateContent( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.response.text()).to.equal(''); expect(makeRequestStub).to.be.calledWith( @@ -270,7 +278,8 @@ describe('generateContent()', () => { const result = await generateContent( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.response.text()).to.include('Some text'); expect(makeRequestStub).to.be.calledWith( @@ -292,7 +301,12 @@ describe('generateContent()', () => { json: mockResponse.json } as Response); await expect( - generateContent(fakeApiSettings, 'model', fakeRequestParams) + generateContent( + fakeApiSettings, + 'model', + fakeRequestParams, + new ChromeAdapter() + ) ).to.be.rejectedWith(/400.*invalid argument/); expect(mockFetch).to.be.called; }); @@ -307,7 +321,12 @@ describe('generateContent()', () => { json: mockResponse.json } as Response); await expect( - generateContent(fakeApiSettings, 'model', fakeRequestParams) + generateContent( + fakeApiSettings, + 'model', + fakeRequestParams, + new ChromeAdapter() + ) ).to.be.rejectedWith( /firebasevertexai\.googleapis[\s\S]*my-project[\s\S]*api-not-enabled/ ); @@ -347,7 +366,8 @@ describe('generateContent()', () => { generateContent( fakeGoogleAIApiSettings, 'model', - requestParamsWithMethod + requestParamsWithMethod, + new ChromeAdapter() ) ).to.be.rejectedWith(AIError, AIErrorCode.UNSUPPORTED); expect(makeRequestStub).to.not.be.called; @@ -362,7 +382,8 @@ describe('generateContent()', () => { await generateContent( fakeGoogleAIApiSettings, 'model', - fakeGoogleAIRequestParams + fakeGoogleAIRequestParams, + new ChromeAdapter() ); expect(makeRequestStub).to.be.calledWith( @@ -375,4 +396,25 @@ describe('generateContent()', () => { ); }); }); + // TODO: define a similar test for generateContentStream + it('on-device', async () => { + const chromeAdapter = new ChromeAdapter(); + const isAvailableStub = stub(chromeAdapter, 'isAvailable').resolves(true); + const mockResponse = getMockResponse( + 'vertexAI', + 'unary-success-basic-reply-short.json' + ); + const generateContentStub = stub(chromeAdapter, 'generateContent').resolves( + mockResponse as Response + ); + const result = await generateContent( + fakeApiSettings, + 'model', + fakeRequestParams, + chromeAdapter + ); + expect(result.response.text()).to.include('Mountain View, California'); + expect(isAvailableStub).to.be.called; + expect(generateContentStub).to.be.calledWith(fakeRequestParams); + }); }); diff --git a/packages/vertexai/src/methods/generate-content.ts b/packages/vertexai/src/methods/generate-content.ts index 5f7902f5954..ff99b306855 100644 --- a/packages/vertexai/src/methods/generate-content.ts +++ b/packages/vertexai/src/methods/generate-content.ts @@ -28,17 +28,18 @@ import { processStream } from '../requests/stream-reader'; import { ApiSettings } from '../types/internal'; import * as GoogleAIMapper from '../googleai-mappers'; import { BackendType } from '../public-types'; +import { ChromeAdapter } from './chrome-adapter'; -export async function generateContentStream( +async function generateContentStreamOnCloud( apiSettings: ApiSettings, model: string, params: GenerateContentRequest, requestOptions?: RequestOptions -): Promise { +): Promise { if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { params = GoogleAIMapper.mapGenerateContentRequest(params); } - const response = await makeRequest( + return makeRequest( model, Task.STREAM_GENERATE_CONTENT, apiSettings, @@ -46,19 +47,39 @@ export async function generateContentStream( JSON.stringify(params), requestOptions ); +} + +export async function generateContentStream( + apiSettings: ApiSettings, + model: string, + params: GenerateContentRequest, + chromeAdapter: ChromeAdapter, + requestOptions?: RequestOptions +): Promise { + let response; + if (await chromeAdapter.isAvailable(params)) { + response = await chromeAdapter.generateContentStream(params); + } else { + response = await generateContentStreamOnCloud( + apiSettings, + model, + params, + requestOptions + ); + } return processStream(response, apiSettings); // TODO: Map streaming responses } -export async function generateContent( +async function generateContentOnCloud( apiSettings: ApiSettings, model: string, params: GenerateContentRequest, requestOptions?: RequestOptions -): Promise { +): Promise { if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { params = GoogleAIMapper.mapGenerateContentRequest(params); } - const response = await makeRequest( + return makeRequest( model, Task.GENERATE_CONTENT, apiSettings, @@ -66,6 +87,26 @@ export async function generateContent( JSON.stringify(params), requestOptions ); +} + +export async function generateContent( + apiSettings: ApiSettings, + model: string, + params: GenerateContentRequest, + chromeAdapter: ChromeAdapter, + requestOptions?: RequestOptions +): Promise { + let response; + if (await chromeAdapter.isAvailable(params)) { + response = await chromeAdapter.generateContent(params); + } else { + response = await generateContentOnCloud( + apiSettings, + model, + params, + requestOptions + ); + } const generateContentResponse = await processGenerateContentResponse( response, apiSettings diff --git a/packages/vertexai/src/models/generative-model.test.ts b/packages/vertexai/src/models/generative-model.test.ts index 3ce7173e03e..71d4be823ee 100644 --- a/packages/vertexai/src/models/generative-model.test.ts +++ b/packages/vertexai/src/models/generative-model.test.ts @@ -22,6 +22,7 @@ import { match, restore, stub } from 'sinon'; import { getMockResponse } from '../../test-utils/mock-response'; import sinonChai from 'sinon-chai'; import { VertexAIBackend } from '../backend'; +import { ChromeAdapter } from '../methods/chrome-adapter'; use(sinonChai); @@ -41,21 +42,27 @@ const fakeAI: AI = { describe('GenerativeModel', () => { it('passes params through to generateContent', async () => { - const genModel = new GenerativeModel(fakeAI, { - model: 'my-model', - tools: [ - { - functionDeclarations: [ - { - name: 'myfunc', - description: 'mydesc' - } - ] - } - ], - toolConfig: { functionCallingConfig: { mode: FunctionCallingMode.NONE } }, - systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] } - }); + const genModel = new GenerativeModel( + fakeAI, + { + model: 'my-model', + tools: [ + { + functionDeclarations: [ + { + name: 'myfunc', + description: 'mydesc' + } + ] + } + ], + toolConfig: { + functionCallingConfig: { mode: FunctionCallingMode.NONE } + }, + systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] } + }, + new ChromeAdapter() + ); expect(genModel.tools?.length).to.equal(1); expect(genModel.toolConfig?.functionCallingConfig?.mode).to.equal( FunctionCallingMode.NONE @@ -86,10 +93,14 @@ describe('GenerativeModel', () => { restore(); }); it('passes text-only systemInstruction through to generateContent', async () => { - const genModel = new GenerativeModel(fakeAI, { - model: 'my-model', - systemInstruction: 'be friendly' - }); + const genModel = new GenerativeModel( + fakeAI, + { + model: 'my-model', + systemInstruction: 'be friendly' + }, + new ChromeAdapter() + ); expect(genModel.systemInstruction?.parts[0].text).to.equal('be friendly'); const mockResponse = getMockResponse( 'vertexAI', @@ -112,21 +123,27 @@ describe('GenerativeModel', () => { restore(); }); it('generateContent overrides model values', async () => { - const genModel = new GenerativeModel(fakeAI, { - model: 'my-model', - tools: [ - { - functionDeclarations: [ - { - name: 'myfunc', - description: 'mydesc' - } - ] - } - ], - toolConfig: { functionCallingConfig: { mode: FunctionCallingMode.NONE } }, - systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] } - }); + const genModel = new GenerativeModel( + fakeAI, + { + model: 'my-model', + tools: [ + { + functionDeclarations: [ + { + name: 'myfunc', + description: 'mydesc' + } + ] + } + ], + toolConfig: { + functionCallingConfig: { mode: FunctionCallingMode.NONE } + }, + systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] } + }, + new ChromeAdapter() + ); expect(genModel.tools?.length).to.equal(1); expect(genModel.toolConfig?.functionCallingConfig?.mode).to.equal( FunctionCallingMode.NONE @@ -168,14 +185,20 @@ describe('GenerativeModel', () => { restore(); }); it('passes params through to chat.sendMessage', async () => { - const genModel = new GenerativeModel(fakeAI, { - model: 'my-model', - tools: [ - { functionDeclarations: [{ name: 'myfunc', description: 'mydesc' }] } - ], - toolConfig: { functionCallingConfig: { mode: FunctionCallingMode.NONE } }, - systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] } - }); + const genModel = new GenerativeModel( + fakeAI, + { + model: 'my-model', + tools: [ + { functionDeclarations: [{ name: 'myfunc', description: 'mydesc' }] } + ], + toolConfig: { + functionCallingConfig: { mode: FunctionCallingMode.NONE } + }, + systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] } + }, + new ChromeAdapter() + ); expect(genModel.tools?.length).to.equal(1); expect(genModel.toolConfig?.functionCallingConfig?.mode).to.equal( FunctionCallingMode.NONE @@ -206,10 +229,14 @@ describe('GenerativeModel', () => { restore(); }); it('passes text-only systemInstruction through to chat.sendMessage', async () => { - const genModel = new GenerativeModel(fakeAI, { - model: 'my-model', - systemInstruction: 'be friendly' - }); + const genModel = new GenerativeModel( + fakeAI, + { + model: 'my-model', + systemInstruction: 'be friendly' + }, + new ChromeAdapter() + ); expect(genModel.systemInstruction?.parts[0].text).to.equal('be friendly'); const mockResponse = getMockResponse( 'vertexAI', @@ -232,14 +259,20 @@ describe('GenerativeModel', () => { restore(); }); it('startChat overrides model values', async () => { - const genModel = new GenerativeModel(fakeAI, { - model: 'my-model', - tools: [ - { functionDeclarations: [{ name: 'myfunc', description: 'mydesc' }] } - ], - toolConfig: { functionCallingConfig: { mode: FunctionCallingMode.NONE } }, - systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] } - }); + const genModel = new GenerativeModel( + fakeAI, + { + model: 'my-model', + tools: [ + { functionDeclarations: [{ name: 'myfunc', description: 'mydesc' }] } + ], + toolConfig: { + functionCallingConfig: { mode: FunctionCallingMode.NONE } + }, + systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] } + }, + new ChromeAdapter() + ); expect(genModel.tools?.length).to.equal(1); expect(genModel.toolConfig?.functionCallingConfig?.mode).to.equal( FunctionCallingMode.NONE @@ -284,7 +317,11 @@ describe('GenerativeModel', () => { restore(); }); it('calls countTokens', async () => { - const genModel = new GenerativeModel(fakeAI, { model: 'my-model' }); + const genModel = new GenerativeModel( + fakeAI, + { model: 'my-model' }, + new ChromeAdapter() + ); const mockResponse = getMockResponse( 'vertexAI', 'unary-success-total-tokens.json' diff --git a/packages/vertexai/src/models/generative-model.ts b/packages/vertexai/src/models/generative-model.ts index 2e7ed93eeb8..02965043d4e 100644 --- a/packages/vertexai/src/models/generative-model.ts +++ b/packages/vertexai/src/models/generative-model.ts @@ -43,12 +43,17 @@ import { } from '../requests/request-helpers'; import { AI } from '../public-types'; import { AIModel } from './genai-model'; +import { ChromeAdapter } from '../methods/chrome-adapter'; /** * Class for generative model APIs. * @public */ export class GenerativeModel extends AIModel { + /** + * Defines the name of the default in-cloud model to use for hybrid inference. + */ + static DEFAULT_HYBRID_IN_CLOUD_MODEL = 'gemini-2.0-flash-lite'; generationConfig: GenerationConfig; safetySettings: SafetySetting[]; requestOptions?: RequestOptions; @@ -59,6 +64,7 @@ export class GenerativeModel extends AIModel { constructor( ai: AI, modelParams: ModelParams, + private chromeAdapter: ChromeAdapter, requestOptions?: RequestOptions ) { super(ai, modelParams.model); @@ -91,6 +97,7 @@ export class GenerativeModel extends AIModel { systemInstruction: this.systemInstruction, ...formattedParams }, + this.chromeAdapter, this.requestOptions ); } @@ -116,6 +123,7 @@ export class GenerativeModel extends AIModel { systemInstruction: this.systemInstruction, ...formattedParams }, + this.chromeAdapter, this.requestOptions ); } @@ -128,6 +136,7 @@ export class GenerativeModel extends AIModel { return new ChatSession( this._apiSettings, this.model, + this.chromeAdapter, { tools: this.tools, toolConfig: this.toolConfig, @@ -145,6 +154,11 @@ export class GenerativeModel extends AIModel { request: CountTokensRequest | string | Array ): Promise { const formattedParams = formatGenerateContentInput(request); - return countTokens(this._apiSettings, this.model, formattedParams); + return countTokens( + this._apiSettings, + this.model, + formattedParams, + this.chromeAdapter + ); } } diff --git a/packages/vertexai/src/types/language-model.ts b/packages/vertexai/src/types/language-model.ts new file mode 100644 index 00000000000..cd84f22dbdb --- /dev/null +++ b/packages/vertexai/src/types/language-model.ts @@ -0,0 +1,82 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export interface LanguageModel extends EventTarget { + create(options?: LanguageModelCreateOptions): Promise; + availability(options?: LanguageModelCreateCoreOptions): Promise; + prompt( + input: LanguageModelPrompt, + options?: LanguageModelPromptOptions + ): Promise; + promptStreaming( + input: LanguageModelPrompt, + options?: LanguageModelPromptOptions + ): ReadableStream; + measureInputUsage( + input: LanguageModelPrompt, + options?: LanguageModelPromptOptions + ): Promise; + destroy(): undefined; +} +export enum Availability { + 'unavailable' = 'unavailable', + 'downloadable' = 'downloadable', + 'downloading' = 'downloading', + 'available' = 'available' +} +export interface LanguageModelCreateCoreOptions { + topK?: number; + temperature?: number; + expectedInputs?: LanguageModelExpectedInput[]; +} +export interface LanguageModelCreateOptions + extends LanguageModelCreateCoreOptions { + signal?: AbortSignal; + systemPrompt?: string; + initialPrompts?: LanguageModelInitialPrompts; +} +interface LanguageModelPromptOptions { + signal?: AbortSignal; +} +interface LanguageModelExpectedInput { + type: LanguageModelMessageType; + languages?: string[]; +} +// TODO: revert to type from Prompt API explainer once it's supported. +export type LanguageModelPrompt = LanguageModelMessageContent[]; +type LanguageModelInitialPrompts = + | LanguageModelMessage[] + | LanguageModelMessageShorthand[]; +interface LanguageModelMessage { + role: LanguageModelMessageRole; + content: LanguageModelMessageContent[]; +} +interface LanguageModelMessageShorthand { + role: LanguageModelMessageRole; + content: string; +} +export interface LanguageModelMessageContent { + type: LanguageModelMessageType; + content: LanguageModelMessageContentValue; +} +type LanguageModelMessageRole = 'system' | 'user' | 'assistant'; +type LanguageModelMessageType = 'text' | 'image' | 'audio'; +type LanguageModelMessageContentValue = + | ImageBitmapSource + | AudioBuffer + | BufferSource + | string; diff --git a/packages/vertexai/src/types/requests.ts b/packages/vertexai/src/types/requests.ts index 33ed804bb9f..36700b5a936 100644 --- a/packages/vertexai/src/types/requests.ts +++ b/packages/vertexai/src/types/requests.ts @@ -17,6 +17,7 @@ import { TypedSchema } from '../requests/schema-builder'; import { Content, Part } from './content'; +import { LanguageModelCreateOptions } from './language-model'; import { FunctionCallingMode, HarmBlockMethod, @@ -218,3 +219,29 @@ export interface FunctionCallingConfig { mode?: FunctionCallingMode; allowedFunctionNames?: string[]; } + +/** + * Toggles hybrid inference. + */ +export interface HybridParams { + /** + * Specifies on-device or in-cloud inference. Defaults to prefer on-device. + */ + mode: InferenceMode; + /** + * Optional. Specifies advanced params for on-device inference. + */ + onDeviceParams?: LanguageModelCreateOptions; + /** + * Optional. Specifies advanced params for in-cloud inference. + */ + inCloudParams?: ModelParams; +} + +/** + * Determines whether inference happens on-device or in-cloud. + */ +export type InferenceMode = + | 'prefer_on_device' + | 'only_on_device' + | 'only_in_cloud';