1 |
- {"version":3,"file":"firebase-database.js","sources":["../util/src/constants.ts","../util/src/assert.ts","../util/src/crypt.ts","../util/src/deepCopy.ts","../util/src/defaults.ts","../util/src/global.ts","../util/src/deferred.ts","../util/src/environment.ts","../util/src/json.ts","../util/src/jwt.ts","../util/src/obj.ts","../util/src/sha1.ts","../util/src/validation.ts","../util/src/utf8.ts","../util/src/compat.ts","../component/src/component.ts","../logger/src/logger.ts","../database/src/core/version.ts","../database/src/core/storage/DOMStorageWrapper.ts","../database/src/core/storage/MemoryStorage.ts","../database/src/core/storage/storage.ts","../database/src/core/util/util.ts","../database/src/core/AppCheckTokenProvider.ts","../database/src/core/AuthTokenProvider.ts","../database/src/realtime/Constants.ts","../database/src/core/RepoInfo.ts","../database/src/core/stats/StatsCollection.ts","../database/src/core/stats/StatsManager.ts","../database/src/realtime/polling/PacketReceiver.ts","../database/src/realtime/BrowserPollConnection.ts","../database/src/realtime/WebSocketConnection.ts","../database/src/realtime/TransportManager.ts","../database/src/realtime/Connection.ts","../database/src/core/ServerActions.ts","../database/src/core/util/EventEmitter.ts","../database/src/core/util/OnlineMonitor.ts","../database/src/core/util/Path.ts","../database/src/core/util/VisibilityMonitor.ts","../database/src/core/PersistentConnection.ts","../database/src/core/snap/Node.ts","../database/src/core/snap/indexes/Index.ts","../database/src/core/snap/indexes/KeyIndex.ts","../database/src/core/util/SortedMap.ts","../database/src/core/snap/comparators.ts","../database/src/core/snap/snap.ts","../database/src/core/snap/LeafNode.ts","../database/src/core/snap/indexes/PriorityIndex.ts","../database/src/core/snap/childSet.ts","../database/src/core/snap/IndexMap.ts","../database/src/core/snap/ChildrenNode.ts","../database/src/core/snap/nodeFromJSON.ts","../database/src/core/snap/indexes/PathIndex.ts","../database/src/core/snap/indexes/ValueIndex.ts","../database/src/core/view/Change.ts","../database/src/core/view/filter/IndexedFilter.ts","../database/src/core/view/filter/RangedFilter.ts","../database/src/core/view/filter/LimitedFilter.ts","../database/src/core/view/QueryParams.ts","../database/src/core/ReadonlyRestClient.ts","../util/src/query.ts","../database/src/core/SnapshotHolder.ts","../database/src/core/SparseSnapshotTree.ts","../database/src/core/stats/StatsListener.ts","../database/src/core/stats/StatsReporter.ts","../database/src/core/operation/Operation.ts","../database/src/core/operation/AckUserWrite.ts","../database/src/core/operation/ListenComplete.ts","../database/src/core/operation/Overwrite.ts","../database/src/core/operation/Merge.ts","../database/src/core/view/CacheNode.ts","../database/src/core/view/EventGenerator.ts","../database/src/core/view/ViewCache.ts","../database/src/core/util/ImmutableTree.ts","../database/src/core/CompoundWrite.ts","../database/src/core/WriteTree.ts","../database/src/core/view/ChildChangeAccumulator.ts","../database/src/core/view/CompleteChildSource.ts","../database/src/core/view/ViewProcessor.ts","../database/src/core/view/View.ts","../database/src/core/SyncPoint.ts","../database/src/core/SyncTree.ts","../database/src/core/util/ServerValues.ts","../database/src/core/util/Tree.ts","../database/src/core/util/validation.ts","../database/src/core/view/EventQueue.ts","../database/src/core/Repo.ts","../database/src/core/util/libs/parser.ts","../database/src/core/util/NextPushId.ts","../database/src/core/view/Event.ts","../database/src/core/view/EventRegistration.ts","../database/src/api/OnDisconnect.ts","../database/src/api/Reference_impl.ts","../database/src/api/Database.ts","../util/src/emulator.ts","../database/src/api/ServerValue.ts","../database/src/api/Transaction.ts","../database/src/api/test_access.ts","../database/src/register.ts"],"sourcesContent":["/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/**\n * @fileoverview Firebase constants. Some of these (@defines) can be overridden at compile-time.\n */\n\nexport const CONSTANTS = {\n /**\n * @define {boolean} Whether this is the client Node.js SDK.\n */\n NODE_CLIENT: false,\n /**\n * @define {boolean} Whether this is the Admin Node.js SDK.\n */\n NODE_ADMIN: false,\n\n /**\n * Firebase SDK Version\n */\n SDK_VERSION: '${JSCORE_VERSION}'\n};\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { CONSTANTS } from './constants';\n\n/**\n * Throws an error if the provided assertion is falsy\n */\nexport const assert = function (assertion: unknown, message: string): void {\n if (!assertion) {\n throw assertionError(message);\n }\n};\n\n/**\n * Returns an Error object suitable for throwing.\n */\nexport const assertionError = function (message: string): Error {\n return new Error(\n 'Firebase Database (' +\n CONSTANTS.SDK_VERSION +\n ') INTERNAL ASSERT FAILED: ' +\n message\n );\n};\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nconst stringToByteArray = function (str: string): number[] {\n // TODO(user): Use native implementations if/when available\n const out: number[] = [];\n let p = 0;\n for (let i = 0; i < str.length; i++) {\n let c = str.charCodeAt(i);\n if (c < 128) {\n out[p++] = c;\n } else if (c < 2048) {\n out[p++] = (c >> 6) | 192;\n out[p++] = (c & 63) | 128;\n } else if (\n (c & 0xfc00) === 0xd800 &&\n i + 1 < str.length &&\n (str.charCodeAt(i + 1) & 0xfc00) === 0xdc00\n ) {\n // Surrogate Pair\n c = 0x10000 + ((c & 0x03ff) << 10) + (str.charCodeAt(++i) & 0x03ff);\n out[p++] = (c >> 18) | 240;\n out[p++] = ((c >> 12) & 63) | 128;\n out[p++] = ((c >> 6) & 63) | 128;\n out[p++] = (c & 63) | 128;\n } else {\n out[p++] = (c >> 12) | 224;\n out[p++] = ((c >> 6) & 63) | 128;\n out[p++] = (c & 63) | 128;\n }\n }\n return out;\n};\n\n/**\n * Turns an array of numbers into the string given by the concatenation of the\n * characters to which the numbers correspond.\n * @param bytes Array of numbers representing characters.\n * @return Stringification of the array.\n */\nconst byteArrayToString = function (bytes: number[]): string {\n // TODO(user): Use native implementations if/when available\n const out: string[] = [];\n let pos = 0,\n c = 0;\n while (pos < bytes.length) {\n const c1 = bytes[pos++];\n if (c1 < 128) {\n out[c++] = String.fromCharCode(c1);\n } else if (c1 > 191 && c1 < 224) {\n const c2 = bytes[pos++];\n out[c++] = String.fromCharCode(((c1 & 31) << 6) | (c2 & 63));\n } else if (c1 > 239 && c1 < 365) {\n // Surrogate Pair\n const c2 = bytes[pos++];\n const c3 = bytes[pos++];\n const c4 = bytes[pos++];\n const u =\n (((c1 & 7) << 18) | ((c2 & 63) << 12) | ((c3 & 63) << 6) | (c4 & 63)) -\n 0x10000;\n out[c++] = String.fromCharCode(0xd800 + (u >> 10));\n out[c++] = String.fromCharCode(0xdc00 + (u & 1023));\n } else {\n const c2 = bytes[pos++];\n const c3 = bytes[pos++];\n out[c++] = String.fromCharCode(\n ((c1 & 15) << 12) | ((c2 & 63) << 6) | (c3 & 63)\n );\n }\n }\n return out.join('');\n};\n\ninterface Base64 {\n byteToCharMap_: { [key: number]: string } | null;\n charToByteMap_: { [key: string]: number } | null;\n byteToCharMapWebSafe_: { [key: number]: string } | null;\n charToByteMapWebSafe_: { [key: string]: number } | null;\n ENCODED_VALS_BASE: string;\n readonly ENCODED_VALS: string;\n readonly ENCODED_VALS_WEBSAFE: string;\n HAS_NATIVE_SUPPORT: boolean;\n encodeByteArray(input: number[] | Uint8Array, webSafe?: boolean): string;\n encodeString(input: string, webSafe?: boolean): string;\n decodeString(input: string, webSafe: boolean): string;\n decodeStringToByteArray(input: string, webSafe: boolean): number[];\n init_(): void;\n}\n\n// We define it as an object literal instead of a class because a class compiled down to es5 can't\n// be treeshaked. https://github.com/rollup/rollup/issues/1691\n// Static lookup maps, lazily populated by init_()\nexport const base64: Base64 = {\n /**\n * Maps bytes to characters.\n */\n byteToCharMap_: null,\n\n /**\n * Maps characters to bytes.\n */\n charToByteMap_: null,\n\n /**\n * Maps bytes to websafe characters.\n * @private\n */\n byteToCharMapWebSafe_: null,\n\n /**\n * Maps websafe characters to bytes.\n * @private\n */\n charToByteMapWebSafe_: null,\n\n /**\n * Our default alphabet, shared between\n * ENCODED_VALS and ENCODED_VALS_WEBSAFE\n */\n ENCODED_VALS_BASE:\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + 'abcdefghijklmnopqrstuvwxyz' + '0123456789',\n\n /**\n * Our default alphabet. Value 64 (=) is special; it means \"nothing.\"\n */\n get ENCODED_VALS() {\n return this.ENCODED_VALS_BASE + '+/=';\n },\n\n /**\n * Our websafe alphabet.\n */\n get ENCODED_VALS_WEBSAFE() {\n return this.ENCODED_VALS_BASE + '-_.';\n },\n\n /**\n * Whether this browser supports the atob and btoa functions. This extension\n * started at Mozilla but is now implemented by many browsers. We use the\n * ASSUME_* variables to avoid pulling in the full useragent detection library\n * but still allowing the standard per-browser compilations.\n *\n */\n HAS_NATIVE_SUPPORT: typeof atob === 'function',\n\n /**\n * Base64-encode an array of bytes.\n *\n * @param input An array of bytes (numbers with\n * value in [0, 255]) to encode.\n * @param webSafe Boolean indicating we should use the\n * alternative alphabet.\n * @return The base64 encoded string.\n */\n encodeByteArray(input: number[] | Uint8Array, webSafe?: boolean): string {\n if (!Array.isArray(input)) {\n throw Error('encodeByteArray takes an array as a parameter');\n }\n\n this.init_();\n\n const byteToCharMap = webSafe\n ? this.byteToCharMapWebSafe_!\n : this.byteToCharMap_!;\n\n const output = [];\n\n for (let i = 0; i < input.length; i += 3) {\n const byte1 = input[i];\n const haveByte2 = i + 1 < input.length;\n const byte2 = haveByte2 ? input[i + 1] : 0;\n const haveByte3 = i + 2 < input.length;\n const byte3 = haveByte3 ? input[i + 2] : 0;\n\n const outByte1 = byte1 >> 2;\n const outByte2 = ((byte1 & 0x03) << 4) | (byte2 >> 4);\n let outByte3 = ((byte2 & 0x0f) << 2) | (byte3 >> 6);\n let outByte4 = byte3 & 0x3f;\n\n if (!haveByte3) {\n outByte4 = 64;\n\n if (!haveByte2) {\n outByte3 = 64;\n }\n }\n\n output.push(\n byteToCharMap[outByte1],\n byteToCharMap[outByte2],\n byteToCharMap[outByte3],\n byteToCharMap[outByte4]\n );\n }\n\n return output.join('');\n },\n\n /**\n * Base64-encode a string.\n *\n * @param input A string to encode.\n * @param webSafe If true, we should use the\n * alternative alphabet.\n * @return The base64 encoded string.\n */\n encodeString(input: string, webSafe?: boolean): string {\n // Shortcut for Mozilla browsers that implement\n // a native base64 encoder in the form of \"btoa/atob\"\n if (this.HAS_NATIVE_SUPPORT && !webSafe) {\n return btoa(input);\n }\n return this.encodeByteArray(stringToByteArray(input), webSafe);\n },\n\n /**\n * Base64-decode a string.\n *\n * @param input to decode.\n * @param webSafe True if we should use the\n * alternative alphabet.\n * @return string representing the decoded value.\n */\n decodeString(input: string, webSafe: boolean): string {\n // Shortcut for Mozilla browsers that implement\n // a native base64 encoder in the form of \"btoa/atob\"\n if (this.HAS_NATIVE_SUPPORT && !webSafe) {\n return atob(input);\n }\n return byteArrayToString(this.decodeStringToByteArray(input, webSafe));\n },\n\n /**\n * Base64-decode a string.\n *\n * In base-64 decoding, groups of four characters are converted into three\n * bytes. If the encoder did not apply padding, the input length may not\n * be a multiple of 4.\n *\n * In this case, the last group will have fewer than 4 characters, and\n * padding will be inferred. If the group has one or two characters, it decodes\n * to one byte. If the group has three characters, it decodes to two bytes.\n *\n * @param input Input to decode.\n * @param webSafe True if we should use the web-safe alphabet.\n * @return bytes representing the decoded value.\n */\n decodeStringToByteArray(input: string, webSafe: boolean): number[] {\n this.init_();\n\n const charToByteMap = webSafe\n ? this.charToByteMapWebSafe_!\n : this.charToByteMap_!;\n\n const output: number[] = [];\n\n for (let i = 0; i < input.length; ) {\n const byte1 = charToByteMap[input.charAt(i++)];\n\n const haveByte2 = i < input.length;\n const byte2 = haveByte2 ? charToByteMap[input.charAt(i)] : 0;\n ++i;\n\n const haveByte3 = i < input.length;\n const byte3 = haveByte3 ? charToByteMap[input.charAt(i)] : 64;\n ++i;\n\n const haveByte4 = i < input.length;\n const byte4 = haveByte4 ? charToByteMap[input.charAt(i)] : 64;\n ++i;\n\n if (byte1 == null || byte2 == null || byte3 == null || byte4 == null) {\n throw Error();\n }\n\n const outByte1 = (byte1 << 2) | (byte2 >> 4);\n output.push(outByte1);\n\n if (byte3 !== 64) {\n const outByte2 = ((byte2 << 4) & 0xf0) | (byte3 >> 2);\n output.push(outByte2);\n\n if (byte4 !== 64) {\n const outByte3 = ((byte3 << 6) & 0xc0) | byte4;\n output.push(outByte3);\n }\n }\n }\n\n return output;\n },\n\n /**\n * Lazy static initialization function. Called before\n * accessing any of the static map variables.\n * @private\n */\n init_() {\n if (!this.byteToCharMap_) {\n this.byteToCharMap_ = {};\n this.charToByteMap_ = {};\n this.byteToCharMapWebSafe_ = {};\n this.charToByteMapWebSafe_ = {};\n\n // We want quick mappings back and forth, so we precompute two maps.\n for (let i = 0; i < this.ENCODED_VALS.length; i++) {\n this.byteToCharMap_[i] = this.ENCODED_VALS.charAt(i);\n this.charToByteMap_[this.byteToCharMap_[i]] = i;\n this.byteToCharMapWebSafe_[i] = this.ENCODED_VALS_WEBSAFE.charAt(i);\n this.charToByteMapWebSafe_[this.byteToCharMapWebSafe_[i]] = i;\n\n // Be forgiving when decoding and correctly decode both encodings.\n if (i >= this.ENCODED_VALS_BASE.length) {\n this.charToByteMap_[this.ENCODED_VALS_WEBSAFE.charAt(i)] = i;\n this.charToByteMapWebSafe_[this.ENCODED_VALS.charAt(i)] = i;\n }\n }\n }\n }\n};\n\n/**\n * URL-safe base64 encoding\n */\nexport const base64Encode = function (str: string): string {\n const utf8Bytes = stringToByteArray(str);\n return base64.encodeByteArray(utf8Bytes, true);\n};\n\n/**\n * URL-safe base64 encoding (without \".\" padding in the end).\n * e.g. Used in JSON Web Token (JWT) parts.\n */\nexport const base64urlEncodeWithoutPadding = function (str: string): string {\n // Use base64url encoding and remove padding in the end (dot characters).\n return base64Encode(str).replace(/\\./g, '');\n};\n\n/**\n * URL-safe base64 decoding\n *\n * NOTE: DO NOT use the global atob() function - it does NOT support the\n * base64Url variant encoding.\n *\n * @param str To be decoded\n * @return Decoded result, if possible\n */\nexport const base64Decode = function (str: string): string | null {\n try {\n return base64.decodeString(str, true);\n } catch (e) {\n console.error('base64Decode failed: ', e);\n }\n return null;\n};\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/**\n * Do a deep-copy of basic JavaScript Objects or Arrays.\n */\nexport function deepCopy<T>(value: T): T {\n return deepExtend(undefined, value) as T;\n}\n\n/**\n * Copy properties from source to target (recursively allows extension\n * of Objects and Arrays). Scalar values in the target are over-written.\n * If target is undefined, an object of the appropriate type will be created\n * (and returned).\n *\n * We recursively copy all child properties of plain Objects in the source- so\n * that namespace- like dictionaries are merged.\n *\n * Note that the target can be a function, in which case the properties in\n * the source Object are copied onto it as static properties of the Function.\n *\n * Note: we don't merge __proto__ to prevent prototype pollution\n */\nexport function deepExtend(target: unknown, source: unknown): unknown {\n if (!(source instanceof Object)) {\n return source;\n }\n\n switch (source.constructor) {\n case Date:\n // Treat Dates like scalars; if the target date object had any child\n // properties - they will be lost!\n const dateValue = source as Date;\n return new Date(dateValue.getTime());\n\n case Object:\n if (target === undefined) {\n target = {};\n }\n break;\n case Array:\n // Always copy the array source and overwrite the target.\n target = [];\n break;\n\n default:\n // Not a plain Object - treat it as a scalar.\n return source;\n }\n\n for (const prop in source) {\n // use isValidKey to guard against prototype pollution. See https://snyk.io/vuln/SNYK-JS-LODASH-450202\n if (!source.hasOwnProperty(prop) || !isValidKey(prop)) {\n continue;\n }\n (target as Record<string, unknown>)[prop] = deepExtend(\n (target as Record<string, unknown>)[prop],\n (source as Record<string, unknown>)[prop]\n );\n }\n\n return target;\n}\n\nfunction isValidKey(key: string): boolean {\n return key !== '__proto__';\n}\n","/**\n * @license\n * Copyright 2022 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { base64Decode } from './crypt';\nimport { getGlobal } from './global';\n\n/**\n * Keys for experimental properties on the `FirebaseDefaults` object.\n * @public\n */\nexport type ExperimentalKey = 'authTokenSyncURL' | 'authIdTokenMaxAge';\n\n/**\n * An object that can be injected into the environment as __FIREBASE_DEFAULTS__,\n * either as a property of globalThis, a shell environment variable, or a\n * cookie.\n *\n * This object can be used to automatically configure and initialize\n * a Firebase app as well as any emulators.\n *\n * @public\n */\nexport interface FirebaseDefaults {\n config?: Record<string, string>;\n emulatorHosts?: Record<string, string>;\n _authTokenSyncURL?: string;\n _authIdTokenMaxAge?: number;\n /**\n * Override Firebase's runtime environment detection and\n * force the SDK to act as if it were in the specified environment.\n */\n forceEnvironment?: 'browser' | 'node';\n [key: string]: unknown;\n}\n\ndeclare global {\n // Need `var` for this to work.\n // eslint-disable-next-line no-var\n var __FIREBASE_DEFAULTS__: FirebaseDefaults | undefined;\n}\n\nconst getDefaultsFromGlobal = (): FirebaseDefaults | undefined =>\n getGlobal().__FIREBASE_DEFAULTS__;\n\n/**\n * Attempt to read defaults from a JSON string provided to\n * process(.)env(.)__FIREBASE_DEFAULTS__ or a JSON file whose path is in\n * process(.)env(.)__FIREBASE_DEFAULTS_PATH__\n * The dots are in parens because certain compilers (Vite?) cannot\n * handle seeing that variable in comments.\n * See https://github.com/firebase/firebase-js-sdk/issues/6838\n */\nconst getDefaultsFromEnvVariable = (): FirebaseDefaults | undefined => {\n if (typeof process === 'undefined' || typeof process.env === 'undefined') {\n return;\n }\n const defaultsJsonString = process.env.__FIREBASE_DEFAULTS__;\n if (defaultsJsonString) {\n return JSON.parse(defaultsJsonString);\n }\n};\n\nconst getDefaultsFromCookie = (): FirebaseDefaults | undefined => {\n if (typeof document === 'undefined') {\n return;\n }\n let match;\n try {\n match = document.cookie.match(/__FIREBASE_DEFAULTS__=([^;]+)/);\n } catch (e) {\n // Some environments such as Angular Universal SSR have a\n // `document` object but error on accessing `document.cookie`.\n return;\n }\n const decoded = match && base64Decode(match[1]);\n return decoded && JSON.parse(decoded);\n};\n\n/**\n * Get the __FIREBASE_DEFAULTS__ object. It checks in order:\n * (1) if such an object exists as a property of `globalThis`\n * (2) if such an object was provided on a shell environment variable\n * (3) if such an object exists in a cookie\n * @public\n */\nexport const getDefaults = (): FirebaseDefaults | undefined => {\n try {\n return (\n getDefaultsFromGlobal() ||\n getDefaultsFromEnvVariable() ||\n getDefaultsFromCookie()\n );\n } catch (e) {\n /**\n * Catch-all for being unable to get __FIREBASE_DEFAULTS__ due\n * to any environment case we have not accounted for. Log to\n * info instead of swallowing so we can find these unknown cases\n * and add paths for them if needed.\n */\n console.info(`Unable to get __FIREBASE_DEFAULTS__ due to: ${e}`);\n return;\n }\n};\n\n/**\n * Returns emulator host stored in the __FIREBASE_DEFAULTS__ object\n * for the given product.\n * @returns a URL host formatted like `127.0.0.1:9999` or `[::1]:4000` if available\n * @public\n */\nexport const getDefaultEmulatorHost = (\n productName: string\n): string | undefined => getDefaults()?.emulatorHosts?.[productName];\n\n/**\n * Returns emulator hostname and port stored in the __FIREBASE_DEFAULTS__ object\n * for the given product.\n * @returns a pair of hostname and port like `[\"::1\", 4000]` if available\n * @public\n */\nexport const getDefaultEmulatorHostnameAndPort = (\n productName: string\n): [hostname: string, port: number] | undefined => {\n const host = getDefaultEmulatorHost(productName);\n if (!host) {\n return undefined;\n }\n const separatorIndex = host.lastIndexOf(':'); // Finding the last since IPv6 addr also has colons.\n if (separatorIndex <= 0 || separatorIndex + 1 === host.length) {\n throw new Error(`Invalid host ${host} with no separate hostname and port!`);\n }\n // eslint-disable-next-line no-restricted-globals\n const port = parseInt(host.substring(separatorIndex + 1), 10);\n if (host[0] === '[') {\n // Bracket-quoted `[ipv6addr]:port` => return \"ipv6addr\" (without brackets).\n return [host.substring(1, separatorIndex - 1), port];\n } else {\n return [host.substring(0, separatorIndex), port];\n }\n};\n\n/**\n * Returns Firebase app config stored in the __FIREBASE_DEFAULTS__ object.\n * @public\n */\nexport const getDefaultAppConfig = (): Record<string, string> | undefined =>\n getDefaults()?.config;\n\n/**\n * Returns an experimental setting on the __FIREBASE_DEFAULTS__ object (properties\n * prefixed by \"_\")\n * @public\n */\nexport const getExperimentalSetting = <T extends ExperimentalKey>(\n name: T\n): FirebaseDefaults[`_${T}`] =>\n getDefaults()?.[`_${name}`] as FirebaseDefaults[`_${T}`];\n","/**\n * @license\n * Copyright 2022 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/**\n * Polyfill for `globalThis` object.\n * @returns the `globalThis` object for the given environment.\n * @public\n */\nexport function getGlobal(): typeof globalThis {\n if (typeof self !== 'undefined') {\n return self;\n }\n if (typeof window !== 'undefined') {\n return window;\n }\n if (typeof global !== 'undefined') {\n return global;\n }\n throw new Error('Unable to locate global object.');\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nexport class Deferred<R> {\n promise: Promise<R>;\n reject: (value?: unknown) => void = () => {};\n resolve: (value?: unknown) => void = () => {};\n constructor() {\n this.promise = new Promise((resolve, reject) => {\n this.resolve = resolve as (value?: unknown) => void;\n this.reject = reject as (value?: unknown) => void;\n });\n }\n\n /**\n * Our API internals are not promiseified and cannot because our callback APIs have subtle expectations around\n * invoking promises inline, which Promises are forbidden to do. This method accepts an optional node-style callback\n * and returns a node-style callback which will resolve or reject the Deferred's promise.\n */\n wrapCallback(\n callback?: (error?: unknown, value?: unknown) => void\n ): (error: unknown, value?: unknown) => void {\n return (error, value?) => {\n if (error) {\n this.reject(error);\n } else {\n this.resolve(value);\n }\n if (typeof callback === 'function') {\n // Attaching noop handler just in case developer wasn't expecting\n // promises\n this.promise.catch(() => {});\n\n // Some of our callbacks don't expect a value and our own tests\n // assert that the parameter length is 1\n if (callback.length === 1) {\n callback(error);\n } else {\n callback(error, value);\n }\n }\n };\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { CONSTANTS } from './constants';\nimport { getDefaults } from './defaults';\n\n/**\n * Returns navigator.userAgent string or '' if it's not defined.\n * @return user agent string\n */\nexport function getUA(): string {\n if (\n typeof navigator !== 'undefined' &&\n typeof navigator['userAgent'] === 'string'\n ) {\n return navigator['userAgent'];\n } else {\n return '';\n }\n}\n\n/**\n * Detect Cordova / PhoneGap / Ionic frameworks on a mobile device.\n *\n * Deliberately does not rely on checking `file://` URLs (as this fails PhoneGap\n * in the Ripple emulator) nor Cordova `onDeviceReady`, which would normally\n * wait for a callback.\n */\nexport function isMobileCordova(): boolean {\n return (\n typeof window !== 'undefined' &&\n // @ts-ignore Setting up an broadly applicable index signature for Window\n // just to deal with this case would probably be a bad idea.\n !!(window['cordova'] || window['phonegap'] || window['PhoneGap']) &&\n /ios|iphone|ipod|ipad|android|blackberry|iemobile/i.test(getUA())\n );\n}\n\n/**\n * Detect Node.js.\n *\n * @return true if Node.js environment is detected or specified.\n */\n// Node detection logic from: https://github.com/iliakan/detect-node/\nexport function isNode(): boolean {\n const forceEnvironment = getDefaults()?.forceEnvironment;\n if (forceEnvironment === 'node') {\n return true;\n } else if (forceEnvironment === 'browser') {\n return false;\n }\n\n try {\n return (\n Object.prototype.toString.call(global.process) === '[object process]'\n );\n } catch (e) {\n return false;\n }\n}\n\n/**\n * Detect Browser Environment\n */\nexport function isBrowser(): boolean {\n return typeof self === 'object' && self.self === self;\n}\n\n/**\n * Detect browser extensions (Chrome and Firefox at least).\n */\ninterface BrowserRuntime {\n id?: unknown;\n}\ndeclare const chrome: { runtime?: BrowserRuntime };\ndeclare const browser: { runtime?: BrowserRuntime };\nexport function isBrowserExtension(): boolean {\n const runtime =\n typeof chrome === 'object'\n ? chrome.runtime\n : typeof browser === 'object'\n ? browser.runtime\n : undefined;\n return typeof runtime === 'object' && runtime.id !== undefined;\n}\n\n/**\n * Detect React Native.\n *\n * @return true if ReactNative environment is detected.\n */\nexport function isReactNative(): boolean {\n return (\n typeof navigator === 'object' && navigator['product'] === 'ReactNative'\n );\n}\n\n/** Detects Electron apps. */\nexport function isElectron(): boolean {\n return getUA().indexOf('Electron/') >= 0;\n}\n\n/** Detects Internet Explorer. */\nexport function isIE(): boolean {\n const ua = getUA();\n return ua.indexOf('MSIE ') >= 0 || ua.indexOf('Trident/') >= 0;\n}\n\n/** Detects Universal Windows Platform apps. */\nexport function isUWP(): boolean {\n return getUA().indexOf('MSAppHost/') >= 0;\n}\n\n/**\n * Detect whether the current SDK build is the Node version.\n *\n * @return true if it's the Node SDK build.\n */\nexport function isNodeSdk(): boolean {\n return CONSTANTS.NODE_CLIENT === true || CONSTANTS.NODE_ADMIN === true;\n}\n\n/** Returns true if we are running in Safari. */\nexport function isSafari(): boolean {\n return (\n !isNode() &&\n navigator.userAgent.includes('Safari') &&\n !navigator.userAgent.includes('Chrome')\n );\n}\n\n/**\n * This method checks if indexedDB is supported by current browser/service worker context\n * @return true if indexedDB is supported by current browser/service worker context\n */\nexport function isIndexedDBAvailable(): boolean {\n try {\n return typeof indexedDB === 'object';\n } catch (e) {\n return false;\n }\n}\n\n/**\n * This method validates browser/sw context for indexedDB by opening a dummy indexedDB database and reject\n * if errors occur during the database open operation.\n *\n * @throws exception if current browser/sw context can't run idb.open (ex: Safari iframe, Firefox\n * private browsing)\n */\nexport function validateIndexedDBOpenable(): Promise<boolean> {\n return new Promise((resolve, reject) => {\n try {\n let preExist: boolean = true;\n const DB_CHECK_NAME =\n 'validate-browser-context-for-indexeddb-analytics-module';\n const request = self.indexedDB.open(DB_CHECK_NAME);\n request.onsuccess = () => {\n request.result.close();\n // delete database only when it doesn't pre-exist\n if (!preExist) {\n self.indexedDB.deleteDatabase(DB_CHECK_NAME);\n }\n resolve(true);\n };\n request.onupgradeneeded = () => {\n preExist = false;\n };\n\n request.onerror = () => {\n reject(request.error?.message || '');\n };\n } catch (error) {\n reject(error);\n }\n });\n}\n\n/**\n *\n * This method checks whether cookie is enabled within current browser\n * @return true if cookie is enabled within current browser\n */\nexport function areCookiesEnabled(): boolean {\n if (typeof navigator === 'undefined' || !navigator.cookieEnabled) {\n return false;\n }\n return true;\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/**\n * Evaluates a JSON string into a javascript object.\n *\n * @param {string} str A string containing JSON.\n * @return {*} The javascript object representing the specified JSON.\n */\nexport function jsonEval(str: string): unknown {\n return JSON.parse(str);\n}\n\n/**\n * Returns JSON representing a javascript object.\n * @param {*} data Javascript object to be stringified.\n * @return {string} The JSON contents of the object.\n */\nexport function stringify(data: unknown): string {\n return JSON.stringify(data);\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { base64Decode } from './crypt';\nimport { jsonEval } from './json';\n\ninterface Claims {\n [key: string]: {};\n}\n\ninterface DecodedToken {\n header: object;\n claims: Claims;\n data: object;\n signature: string;\n}\n\n/**\n * Decodes a Firebase auth. token into constituent parts.\n *\n * Notes:\n * - May return with invalid / incomplete claims if there's no native base64 decoding support.\n * - Doesn't check if the token is actually valid.\n */\nexport const decode = function (token: string): DecodedToken {\n let header = {},\n claims: Claims = {},\n data = {},\n signature = '';\n\n try {\n const parts = token.split('.');\n header = jsonEval(base64Decode(parts[0]) || '') as object;\n claims = jsonEval(base64Decode(parts[1]) || '') as Claims;\n signature = parts[2];\n data = claims['d'] || {};\n delete claims['d'];\n } catch (e) {}\n\n return {\n header,\n claims,\n data,\n signature\n };\n};\n\ninterface DecodedToken {\n header: object;\n claims: Claims;\n data: object;\n signature: string;\n}\n\n/**\n * Decodes a Firebase auth. token and checks the validity of its time-based claims. Will return true if the\n * token is within the time window authorized by the 'nbf' (not-before) and 'iat' (issued-at) claims.\n *\n * Notes:\n * - May return a false negative if there's no native base64 decoding support.\n * - Doesn't check if the token is actually valid.\n */\nexport const isValidTimestamp = function (token: string): boolean {\n const claims: Claims = decode(token).claims;\n const now: number = Math.floor(new Date().getTime() / 1000);\n let validSince: number = 0,\n validUntil: number = 0;\n\n if (typeof claims === 'object') {\n if (claims.hasOwnProperty('nbf')) {\n validSince = claims['nbf'] as number;\n } else if (claims.hasOwnProperty('iat')) {\n validSince = claims['iat'] as number;\n }\n\n if (claims.hasOwnProperty('exp')) {\n validUntil = claims['exp'] as number;\n } else {\n // token will expire after 24h by default\n validUntil = validSince + 86400;\n }\n }\n\n return (\n !!now &&\n !!validSince &&\n !!validUntil &&\n now >= validSince &&\n now <= validUntil\n );\n};\n\n/**\n * Decodes a Firebase auth. token and returns its issued at time if valid, null otherwise.\n *\n * Notes:\n * - May return null if there's no native base64 decoding support.\n * - Doesn't check if the token is actually valid.\n */\nexport const issuedAtTime = function (token: string): number | null {\n const claims: Claims = decode(token).claims;\n if (typeof claims === 'object' && claims.hasOwnProperty('iat')) {\n return claims['iat'] as number;\n }\n return null;\n};\n\n/**\n * Decodes a Firebase auth. token and checks the validity of its format. Expects a valid issued-at time.\n *\n * Notes:\n * - May return a false negative if there's no native base64 decoding support.\n * - Doesn't check if the token is actually valid.\n */\nexport const isValidFormat = function (token: string): boolean {\n const decoded = decode(token),\n claims = decoded.claims;\n\n return !!claims && typeof claims === 'object' && claims.hasOwnProperty('iat');\n};\n\n/**\n * Attempts to peer into an auth token and determine if it's an admin auth token by looking at the claims portion.\n *\n * Notes:\n * - May return a false negative if there's no native base64 decoding support.\n * - Doesn't check if the token is actually valid.\n */\nexport const isAdmin = function (token: string): boolean {\n const claims: Claims = decode(token).claims;\n return typeof claims === 'object' && claims['admin'] === true;\n};\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nexport function contains<T extends object>(obj: T, key: string): boolean {\n return Object.prototype.hasOwnProperty.call(obj, key);\n}\n\nexport function safeGet<T extends object, K extends keyof T>(\n obj: T,\n key: K\n): T[K] | undefined {\n if (Object.prototype.hasOwnProperty.call(obj, key)) {\n return obj[key];\n } else {\n return undefined;\n }\n}\n\nexport function isEmpty(obj: object): obj is {} {\n for (const key in obj) {\n if (Object.prototype.hasOwnProperty.call(obj, key)) {\n return false;\n }\n }\n return true;\n}\n\nexport function map<K extends string, V, U>(\n obj: { [key in K]: V },\n fn: (value: V, key: K, obj: { [key in K]: V }) => U,\n contextObj?: unknown\n): { [key in K]: U } {\n const res: Partial<{ [key in K]: U }> = {};\n for (const key in obj) {\n if (Object.prototype.hasOwnProperty.call(obj, key)) {\n res[key] = fn.call(contextObj, obj[key], key, obj);\n }\n }\n return res as { [key in K]: U };\n}\n\n/**\n * Deep equal two objects. Support Arrays and Objects.\n */\nexport function deepEqual(a: object, b: object): boolean {\n if (a === b) {\n return true;\n }\n\n const aKeys = Object.keys(a);\n const bKeys = Object.keys(b);\n for (const k of aKeys) {\n if (!bKeys.includes(k)) {\n return false;\n }\n\n const aProp = (a as Record<string, unknown>)[k];\n const bProp = (b as Record<string, unknown>)[k];\n if (isObject(aProp) && isObject(bProp)) {\n if (!deepEqual(aProp, bProp)) {\n return false;\n }\n } else if (aProp !== bProp) {\n return false;\n }\n }\n\n for (const k of bKeys) {\n if (!aKeys.includes(k)) {\n return false;\n }\n }\n return true;\n}\n\nfunction isObject(thing: unknown): thing is object {\n return thing !== null && typeof thing === 'object';\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/**\n * @fileoverview SHA-1 cryptographic hash.\n * Variable names follow the notation in FIPS PUB 180-3:\n * http://csrc.nist.gov/publications/fips/fips180-3/fips180-3_final.pdf.\n *\n * Usage:\n * var sha1 = new sha1();\n * sha1.update(bytes);\n * var hash = sha1.digest();\n *\n * Performance:\n * Chrome 23: ~400 Mbit/s\n * Firefox 16: ~250 Mbit/s\n *\n */\n\n/**\n * SHA-1 cryptographic hash constructor.\n *\n * The properties declared here are discussed in the above algorithm document.\n * @constructor\n * @final\n * @struct\n */\nexport class Sha1 {\n /**\n * Holds the previous values of accumulated variables a-e in the compress_\n * function.\n * @private\n */\n private chain_: number[] = [];\n\n /**\n * A buffer holding the partially computed hash result.\n * @private\n */\n private buf_: number[] = [];\n\n /**\n * An array of 80 bytes, each a part of the message to be hashed. Referred to\n * as the message schedule in the docs.\n * @private\n */\n private W_: number[] = [];\n\n /**\n * Contains data needed to pad messages less than 64 bytes.\n * @private\n */\n private pad_: number[] = [];\n\n /**\n * @private {number}\n */\n private inbuf_: number = 0;\n\n /**\n * @private {number}\n */\n private total_: number = 0;\n\n blockSize: number;\n\n constructor() {\n this.blockSize = 512 / 8;\n\n this.pad_[0] = 128;\n for (let i = 1; i < this.blockSize; ++i) {\n this.pad_[i] = 0;\n }\n\n this.reset();\n }\n\n reset(): void {\n this.chain_[0] = 0x67452301;\n this.chain_[1] = 0xefcdab89;\n this.chain_[2] = 0x98badcfe;\n this.chain_[3] = 0x10325476;\n this.chain_[4] = 0xc3d2e1f0;\n\n this.inbuf_ = 0;\n this.total_ = 0;\n }\n\n /**\n * Internal compress helper function.\n * @param buf Block to compress.\n * @param offset Offset of the block in the buffer.\n * @private\n */\n compress_(buf: number[] | Uint8Array | string, offset?: number): void {\n if (!offset) {\n offset = 0;\n }\n\n const W = this.W_;\n\n // get 16 big endian words\n if (typeof buf === 'string') {\n for (let i = 0; i < 16; i++) {\n // TODO(user): [bug 8140122] Recent versions of Safari for Mac OS and iOS\n // have a bug that turns the post-increment ++ operator into pre-increment\n // during JIT compilation. We have code that depends heavily on SHA-1 for\n // correctness and which is affected by this bug, so I've removed all uses\n // of post-increment ++ in which the result value is used. We can revert\n // this change once the Safari bug\n // (https://bugs.webkit.org/show_bug.cgi?id=109036) has been fixed and\n // most clients have been updated.\n W[i] =\n (buf.charCodeAt(offset) << 24) |\n (buf.charCodeAt(offset + 1) << 16) |\n (buf.charCodeAt(offset + 2) << 8) |\n buf.charCodeAt(offset + 3);\n offset += 4;\n }\n } else {\n for (let i = 0; i < 16; i++) {\n W[i] =\n (buf[offset] << 24) |\n (buf[offset + 1] << 16) |\n (buf[offset + 2] << 8) |\n buf[offset + 3];\n offset += 4;\n }\n }\n\n // expand to 80 words\n for (let i = 16; i < 80; i++) {\n const t = W[i - 3] ^ W[i - 8] ^ W[i - 14] ^ W[i - 16];\n W[i] = ((t << 1) | (t >>> 31)) & 0xffffffff;\n }\n\n let a = this.chain_[0];\n let b = this.chain_[1];\n let c = this.chain_[2];\n let d = this.chain_[3];\n let e = this.chain_[4];\n let f, k;\n\n // TODO(user): Try to unroll this loop to speed up the computation.\n for (let i = 0; i < 80; i++) {\n if (i < 40) {\n if (i < 20) {\n f = d ^ (b & (c ^ d));\n k = 0x5a827999;\n } else {\n f = b ^ c ^ d;\n k = 0x6ed9eba1;\n }\n } else {\n if (i < 60) {\n f = (b & c) | (d & (b | c));\n k = 0x8f1bbcdc;\n } else {\n f = b ^ c ^ d;\n k = 0xca62c1d6;\n }\n }\n\n const t = (((a << 5) | (a >>> 27)) + f + e + k + W[i]) & 0xffffffff;\n e = d;\n d = c;\n c = ((b << 30) | (b >>> 2)) & 0xffffffff;\n b = a;\n a = t;\n }\n\n this.chain_[0] = (this.chain_[0] + a) & 0xffffffff;\n this.chain_[1] = (this.chain_[1] + b) & 0xffffffff;\n this.chain_[2] = (this.chain_[2] + c) & 0xffffffff;\n this.chain_[3] = (this.chain_[3] + d) & 0xffffffff;\n this.chain_[4] = (this.chain_[4] + e) & 0xffffffff;\n }\n\n update(bytes?: number[] | Uint8Array | string, length?: number): void {\n // TODO(johnlenz): tighten the function signature and remove this check\n if (bytes == null) {\n return;\n }\n\n if (length === undefined) {\n length = bytes.length;\n }\n\n const lengthMinusBlock = length - this.blockSize;\n let n = 0;\n // Using local instead of member variables gives ~5% speedup on Firefox 16.\n const buf = this.buf_;\n let inbuf = this.inbuf_;\n\n // The outer while loop should execute at most twice.\n while (n < length) {\n // When we have no data in the block to top up, we can directly process the\n // input buffer (assuming it contains sufficient data). This gives ~25%\n // speedup on Chrome 23 and ~15% speedup on Firefox 16, but requires that\n // the data is provided in large chunks (or in multiples of 64 bytes).\n if (inbuf === 0) {\n while (n <= lengthMinusBlock) {\n this.compress_(bytes, n);\n n += this.blockSize;\n }\n }\n\n if (typeof bytes === 'string') {\n while (n < length) {\n buf[inbuf] = bytes.charCodeAt(n);\n ++inbuf;\n ++n;\n if (inbuf === this.blockSize) {\n this.compress_(buf);\n inbuf = 0;\n // Jump to the outer loop so we use the full-block optimization.\n break;\n }\n }\n } else {\n while (n < length) {\n buf[inbuf] = bytes[n];\n ++inbuf;\n ++n;\n if (inbuf === this.blockSize) {\n this.compress_(buf);\n inbuf = 0;\n // Jump to the outer loop so we use the full-block optimization.\n break;\n }\n }\n }\n }\n\n this.inbuf_ = inbuf;\n this.total_ += length;\n }\n\n /** @override */\n digest(): number[] {\n const digest: number[] = [];\n let totalBits = this.total_ * 8;\n\n // Add pad 0x80 0x00*.\n if (this.inbuf_ < 56) {\n this.update(this.pad_, 56 - this.inbuf_);\n } else {\n this.update(this.pad_, this.blockSize - (this.inbuf_ - 56));\n }\n\n // Add # bits.\n for (let i = this.blockSize - 1; i >= 56; i--) {\n this.buf_[i] = totalBits & 255;\n totalBits /= 256; // Don't use bit-shifting here!\n }\n\n this.compress_(this.buf_);\n\n let n = 0;\n for (let i = 0; i < 5; i++) {\n for (let j = 24; j >= 0; j -= 8) {\n digest[n] = (this.chain_[i] >> j) & 255;\n ++n;\n }\n }\n return digest;\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/**\n * Check to make sure the appropriate number of arguments are provided for a public function.\n * Throws an error if it fails.\n *\n * @param fnName The function name\n * @param minCount The minimum number of arguments to allow for the function call\n * @param maxCount The maximum number of argument to allow for the function call\n * @param argCount The actual number of arguments provided.\n */\nexport const validateArgCount = function (\n fnName: string,\n minCount: number,\n maxCount: number,\n argCount: number\n): void {\n let argError;\n if (argCount < minCount) {\n argError = 'at least ' + minCount;\n } else if (argCount > maxCount) {\n argError = maxCount === 0 ? 'none' : 'no more than ' + maxCount;\n }\n if (argError) {\n const error =\n fnName +\n ' failed: Was called with ' +\n argCount +\n (argCount === 1 ? ' argument.' : ' arguments.') +\n ' Expects ' +\n argError +\n '.';\n throw new Error(error);\n }\n};\n\n/**\n * Generates a string to prefix an error message about failed argument validation\n *\n * @param fnName The function name\n * @param argName The name of the argument\n * @return The prefix to add to the error thrown for validation.\n */\nexport function errorPrefix(fnName: string, argName: string): string {\n return `${fnName} failed: ${argName} argument `;\n}\n\n/**\n * @param fnName\n * @param argumentNumber\n * @param namespace\n * @param optional\n */\nexport function validateNamespace(\n fnName: string,\n namespace: string,\n optional: boolean\n): void {\n if (optional && !namespace) {\n return;\n }\n if (typeof namespace !== 'string') {\n //TODO: I should do more validation here. We only allow certain chars in namespaces.\n throw new Error(\n errorPrefix(fnName, 'namespace') + 'must be a valid firebase namespace.'\n );\n }\n}\n\nexport function validateCallback(\n fnName: string,\n argumentName: string,\n // eslint-disable-next-line @typescript-eslint/ban-types\n callback: Function,\n optional: boolean\n): void {\n if (optional && !callback) {\n return;\n }\n if (typeof callback !== 'function') {\n throw new Error(\n errorPrefix(fnName, argumentName) + 'must be a valid function.'\n );\n }\n}\n\nexport function validateContextObject(\n fnName: string,\n argumentName: string,\n context: unknown,\n optional: boolean\n): void {\n if (optional && !context) {\n return;\n }\n if (typeof context !== 'object' || context === null) {\n throw new Error(\n errorPrefix(fnName, argumentName) + 'must be a valid context object.'\n );\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert } from './assert';\n\n// Code originally came from goog.crypt.stringToUtf8ByteArray, but for some reason they\n// automatically replaced '\\r\\n' with '\\n', and they didn't handle surrogate pairs,\n// so it's been modified.\n\n// Note that not all Unicode characters appear as single characters in JavaScript strings.\n// fromCharCode returns the UTF-16 encoding of a character - so some Unicode characters\n// use 2 characters in Javascript. All 4-byte UTF-8 characters begin with a first\n// character in the range 0xD800 - 0xDBFF (the first character of a so-called surrogate\n// pair).\n// See http://www.ecma-international.org/ecma-262/5.1/#sec-15.1.3\n\n/**\n * @param {string} str\n * @return {Array}\n */\nexport const stringToByteArray = function (str: string): number[] {\n const out: number[] = [];\n let p = 0;\n for (let i = 0; i < str.length; i++) {\n let c = str.charCodeAt(i);\n\n // Is this the lead surrogate in a surrogate pair?\n if (c >= 0xd800 && c <= 0xdbff) {\n const high = c - 0xd800; // the high 10 bits.\n i++;\n assert(i < str.length, 'Surrogate pair missing trail surrogate.');\n const low = str.charCodeAt(i) - 0xdc00; // the low 10 bits.\n c = 0x10000 + (high << 10) + low;\n }\n\n if (c < 128) {\n out[p++] = c;\n } else if (c < 2048) {\n out[p++] = (c >> 6) | 192;\n out[p++] = (c & 63) | 128;\n } else if (c < 65536) {\n out[p++] = (c >> 12) | 224;\n out[p++] = ((c >> 6) & 63) | 128;\n out[p++] = (c & 63) | 128;\n } else {\n out[p++] = (c >> 18) | 240;\n out[p++] = ((c >> 12) & 63) | 128;\n out[p++] = ((c >> 6) & 63) | 128;\n out[p++] = (c & 63) | 128;\n }\n }\n return out;\n};\n\n/**\n * Calculate length without actually converting; useful for doing cheaper validation.\n * @param {string} str\n * @return {number}\n */\nexport const stringLength = function (str: string): number {\n let p = 0;\n for (let i = 0; i < str.length; i++) {\n const c = str.charCodeAt(i);\n if (c < 128) {\n p++;\n } else if (c < 2048) {\n p += 2;\n } else if (c >= 0xd800 && c <= 0xdbff) {\n // Lead surrogate of a surrogate pair. The pair together will take 4 bytes to represent.\n p += 4;\n i++; // skip trail surrogate.\n } else {\n p += 3;\n }\n }\n return p;\n};\n","/**\n * @license\n * Copyright 2021 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nexport interface Compat<T> {\n _delegate: T;\n}\n\nexport function getModularInstance<ExpService>(\n service: Compat<ExpService> | ExpService\n): ExpService {\n if (service && (service as Compat<ExpService>)._delegate) {\n return (service as Compat<ExpService>)._delegate;\n } else {\n return service as ExpService;\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\nimport {\n InstantiationMode,\n InstanceFactory,\n ComponentType,\n Dictionary,\n Name,\n onInstanceCreatedCallback\n} from './types';\n\n/**\n * Component for service name T, e.g. `auth`, `auth-internal`\n */\nexport class Component<T extends Name = Name> {\n multipleInstances = false;\n /**\n * Properties to be added to the service namespace\n */\n serviceProps: Dictionary = {};\n\n instantiationMode = InstantiationMode.LAZY;\n\n onInstanceCreated: onInstanceCreatedCallback<T> | null = null;\n\n /**\n *\n * @param name The public service name, e.g. app, auth, firestore, database\n * @param instanceFactory Service factory responsible for creating the public interface\n * @param type whether the service provided by the component is public or private\n */\n constructor(\n readonly name: T,\n readonly instanceFactory: InstanceFactory<T>,\n readonly type: ComponentType\n ) {}\n\n setInstantiationMode(mode: InstantiationMode): this {\n this.instantiationMode = mode;\n return this;\n }\n\n setMultipleInstances(multipleInstances: boolean): this {\n this.multipleInstances = multipleInstances;\n return this;\n }\n\n setServiceProps(props: Dictionary): this {\n this.serviceProps = props;\n return this;\n }\n\n setInstanceCreatedCallback(callback: onInstanceCreatedCallback<T>): this {\n this.onInstanceCreated = callback;\n return this;\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nexport type LogLevelString =\n | 'debug'\n | 'verbose'\n | 'info'\n | 'warn'\n | 'error'\n | 'silent';\n\nexport interface LogOptions {\n level: LogLevelString;\n}\n\nexport type LogCallback = (callbackParams: LogCallbackParams) => void;\n\nexport interface LogCallbackParams {\n level: LogLevelString;\n message: string;\n args: unknown[];\n type: string;\n}\n\n/**\n * A container for all of the Logger instances\n */\nexport const instances: Logger[] = [];\n\n/**\n * The JS SDK supports 5 log levels and also allows a user the ability to\n * silence the logs altogether.\n *\n * The order is a follows:\n * DEBUG < VERBOSE < INFO < WARN < ERROR\n *\n * All of the log types above the current log level will be captured (i.e. if\n * you set the log level to `INFO`, errors will still be logged, but `DEBUG` and\n * `VERBOSE` logs will not)\n */\nexport enum LogLevel {\n DEBUG,\n VERBOSE,\n INFO,\n WARN,\n ERROR,\n SILENT\n}\n\nconst levelStringToEnum: { [key in LogLevelString]: LogLevel } = {\n 'debug': LogLevel.DEBUG,\n 'verbose': LogLevel.VERBOSE,\n 'info': LogLevel.INFO,\n 'warn': LogLevel.WARN,\n 'error': LogLevel.ERROR,\n 'silent': LogLevel.SILENT\n};\n\n/**\n * The default log level\n */\nconst defaultLogLevel: LogLevel = LogLevel.INFO;\n\n/**\n * We allow users the ability to pass their own log handler. We will pass the\n * type of log, the current log level, and any other arguments passed (i.e. the\n * messages that the user wants to log) to this function.\n */\nexport type LogHandler = (\n loggerInstance: Logger,\n logType: LogLevel,\n ...args: unknown[]\n) => void;\n\n/**\n * By default, `console.debug` is not displayed in the developer console (in\n * chrome). To avoid forcing users to have to opt-in to these logs twice\n * (i.e. once for firebase, and once in the console), we are sending `DEBUG`\n * logs to the `console.log` function.\n */\nconst ConsoleMethod = {\n [LogLevel.DEBUG]: 'log',\n [LogLevel.VERBOSE]: 'log',\n [LogLevel.INFO]: 'info',\n [LogLevel.WARN]: 'warn',\n [LogLevel.ERROR]: 'error'\n};\n\n/**\n * The default log handler will forward DEBUG, VERBOSE, INFO, WARN, and ERROR\n * messages on to their corresponding console counterparts (if the log method\n * is supported by the current log level)\n */\nconst defaultLogHandler: LogHandler = (instance, logType, ...args): void => {\n if (logType < instance.logLevel) {\n return;\n }\n const now = new Date().toISOString();\n const method = ConsoleMethod[logType as keyof typeof ConsoleMethod];\n if (method) {\n console[method as 'log' | 'info' | 'warn' | 'error'](\n `[${now}] ${instance.name}:`,\n ...args\n );\n } else {\n throw new Error(\n `Attempted to log a message with an invalid logType (value: ${logType})`\n );\n }\n};\n\nexport class Logger {\n /**\n * Gives you an instance of a Logger to capture messages according to\n * Firebase's logging scheme.\n *\n * @param name The name that the logs will be associated with\n */\n constructor(public name: string) {\n /**\n * Capture the current instance for later use\n */\n instances.push(this);\n }\n\n /**\n * The log level of the given Logger instance.\n */\n private _logLevel = defaultLogLevel;\n\n get logLevel(): LogLevel {\n return this._logLevel;\n }\n\n set logLevel(val: LogLevel) {\n if (!(val in LogLevel)) {\n throw new TypeError(`Invalid value \"${val}\" assigned to \\`logLevel\\``);\n }\n this._logLevel = val;\n }\n\n // Workaround for setter/getter having to be the same type.\n setLogLevel(val: LogLevel | LogLevelString): void {\n this._logLevel = typeof val === 'string' ? levelStringToEnum[val] : val;\n }\n\n /**\n * The main (internal) log handler for the Logger instance.\n * Can be set to a new function in internal package code but not by user.\n */\n private _logHandler: LogHandler = defaultLogHandler;\n get logHandler(): LogHandler {\n return this._logHandler;\n }\n set logHandler(val: LogHandler) {\n if (typeof val !== 'function') {\n throw new TypeError('Value assigned to `logHandler` must be a function');\n }\n this._logHandler = val;\n }\n\n /**\n * The optional, additional, user-defined log handler for the Logger instance.\n */\n private _userLogHandler: LogHandler | null = null;\n get userLogHandler(): LogHandler | null {\n return this._userLogHandler;\n }\n set userLogHandler(val: LogHandler | null) {\n this._userLogHandler = val;\n }\n\n /**\n * The functions below are all based on the `console` interface\n */\n\n debug(...args: unknown[]): void {\n this._userLogHandler && this._userLogHandler(this, LogLevel.DEBUG, ...args);\n this._logHandler(this, LogLevel.DEBUG, ...args);\n }\n log(...args: unknown[]): void {\n this._userLogHandler &&\n this._userLogHandler(this, LogLevel.VERBOSE, ...args);\n this._logHandler(this, LogLevel.VERBOSE, ...args);\n }\n info(...args: unknown[]): void {\n this._userLogHandler && this._userLogHandler(this, LogLevel.INFO, ...args);\n this._logHandler(this, LogLevel.INFO, ...args);\n }\n warn(...args: unknown[]): void {\n this._userLogHandler && this._userLogHandler(this, LogLevel.WARN, ...args);\n this._logHandler(this, LogLevel.WARN, ...args);\n }\n error(...args: unknown[]): void {\n this._userLogHandler && this._userLogHandler(this, LogLevel.ERROR, ...args);\n this._logHandler(this, LogLevel.ERROR, ...args);\n }\n}\n\nexport function setLogLevel(level: LogLevelString | LogLevel): void {\n instances.forEach(inst => {\n inst.setLogLevel(level);\n });\n}\n\nexport function setUserLogHandler(\n logCallback: LogCallback | null,\n options?: LogOptions\n): void {\n for (const instance of instances) {\n let customLogLevel: LogLevel | null = null;\n if (options && options.level) {\n customLogLevel = levelStringToEnum[options.level];\n }\n if (logCallback === null) {\n instance.userLogHandler = null;\n } else {\n instance.userLogHandler = (\n instance: Logger,\n level: LogLevel,\n ...args: unknown[]\n ) => {\n const message = args\n .map(arg => {\n if (arg == null) {\n return null;\n } else if (typeof arg === 'string') {\n return arg;\n } else if (typeof arg === 'number' || typeof arg === 'boolean') {\n return arg.toString();\n } else if (arg instanceof Error) {\n return arg.message;\n } else {\n try {\n return JSON.stringify(arg);\n } catch (ignored) {\n return null;\n }\n }\n })\n .filter(arg => arg)\n .join(' ');\n if (level >= (customLogLevel ?? instance.logLevel)) {\n logCallback({\n level: LogLevel[level].toLowerCase() as LogLevelString,\n message,\n args,\n type: instance.name\n });\n }\n };\n }\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/** The semver (www.semver.org) version of the SDK. */\nexport let SDK_VERSION = '';\n\n/**\n * SDK_VERSION should be set before any database instance is created\n * @internal\n */\nexport function setSDKVersion(version: string): void {\n SDK_VERSION = version;\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { jsonEval, stringify } from '@firebase/util';\n\n/**\n * Wraps a DOM Storage object and:\n * - automatically encode objects as JSON strings before storing them to allow us to store arbitrary types.\n * - prefixes names with \"firebase:\" to avoid collisions with app data.\n *\n * We automatically (see storage.js) create two such wrappers, one for sessionStorage,\n * and one for localStorage.\n *\n */\nexport class DOMStorageWrapper {\n // Use a prefix to avoid collisions with other stuff saved by the app.\n private prefix_ = 'firebase:';\n\n /**\n * @param domStorage_ - The underlying storage object (e.g. localStorage or sessionStorage)\n */\n constructor(private domStorage_: Storage) {}\n\n /**\n * @param key - The key to save the value under\n * @param value - The value being stored, or null to remove the key.\n */\n set(key: string, value: unknown | null) {\n if (value == null) {\n this.domStorage_.removeItem(this.prefixedName_(key));\n } else {\n this.domStorage_.setItem(this.prefixedName_(key), stringify(value));\n }\n }\n\n /**\n * @returns The value that was stored under this key, or null\n */\n get(key: string): unknown {\n const storedVal = this.domStorage_.getItem(this.prefixedName_(key));\n if (storedVal == null) {\n return null;\n } else {\n return jsonEval(storedVal);\n }\n }\n\n remove(key: string) {\n this.domStorage_.removeItem(this.prefixedName_(key));\n }\n\n isInMemoryStorage: boolean;\n\n prefixedName_(name: string): string {\n return this.prefix_ + name;\n }\n\n toString(): string {\n return this.domStorage_.toString();\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { contains } from '@firebase/util';\n\n/**\n * An in-memory storage implementation that matches the API of DOMStorageWrapper\n * (TODO: create interface for both to implement).\n */\nexport class MemoryStorage {\n private cache_: { [k: string]: unknown } = {};\n\n set(key: string, value: unknown | null) {\n if (value == null) {\n delete this.cache_[key];\n } else {\n this.cache_[key] = value;\n }\n }\n\n get(key: string): unknown {\n if (contains(this.cache_, key)) {\n return this.cache_[key];\n }\n return null;\n }\n\n remove(key: string) {\n delete this.cache_[key];\n }\n\n isInMemoryStorage = true;\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { DOMStorageWrapper } from './DOMStorageWrapper';\nimport { MemoryStorage } from './MemoryStorage';\n\ndeclare const window: Window;\n\n/**\n * Helper to create a DOMStorageWrapper or else fall back to MemoryStorage.\n * TODO: Once MemoryStorage and DOMStorageWrapper have a shared interface this method annotation should change\n * to reflect this type\n *\n * @param domStorageName - Name of the underlying storage object\n * (e.g. 'localStorage' or 'sessionStorage').\n * @returns Turning off type information until a common interface is defined.\n */\nconst createStoragefor = function (\n domStorageName: string\n): DOMStorageWrapper | MemoryStorage {\n try {\n // NOTE: just accessing \"localStorage\" or \"window['localStorage']\" may throw a security exception,\n // so it must be inside the try/catch.\n if (\n typeof window !== 'undefined' &&\n typeof window[domStorageName] !== 'undefined'\n ) {\n // Need to test cache. Just because it's here doesn't mean it works\n const domStorage = window[domStorageName];\n domStorage.setItem('firebase:sentinel', 'cache');\n domStorage.removeItem('firebase:sentinel');\n return new DOMStorageWrapper(domStorage);\n }\n } catch (e) {}\n\n // Failed to create wrapper. Just return in-memory storage.\n // TODO: log?\n return new MemoryStorage();\n};\n\n/** A storage object that lasts across sessions */\nexport const PersistentStorage = createStoragefor('localStorage');\n\n/** A storage object that only lasts one session */\nexport const SessionStorage = createStoragefor('sessionStorage');\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Logger, LogLevel } from '@firebase/logger';\nimport {\n assert,\n base64,\n Sha1,\n stringToByteArray,\n stringify,\n isNodeSdk\n} from '@firebase/util';\n\nimport { SessionStorage } from '../storage/storage';\nimport { QueryContext } from '../view/EventRegistration';\n\ndeclare const window: Window;\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\ndeclare const Windows: any;\n\nconst logClient = new Logger('@firebase/database');\n\n/**\n * Returns a locally-unique ID (generated by just incrementing up from 0 each time its called).\n */\nexport const LUIDGenerator: () => number = (function () {\n let id = 1;\n return function () {\n return id++;\n };\n})();\n\n/**\n * Sha1 hash of the input string\n * @param str - The string to hash\n * @returns {!string} The resulting hash\n */\nexport const sha1 = function (str: string): string {\n const utf8Bytes = stringToByteArray(str);\n const sha1 = new Sha1();\n sha1.update(utf8Bytes);\n const sha1Bytes = sha1.digest();\n return base64.encodeByteArray(sha1Bytes);\n};\n\nconst buildLogMessage_ = function (...varArgs: unknown[]): string {\n let message = '';\n for (let i = 0; i < varArgs.length; i++) {\n const arg = varArgs[i];\n if (\n Array.isArray(arg) ||\n (arg &&\n typeof arg === 'object' &&\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n typeof (arg as any).length === 'number')\n ) {\n message += buildLogMessage_.apply(null, arg);\n } else if (typeof arg === 'object') {\n message += stringify(arg);\n } else {\n message += arg;\n }\n message += ' ';\n }\n\n return message;\n};\n\n/**\n * Use this for all debug messages in Firebase.\n */\nexport let logger: ((a: string) => void) | null = null;\n\n/**\n * Flag to check for log availability on first log message\n */\nlet firstLog_ = true;\n\n/**\n * The implementation of Firebase.enableLogging (defined here to break dependencies)\n * @param logger_ - A flag to turn on logging, or a custom logger\n * @param persistent - Whether or not to persist logging settings across refreshes\n */\nexport const enableLogging = function (\n logger_?: boolean | ((a: string) => void) | null,\n persistent?: boolean\n) {\n assert(\n !persistent || logger_ === true || logger_ === false,\n \"Can't turn on custom loggers persistently.\"\n );\n if (logger_ === true) {\n logClient.logLevel = LogLevel.VERBOSE;\n logger = logClient.log.bind(logClient);\n if (persistent) {\n SessionStorage.set('logging_enabled', true);\n }\n } else if (typeof logger_ === 'function') {\n logger = logger_;\n } else {\n logger = null;\n SessionStorage.remove('logging_enabled');\n }\n};\n\nexport const log = function (...varArgs: unknown[]) {\n if (firstLog_ === true) {\n firstLog_ = false;\n if (logger === null && SessionStorage.get('logging_enabled') === true) {\n enableLogging(true);\n }\n }\n\n if (logger) {\n const message = buildLogMessage_.apply(null, varArgs);\n logger(message);\n }\n};\n\nexport const logWrapper = function (\n prefix: string\n): (...varArgs: unknown[]) => void {\n return function (...varArgs: unknown[]) {\n log(prefix, ...varArgs);\n };\n};\n\nexport const error = function (...varArgs: string[]) {\n const message = 'FIREBASE INTERNAL ERROR: ' + buildLogMessage_(...varArgs);\n logClient.error(message);\n};\n\nexport const fatal = function (...varArgs: string[]) {\n const message = `FIREBASE FATAL ERROR: ${buildLogMessage_(...varArgs)}`;\n logClient.error(message);\n throw new Error(message);\n};\n\nexport const warn = function (...varArgs: unknown[]) {\n const message = 'FIREBASE WARNING: ' + buildLogMessage_(...varArgs);\n logClient.warn(message);\n};\n\n/**\n * Logs a warning if the containing page uses https. Called when a call to new Firebase\n * does not use https.\n */\nexport const warnIfPageIsSecure = function () {\n // Be very careful accessing browser globals. Who knows what may or may not exist.\n if (\n typeof window !== 'undefined' &&\n window.location &&\n window.location.protocol &&\n window.location.protocol.indexOf('https:') !== -1\n ) {\n warn(\n 'Insecure Firebase access from a secure page. ' +\n 'Please use https in calls to new Firebase().'\n );\n }\n};\n\nexport const warnAboutUnsupportedMethod = function (methodName: string) {\n warn(\n methodName +\n ' is unsupported and will likely change soon. ' +\n 'Please do not use.'\n );\n};\n\n/**\n * Returns true if data is NaN, or +/- Infinity.\n */\nexport const isInvalidJSONNumber = function (data: unknown): boolean {\n return (\n typeof data === 'number' &&\n (data !== data || // NaN\n data === Number.POSITIVE_INFINITY ||\n data === Number.NEGATIVE_INFINITY)\n );\n};\n\nexport const executeWhenDOMReady = function (fn: () => void) {\n if (isNodeSdk() || document.readyState === 'complete') {\n fn();\n } else {\n // Modeled after jQuery. Try DOMContentLoaded and onreadystatechange (which\n // fire before onload), but fall back to onload.\n\n let called = false;\n const wrappedFn = function () {\n if (!document.body) {\n setTimeout(wrappedFn, Math.floor(10));\n return;\n }\n\n if (!called) {\n called = true;\n fn();\n }\n };\n\n if (document.addEventListener) {\n document.addEventListener('DOMContentLoaded', wrappedFn, false);\n // fallback to onload.\n window.addEventListener('load', wrappedFn, false);\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n } else if ((document as any).attachEvent) {\n // IE.\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n (document as any).attachEvent('onreadystatechange', () => {\n if (document.readyState === 'complete') {\n wrappedFn();\n }\n });\n // fallback to onload.\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n (window as any).attachEvent('onload', wrappedFn);\n\n // jQuery has an extra hack for IE that we could employ (based on\n // http://javascript.nwbox.com/IEContentLoaded/) But it looks really old.\n // I'm hoping we don't need it.\n }\n }\n};\n\n/**\n * Minimum key name. Invalid for actual data, used as a marker to sort before any valid names\n */\nexport const MIN_NAME = '[MIN_NAME]';\n\n/**\n * Maximum key name. Invalid for actual data, used as a marker to sort above any valid names\n */\nexport const MAX_NAME = '[MAX_NAME]';\n\n/**\n * Compares valid Firebase key names, plus min and max name\n */\nexport const nameCompare = function (a: string, b: string): number {\n if (a === b) {\n return 0;\n } else if (a === MIN_NAME || b === MAX_NAME) {\n return -1;\n } else if (b === MIN_NAME || a === MAX_NAME) {\n return 1;\n } else {\n const aAsInt = tryParseInt(a),\n bAsInt = tryParseInt(b);\n\n if (aAsInt !== null) {\n if (bAsInt !== null) {\n return aAsInt - bAsInt === 0 ? a.length - b.length : aAsInt - bAsInt;\n } else {\n return -1;\n }\n } else if (bAsInt !== null) {\n return 1;\n } else {\n return a < b ? -1 : 1;\n }\n }\n};\n\n/**\n * @returns {!number} comparison result.\n */\nexport const stringCompare = function (a: string, b: string): number {\n if (a === b) {\n return 0;\n } else if (a < b) {\n return -1;\n } else {\n return 1;\n }\n};\n\nexport const requireKey = function (\n key: string,\n obj: { [k: string]: unknown }\n): unknown {\n if (obj && key in obj) {\n return obj[key];\n } else {\n throw new Error(\n 'Missing required key (' + key + ') in object: ' + stringify(obj)\n );\n }\n};\n\nexport const ObjectToUniqueKey = function (obj: unknown): string {\n if (typeof obj !== 'object' || obj === null) {\n return stringify(obj);\n }\n\n const keys = [];\n // eslint-disable-next-line guard-for-in\n for (const k in obj) {\n keys.push(k);\n }\n\n // Export as json, but with the keys sorted.\n keys.sort();\n let key = '{';\n for (let i = 0; i < keys.length; i++) {\n if (i !== 0) {\n key += ',';\n }\n key += stringify(keys[i]);\n key += ':';\n key += ObjectToUniqueKey(obj[keys[i]]);\n }\n\n key += '}';\n return key;\n};\n\n/**\n * Splits a string into a number of smaller segments of maximum size\n * @param str - The string\n * @param segsize - The maximum number of chars in the string.\n * @returns The string, split into appropriately-sized chunks\n */\nexport const splitStringBySize = function (\n str: string,\n segsize: number\n): string[] {\n const len = str.length;\n\n if (len <= segsize) {\n return [str];\n }\n\n const dataSegs = [];\n for (let c = 0; c < len; c += segsize) {\n if (c + segsize > len) {\n dataSegs.push(str.substring(c, len));\n } else {\n dataSegs.push(str.substring(c, c + segsize));\n }\n }\n return dataSegs;\n};\n\n/**\n * Apply a function to each (key, value) pair in an object or\n * apply a function to each (index, value) pair in an array\n * @param obj - The object or array to iterate over\n * @param fn - The function to apply\n */\nexport function each(obj: object, fn: (k: string, v: unknown) => void) {\n for (const key in obj) {\n if (obj.hasOwnProperty(key)) {\n fn(key, obj[key]);\n }\n }\n}\n\n/**\n * Like goog.bind, but doesn't bother to create a closure if opt_context is null/undefined.\n * @param callback - Callback function.\n * @param context - Optional context to bind to.\n *\n */\nexport const bindCallback = function (\n callback: (a: unknown) => void,\n context?: object | null\n): (a: unknown) => void {\n return context ? callback.bind(context) : callback;\n};\n\n/**\n * Borrowed from http://hg.secondlife.com/llsd/src/tip/js/typedarray.js (MIT License)\n * I made one modification at the end and removed the NaN / Infinity\n * handling (since it seemed broken [caused an overflow] and we don't need it). See MJL comments.\n * @param v - A double\n *\n */\nexport const doubleToIEEE754String = function (v: number): string {\n assert(!isInvalidJSONNumber(v), 'Invalid JSON number'); // MJL\n\n const ebits = 11,\n fbits = 52;\n const bias = (1 << (ebits - 1)) - 1;\n let s, e, f, ln, i;\n\n // Compute sign, exponent, fraction\n // Skip NaN / Infinity handling --MJL.\n if (v === 0) {\n e = 0;\n f = 0;\n s = 1 / v === -Infinity ? 1 : 0;\n } else {\n s = v < 0;\n v = Math.abs(v);\n\n if (v >= Math.pow(2, 1 - bias)) {\n // Normalized\n ln = Math.min(Math.floor(Math.log(v) / Math.LN2), bias);\n e = ln + bias;\n f = Math.round(v * Math.pow(2, fbits - ln) - Math.pow(2, fbits));\n } else {\n // Denormalized\n e = 0;\n f = Math.round(v / Math.pow(2, 1 - bias - fbits));\n }\n }\n\n // Pack sign, exponent, fraction\n const bits = [];\n for (i = fbits; i; i -= 1) {\n bits.push(f % 2 ? 1 : 0);\n f = Math.floor(f / 2);\n }\n for (i = ebits; i; i -= 1) {\n bits.push(e % 2 ? 1 : 0);\n e = Math.floor(e / 2);\n }\n bits.push(s ? 1 : 0);\n bits.reverse();\n const str = bits.join('');\n\n // Return the data as a hex string. --MJL\n let hexByteString = '';\n for (i = 0; i < 64; i += 8) {\n let hexByte = parseInt(str.substr(i, 8), 2).toString(16);\n if (hexByte.length === 1) {\n hexByte = '0' + hexByte;\n }\n hexByteString = hexByteString + hexByte;\n }\n return hexByteString.toLowerCase();\n};\n\n/**\n * Used to detect if we're in a Chrome content script (which executes in an\n * isolated environment where long-polling doesn't work).\n */\nexport const isChromeExtensionContentScript = function (): boolean {\n return !!(\n typeof window === 'object' &&\n window['chrome'] &&\n window['chrome']['extension'] &&\n !/^chrome/.test(window.location.href)\n );\n};\n\n/**\n * Used to detect if we're in a Windows 8 Store app.\n */\nexport const isWindowsStoreApp = function (): boolean {\n // Check for the presence of a couple WinRT globals\n return typeof Windows === 'object' && typeof Windows.UI === 'object';\n};\n\n/**\n * Converts a server error code to a Javascript Error\n */\nexport function errorForServerCode(code: string, query: QueryContext): Error {\n let reason = 'Unknown Error';\n if (code === 'too_big') {\n reason =\n 'The data requested exceeds the maximum size ' +\n 'that can be accessed with a single request.';\n } else if (code === 'permission_denied') {\n reason = \"Client doesn't have permission to access the desired data.\";\n } else if (code === 'unavailable') {\n reason = 'The service is unavailable';\n }\n\n const error = new Error(\n code + ' at ' + query._path.toString() + ': ' + reason\n );\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n (error as any).code = code.toUpperCase();\n return error;\n}\n\n/**\n * Used to test for integer-looking strings\n */\nexport const INTEGER_REGEXP_ = new RegExp('^-?(0*)\\\\d{1,10}$');\n\n/**\n * For use in keys, the minimum possible 32-bit integer.\n */\nexport const INTEGER_32_MIN = -2147483648;\n\n/**\n * For use in kyes, the maximum possible 32-bit integer.\n */\nexport const INTEGER_32_MAX = 2147483647;\n\n/**\n * If the string contains a 32-bit integer, return it. Else return null.\n */\nexport const tryParseInt = function (str: string): number | null {\n if (INTEGER_REGEXP_.test(str)) {\n const intVal = Number(str);\n if (intVal >= INTEGER_32_MIN && intVal <= INTEGER_32_MAX) {\n return intVal;\n }\n }\n return null;\n};\n\n/**\n * Helper to run some code but catch any exceptions and re-throw them later.\n * Useful for preventing user callbacks from breaking internal code.\n *\n * Re-throwing the exception from a setTimeout is a little evil, but it's very\n * convenient (we don't have to try to figure out when is a safe point to\n * re-throw it), and the behavior seems reasonable:\n *\n * * If you aren't pausing on exceptions, you get an error in the console with\n * the correct stack trace.\n * * If you're pausing on all exceptions, the debugger will pause on your\n * exception and then again when we rethrow it.\n * * If you're only pausing on uncaught exceptions, the debugger will only pause\n * on us re-throwing it.\n *\n * @param fn - The code to guard.\n */\nexport const exceptionGuard = function (fn: () => void) {\n try {\n fn();\n } catch (e) {\n // Re-throw exception when it's safe.\n setTimeout(() => {\n // It used to be that \"throw e\" would result in a good console error with\n // relevant context, but as of Chrome 39, you just get the firebase.js\n // file/line number where we re-throw it, which is useless. So we log\n // e.stack explicitly.\n const stack = e.stack || '';\n warn('Exception was thrown by user callback.', stack);\n throw e;\n }, Math.floor(0));\n }\n};\n\n/**\n * Helper function to safely call opt_callback with the specified arguments. It:\n * 1. Turns into a no-op if opt_callback is null or undefined.\n * 2. Wraps the call inside exceptionGuard to prevent exceptions from breaking our state.\n *\n * @param callback - Optional onComplete callback.\n * @param varArgs - Arbitrary args to be passed to opt_onComplete\n */\nexport const callUserCallback = function (\n // eslint-disable-next-line @typescript-eslint/ban-types\n callback?: Function | null,\n ...varArgs: unknown[]\n) {\n if (typeof callback === 'function') {\n exceptionGuard(() => {\n callback(...varArgs);\n });\n }\n};\n\n/**\n * @returns {boolean} true if we think we're currently being crawled.\n */\nexport const beingCrawled = function (): boolean {\n const userAgent =\n (typeof window === 'object' &&\n window['navigator'] &&\n window['navigator']['userAgent']) ||\n '';\n\n // For now we whitelist the most popular crawlers. We should refine this to be the set of crawlers we\n // believe to support JavaScript/AJAX rendering.\n // NOTE: Google Webmaster Tools doesn't really belong, but their \"This is how a visitor to your website\n // would have seen the page\" is flaky if we don't treat it as a crawler.\n return (\n userAgent.search(\n /googlebot|google webmaster tools|bingbot|yahoo! slurp|baiduspider|yandexbot|duckduckbot/i\n ) >= 0\n );\n};\n\n/**\n * Export a property of an object using a getter function.\n */\nexport const exportPropGetter = function (\n object: object,\n name: string,\n fnGet: () => unknown\n) {\n Object.defineProperty(object, name, { get: fnGet });\n};\n\n/**\n * Same as setTimeout() except on Node.JS it will /not/ prevent the process from exiting.\n *\n * It is removed with clearTimeout() as normal.\n *\n * @param fn - Function to run.\n * @param time - Milliseconds to wait before running.\n * @returns The setTimeout() return value.\n */\nexport const setTimeoutNonBlocking = function (\n fn: () => void,\n time: number\n): number | object {\n const timeout: number | object = setTimeout(fn, time);\n // Note: at the time of this comment, unrefTimer is under the unstable set of APIs. Run with --unstable to enable the API.\n if (\n typeof timeout === 'number' &&\n // @ts-ignore Is only defined in Deno environments.\n typeof Deno !== 'undefined' &&\n // @ts-ignore Deno and unrefTimer are only defined in Deno environments.\n Deno['unrefTimer']\n ) {\n // @ts-ignore Deno and unrefTimer are only defined in Deno environments.\n Deno.unrefTimer(timeout);\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n } else if (typeof timeout === 'object' && (timeout as any)['unref']) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n (timeout as any)['unref']();\n }\n\n return timeout;\n};\n","/**\n * @license\n * Copyright 2021 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n AppCheckInternalComponentName,\n AppCheckTokenListener,\n AppCheckTokenResult,\n FirebaseAppCheckInternal\n} from '@firebase/app-check-interop-types';\nimport { Provider } from '@firebase/component';\n\nimport { warn } from './util/util';\n\n/**\n * Abstraction around AppCheck's token fetching capabilities.\n */\nexport class AppCheckTokenProvider {\n private appCheck?: FirebaseAppCheckInternal;\n constructor(\n private appName_: string,\n private appCheckProvider?: Provider<AppCheckInternalComponentName>\n ) {\n this.appCheck = appCheckProvider?.getImmediate({ optional: true });\n if (!this.appCheck) {\n appCheckProvider?.get().then(appCheck => (this.appCheck = appCheck));\n }\n }\n\n getToken(forceRefresh?: boolean): Promise<AppCheckTokenResult> {\n if (!this.appCheck) {\n return new Promise<AppCheckTokenResult>((resolve, reject) => {\n // Support delayed initialization of FirebaseAppCheck. This allows our\n // customers to initialize the RTDB SDK before initializing Firebase\n // AppCheck and ensures that all requests are authenticated if a token\n // becomes available before the timoeout below expires.\n setTimeout(() => {\n if (this.appCheck) {\n this.getToken(forceRefresh).then(resolve, reject);\n } else {\n resolve(null);\n }\n }, 0);\n });\n }\n return this.appCheck.getToken(forceRefresh);\n }\n\n addTokenChangeListener(listener: AppCheckTokenListener) {\n this.appCheckProvider\n ?.get()\n .then(appCheck => appCheck.addTokenListener(listener));\n }\n\n notifyForInvalidToken(): void {\n warn(\n `Provided AppCheck credentials for the app named \"${this.appName_}\" ` +\n 'are invalid. This usually indicates your app was not initialized correctly.'\n );\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FirebaseAuthTokenData } from '@firebase/app-types/private';\nimport {\n FirebaseAuthInternal,\n FirebaseAuthInternalName\n} from '@firebase/auth-interop-types';\nimport { Provider } from '@firebase/component';\n\nimport { log, warn } from './util/util';\n\nexport interface AuthTokenProvider {\n getToken(forceRefresh: boolean): Promise<FirebaseAuthTokenData>;\n addTokenChangeListener(listener: (token: string | null) => void): void;\n removeTokenChangeListener(listener: (token: string | null) => void): void;\n notifyForInvalidToken(): void;\n}\n\n/**\n * Abstraction around FirebaseApp's token fetching capabilities.\n */\nexport class FirebaseAuthTokenProvider implements AuthTokenProvider {\n private auth_: FirebaseAuthInternal | null = null;\n\n constructor(\n private appName_: string,\n private firebaseOptions_: object,\n private authProvider_: Provider<FirebaseAuthInternalName>\n ) {\n this.auth_ = authProvider_.getImmediate({ optional: true });\n if (!this.auth_) {\n authProvider_.onInit(auth => (this.auth_ = auth));\n }\n }\n\n getToken(forceRefresh: boolean): Promise<FirebaseAuthTokenData> {\n if (!this.auth_) {\n return new Promise<FirebaseAuthTokenData>((resolve, reject) => {\n // Support delayed initialization of FirebaseAuth. This allows our\n // customers to initialize the RTDB SDK before initializing Firebase\n // Auth and ensures that all requests are authenticated if a token\n // becomes available before the timoeout below expires.\n setTimeout(() => {\n if (this.auth_) {\n this.getToken(forceRefresh).then(resolve, reject);\n } else {\n resolve(null);\n }\n }, 0);\n });\n }\n\n return this.auth_.getToken(forceRefresh).catch(error => {\n // TODO: Need to figure out all the cases this is raised and whether\n // this makes sense.\n if (error && error.code === 'auth/token-not-initialized') {\n log('Got auth/token-not-initialized error. Treating as null token.');\n return null;\n } else {\n return Promise.reject(error);\n }\n });\n }\n\n addTokenChangeListener(listener: (token: string | null) => void): void {\n // TODO: We might want to wrap the listener and call it with no args to\n // avoid a leaky abstraction, but that makes removing the listener harder.\n if (this.auth_) {\n this.auth_.addAuthTokenListener(listener);\n } else {\n this.authProvider_\n .get()\n .then(auth => auth.addAuthTokenListener(listener));\n }\n }\n\n removeTokenChangeListener(listener: (token: string | null) => void): void {\n this.authProvider_\n .get()\n .then(auth => auth.removeAuthTokenListener(listener));\n }\n\n notifyForInvalidToken(): void {\n let errorMessage =\n 'Provided authentication credentials for the app named \"' +\n this.appName_ +\n '\" are invalid. This usually indicates your app was not ' +\n 'initialized correctly. ';\n if ('credential' in this.firebaseOptions_) {\n errorMessage +=\n 'Make sure the \"credential\" property provided to initializeApp() ' +\n 'is authorized to access the specified \"databaseURL\" and is from the correct ' +\n 'project.';\n } else if ('serviceAccount' in this.firebaseOptions_) {\n errorMessage +=\n 'Make sure the \"serviceAccount\" property provided to initializeApp() ' +\n 'is authorized to access the specified \"databaseURL\" and is from the correct ' +\n 'project.';\n } else {\n errorMessage +=\n 'Make sure the \"apiKey\" and \"databaseURL\" properties provided to ' +\n 'initializeApp() match the values provided for your app at ' +\n 'https://console.firebase.google.com/.';\n }\n warn(errorMessage);\n }\n}\n\n/* AuthTokenProvider that supplies a constant token. Used by Admin SDK or mockUserToken with emulators. */\nexport class EmulatorTokenProvider implements AuthTokenProvider {\n /** A string that is treated as an admin access token by the RTDB emulator. Used by Admin SDK. */\n static OWNER = 'owner';\n\n constructor(private accessToken: string) {}\n\n getToken(forceRefresh: boolean): Promise<FirebaseAuthTokenData> {\n return Promise.resolve({\n accessToken: this.accessToken\n });\n }\n\n addTokenChangeListener(listener: (token: string | null) => void): void {\n // Invoke the listener immediately to match the behavior in Firebase Auth\n // (see packages/auth/src/auth.js#L1807)\n listener(this.accessToken);\n }\n\n removeTokenChangeListener(listener: (token: string | null) => void): void {}\n\n notifyForInvalidToken(): void {}\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nexport const PROTOCOL_VERSION = '5';\n\nexport const VERSION_PARAM = 'v';\n\nexport const TRANSPORT_SESSION_PARAM = 's';\n\nexport const REFERER_PARAM = 'r';\n\nexport const FORGE_REF = 'f';\n\n// Matches console.firebase.google.com, firebase-console-*.corp.google.com and\n// firebase.corp.google.com\nexport const FORGE_DOMAIN_RE =\n /(console\\.firebase|firebase-console-\\w+\\.corp|firebase\\.corp)\\.google\\.com/;\n\nexport const LAST_SESSION_PARAM = 'ls';\n\nexport const APPLICATION_ID_PARAM = 'p';\n\nexport const APP_CHECK_TOKEN_PARAM = 'ac';\n\nexport const WEBSOCKET = 'websocket';\n\nexport const LONG_POLLING = 'long_polling';\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert } from '@firebase/util';\n\nimport { LONG_POLLING, WEBSOCKET } from '../realtime/Constants';\n\nimport { PersistentStorage } from './storage/storage';\nimport { each } from './util/util';\n\n/**\n * A class that holds metadata about a Repo object\n */\nexport class RepoInfo {\n private _host: string;\n private _domain: string;\n internalHost: string;\n\n /**\n * @param host - Hostname portion of the url for the repo\n * @param secure - Whether or not this repo is accessed over ssl\n * @param namespace - The namespace represented by the repo\n * @param webSocketOnly - Whether to prefer websockets over all other transports (used by Nest).\n * @param nodeAdmin - Whether this instance uses Admin SDK credentials\n * @param persistenceKey - Override the default session persistence storage key\n */\n constructor(\n host: string,\n public readonly secure: boolean,\n public readonly namespace: string,\n public readonly webSocketOnly: boolean,\n public readonly nodeAdmin: boolean = false,\n public readonly persistenceKey: string = '',\n public readonly includeNamespaceInQueryParams: boolean = false\n ) {\n this._host = host.toLowerCase();\n this._domain = this._host.substr(this._host.indexOf('.') + 1);\n this.internalHost =\n (PersistentStorage.get('host:' + host) as string) || this._host;\n }\n\n isCacheableHost(): boolean {\n return this.internalHost.substr(0, 2) === 's-';\n }\n\n isCustomHost() {\n return (\n this._domain !== 'firebaseio.com' &&\n this._domain !== 'firebaseio-demo.com'\n );\n }\n\n get host() {\n return this._host;\n }\n\n set host(newHost: string) {\n if (newHost !== this.internalHost) {\n this.internalHost = newHost;\n if (this.isCacheableHost()) {\n PersistentStorage.set('host:' + this._host, this.internalHost);\n }\n }\n }\n\n toString(): string {\n let str = this.toURLString();\n if (this.persistenceKey) {\n str += '<' + this.persistenceKey + '>';\n }\n return str;\n }\n\n toURLString(): string {\n const protocol = this.secure ? 'https://' : 'http://';\n const query = this.includeNamespaceInQueryParams\n ? `?ns=${this.namespace}`\n : '';\n return `${protocol}${this.host}/${query}`;\n }\n}\n\nfunction repoInfoNeedsQueryParam(repoInfo: RepoInfo): boolean {\n return (\n repoInfo.host !== repoInfo.internalHost ||\n repoInfo.isCustomHost() ||\n repoInfo.includeNamespaceInQueryParams\n );\n}\n\n/**\n * Returns the websocket URL for this repo\n * @param repoInfo - RepoInfo object\n * @param type - of connection\n * @param params - list\n * @returns The URL for this repo\n */\nexport function repoInfoConnectionURL(\n repoInfo: RepoInfo,\n type: string,\n params: { [k: string]: string }\n): string {\n assert(typeof type === 'string', 'typeof type must == string');\n assert(typeof params === 'object', 'typeof params must == object');\n\n let connURL: string;\n if (type === WEBSOCKET) {\n connURL =\n (repoInfo.secure ? 'wss://' : 'ws://') + repoInfo.internalHost + '/.ws?';\n } else if (type === LONG_POLLING) {\n connURL =\n (repoInfo.secure ? 'https://' : 'http://') +\n repoInfo.internalHost +\n '/.lp?';\n } else {\n throw new Error('Unknown connection type: ' + type);\n }\n if (repoInfoNeedsQueryParam(repoInfo)) {\n params['ns'] = repoInfo.namespace;\n }\n\n const pairs: string[] = [];\n\n each(params, (key: string, value: string) => {\n pairs.push(key + '=' + value);\n });\n\n return connURL + pairs.join('&');\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { deepCopy, contains } from '@firebase/util';\n\n/**\n * Tracks a collection of stats.\n */\nexport class StatsCollection {\n private counters_: { [k: string]: number } = {};\n\n incrementCounter(name: string, amount: number = 1) {\n if (!contains(this.counters_, name)) {\n this.counters_[name] = 0;\n }\n\n this.counters_[name] += amount;\n }\n\n get() {\n return deepCopy(this.counters_);\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { RepoInfo } from '../RepoInfo';\n\nimport { StatsCollection } from './StatsCollection';\n\nconst collections: { [k: string]: StatsCollection } = {};\nconst reporters: { [k: string]: unknown } = {};\n\nexport function statsManagerGetCollection(repoInfo: RepoInfo): StatsCollection {\n const hashString = repoInfo.toString();\n\n if (!collections[hashString]) {\n collections[hashString] = new StatsCollection();\n }\n\n return collections[hashString];\n}\n\nexport function statsManagerGetOrCreateReporter<T>(\n repoInfo: RepoInfo,\n creatorFunction: () => T\n): T {\n const hashString = repoInfo.toString();\n\n if (!reporters[hashString]) {\n reporters[hashString] = creatorFunction();\n }\n\n return reporters[hashString] as T;\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { exceptionGuard } from '../../core/util/util';\n\n/**\n * This class ensures the packets from the server arrive in order\n * This class takes data from the server and ensures it gets passed into the callbacks in order.\n */\nexport class PacketReceiver {\n pendingResponses: unknown[] = [];\n currentResponseNum = 0;\n closeAfterResponse = -1;\n onClose: (() => void) | null = null;\n\n /**\n * @param onMessage_\n */\n constructor(private onMessage_: (a: {}) => void) {}\n\n closeAfter(responseNum: number, callback: () => void) {\n this.closeAfterResponse = responseNum;\n this.onClose = callback;\n if (this.closeAfterResponse < this.currentResponseNum) {\n this.onClose();\n this.onClose = null;\n }\n }\n\n /**\n * Each message from the server comes with a response number, and an array of data. The responseNumber\n * allows us to ensure that we process them in the right order, since we can't be guaranteed that all\n * browsers will respond in the same order as the requests we sent\n */\n handleResponse(requestNum: number, data: unknown[]) {\n this.pendingResponses[requestNum] = data;\n while (this.pendingResponses[this.currentResponseNum]) {\n const toProcess = this.pendingResponses[\n this.currentResponseNum\n ] as unknown[];\n delete this.pendingResponses[this.currentResponseNum];\n for (let i = 0; i < toProcess.length; ++i) {\n if (toProcess[i]) {\n exceptionGuard(() => {\n this.onMessage_(toProcess[i]);\n });\n }\n }\n if (this.currentResponseNum === this.closeAfterResponse) {\n if (this.onClose) {\n this.onClose();\n this.onClose = null;\n }\n break;\n }\n this.currentResponseNum++;\n }\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { base64Encode, isNodeSdk, stringify } from '@firebase/util';\n\nimport { RepoInfo, repoInfoConnectionURL } from '../core/RepoInfo';\nimport { StatsCollection } from '../core/stats/StatsCollection';\nimport { statsManagerGetCollection } from '../core/stats/StatsManager';\nimport {\n executeWhenDOMReady,\n isChromeExtensionContentScript,\n isWindowsStoreApp,\n log,\n logWrapper,\n LUIDGenerator,\n splitStringBySize\n} from '../core/util/util';\n\nimport {\n APP_CHECK_TOKEN_PARAM,\n APPLICATION_ID_PARAM,\n FORGE_DOMAIN_RE,\n FORGE_REF,\n LAST_SESSION_PARAM,\n LONG_POLLING,\n PROTOCOL_VERSION,\n REFERER_PARAM,\n TRANSPORT_SESSION_PARAM,\n VERSION_PARAM\n} from './Constants';\nimport { PacketReceiver } from './polling/PacketReceiver';\nimport { Transport } from './Transport';\n\n// URL query parameters associated with longpolling\nexport const FIREBASE_LONGPOLL_START_PARAM = 'start';\nexport const FIREBASE_LONGPOLL_CLOSE_COMMAND = 'close';\nexport const FIREBASE_LONGPOLL_COMMAND_CB_NAME = 'pLPCommand';\nexport const FIREBASE_LONGPOLL_DATA_CB_NAME = 'pRTLPCB';\nexport const FIREBASE_LONGPOLL_ID_PARAM = 'id';\nexport const FIREBASE_LONGPOLL_PW_PARAM = 'pw';\nexport const FIREBASE_LONGPOLL_SERIAL_PARAM = 'ser';\nexport const FIREBASE_LONGPOLL_CALLBACK_ID_PARAM = 'cb';\nexport const FIREBASE_LONGPOLL_SEGMENT_NUM_PARAM = 'seg';\nexport const FIREBASE_LONGPOLL_SEGMENTS_IN_PACKET = 'ts';\nexport const FIREBASE_LONGPOLL_DATA_PARAM = 'd';\nexport const FIREBASE_LONGPOLL_DISCONN_FRAME_PARAM = 'disconn';\nexport const FIREBASE_LONGPOLL_DISCONN_FRAME_REQUEST_PARAM = 'dframe';\n\n//Data size constants.\n//TODO: Perf: the maximum length actually differs from browser to browser.\n// We should check what browser we're on and set accordingly.\nconst MAX_URL_DATA_SIZE = 1870;\nconst SEG_HEADER_SIZE = 30; //ie: &seg=8299234&ts=982389123&d=\nconst MAX_PAYLOAD_SIZE = MAX_URL_DATA_SIZE - SEG_HEADER_SIZE;\n\n/**\n * Keepalive period\n * send a fresh request at minimum every 25 seconds. Opera has a maximum request\n * length of 30 seconds that we can't exceed.\n */\nconst KEEPALIVE_REQUEST_INTERVAL = 25000;\n\n/**\n * How long to wait before aborting a long-polling connection attempt.\n */\nconst LP_CONNECT_TIMEOUT = 30000;\n\n/**\n * This class manages a single long-polling connection.\n */\nexport class BrowserPollConnection implements Transport {\n bytesSent = 0;\n bytesReceived = 0;\n urlFn: (params: object) => string;\n scriptTagHolder: FirebaseIFrameScriptHolder;\n myDisconnFrame: HTMLIFrameElement;\n curSegmentNum: number;\n myPacketOrderer: PacketReceiver;\n id: string;\n password: string;\n private log_: (...a: unknown[]) => void;\n private stats_: StatsCollection;\n private everConnected_ = false;\n private isClosed_: boolean;\n private connectTimeoutTimer_: number | null;\n private onDisconnect_: ((a?: boolean) => void) | null;\n\n /**\n * @param connId An identifier for this connection, used for logging\n * @param repoInfo The info for the endpoint to send data to.\n * @param applicationId The Firebase App ID for this project.\n * @param appCheckToken The AppCheck token for this client.\n * @param authToken The AuthToken to use for this connection.\n * @param transportSessionId Optional transportSessionid if we are\n * reconnecting for an existing transport session\n * @param lastSessionId Optional lastSessionId if the PersistentConnection has\n * already created a connection previously\n */\n constructor(\n public connId: string,\n public repoInfo: RepoInfo,\n private applicationId?: string,\n private appCheckToken?: string,\n private authToken?: string,\n public transportSessionId?: string,\n public lastSessionId?: string\n ) {\n this.log_ = logWrapper(connId);\n this.stats_ = statsManagerGetCollection(repoInfo);\n this.urlFn = (params: { [k: string]: string }) => {\n // Always add the token if we have one.\n if (this.appCheckToken) {\n params[APP_CHECK_TOKEN_PARAM] = this.appCheckToken;\n }\n return repoInfoConnectionURL(repoInfo, LONG_POLLING, params);\n };\n }\n\n /**\n * @param onMessage - Callback when messages arrive\n * @param onDisconnect - Callback with connection lost.\n */\n open(onMessage: (msg: {}) => void, onDisconnect: (a?: boolean) => void) {\n this.curSegmentNum = 0;\n this.onDisconnect_ = onDisconnect;\n this.myPacketOrderer = new PacketReceiver(onMessage);\n this.isClosed_ = false;\n\n this.connectTimeoutTimer_ = setTimeout(() => {\n this.log_('Timed out trying to connect.');\n // Make sure we clear the host cache\n this.onClosed_();\n this.connectTimeoutTimer_ = null;\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n }, Math.floor(LP_CONNECT_TIMEOUT)) as any;\n\n // Ensure we delay the creation of the iframe until the DOM is loaded.\n executeWhenDOMReady(() => {\n if (this.isClosed_) {\n return;\n }\n\n //Set up a callback that gets triggered once a connection is set up.\n this.scriptTagHolder = new FirebaseIFrameScriptHolder(\n (...args) => {\n const [command, arg1, arg2, arg3, arg4] = args;\n this.incrementIncomingBytes_(args);\n if (!this.scriptTagHolder) {\n return; // we closed the connection.\n }\n\n if (this.connectTimeoutTimer_) {\n clearTimeout(this.connectTimeoutTimer_);\n this.connectTimeoutTimer_ = null;\n }\n this.everConnected_ = true;\n if (command === FIREBASE_LONGPOLL_START_PARAM) {\n this.id = arg1 as string;\n this.password = arg2 as string;\n } else if (command === FIREBASE_LONGPOLL_CLOSE_COMMAND) {\n // Don't clear the host cache. We got a response from the server, so we know it's reachable\n if (arg1) {\n // We aren't expecting any more data (other than what the server's already in the process of sending us\n // through our already open polls), so don't send any more.\n this.scriptTagHolder.sendNewPolls = false;\n\n // arg1 in this case is the last response number sent by the server. We should try to receive\n // all of the responses up to this one before closing\n this.myPacketOrderer.closeAfter(arg1 as number, () => {\n this.onClosed_();\n });\n } else {\n this.onClosed_();\n }\n } else {\n throw new Error('Unrecognized command received: ' + command);\n }\n },\n (...args) => {\n const [pN, data] = args;\n this.incrementIncomingBytes_(args);\n this.myPacketOrderer.handleResponse(pN as number, data as unknown[]);\n },\n () => {\n this.onClosed_();\n },\n this.urlFn\n );\n\n //Send the initial request to connect. The serial number is simply to keep the browser from pulling previous results\n //from cache.\n const urlParams: { [k: string]: string | number } = {};\n urlParams[FIREBASE_LONGPOLL_START_PARAM] = 't';\n urlParams[FIREBASE_LONGPOLL_SERIAL_PARAM] = Math.floor(\n Math.random() * 100000000\n );\n if (this.scriptTagHolder.uniqueCallbackIdentifier) {\n urlParams[FIREBASE_LONGPOLL_CALLBACK_ID_PARAM] =\n this.scriptTagHolder.uniqueCallbackIdentifier;\n }\n urlParams[VERSION_PARAM] = PROTOCOL_VERSION;\n if (this.transportSessionId) {\n urlParams[TRANSPORT_SESSION_PARAM] = this.transportSessionId;\n }\n if (this.lastSessionId) {\n urlParams[LAST_SESSION_PARAM] = this.lastSessionId;\n }\n if (this.applicationId) {\n urlParams[APPLICATION_ID_PARAM] = this.applicationId;\n }\n if (this.appCheckToken) {\n urlParams[APP_CHECK_TOKEN_PARAM] = this.appCheckToken;\n }\n if (\n typeof location !== 'undefined' &&\n location.hostname &&\n FORGE_DOMAIN_RE.test(location.hostname)\n ) {\n urlParams[REFERER_PARAM] = FORGE_REF;\n }\n const connectURL = this.urlFn(urlParams);\n this.log_('Connecting via long-poll to ' + connectURL);\n this.scriptTagHolder.addTag(connectURL, () => {\n /* do nothing */\n });\n });\n }\n\n /**\n * Call this when a handshake has completed successfully and we want to consider the connection established\n */\n start() {\n this.scriptTagHolder.startLongPoll(this.id, this.password);\n this.addDisconnectPingFrame(this.id, this.password);\n }\n\n static forceAllow_: boolean;\n\n /**\n * Forces long polling to be considered as a potential transport\n */\n static forceAllow() {\n BrowserPollConnection.forceAllow_ = true;\n }\n\n static forceDisallow_: boolean;\n\n /**\n * Forces longpolling to not be considered as a potential transport\n */\n static forceDisallow() {\n BrowserPollConnection.forceDisallow_ = true;\n }\n\n // Static method, use string literal so it can be accessed in a generic way\n static isAvailable() {\n if (isNodeSdk()) {\n return false;\n } else if (BrowserPollConnection.forceAllow_) {\n return true;\n } else {\n // NOTE: In React-Native there's normally no 'document', but if you debug a React-Native app in\n // the Chrome debugger, 'document' is defined, but document.createElement is null (2015/06/08).\n return (\n !BrowserPollConnection.forceDisallow_ &&\n typeof document !== 'undefined' &&\n document.createElement != null &&\n !isChromeExtensionContentScript() &&\n !isWindowsStoreApp()\n );\n }\n }\n\n /**\n * No-op for polling\n */\n markConnectionHealthy() {}\n\n /**\n * Stops polling and cleans up the iframe\n */\n private shutdown_() {\n this.isClosed_ = true;\n\n if (this.scriptTagHolder) {\n this.scriptTagHolder.close();\n this.scriptTagHolder = null;\n }\n\n //remove the disconnect frame, which will trigger an XHR call to the server to tell it we're leaving.\n if (this.myDisconnFrame) {\n document.body.removeChild(this.myDisconnFrame);\n this.myDisconnFrame = null;\n }\n\n if (this.connectTimeoutTimer_) {\n clearTimeout(this.connectTimeoutTimer_);\n this.connectTimeoutTimer_ = null;\n }\n }\n\n /**\n * Triggered when this transport is closed\n */\n private onClosed_() {\n if (!this.isClosed_) {\n this.log_('Longpoll is closing itself');\n this.shutdown_();\n\n if (this.onDisconnect_) {\n this.onDisconnect_(this.everConnected_);\n this.onDisconnect_ = null;\n }\n }\n }\n\n /**\n * External-facing close handler. RealTime has requested we shut down. Kill our connection and tell the server\n * that we've left.\n */\n close() {\n if (!this.isClosed_) {\n this.log_('Longpoll is being closed.');\n this.shutdown_();\n }\n }\n\n /**\n * Send the JSON object down to the server. It will need to be stringified, base64 encoded, and then\n * broken into chunks (since URLs have a small maximum length).\n * @param data - The JSON data to transmit.\n */\n send(data: {}) {\n const dataStr = stringify(data);\n this.bytesSent += dataStr.length;\n this.stats_.incrementCounter('bytes_sent', dataStr.length);\n\n //first, lets get the base64-encoded data\n const base64data = base64Encode(dataStr);\n\n //We can only fit a certain amount in each URL, so we need to split this request\n //up into multiple pieces if it doesn't fit in one request.\n const dataSegs = splitStringBySize(base64data, MAX_PAYLOAD_SIZE);\n\n //Enqueue each segment for transmission. We assign each chunk a sequential ID and a total number\n //of segments so that we can reassemble the packet on the server.\n for (let i = 0; i < dataSegs.length; i++) {\n this.scriptTagHolder.enqueueSegment(\n this.curSegmentNum,\n dataSegs.length,\n dataSegs[i]\n );\n this.curSegmentNum++;\n }\n }\n\n /**\n * This is how we notify the server that we're leaving.\n * We aren't able to send requests with DHTML on a window close event, but we can\n * trigger XHR requests in some browsers (everything but Opera basically).\n */\n addDisconnectPingFrame(id: string, pw: string) {\n if (isNodeSdk()) {\n return;\n }\n this.myDisconnFrame = document.createElement('iframe');\n const urlParams: { [k: string]: string } = {};\n urlParams[FIREBASE_LONGPOLL_DISCONN_FRAME_REQUEST_PARAM] = 't';\n urlParams[FIREBASE_LONGPOLL_ID_PARAM] = id;\n urlParams[FIREBASE_LONGPOLL_PW_PARAM] = pw;\n this.myDisconnFrame.src = this.urlFn(urlParams);\n this.myDisconnFrame.style.display = 'none';\n\n document.body.appendChild(this.myDisconnFrame);\n }\n\n /**\n * Used to track the bytes received by this client\n */\n private incrementIncomingBytes_(args: unknown) {\n // TODO: This is an annoying perf hit just to track the number of incoming bytes. Maybe it should be opt-in.\n const bytesReceived = stringify(args).length;\n this.bytesReceived += bytesReceived;\n this.stats_.incrementCounter('bytes_received', bytesReceived);\n }\n}\n\n// eslint-disable-next-line @typescript-eslint/naming-convention\nexport interface IFrameElement extends HTMLIFrameElement {\n doc: Document;\n}\n\n/*********************************************************************************************\n * A wrapper around an iframe that is used as a long-polling script holder.\n *********************************************************************************************/\nexport class FirebaseIFrameScriptHolder {\n //We maintain a count of all of the outstanding requests, because if we have too many active at once it can cause\n //problems in some browsers.\n outstandingRequests = new Set<number>();\n\n //A queue of the pending segments waiting for transmission to the server.\n pendingSegs: Array<{ seg: number; ts: number; d: unknown }> = [];\n\n //A serial number. We use this for two things:\n // 1) A way to ensure the browser doesn't cache responses to polls\n // 2) A way to make the server aware when long-polls arrive in a different order than we started them. The\n // server needs to release both polls in this case or it will cause problems in Opera since Opera can only execute\n // JSONP code in the order it was added to the iframe.\n currentSerial = Math.floor(Math.random() * 100000000);\n\n // This gets set to false when we're \"closing down\" the connection (e.g. we're switching transports but there's still\n // incoming data from the server that we're waiting for).\n sendNewPolls = true;\n\n uniqueCallbackIdentifier: number;\n myIFrame: IFrameElement;\n alive: boolean;\n myID: string;\n myPW: string;\n commandCB: (command: string, ...args: unknown[]) => void;\n onMessageCB: (...args: unknown[]) => void;\n\n /**\n * @param commandCB - The callback to be called when control commands are recevied from the server.\n * @param onMessageCB - The callback to be triggered when responses arrive from the server.\n * @param onDisconnect - The callback to be triggered when this tag holder is closed\n * @param urlFn - A function that provides the URL of the endpoint to send data to.\n */\n constructor(\n commandCB: (command: string, ...args: unknown[]) => void,\n onMessageCB: (...args: unknown[]) => void,\n public onDisconnect: () => void,\n public urlFn: (a: object) => string\n ) {\n if (!isNodeSdk()) {\n //Each script holder registers a couple of uniquely named callbacks with the window. These are called from the\n //iframes where we put the long-polling script tags. We have two callbacks:\n // 1) Command Callback - Triggered for control issues, like starting a connection.\n // 2) Message Callback - Triggered when new data arrives.\n this.uniqueCallbackIdentifier = LUIDGenerator();\n window[\n FIREBASE_LONGPOLL_COMMAND_CB_NAME + this.uniqueCallbackIdentifier\n ] = commandCB;\n window[FIREBASE_LONGPOLL_DATA_CB_NAME + this.uniqueCallbackIdentifier] =\n onMessageCB;\n\n //Create an iframe for us to add script tags to.\n this.myIFrame = FirebaseIFrameScriptHolder.createIFrame_();\n\n // Set the iframe's contents.\n let script = '';\n // if we set a javascript url, it's IE and we need to set the document domain. The javascript url is sufficient\n // for ie9, but ie8 needs to do it again in the document itself.\n if (\n this.myIFrame.src &&\n this.myIFrame.src.substr(0, 'javascript:'.length) === 'javascript:'\n ) {\n const currentDomain = document.domain;\n script = '<script>document.domain=\"' + currentDomain + '\";</script>';\n }\n const iframeContents = '<html><body>' + script + '</body></html>';\n try {\n this.myIFrame.doc.open();\n this.myIFrame.doc.write(iframeContents);\n this.myIFrame.doc.close();\n } catch (e) {\n log('frame writing exception');\n if (e.stack) {\n log(e.stack);\n }\n log(e);\n }\n } else {\n this.commandCB = commandCB;\n this.onMessageCB = onMessageCB;\n }\n }\n\n /**\n * Each browser has its own funny way to handle iframes. Here we mush them all together into one object that I can\n * actually use.\n */\n private static createIFrame_(): IFrameElement {\n const iframe = document.createElement('iframe') as IFrameElement;\n iframe.style.display = 'none';\n\n // This is necessary in order to initialize the document inside the iframe\n if (document.body) {\n document.body.appendChild(iframe);\n try {\n // If document.domain has been modified in IE, this will throw an error, and we need to set the\n // domain of the iframe's document manually. We can do this via a javascript: url as the src attribute\n // Also note that we must do this *after* the iframe has been appended to the page. Otherwise it doesn't work.\n const a = iframe.contentWindow.document;\n if (!a) {\n // Apologies for the log-spam, I need to do something to keep closure from optimizing out the assignment above.\n log('No IE domain setting required');\n }\n } catch (e) {\n const domain = document.domain;\n iframe.src =\n \"javascript:void((function(){document.open();document.domain='\" +\n domain +\n \"';document.close();})())\";\n }\n } else {\n // LongPollConnection attempts to delay initialization until the document is ready, so hopefully this\n // never gets hit.\n throw 'Document body has not initialized. Wait to initialize Firebase until after the document is ready.';\n }\n\n // Get the document of the iframe in a browser-specific way.\n if (iframe.contentDocument) {\n iframe.doc = iframe.contentDocument; // Firefox, Opera, Safari\n } else if (iframe.contentWindow) {\n iframe.doc = iframe.contentWindow.document; // Internet Explorer\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n } else if ((iframe as any).document) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n iframe.doc = (iframe as any).document; //others?\n }\n\n return iframe;\n }\n\n /**\n * Cancel all outstanding queries and remove the frame.\n */\n close() {\n //Mark this iframe as dead, so no new requests are sent.\n this.alive = false;\n\n if (this.myIFrame) {\n //We have to actually remove all of the html inside this iframe before removing it from the\n //window, or IE will continue loading and executing the script tags we've already added, which\n //can lead to some errors being thrown. Setting textContent seems to be the safest way to do this.\n this.myIFrame.doc.body.textContent = '';\n setTimeout(() => {\n if (this.myIFrame !== null) {\n document.body.removeChild(this.myIFrame);\n this.myIFrame = null;\n }\n }, Math.floor(0));\n }\n\n // Protect from being called recursively.\n const onDisconnect = this.onDisconnect;\n if (onDisconnect) {\n this.onDisconnect = null;\n onDisconnect();\n }\n }\n\n /**\n * Actually start the long-polling session by adding the first script tag(s) to the iframe.\n * @param id - The ID of this connection\n * @param pw - The password for this connection\n */\n startLongPoll(id: string, pw: string) {\n this.myID = id;\n this.myPW = pw;\n this.alive = true;\n\n //send the initial request. If there are requests queued, make sure that we transmit as many as we are currently able to.\n while (this.newRequest_()) {}\n }\n\n /**\n * This is called any time someone might want a script tag to be added. It adds a script tag when there aren't\n * too many outstanding requests and we are still alive.\n *\n * If there are outstanding packet segments to send, it sends one. If there aren't, it sends a long-poll anyways if\n * needed.\n */\n private newRequest_() {\n // We keep one outstanding request open all the time to receive data, but if we need to send data\n // (pendingSegs.length > 0) then we create a new request to send the data. The server will automatically\n // close the old request.\n if (\n this.alive &&\n this.sendNewPolls &&\n this.outstandingRequests.size < (this.pendingSegs.length > 0 ? 2 : 1)\n ) {\n //construct our url\n this.currentSerial++;\n const urlParams: { [k: string]: string | number } = {};\n urlParams[FIREBASE_LONGPOLL_ID_PARAM] = this.myID;\n urlParams[FIREBASE_LONGPOLL_PW_PARAM] = this.myPW;\n urlParams[FIREBASE_LONGPOLL_SERIAL_PARAM] = this.currentSerial;\n let theURL = this.urlFn(urlParams);\n //Now add as much data as we can.\n let curDataString = '';\n let i = 0;\n\n while (this.pendingSegs.length > 0) {\n //first, lets see if the next segment will fit.\n const nextSeg = this.pendingSegs[0];\n if (\n (nextSeg.d as unknown[]).length +\n SEG_HEADER_SIZE +\n curDataString.length <=\n MAX_URL_DATA_SIZE\n ) {\n //great, the segment will fit. Lets append it.\n const theSeg = this.pendingSegs.shift();\n curDataString =\n curDataString +\n '&' +\n FIREBASE_LONGPOLL_SEGMENT_NUM_PARAM +\n i +\n '=' +\n theSeg.seg +\n '&' +\n FIREBASE_LONGPOLL_SEGMENTS_IN_PACKET +\n i +\n '=' +\n theSeg.ts +\n '&' +\n FIREBASE_LONGPOLL_DATA_PARAM +\n i +\n '=' +\n theSeg.d;\n i++;\n } else {\n break;\n }\n }\n\n theURL = theURL + curDataString;\n this.addLongPollTag_(theURL, this.currentSerial);\n\n return true;\n } else {\n return false;\n }\n }\n\n /**\n * Queue a packet for transmission to the server.\n * @param segnum - A sequential id for this packet segment used for reassembly\n * @param totalsegs - The total number of segments in this packet\n * @param data - The data for this segment.\n */\n enqueueSegment(segnum: number, totalsegs: number, data: unknown) {\n //add this to the queue of segments to send.\n this.pendingSegs.push({ seg: segnum, ts: totalsegs, d: data });\n\n //send the data immediately if there isn't already data being transmitted, unless\n //startLongPoll hasn't been called yet.\n if (this.alive) {\n this.newRequest_();\n }\n }\n\n /**\n * Add a script tag for a regular long-poll request.\n * @param url - The URL of the script tag.\n * @param serial - The serial number of the request.\n */\n private addLongPollTag_(url: string, serial: number) {\n //remember that we sent this request.\n this.outstandingRequests.add(serial);\n\n const doNewRequest = () => {\n this.outstandingRequests.delete(serial);\n this.newRequest_();\n };\n\n // If this request doesn't return on its own accord (by the server sending us some data), we'll\n // create a new one after the KEEPALIVE interval to make sure we always keep a fresh request open.\n const keepaliveTimeout = setTimeout(\n doNewRequest,\n Math.floor(KEEPALIVE_REQUEST_INTERVAL)\n );\n\n const readyStateCB = () => {\n // Request completed. Cancel the keepalive.\n clearTimeout(keepaliveTimeout);\n\n // Trigger a new request so we can continue receiving data.\n doNewRequest();\n };\n\n this.addTag(url, readyStateCB);\n }\n\n /**\n * Add an arbitrary script tag to the iframe.\n * @param url - The URL for the script tag source.\n * @param loadCB - A callback to be triggered once the script has loaded.\n */\n addTag(url: string, loadCB: () => void) {\n if (isNodeSdk()) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n (this as any).doNodeLongPoll(url, loadCB);\n } else {\n setTimeout(() => {\n try {\n // if we're already closed, don't add this poll\n if (!this.sendNewPolls) {\n return;\n }\n const newScript = this.myIFrame.doc.createElement('script');\n newScript.type = 'text/javascript';\n newScript.async = true;\n newScript.src = url;\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n newScript.onload = (newScript as any).onreadystatechange =\n function () {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const rstate = (newScript as any).readyState;\n if (!rstate || rstate === 'loaded' || rstate === 'complete') {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n newScript.onload = (newScript as any).onreadystatechange = null;\n if (newScript.parentNode) {\n newScript.parentNode.removeChild(newScript);\n }\n loadCB();\n }\n };\n newScript.onerror = () => {\n log('Long-poll script failed to load: ' + url);\n this.sendNewPolls = false;\n this.close();\n };\n this.myIFrame.doc.body.appendChild(newScript);\n } catch (e) {\n // TODO: we should make this error visible somehow\n }\n }, Math.floor(1));\n }\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert, isNodeSdk, jsonEval, stringify } from '@firebase/util';\n\nimport { RepoInfo, repoInfoConnectionURL } from '../core/RepoInfo';\nimport { StatsCollection } from '../core/stats/StatsCollection';\nimport { statsManagerGetCollection } from '../core/stats/StatsManager';\nimport { PersistentStorage } from '../core/storage/storage';\nimport { logWrapper, splitStringBySize } from '../core/util/util';\nimport { SDK_VERSION } from '../core/version';\n\nimport {\n APPLICATION_ID_PARAM,\n APP_CHECK_TOKEN_PARAM,\n FORGE_DOMAIN_RE,\n FORGE_REF,\n LAST_SESSION_PARAM,\n PROTOCOL_VERSION,\n REFERER_PARAM,\n TRANSPORT_SESSION_PARAM,\n VERSION_PARAM,\n WEBSOCKET\n} from './Constants';\nimport { Transport } from './Transport';\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\ndeclare const MozWebSocket: any;\n\nconst WEBSOCKET_MAX_FRAME_SIZE = 16384;\nconst WEBSOCKET_KEEPALIVE_INTERVAL = 45000;\n\nlet WebSocketImpl = null;\nif (typeof MozWebSocket !== 'undefined') {\n WebSocketImpl = MozWebSocket;\n} else if (typeof WebSocket !== 'undefined') {\n WebSocketImpl = WebSocket;\n}\n\nexport function setWebSocketImpl(impl) {\n WebSocketImpl = impl;\n}\n\n/**\n * Create a new websocket connection with the given callbacks.\n */\nexport class WebSocketConnection implements Transport {\n keepaliveTimer: number | null = null;\n frames: string[] | null = null;\n totalFrames = 0;\n bytesSent = 0;\n bytesReceived = 0;\n connURL: string;\n onDisconnect: (a?: boolean) => void;\n onMessage: (msg: {}) => void;\n mySock: WebSocket | null;\n private log_: (...a: unknown[]) => void;\n private stats_: StatsCollection;\n private everConnected_: boolean;\n private isClosed_: boolean;\n private nodeAdmin: boolean;\n\n /**\n * @param connId identifier for this transport\n * @param repoInfo The info for the websocket endpoint.\n * @param applicationId The Firebase App ID for this project.\n * @param appCheckToken The App Check Token for this client.\n * @param authToken The Auth Token for this client.\n * @param transportSessionId Optional transportSessionId if this is connecting\n * to an existing transport session\n * @param lastSessionId Optional lastSessionId if there was a previous\n * connection\n */\n constructor(\n public connId: string,\n repoInfo: RepoInfo,\n private applicationId?: string,\n private appCheckToken?: string,\n private authToken?: string,\n transportSessionId?: string,\n lastSessionId?: string\n ) {\n this.log_ = logWrapper(this.connId);\n this.stats_ = statsManagerGetCollection(repoInfo);\n this.connURL = WebSocketConnection.connectionURL_(\n repoInfo,\n transportSessionId,\n lastSessionId,\n appCheckToken,\n applicationId\n );\n this.nodeAdmin = repoInfo.nodeAdmin;\n }\n\n /**\n * @param repoInfo - The info for the websocket endpoint.\n * @param transportSessionId - Optional transportSessionId if this is connecting to an existing transport\n * session\n * @param lastSessionId - Optional lastSessionId if there was a previous connection\n * @returns connection url\n */\n private static connectionURL_(\n repoInfo: RepoInfo,\n transportSessionId?: string,\n lastSessionId?: string,\n appCheckToken?: string,\n applicationId?: string\n ): string {\n const urlParams: { [k: string]: string } = {};\n urlParams[VERSION_PARAM] = PROTOCOL_VERSION;\n\n if (\n !isNodeSdk() &&\n typeof location !== 'undefined' &&\n location.hostname &&\n FORGE_DOMAIN_RE.test(location.hostname)\n ) {\n urlParams[REFERER_PARAM] = FORGE_REF;\n }\n if (transportSessionId) {\n urlParams[TRANSPORT_SESSION_PARAM] = transportSessionId;\n }\n if (lastSessionId) {\n urlParams[LAST_SESSION_PARAM] = lastSessionId;\n }\n if (appCheckToken) {\n urlParams[APP_CHECK_TOKEN_PARAM] = appCheckToken;\n }\n if (applicationId) {\n urlParams[APPLICATION_ID_PARAM] = applicationId;\n }\n\n return repoInfoConnectionURL(repoInfo, WEBSOCKET, urlParams);\n }\n\n /**\n * @param onMessage - Callback when messages arrive\n * @param onDisconnect - Callback with connection lost.\n */\n open(onMessage: (msg: {}) => void, onDisconnect: (a?: boolean) => void) {\n this.onDisconnect = onDisconnect;\n this.onMessage = onMessage;\n\n this.log_('Websocket connecting to ' + this.connURL);\n\n this.everConnected_ = false;\n // Assume failure until proven otherwise.\n PersistentStorage.set('previous_websocket_failure', true);\n\n try {\n let options: { [k: string]: object };\n if (isNodeSdk()) {\n const device = this.nodeAdmin ? 'AdminNode' : 'Node';\n // UA Format: Firebase/<wire_protocol>/<sdk_version>/<platform>/<device>\n options = {\n headers: {\n 'User-Agent': `Firebase/${PROTOCOL_VERSION}/${SDK_VERSION}/${process.platform}/${device}`,\n 'X-Firebase-GMPID': this.applicationId || ''\n }\n };\n\n // If using Node with admin creds, AppCheck-related checks are unnecessary.\n // Note that we send the credentials here even if they aren't admin credentials, which is\n // not a problem.\n // Note that this header is just used to bypass appcheck, and the token should still be sent\n // through the websocket connection once it is established.\n if (this.authToken) {\n options.headers['Authorization'] = `Bearer ${this.authToken}`;\n }\n if (this.appCheckToken) {\n options.headers['X-Firebase-AppCheck'] = this.appCheckToken;\n }\n\n // Plumb appropriate http_proxy environment variable into faye-websocket if it exists.\n const env = process['env'];\n const proxy =\n this.connURL.indexOf('wss://') === 0\n ? env['HTTPS_PROXY'] || env['https_proxy']\n : env['HTTP_PROXY'] || env['http_proxy'];\n\n if (proxy) {\n options['proxy'] = { origin: proxy };\n }\n }\n this.mySock = new WebSocketImpl(this.connURL, [], options);\n } catch (e) {\n this.log_('Error instantiating WebSocket.');\n const error = e.message || e.data;\n if (error) {\n this.log_(error);\n }\n this.onClosed_();\n return;\n }\n\n this.mySock.onopen = () => {\n this.log_('Websocket connected.');\n this.everConnected_ = true;\n };\n\n this.mySock.onclose = () => {\n this.log_('Websocket connection was disconnected.');\n this.mySock = null;\n this.onClosed_();\n };\n\n this.mySock.onmessage = m => {\n this.handleIncomingFrame(m as {});\n };\n\n this.mySock.onerror = e => {\n this.log_('WebSocket error. Closing connection.');\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const error = (e as any).message || (e as any).data;\n if (error) {\n this.log_(error);\n }\n this.onClosed_();\n };\n }\n\n /**\n * No-op for websockets, we don't need to do anything once the connection is confirmed as open\n */\n start() {}\n\n static forceDisallow_: boolean;\n\n static forceDisallow() {\n WebSocketConnection.forceDisallow_ = true;\n }\n\n static isAvailable(): boolean {\n let isOldAndroid = false;\n if (typeof navigator !== 'undefined' && navigator.userAgent) {\n const oldAndroidRegex = /Android ([0-9]{0,}\\.[0-9]{0,})/;\n const oldAndroidMatch = navigator.userAgent.match(oldAndroidRegex);\n if (oldAndroidMatch && oldAndroidMatch.length > 1) {\n if (parseFloat(oldAndroidMatch[1]) < 4.4) {\n isOldAndroid = true;\n }\n }\n }\n\n return (\n !isOldAndroid &&\n WebSocketImpl !== null &&\n !WebSocketConnection.forceDisallow_\n );\n }\n\n /**\n * Number of response before we consider the connection \"healthy.\"\n */\n static responsesRequiredToBeHealthy = 2;\n\n /**\n * Time to wait for the connection te become healthy before giving up.\n */\n static healthyTimeout = 30000;\n\n /**\n * Returns true if we previously failed to connect with this transport.\n */\n static previouslyFailed(): boolean {\n // If our persistent storage is actually only in-memory storage,\n // we default to assuming that it previously failed to be safe.\n return (\n PersistentStorage.isInMemoryStorage ||\n PersistentStorage.get('previous_websocket_failure') === true\n );\n }\n\n markConnectionHealthy() {\n PersistentStorage.remove('previous_websocket_failure');\n }\n\n private appendFrame_(data: string) {\n this.frames.push(data);\n if (this.frames.length === this.totalFrames) {\n const fullMess = this.frames.join('');\n this.frames = null;\n const jsonMess = jsonEval(fullMess) as object;\n\n //handle the message\n this.onMessage(jsonMess);\n }\n }\n\n /**\n * @param frameCount - The number of frames we are expecting from the server\n */\n private handleNewFrameCount_(frameCount: number) {\n this.totalFrames = frameCount;\n this.frames = [];\n }\n\n /**\n * Attempts to parse a frame count out of some text. If it can't, assumes a value of 1\n * @returns Any remaining data to be process, or null if there is none\n */\n private extractFrameCount_(data: string): string | null {\n assert(this.frames === null, 'We already have a frame buffer');\n // TODO: The server is only supposed to send up to 9999 frames (i.e. length <= 4), but that isn't being enforced\n // currently. So allowing larger frame counts (length <= 6). See https://app.asana.com/0/search/8688598998380/8237608042508\n if (data.length <= 6) {\n const frameCount = Number(data);\n if (!isNaN(frameCount)) {\n this.handleNewFrameCount_(frameCount);\n return null;\n }\n }\n this.handleNewFrameCount_(1);\n return data;\n }\n\n /**\n * Process a websocket frame that has arrived from the server.\n * @param mess - The frame data\n */\n handleIncomingFrame(mess: { [k: string]: unknown }) {\n if (this.mySock === null) {\n return; // Chrome apparently delivers incoming packets even after we .close() the connection sometimes.\n }\n const data = mess['data'] as string;\n this.bytesReceived += data.length;\n this.stats_.incrementCounter('bytes_received', data.length);\n\n this.resetKeepAlive();\n\n if (this.frames !== null) {\n // we're buffering\n this.appendFrame_(data);\n } else {\n // try to parse out a frame count, otherwise, assume 1 and process it\n const remainingData = this.extractFrameCount_(data);\n if (remainingData !== null) {\n this.appendFrame_(remainingData);\n }\n }\n }\n\n /**\n * Send a message to the server\n * @param data - The JSON object to transmit\n */\n send(data: {}) {\n this.resetKeepAlive();\n\n const dataStr = stringify(data);\n this.bytesSent += dataStr.length;\n this.stats_.incrementCounter('bytes_sent', dataStr.length);\n\n //We can only fit a certain amount in each websocket frame, so we need to split this request\n //up into multiple pieces if it doesn't fit in one request.\n\n const dataSegs = splitStringBySize(dataStr, WEBSOCKET_MAX_FRAME_SIZE);\n\n //Send the length header\n if (dataSegs.length > 1) {\n this.sendString_(String(dataSegs.length));\n }\n\n //Send the actual data in segments.\n for (let i = 0; i < dataSegs.length; i++) {\n this.sendString_(dataSegs[i]);\n }\n }\n\n private shutdown_() {\n this.isClosed_ = true;\n if (this.keepaliveTimer) {\n clearInterval(this.keepaliveTimer);\n this.keepaliveTimer = null;\n }\n\n if (this.mySock) {\n this.mySock.close();\n this.mySock = null;\n }\n }\n\n private onClosed_() {\n if (!this.isClosed_) {\n this.log_('WebSocket is closing itself');\n this.shutdown_();\n\n // since this is an internal close, trigger the close listener\n if (this.onDisconnect) {\n this.onDisconnect(this.everConnected_);\n this.onDisconnect = null;\n }\n }\n }\n\n /**\n * External-facing close handler.\n * Close the websocket and kill the connection.\n */\n close() {\n if (!this.isClosed_) {\n this.log_('WebSocket is being closed');\n this.shutdown_();\n }\n }\n\n /**\n * Kill the current keepalive timer and start a new one, to ensure that it always fires N seconds after\n * the last activity.\n */\n resetKeepAlive() {\n clearInterval(this.keepaliveTimer);\n this.keepaliveTimer = setInterval(() => {\n //If there has been no websocket activity for a while, send a no-op\n if (this.mySock) {\n this.sendString_('0');\n }\n this.resetKeepAlive();\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n }, Math.floor(WEBSOCKET_KEEPALIVE_INTERVAL)) as any;\n }\n\n /**\n * Send a string over the websocket.\n *\n * @param str - String to send.\n */\n private sendString_(str: string) {\n // Firefox seems to sometimes throw exceptions (NS_ERROR_UNEXPECTED) from websocket .send()\n // calls for some unknown reason. We treat these as an error and disconnect.\n // See https://app.asana.com/0/58926111402292/68021340250410\n try {\n this.mySock.send(str);\n } catch (e) {\n this.log_(\n 'Exception thrown from WebSocket.send():',\n e.message || e.data,\n 'Closing connection.'\n );\n setTimeout(this.onClosed_.bind(this), 0);\n }\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { RepoInfo } from '../core/RepoInfo';\nimport { warn } from '../core/util/util';\n\nimport { BrowserPollConnection } from './BrowserPollConnection';\nimport { TransportConstructor } from './Transport';\nimport { WebSocketConnection } from './WebSocketConnection';\n\n/**\n * Currently simplistic, this class manages what transport a Connection should use at various stages of its\n * lifecycle.\n *\n * It starts with longpolling in a browser, and httppolling on node. It then upgrades to websockets if\n * they are available.\n */\nexport class TransportManager {\n private transports_: TransportConstructor[];\n\n // Keeps track of whether the TransportManager has already chosen a transport to use\n static globalTransportInitialized_ = false;\n\n static get ALL_TRANSPORTS() {\n return [BrowserPollConnection, WebSocketConnection];\n }\n\n /**\n * Returns whether transport has been selected to ensure WebSocketConnection or BrowserPollConnection are not called after\n * TransportManager has already set up transports_\n */\n static get IS_TRANSPORT_INITIALIZED() {\n return this.globalTransportInitialized_;\n }\n\n /**\n * @param repoInfo - Metadata around the namespace we're connecting to\n */\n constructor(repoInfo: RepoInfo) {\n this.initTransports_(repoInfo);\n }\n\n private initTransports_(repoInfo: RepoInfo) {\n const isWebSocketsAvailable: boolean =\n WebSocketConnection && WebSocketConnection['isAvailable']();\n let isSkipPollConnection =\n isWebSocketsAvailable && !WebSocketConnection.previouslyFailed();\n\n if (repoInfo.webSocketOnly) {\n if (!isWebSocketsAvailable) {\n warn(\n \"wss:// URL used, but browser isn't known to support websockets. Trying anyway.\"\n );\n }\n\n isSkipPollConnection = true;\n }\n\n if (isSkipPollConnection) {\n this.transports_ = [WebSocketConnection];\n } else {\n const transports = (this.transports_ = [] as TransportConstructor[]);\n for (const transport of TransportManager.ALL_TRANSPORTS) {\n if (transport && transport['isAvailable']()) {\n transports.push(transport);\n }\n }\n TransportManager.globalTransportInitialized_ = true;\n }\n }\n\n /**\n * @returns The constructor for the initial transport to use\n */\n initialTransport(): TransportConstructor {\n if (this.transports_.length > 0) {\n return this.transports_[0];\n } else {\n throw new Error('No transports available');\n }\n }\n\n /**\n * @returns The constructor for the next transport, or null\n */\n upgradeTransport(): TransportConstructor | null {\n if (this.transports_.length > 1) {\n return this.transports_[1];\n } else {\n return null;\n }\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { RepoInfo } from '../core/RepoInfo';\nimport { PersistentStorage } from '../core/storage/storage';\nimport { Indexable } from '../core/util/misc';\nimport {\n error,\n logWrapper,\n requireKey,\n setTimeoutNonBlocking,\n warn\n} from '../core/util/util';\n\nimport { PROTOCOL_VERSION } from './Constants';\nimport { Transport, TransportConstructor } from './Transport';\nimport { TransportManager } from './TransportManager';\n\n// Abort upgrade attempt if it takes longer than 60s.\nconst UPGRADE_TIMEOUT = 60000;\n\n// For some transports (WebSockets), we need to \"validate\" the transport by exchanging a few requests and responses.\n// If we haven't sent enough requests within 5s, we'll start sending noop ping requests.\nconst DELAY_BEFORE_SENDING_EXTRA_REQUESTS = 5000;\n\n// If the initial data sent triggers a lot of bandwidth (i.e. it's a large put or a listen for a large amount of data)\n// then we may not be able to exchange our ping/pong requests within the healthy timeout. So if we reach the timeout\n// but we've sent/received enough bytes, we don't cancel the connection.\nconst BYTES_SENT_HEALTHY_OVERRIDE = 10 * 1024;\nconst BYTES_RECEIVED_HEALTHY_OVERRIDE = 100 * 1024;\n\nconst enum RealtimeState {\n CONNECTING,\n CONNECTED,\n DISCONNECTED\n}\n\nconst MESSAGE_TYPE = 't';\nconst MESSAGE_DATA = 'd';\nconst CONTROL_SHUTDOWN = 's';\nconst CONTROL_RESET = 'r';\nconst CONTROL_ERROR = 'e';\nconst CONTROL_PONG = 'o';\nconst SWITCH_ACK = 'a';\nconst END_TRANSMISSION = 'n';\nconst PING = 'p';\n\nconst SERVER_HELLO = 'h';\n\n/**\n * Creates a new real-time connection to the server using whichever method works\n * best in the current browser.\n */\nexport class Connection {\n connectionCount = 0;\n pendingDataMessages: unknown[] = [];\n sessionId: string;\n\n private conn_: Transport;\n private healthyTimeout_: number;\n private isHealthy_: boolean;\n private log_: (...args: unknown[]) => void;\n private primaryResponsesRequired_: number;\n private rx_: Transport;\n private secondaryConn_: Transport;\n private secondaryResponsesRequired_: number;\n private state_ = RealtimeState.CONNECTING;\n private transportManager_: TransportManager;\n private tx_: Transport;\n\n /**\n * @param id - an id for this connection\n * @param repoInfo_ - the info for the endpoint to connect to\n * @param applicationId_ - the Firebase App ID for this project\n * @param appCheckToken_ - The App Check Token for this device.\n * @param authToken_ - The auth token for this session.\n * @param onMessage_ - the callback to be triggered when a server-push message arrives\n * @param onReady_ - the callback to be triggered when this connection is ready to send messages.\n * @param onDisconnect_ - the callback to be triggered when a connection was lost\n * @param onKill_ - the callback to be triggered when this connection has permanently shut down.\n * @param lastSessionId - last session id in persistent connection. is used to clean up old session in real-time server\n */\n constructor(\n public id: string,\n private repoInfo_: RepoInfo,\n private applicationId_: string | undefined,\n private appCheckToken_: string | undefined,\n private authToken_: string | undefined,\n private onMessage_: (a: {}) => void,\n private onReady_: (a: number, b: string) => void,\n private onDisconnect_: () => void,\n private onKill_: (a: string) => void,\n public lastSessionId?: string\n ) {\n this.log_ = logWrapper('c:' + this.id + ':');\n this.transportManager_ = new TransportManager(repoInfo_);\n this.log_('Connection created');\n this.start_();\n }\n\n /**\n * Starts a connection attempt\n */\n private start_(): void {\n const conn = this.transportManager_.initialTransport();\n this.conn_ = new conn(\n this.nextTransportId_(),\n this.repoInfo_,\n this.applicationId_,\n this.appCheckToken_,\n this.authToken_,\n null,\n this.lastSessionId\n );\n\n // For certain transports (WebSockets), we need to send and receive several messages back and forth before we\n // can consider the transport healthy.\n this.primaryResponsesRequired_ = conn['responsesRequiredToBeHealthy'] || 0;\n\n const onMessageReceived = this.connReceiver_(this.conn_);\n const onConnectionLost = this.disconnReceiver_(this.conn_);\n this.tx_ = this.conn_;\n this.rx_ = this.conn_;\n this.secondaryConn_ = null;\n this.isHealthy_ = false;\n\n /*\n * Firefox doesn't like when code from one iframe tries to create another iframe by way of the parent frame.\n * This can occur in the case of a redirect, i.e. we guessed wrong on what server to connect to and received a reset.\n * Somehow, setTimeout seems to make this ok. That doesn't make sense from a security perspective, since you should\n * still have the context of your originating frame.\n */\n setTimeout(() => {\n // this.conn_ gets set to null in some of the tests. Check to make sure it still exists before using it\n this.conn_ && this.conn_.open(onMessageReceived, onConnectionLost);\n }, Math.floor(0));\n\n const healthyTimeoutMS = conn['healthyTimeout'] || 0;\n if (healthyTimeoutMS > 0) {\n this.healthyTimeout_ = setTimeoutNonBlocking(() => {\n this.healthyTimeout_ = null;\n if (!this.isHealthy_) {\n if (\n this.conn_ &&\n this.conn_.bytesReceived > BYTES_RECEIVED_HEALTHY_OVERRIDE\n ) {\n this.log_(\n 'Connection exceeded healthy timeout but has received ' +\n this.conn_.bytesReceived +\n ' bytes. Marking connection healthy.'\n );\n this.isHealthy_ = true;\n this.conn_.markConnectionHealthy();\n } else if (\n this.conn_ &&\n this.conn_.bytesSent > BYTES_SENT_HEALTHY_OVERRIDE\n ) {\n this.log_(\n 'Connection exceeded healthy timeout but has sent ' +\n this.conn_.bytesSent +\n ' bytes. Leaving connection alive.'\n );\n // NOTE: We don't want to mark it healthy, since we have no guarantee that the bytes have made it to\n // the server.\n } else {\n this.log_('Closing unhealthy connection after timeout.');\n this.close();\n }\n }\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n }, Math.floor(healthyTimeoutMS)) as any;\n }\n }\n\n private nextTransportId_(): string {\n return 'c:' + this.id + ':' + this.connectionCount++;\n }\n\n private disconnReceiver_(conn) {\n return everConnected => {\n if (conn === this.conn_) {\n this.onConnectionLost_(everConnected);\n } else if (conn === this.secondaryConn_) {\n this.log_('Secondary connection lost.');\n this.onSecondaryConnectionLost_();\n } else {\n this.log_('closing an old connection');\n }\n };\n }\n\n private connReceiver_(conn: Transport) {\n return (message: Indexable) => {\n if (this.state_ !== RealtimeState.DISCONNECTED) {\n if (conn === this.rx_) {\n this.onPrimaryMessageReceived_(message);\n } else if (conn === this.secondaryConn_) {\n this.onSecondaryMessageReceived_(message);\n } else {\n this.log_('message on old connection');\n }\n }\n };\n }\n\n /**\n * @param dataMsg - An arbitrary data message to be sent to the server\n */\n sendRequest(dataMsg: object) {\n // wrap in a data message envelope and send it on\n const msg = { t: 'd', d: dataMsg };\n this.sendData_(msg);\n }\n\n tryCleanupConnection() {\n if (this.tx_ === this.secondaryConn_ && this.rx_ === this.secondaryConn_) {\n this.log_(\n 'cleaning up and promoting a connection: ' + this.secondaryConn_.connId\n );\n this.conn_ = this.secondaryConn_;\n this.secondaryConn_ = null;\n // the server will shutdown the old connection\n }\n }\n\n private onSecondaryControl_(controlData: { [k: string]: unknown }) {\n if (MESSAGE_TYPE in controlData) {\n const cmd = controlData[MESSAGE_TYPE] as string;\n if (cmd === SWITCH_ACK) {\n this.upgradeIfSecondaryHealthy_();\n } else if (cmd === CONTROL_RESET) {\n // Most likely the session wasn't valid. Abandon the switch attempt\n this.log_('Got a reset on secondary, closing it');\n this.secondaryConn_.close();\n // If we were already using this connection for something, than we need to fully close\n if (\n this.tx_ === this.secondaryConn_ ||\n this.rx_ === this.secondaryConn_\n ) {\n this.close();\n }\n } else if (cmd === CONTROL_PONG) {\n this.log_('got pong on secondary.');\n this.secondaryResponsesRequired_--;\n this.upgradeIfSecondaryHealthy_();\n }\n }\n }\n\n private onSecondaryMessageReceived_(parsedData: Indexable) {\n const layer: string = requireKey('t', parsedData) as string;\n const data: unknown = requireKey('d', parsedData);\n if (layer === 'c') {\n this.onSecondaryControl_(data as Indexable);\n } else if (layer === 'd') {\n // got a data message, but we're still second connection. Need to buffer it up\n this.pendingDataMessages.push(data);\n } else {\n throw new Error('Unknown protocol layer: ' + layer);\n }\n }\n\n private upgradeIfSecondaryHealthy_() {\n if (this.secondaryResponsesRequired_ <= 0) {\n this.log_('Secondary connection is healthy.');\n this.isHealthy_ = true;\n this.secondaryConn_.markConnectionHealthy();\n this.proceedWithUpgrade_();\n } else {\n // Send a ping to make sure the connection is healthy.\n this.log_('sending ping on secondary.');\n this.secondaryConn_.send({ t: 'c', d: { t: PING, d: {} } });\n }\n }\n\n private proceedWithUpgrade_() {\n // tell this connection to consider itself open\n this.secondaryConn_.start();\n // send ack\n this.log_('sending client ack on secondary');\n this.secondaryConn_.send({ t: 'c', d: { t: SWITCH_ACK, d: {} } });\n\n // send end packet on primary transport, switch to sending on this one\n // can receive on this one, buffer responses until end received on primary transport\n this.log_('Ending transmission on primary');\n this.conn_.send({ t: 'c', d: { t: END_TRANSMISSION, d: {} } });\n this.tx_ = this.secondaryConn_;\n\n this.tryCleanupConnection();\n }\n\n private onPrimaryMessageReceived_(parsedData: { [k: string]: unknown }) {\n // Must refer to parsedData properties in quotes, so closure doesn't touch them.\n const layer: string = requireKey('t', parsedData) as string;\n const data: unknown = requireKey('d', parsedData);\n if (layer === 'c') {\n this.onControl_(data as { [k: string]: unknown });\n } else if (layer === 'd') {\n this.onDataMessage_(data);\n }\n }\n\n private onDataMessage_(message: unknown) {\n this.onPrimaryResponse_();\n\n // We don't do anything with data messages, just kick them up a level\n this.onMessage_(message);\n }\n\n private onPrimaryResponse_() {\n if (!this.isHealthy_) {\n this.primaryResponsesRequired_--;\n if (this.primaryResponsesRequired_ <= 0) {\n this.log_('Primary connection is healthy.');\n this.isHealthy_ = true;\n this.conn_.markConnectionHealthy();\n }\n }\n }\n\n private onControl_(controlData: { [k: string]: unknown }) {\n const cmd: string = requireKey(MESSAGE_TYPE, controlData) as string;\n if (MESSAGE_DATA in controlData) {\n const payload = controlData[MESSAGE_DATA];\n if (cmd === SERVER_HELLO) {\n this.onHandshake_(\n payload as {\n ts: number;\n v: string;\n h: string;\n s: string;\n }\n );\n } else if (cmd === END_TRANSMISSION) {\n this.log_('recvd end transmission on primary');\n this.rx_ = this.secondaryConn_;\n for (let i = 0; i < this.pendingDataMessages.length; ++i) {\n this.onDataMessage_(this.pendingDataMessages[i]);\n }\n this.pendingDataMessages = [];\n this.tryCleanupConnection();\n } else if (cmd === CONTROL_SHUTDOWN) {\n // This was previously the 'onKill' callback passed to the lower-level connection\n // payload in this case is the reason for the shutdown. Generally a human-readable error\n this.onConnectionShutdown_(payload as string);\n } else if (cmd === CONTROL_RESET) {\n // payload in this case is the host we should contact\n this.onReset_(payload as string);\n } else if (cmd === CONTROL_ERROR) {\n error('Server Error: ' + payload);\n } else if (cmd === CONTROL_PONG) {\n this.log_('got pong on primary.');\n this.onPrimaryResponse_();\n this.sendPingOnPrimaryIfNecessary_();\n } else {\n error('Unknown control packet command: ' + cmd);\n }\n }\n }\n\n /**\n * @param handshake - The handshake data returned from the server\n */\n private onHandshake_(handshake: {\n ts: number;\n v: string;\n h: string;\n s: string;\n }): void {\n const timestamp = handshake.ts;\n const version = handshake.v;\n const host = handshake.h;\n this.sessionId = handshake.s;\n this.repoInfo_.host = host;\n // if we've already closed the connection, then don't bother trying to progress further\n if (this.state_ === RealtimeState.CONNECTING) {\n this.conn_.start();\n this.onConnectionEstablished_(this.conn_, timestamp);\n if (PROTOCOL_VERSION !== version) {\n warn('Protocol version mismatch detected');\n }\n // TODO: do we want to upgrade? when? maybe a delay?\n this.tryStartUpgrade_();\n }\n }\n\n private tryStartUpgrade_() {\n const conn = this.transportManager_.upgradeTransport();\n if (conn) {\n this.startUpgrade_(conn);\n }\n }\n\n private startUpgrade_(conn: TransportConstructor) {\n this.secondaryConn_ = new conn(\n this.nextTransportId_(),\n this.repoInfo_,\n this.applicationId_,\n this.appCheckToken_,\n this.authToken_,\n this.sessionId\n );\n // For certain transports (WebSockets), we need to send and receive several messages back and forth before we\n // can consider the transport healthy.\n this.secondaryResponsesRequired_ =\n conn['responsesRequiredToBeHealthy'] || 0;\n\n const onMessage = this.connReceiver_(this.secondaryConn_);\n const onDisconnect = this.disconnReceiver_(this.secondaryConn_);\n this.secondaryConn_.open(onMessage, onDisconnect);\n\n // If we haven't successfully upgraded after UPGRADE_TIMEOUT, give up and kill the secondary.\n setTimeoutNonBlocking(() => {\n if (this.secondaryConn_) {\n this.log_('Timed out trying to upgrade.');\n this.secondaryConn_.close();\n }\n }, Math.floor(UPGRADE_TIMEOUT));\n }\n\n private onReset_(host: string) {\n this.log_('Reset packet received. New host: ' + host);\n this.repoInfo_.host = host;\n // TODO: if we're already \"connected\", we need to trigger a disconnect at the next layer up.\n // We don't currently support resets after the connection has already been established\n if (this.state_ === RealtimeState.CONNECTED) {\n this.close();\n } else {\n // Close whatever connections we have open and start again.\n this.closeConnections_();\n this.start_();\n }\n }\n\n private onConnectionEstablished_(conn: Transport, timestamp: number) {\n this.log_('Realtime connection established.');\n this.conn_ = conn;\n this.state_ = RealtimeState.CONNECTED;\n\n if (this.onReady_) {\n this.onReady_(timestamp, this.sessionId);\n this.onReady_ = null;\n }\n\n // If after 5 seconds we haven't sent enough requests to the server to get the connection healthy,\n // send some pings.\n if (this.primaryResponsesRequired_ === 0) {\n this.log_('Primary connection is healthy.');\n this.isHealthy_ = true;\n } else {\n setTimeoutNonBlocking(() => {\n this.sendPingOnPrimaryIfNecessary_();\n }, Math.floor(DELAY_BEFORE_SENDING_EXTRA_REQUESTS));\n }\n }\n\n private sendPingOnPrimaryIfNecessary_() {\n // If the connection isn't considered healthy yet, we'll send a noop ping packet request.\n if (!this.isHealthy_ && this.state_ === RealtimeState.CONNECTED) {\n this.log_('sending ping on primary.');\n this.sendData_({ t: 'c', d: { t: PING, d: {} } });\n }\n }\n\n private onSecondaryConnectionLost_() {\n const conn = this.secondaryConn_;\n this.secondaryConn_ = null;\n if (this.tx_ === conn || this.rx_ === conn) {\n // we are relying on this connection already in some capacity. Therefore, a failure is real\n this.close();\n }\n }\n\n /**\n * @param everConnected - Whether or not the connection ever reached a server. Used to determine if\n * we should flush the host cache\n */\n private onConnectionLost_(everConnected: boolean) {\n this.conn_ = null;\n\n // NOTE: IF you're seeing a Firefox error for this line, I think it might be because it's getting\n // called on window close and RealtimeState.CONNECTING is no longer defined. Just a guess.\n if (!everConnected && this.state_ === RealtimeState.CONNECTING) {\n this.log_('Realtime connection failed.');\n // Since we failed to connect at all, clear any cached entry for this namespace in case the machine went away\n if (this.repoInfo_.isCacheableHost()) {\n PersistentStorage.remove('host:' + this.repoInfo_.host);\n // reset the internal host to what we would show the user, i.e. <ns>.firebaseio.com\n this.repoInfo_.internalHost = this.repoInfo_.host;\n }\n } else if (this.state_ === RealtimeState.CONNECTED) {\n this.log_('Realtime connection lost.');\n }\n\n this.close();\n }\n\n private onConnectionShutdown_(reason: string) {\n this.log_('Connection shutdown command received. Shutting down...');\n\n if (this.onKill_) {\n this.onKill_(reason);\n this.onKill_ = null;\n }\n\n // We intentionally don't want to fire onDisconnect (kill is a different case),\n // so clear the callback.\n this.onDisconnect_ = null;\n\n this.close();\n }\n\n private sendData_(data: object) {\n if (this.state_ !== RealtimeState.CONNECTED) {\n throw 'Connection is not connected';\n } else {\n this.tx_.send(data);\n }\n }\n\n /**\n * Cleans up this connection, calling the appropriate callbacks\n */\n close() {\n if (this.state_ !== RealtimeState.DISCONNECTED) {\n this.log_('Closing realtime connection.');\n this.state_ = RealtimeState.DISCONNECTED;\n\n this.closeConnections_();\n\n if (this.onDisconnect_) {\n this.onDisconnect_();\n this.onDisconnect_ = null;\n }\n }\n }\n\n private closeConnections_() {\n this.log_('Shutting down all connections');\n if (this.conn_) {\n this.conn_.close();\n this.conn_ = null;\n }\n\n if (this.secondaryConn_) {\n this.secondaryConn_.close();\n this.secondaryConn_ = null;\n }\n\n if (this.healthyTimeout_) {\n clearTimeout(this.healthyTimeout_);\n this.healthyTimeout_ = null;\n }\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { QueryContext } from './view/EventRegistration';\n\n/**\n * Interface defining the set of actions that can be performed against the Firebase server\n * (basically corresponds to our wire protocol).\n *\n * @interface\n */\nexport abstract class ServerActions {\n abstract listen(\n query: QueryContext,\n currentHashFn: () => string,\n tag: number | null,\n onComplete: (a: string, b: unknown) => void\n ): void;\n\n /**\n * Remove a listen.\n */\n abstract unlisten(query: QueryContext, tag: number | null): void;\n\n /**\n * Get the server value satisfying this query.\n */\n abstract get(query: QueryContext): Promise<string>;\n\n put(\n pathString: string,\n data: unknown,\n onComplete?: (a: string, b: string) => void,\n hash?: string\n ) {}\n\n merge(\n pathString: string,\n data: unknown,\n onComplete: (a: string, b: string | null) => void,\n hash?: string\n ) {}\n\n /**\n * Refreshes the auth token for the current connection.\n * @param token - The authentication token\n */\n refreshAuthToken(token: string) {}\n\n /**\n * Refreshes the app check token for the current connection.\n * @param token The app check token\n */\n refreshAppCheckToken(token: string) {}\n\n onDisconnectPut(\n pathString: string,\n data: unknown,\n onComplete?: (a: string, b: string) => void\n ) {}\n\n onDisconnectMerge(\n pathString: string,\n data: unknown,\n onComplete?: (a: string, b: string) => void\n ) {}\n\n onDisconnectCancel(\n pathString: string,\n onComplete?: (a: string, b: string) => void\n ) {}\n\n reportStats(stats: { [k: string]: unknown }) {}\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert } from '@firebase/util';\n\n/**\n * Base class to be used if you want to emit events. Call the constructor with\n * the set of allowed event names.\n */\nexport abstract class EventEmitter {\n private listeners_: {\n [eventType: string]: Array<{\n callback(...args: unknown[]): void;\n context: unknown;\n }>;\n } = {};\n\n constructor(private allowedEvents_: string[]) {\n assert(\n Array.isArray(allowedEvents_) && allowedEvents_.length > 0,\n 'Requires a non-empty array'\n );\n }\n\n /**\n * To be overridden by derived classes in order to fire an initial event when\n * somebody subscribes for data.\n *\n * @returns {Array.<*>} Array of parameters to trigger initial event with.\n */\n abstract getInitialEvent(eventType: string): unknown[];\n\n /**\n * To be called by derived classes to trigger events.\n */\n protected trigger(eventType: string, ...varArgs: unknown[]) {\n if (Array.isArray(this.listeners_[eventType])) {\n // Clone the list, since callbacks could add/remove listeners.\n const listeners = [...this.listeners_[eventType]];\n\n for (let i = 0; i < listeners.length; i++) {\n listeners[i].callback.apply(listeners[i].context, varArgs);\n }\n }\n }\n\n on(eventType: string, callback: (a: unknown) => void, context: unknown) {\n this.validateEventType_(eventType);\n this.listeners_[eventType] = this.listeners_[eventType] || [];\n this.listeners_[eventType].push({ callback, context });\n\n const eventData = this.getInitialEvent(eventType);\n if (eventData) {\n callback.apply(context, eventData);\n }\n }\n\n off(eventType: string, callback: (a: unknown) => void, context: unknown) {\n this.validateEventType_(eventType);\n const listeners = this.listeners_[eventType] || [];\n for (let i = 0; i < listeners.length; i++) {\n if (\n listeners[i].callback === callback &&\n (!context || context === listeners[i].context)\n ) {\n listeners.splice(i, 1);\n return;\n }\n }\n }\n\n private validateEventType_(eventType: string) {\n assert(\n this.allowedEvents_.find(et => {\n return et === eventType;\n }),\n 'Unknown event: ' + eventType\n );\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert, isMobileCordova } from '@firebase/util';\n\nimport { EventEmitter } from './EventEmitter';\n\n/**\n * Monitors online state (as reported by window.online/offline events).\n *\n * The expectation is that this could have many false positives (thinks we are online\n * when we're not), but no false negatives. So we can safely use it to determine when\n * we definitely cannot reach the internet.\n */\nexport class OnlineMonitor extends EventEmitter {\n private online_ = true;\n\n static getInstance() {\n return new OnlineMonitor();\n }\n\n constructor() {\n super(['online']);\n\n // We've had repeated complaints that Cordova apps can get stuck \"offline\", e.g.\n // https://forum.ionicframework.com/t/firebase-connection-is-lost-and-never-come-back/43810\n // It would seem that the 'online' event does not always fire consistently. So we disable it\n // for Cordova.\n if (\n typeof window !== 'undefined' &&\n typeof window.addEventListener !== 'undefined' &&\n !isMobileCordova()\n ) {\n window.addEventListener(\n 'online',\n () => {\n if (!this.online_) {\n this.online_ = true;\n this.trigger('online', true);\n }\n },\n false\n );\n\n window.addEventListener(\n 'offline',\n () => {\n if (this.online_) {\n this.online_ = false;\n this.trigger('online', false);\n }\n },\n false\n );\n }\n }\n\n getInitialEvent(eventType: string): boolean[] {\n assert(eventType === 'online', 'Unknown event type: ' + eventType);\n return [this.online_];\n }\n\n currentlyOnline(): boolean {\n return this.online_;\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { stringLength } from '@firebase/util';\n\nimport { nameCompare } from './util';\n\n/** Maximum key depth. */\nconst MAX_PATH_DEPTH = 32;\n\n/** Maximum number of (UTF8) bytes in a Firebase path. */\nconst MAX_PATH_LENGTH_BYTES = 768;\n\n/**\n * An immutable object representing a parsed path. It's immutable so that you\n * can pass them around to other functions without worrying about them changing\n * it.\n */\n\nexport class Path {\n pieces_: string[];\n pieceNum_: number;\n\n /**\n * @param pathOrString - Path string to parse, or another path, or the raw\n * tokens array\n */\n constructor(pathOrString: string | string[], pieceNum?: number) {\n if (pieceNum === void 0) {\n this.pieces_ = (pathOrString as string).split('/');\n\n // Remove empty pieces.\n let copyTo = 0;\n for (let i = 0; i < this.pieces_.length; i++) {\n if (this.pieces_[i].length > 0) {\n this.pieces_[copyTo] = this.pieces_[i];\n copyTo++;\n }\n }\n this.pieces_.length = copyTo;\n\n this.pieceNum_ = 0;\n } else {\n this.pieces_ = pathOrString as string[];\n this.pieceNum_ = pieceNum;\n }\n }\n\n toString(): string {\n let pathString = '';\n for (let i = this.pieceNum_; i < this.pieces_.length; i++) {\n if (this.pieces_[i] !== '') {\n pathString += '/' + this.pieces_[i];\n }\n }\n\n return pathString || '/';\n }\n}\n\nexport function newEmptyPath(): Path {\n return new Path('');\n}\n\nexport function pathGetFront(path: Path): string | null {\n if (path.pieceNum_ >= path.pieces_.length) {\n return null;\n }\n\n return path.pieces_[path.pieceNum_];\n}\n\n/**\n * @returns The number of segments in this path\n */\nexport function pathGetLength(path: Path): number {\n return path.pieces_.length - path.pieceNum_;\n}\n\nexport function pathPopFront(path: Path): Path {\n let pieceNum = path.pieceNum_;\n if (pieceNum < path.pieces_.length) {\n pieceNum++;\n }\n return new Path(path.pieces_, pieceNum);\n}\n\nexport function pathGetBack(path: Path): string | null {\n if (path.pieceNum_ < path.pieces_.length) {\n return path.pieces_[path.pieces_.length - 1];\n }\n\n return null;\n}\n\nexport function pathToUrlEncodedString(path: Path): string {\n let pathString = '';\n for (let i = path.pieceNum_; i < path.pieces_.length; i++) {\n if (path.pieces_[i] !== '') {\n pathString += '/' + encodeURIComponent(String(path.pieces_[i]));\n }\n }\n\n return pathString || '/';\n}\n\n/**\n * Shallow copy of the parts of the path.\n *\n */\nexport function pathSlice(path: Path, begin: number = 0): string[] {\n return path.pieces_.slice(path.pieceNum_ + begin);\n}\n\nexport function pathParent(path: Path): Path | null {\n if (path.pieceNum_ >= path.pieces_.length) {\n return null;\n }\n\n const pieces = [];\n for (let i = path.pieceNum_; i < path.pieces_.length - 1; i++) {\n pieces.push(path.pieces_[i]);\n }\n\n return new Path(pieces, 0);\n}\n\nexport function pathChild(path: Path, childPathObj: string | Path): Path {\n const pieces = [];\n for (let i = path.pieceNum_; i < path.pieces_.length; i++) {\n pieces.push(path.pieces_[i]);\n }\n\n if (childPathObj instanceof Path) {\n for (let i = childPathObj.pieceNum_; i < childPathObj.pieces_.length; i++) {\n pieces.push(childPathObj.pieces_[i]);\n }\n } else {\n const childPieces = childPathObj.split('/');\n for (let i = 0; i < childPieces.length; i++) {\n if (childPieces[i].length > 0) {\n pieces.push(childPieces[i]);\n }\n }\n }\n\n return new Path(pieces, 0);\n}\n\n/**\n * @returns True if there are no segments in this path\n */\nexport function pathIsEmpty(path: Path): boolean {\n return path.pieceNum_ >= path.pieces_.length;\n}\n\n/**\n * @returns The path from outerPath to innerPath\n */\nexport function newRelativePath(outerPath: Path, innerPath: Path): Path {\n const outer = pathGetFront(outerPath),\n inner = pathGetFront(innerPath);\n if (outer === null) {\n return innerPath;\n } else if (outer === inner) {\n return newRelativePath(pathPopFront(outerPath), pathPopFront(innerPath));\n } else {\n throw new Error(\n 'INTERNAL ERROR: innerPath (' +\n innerPath +\n ') is not within ' +\n 'outerPath (' +\n outerPath +\n ')'\n );\n }\n}\n\n/**\n * @returns -1, 0, 1 if left is less, equal, or greater than the right.\n */\nexport function pathCompare(left: Path, right: Path): number {\n const leftKeys = pathSlice(left, 0);\n const rightKeys = pathSlice(right, 0);\n for (let i = 0; i < leftKeys.length && i < rightKeys.length; i++) {\n const cmp = nameCompare(leftKeys[i], rightKeys[i]);\n if (cmp !== 0) {\n return cmp;\n }\n }\n if (leftKeys.length === rightKeys.length) {\n return 0;\n }\n return leftKeys.length < rightKeys.length ? -1 : 1;\n}\n\n/**\n * @returns true if paths are the same.\n */\nexport function pathEquals(path: Path, other: Path): boolean {\n if (pathGetLength(path) !== pathGetLength(other)) {\n return false;\n }\n\n for (\n let i = path.pieceNum_, j = other.pieceNum_;\n i <= path.pieces_.length;\n i++, j++\n ) {\n if (path.pieces_[i] !== other.pieces_[j]) {\n return false;\n }\n }\n\n return true;\n}\n\n/**\n * @returns True if this path is a parent of (or the same as) other\n */\nexport function pathContains(path: Path, other: Path): boolean {\n let i = path.pieceNum_;\n let j = other.pieceNum_;\n if (pathGetLength(path) > pathGetLength(other)) {\n return false;\n }\n while (i < path.pieces_.length) {\n if (path.pieces_[i] !== other.pieces_[j]) {\n return false;\n }\n ++i;\n ++j;\n }\n return true;\n}\n\n/**\n * Dynamic (mutable) path used to count path lengths.\n *\n * This class is used to efficiently check paths for valid\n * length (in UTF8 bytes) and depth (used in path validation).\n *\n * Throws Error exception if path is ever invalid.\n *\n * The definition of a path always begins with '/'.\n */\nexport class ValidationPath {\n parts_: string[];\n /** Initialize to number of '/' chars needed in path. */\n byteLength_: number;\n\n /**\n * @param path - Initial Path.\n * @param errorPrefix_ - Prefix for any error messages.\n */\n constructor(path: Path, public errorPrefix_: string) {\n this.parts_ = pathSlice(path, 0);\n /** Initialize to number of '/' chars needed in path. */\n this.byteLength_ = Math.max(1, this.parts_.length);\n\n for (let i = 0; i < this.parts_.length; i++) {\n this.byteLength_ += stringLength(this.parts_[i]);\n }\n validationPathCheckValid(this);\n }\n}\n\nexport function validationPathPush(\n validationPath: ValidationPath,\n child: string\n): void {\n // Count the needed '/'\n if (validationPath.parts_.length > 0) {\n validationPath.byteLength_ += 1;\n }\n validationPath.parts_.push(child);\n validationPath.byteLength_ += stringLength(child);\n validationPathCheckValid(validationPath);\n}\n\nexport function validationPathPop(validationPath: ValidationPath): void {\n const last = validationPath.parts_.pop();\n validationPath.byteLength_ -= stringLength(last);\n // Un-count the previous '/'\n if (validationPath.parts_.length > 0) {\n validationPath.byteLength_ -= 1;\n }\n}\n\nfunction validationPathCheckValid(validationPath: ValidationPath): void {\n if (validationPath.byteLength_ > MAX_PATH_LENGTH_BYTES) {\n throw new Error(\n validationPath.errorPrefix_ +\n 'has a key path longer than ' +\n MAX_PATH_LENGTH_BYTES +\n ' bytes (' +\n validationPath.byteLength_ +\n ').'\n );\n }\n if (validationPath.parts_.length > MAX_PATH_DEPTH) {\n throw new Error(\n validationPath.errorPrefix_ +\n 'path specified exceeds the maximum depth that can be written (' +\n MAX_PATH_DEPTH +\n ') or object contains a cycle ' +\n validationPathToErrorString(validationPath)\n );\n }\n}\n\n/**\n * String for use in error messages - uses '.' notation for path.\n */\nexport function validationPathToErrorString(\n validationPath: ValidationPath\n): string {\n if (validationPath.parts_.length === 0) {\n return '';\n }\n return \"in property '\" + validationPath.parts_.join('.') + \"'\";\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert } from '@firebase/util';\n\nimport { EventEmitter } from './EventEmitter';\n\ndeclare const document: Document;\n\nexport class VisibilityMonitor extends EventEmitter {\n private visible_: boolean;\n\n static getInstance() {\n return new VisibilityMonitor();\n }\n\n constructor() {\n super(['visible']);\n let hidden: string;\n let visibilityChange: string;\n if (\n typeof document !== 'undefined' &&\n typeof document.addEventListener !== 'undefined'\n ) {\n if (typeof document['hidden'] !== 'undefined') {\n // Opera 12.10 and Firefox 18 and later support\n visibilityChange = 'visibilitychange';\n hidden = 'hidden';\n } else if (typeof document['mozHidden'] !== 'undefined') {\n visibilityChange = 'mozvisibilitychange';\n hidden = 'mozHidden';\n } else if (typeof document['msHidden'] !== 'undefined') {\n visibilityChange = 'msvisibilitychange';\n hidden = 'msHidden';\n } else if (typeof document['webkitHidden'] !== 'undefined') {\n visibilityChange = 'webkitvisibilitychange';\n hidden = 'webkitHidden';\n }\n }\n\n // Initially, we always assume we are visible. This ensures that in browsers\n // without page visibility support or in cases where we are never visible\n // (e.g. chrome extension), we act as if we are visible, i.e. don't delay\n // reconnects\n this.visible_ = true;\n\n if (visibilityChange) {\n document.addEventListener(\n visibilityChange,\n () => {\n const visible = !document[hidden];\n if (visible !== this.visible_) {\n this.visible_ = visible;\n this.trigger('visible', visible);\n }\n },\n false\n );\n }\n }\n\n getInitialEvent(eventType: string): boolean[] {\n assert(eventType === 'visible', 'Unknown event type: ' + eventType);\n return [this.visible_];\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n assert,\n contains,\n Deferred,\n isEmpty,\n isMobileCordova,\n isNodeSdk,\n isReactNative,\n isValidFormat,\n safeGet,\n stringify,\n isAdmin\n} from '@firebase/util';\n\nimport { Connection } from '../realtime/Connection';\n\nimport { AppCheckTokenProvider } from './AppCheckTokenProvider';\nimport { AuthTokenProvider } from './AuthTokenProvider';\nimport { RepoInfo } from './RepoInfo';\nimport { ServerActions } from './ServerActions';\nimport { OnlineMonitor } from './util/OnlineMonitor';\nimport { Path } from './util/Path';\nimport { error, log, logWrapper, warn, ObjectToUniqueKey } from './util/util';\nimport { VisibilityMonitor } from './util/VisibilityMonitor';\nimport { SDK_VERSION } from './version';\nimport { QueryContext } from './view/EventRegistration';\n\nconst RECONNECT_MIN_DELAY = 1000;\nconst RECONNECT_MAX_DELAY_DEFAULT = 60 * 5 * 1000; // 5 minutes in milliseconds (Case: 1858)\nconst RECONNECT_MAX_DELAY_FOR_ADMINS = 30 * 1000; // 30 seconds for admin clients (likely to be a backend server)\nconst RECONNECT_DELAY_MULTIPLIER = 1.3;\nconst RECONNECT_DELAY_RESET_TIMEOUT = 30000; // Reset delay back to MIN_DELAY after being connected for 30sec.\nconst SERVER_KILL_INTERRUPT_REASON = 'server_kill';\n\n// If auth fails repeatedly, we'll assume something is wrong and log a warning / back off.\nconst INVALID_TOKEN_THRESHOLD = 3;\n\ninterface ListenSpec {\n onComplete(s: string, p?: unknown): void;\n\n hashFn(): string;\n\n query: QueryContext;\n tag: number | null;\n}\n\ninterface OnDisconnectRequest {\n pathString: string;\n action: string;\n data: unknown;\n onComplete?: (a: string, b: string) => void;\n}\n\ninterface OutstandingPut {\n action: string;\n request: object;\n queued?: boolean;\n onComplete: (a: string, b?: string) => void;\n}\n\ninterface OutstandingGet {\n request: object;\n onComplete: (response: { [k: string]: unknown }) => void;\n}\n\n/**\n * Firebase connection. Abstracts wire protocol and handles reconnecting.\n *\n * NOTE: All JSON objects sent to the realtime connection must have property names enclosed\n * in quotes to make sure the closure compiler does not minify them.\n */\nexport class PersistentConnection extends ServerActions {\n // Used for diagnostic logging.\n id = PersistentConnection.nextPersistentConnectionId_++;\n private log_ = logWrapper('p:' + this.id + ':');\n\n private interruptReasons_: { [reason: string]: boolean } = {};\n private readonly listens: Map<\n /* path */ string,\n Map</* queryId */ string, ListenSpec>\n > = new Map();\n private outstandingPuts_: OutstandingPut[] = [];\n private outstandingGets_: OutstandingGet[] = [];\n private outstandingPutCount_ = 0;\n private outstandingGetCount_ = 0;\n private onDisconnectRequestQueue_: OnDisconnectRequest[] = [];\n private connected_ = false;\n private reconnectDelay_ = RECONNECT_MIN_DELAY;\n private maxReconnectDelay_ = RECONNECT_MAX_DELAY_DEFAULT;\n private securityDebugCallback_: ((a: object) => void) | null = null;\n lastSessionId: string | null = null;\n\n private establishConnectionTimer_: number | null = null;\n\n private visible_: boolean = false;\n\n // Before we get connected, we keep a queue of pending messages to send.\n private requestCBHash_: { [k: number]: (a: unknown) => void } = {};\n private requestNumber_ = 0;\n\n private realtime_: {\n sendRequest(a: object): void;\n close(): void;\n } | null = null;\n\n private authToken_: string | null = null;\n private appCheckToken_: string | null = null;\n private forceTokenRefresh_ = false;\n private invalidAuthTokenCount_ = 0;\n private invalidAppCheckTokenCount_ = 0;\n\n private firstConnection_ = true;\n private lastConnectionAttemptTime_: number | null = null;\n private lastConnectionEstablishedTime_: number | null = null;\n\n private static nextPersistentConnectionId_ = 0;\n\n /**\n * Counter for number of connections created. Mainly used for tagging in the logs\n */\n private static nextConnectionId_ = 0;\n\n /**\n * @param repoInfo_ - Data about the namespace we are connecting to\n * @param applicationId_ - The Firebase App ID for this project\n * @param onDataUpdate_ - A callback for new data from the server\n */\n constructor(\n private repoInfo_: RepoInfo,\n private applicationId_: string,\n private onDataUpdate_: (\n a: string,\n b: unknown,\n c: boolean,\n d: number | null\n ) => void,\n private onConnectStatus_: (a: boolean) => void,\n private onServerInfoUpdate_: (a: unknown) => void,\n private authTokenProvider_: AuthTokenProvider,\n private appCheckTokenProvider_: AppCheckTokenProvider,\n private authOverride_?: object | null\n ) {\n super();\n\n if (authOverride_ && !isNodeSdk()) {\n throw new Error(\n 'Auth override specified in options, but not supported on non Node.js platforms'\n );\n }\n\n VisibilityMonitor.getInstance().on('visible', this.onVisible_, this);\n\n if (repoInfo_.host.indexOf('fblocal') === -1) {\n OnlineMonitor.getInstance().on('online', this.onOnline_, this);\n }\n }\n\n protected sendRequest(\n action: string,\n body: unknown,\n onResponse?: (a: unknown) => void\n ) {\n const curReqNum = ++this.requestNumber_;\n\n const msg = { r: curReqNum, a: action, b: body };\n this.log_(stringify(msg));\n assert(\n this.connected_,\n \"sendRequest call when we're not connected not allowed.\"\n );\n this.realtime_.sendRequest(msg);\n if (onResponse) {\n this.requestCBHash_[curReqNum] = onResponse;\n }\n }\n\n get(query: QueryContext): Promise<string> {\n this.initConnection_();\n\n const deferred = new Deferred<string>();\n const request = {\n p: query._path.toString(),\n q: query._queryObject\n };\n const outstandingGet = {\n action: 'g',\n request,\n onComplete: (message: { [k: string]: unknown }) => {\n const payload = message['d'] as string;\n if (message['s'] === 'ok') {\n deferred.resolve(payload);\n } else {\n deferred.reject(payload);\n }\n }\n };\n this.outstandingGets_.push(outstandingGet);\n this.outstandingGetCount_++;\n const index = this.outstandingGets_.length - 1;\n\n if (this.connected_) {\n this.sendGet_(index);\n }\n\n return deferred.promise;\n }\n\n listen(\n query: QueryContext,\n currentHashFn: () => string,\n tag: number | null,\n onComplete: (a: string, b: unknown) => void\n ) {\n this.initConnection_();\n\n const queryId = query._queryIdentifier;\n const pathString = query._path.toString();\n this.log_('Listen called for ' + pathString + ' ' + queryId);\n if (!this.listens.has(pathString)) {\n this.listens.set(pathString, new Map());\n }\n assert(\n query._queryParams.isDefault() || !query._queryParams.loadsAllData(),\n 'listen() called for non-default but complete query'\n );\n assert(\n !this.listens.get(pathString)!.has(queryId),\n `listen() called twice for same path/queryId.`\n );\n const listenSpec: ListenSpec = {\n onComplete,\n hashFn: currentHashFn,\n query,\n tag\n };\n this.listens.get(pathString)!.set(queryId, listenSpec);\n\n if (this.connected_) {\n this.sendListen_(listenSpec);\n }\n }\n\n private sendGet_(index: number) {\n const get = this.outstandingGets_[index];\n this.sendRequest('g', get.request, (message: { [k: string]: unknown }) => {\n delete this.outstandingGets_[index];\n this.outstandingGetCount_--;\n if (this.outstandingGetCount_ === 0) {\n this.outstandingGets_ = [];\n }\n if (get.onComplete) {\n get.onComplete(message);\n }\n });\n }\n\n private sendListen_(listenSpec: ListenSpec) {\n const query = listenSpec.query;\n const pathString = query._path.toString();\n const queryId = query._queryIdentifier;\n this.log_('Listen on ' + pathString + ' for ' + queryId);\n const req: { [k: string]: unknown } = { /*path*/ p: pathString };\n\n const action = 'q';\n\n // Only bother to send query if it's non-default.\n if (listenSpec.tag) {\n req['q'] = query._queryObject;\n req['t'] = listenSpec.tag;\n }\n\n req[/*hash*/ 'h'] = listenSpec.hashFn();\n\n this.sendRequest(action, req, (message: { [k: string]: unknown }) => {\n const payload: unknown = message[/*data*/ 'd'];\n const status = message[/*status*/ 's'] as string;\n\n // print warnings in any case...\n PersistentConnection.warnOnListenWarnings_(payload, query);\n\n const currentListenSpec =\n this.listens.get(pathString) &&\n this.listens.get(pathString)!.get(queryId);\n // only trigger actions if the listen hasn't been removed and readded\n if (currentListenSpec === listenSpec) {\n this.log_('listen response', message);\n\n if (status !== 'ok') {\n this.removeListen_(pathString, queryId);\n }\n\n if (listenSpec.onComplete) {\n listenSpec.onComplete(status, payload);\n }\n }\n });\n }\n\n private static warnOnListenWarnings_(payload: unknown, query: QueryContext) {\n if (payload && typeof payload === 'object' && contains(payload, 'w')) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const warnings = safeGet(payload as any, 'w');\n if (Array.isArray(warnings) && ~warnings.indexOf('no_index')) {\n const indexSpec =\n '\".indexOn\": \"' + query._queryParams.getIndex().toString() + '\"';\n const indexPath = query._path.toString();\n warn(\n `Using an unspecified index. Your data will be downloaded and ` +\n `filtered on the client. Consider adding ${indexSpec} at ` +\n `${indexPath} to your security rules for better performance.`\n );\n }\n }\n }\n\n refreshAuthToken(token: string) {\n this.authToken_ = token;\n this.log_('Auth token refreshed');\n if (this.authToken_) {\n this.tryAuth();\n } else {\n //If we're connected we want to let the server know to unauthenticate us. If we're not connected, simply delete\n //the credential so we dont become authenticated next time we connect.\n if (this.connected_) {\n this.sendRequest('unauth', {}, () => {});\n }\n }\n\n this.reduceReconnectDelayIfAdminCredential_(token);\n }\n\n private reduceReconnectDelayIfAdminCredential_(credential: string) {\n // NOTE: This isn't intended to be bulletproof (a malicious developer can always just modify the client).\n // Additionally, we don't bother resetting the max delay back to the default if auth fails / expires.\n const isFirebaseSecret = credential && credential.length === 40;\n if (isFirebaseSecret || isAdmin(credential)) {\n this.log_(\n 'Admin auth credential detected. Reducing max reconnect time.'\n );\n this.maxReconnectDelay_ = RECONNECT_MAX_DELAY_FOR_ADMINS;\n }\n }\n\n refreshAppCheckToken(token: string | null) {\n this.appCheckToken_ = token;\n this.log_('App check token refreshed');\n if (this.appCheckToken_) {\n this.tryAppCheck();\n } else {\n //If we're connected we want to let the server know to unauthenticate us.\n //If we're not connected, simply delete the credential so we dont become\n // authenticated next time we connect.\n if (this.connected_) {\n this.sendRequest('unappeck', {}, () => {});\n }\n }\n }\n\n /**\n * Attempts to authenticate with the given credentials. If the authentication attempt fails, it's triggered like\n * a auth revoked (the connection is closed).\n */\n tryAuth() {\n if (this.connected_ && this.authToken_) {\n const token = this.authToken_;\n const authMethod = isValidFormat(token) ? 'auth' : 'gauth';\n const requestData: { [k: string]: unknown } = { cred: token };\n if (this.authOverride_ === null) {\n requestData['noauth'] = true;\n } else if (typeof this.authOverride_ === 'object') {\n requestData['authvar'] = this.authOverride_;\n }\n this.sendRequest(\n authMethod,\n requestData,\n (res: { [k: string]: unknown }) => {\n const status = res[/*status*/ 's'] as string;\n const data = (res[/*data*/ 'd'] as string) || 'error';\n\n if (this.authToken_ === token) {\n if (status === 'ok') {\n this.invalidAuthTokenCount_ = 0;\n } else {\n // Triggers reconnect and force refresh for auth token\n this.onAuthRevoked_(status, data);\n }\n }\n }\n );\n }\n }\n\n /**\n * Attempts to authenticate with the given token. If the authentication\n * attempt fails, it's triggered like the token was revoked (the connection is\n * closed).\n */\n tryAppCheck() {\n if (this.connected_ && this.appCheckToken_) {\n this.sendRequest(\n 'appcheck',\n { 'token': this.appCheckToken_ },\n (res: { [k: string]: unknown }) => {\n const status = res[/*status*/ 's'] as string;\n const data = (res[/*data*/ 'd'] as string) || 'error';\n if (status === 'ok') {\n this.invalidAppCheckTokenCount_ = 0;\n } else {\n this.onAppCheckRevoked_(status, data);\n }\n }\n );\n }\n }\n\n /**\n * @inheritDoc\n */\n unlisten(query: QueryContext, tag: number | null) {\n const pathString = query._path.toString();\n const queryId = query._queryIdentifier;\n\n this.log_('Unlisten called for ' + pathString + ' ' + queryId);\n\n assert(\n query._queryParams.isDefault() || !query._queryParams.loadsAllData(),\n 'unlisten() called for non-default but complete query'\n );\n const listen = this.removeListen_(pathString, queryId);\n if (listen && this.connected_) {\n this.sendUnlisten_(pathString, queryId, query._queryObject, tag);\n }\n }\n\n private sendUnlisten_(\n pathString: string,\n queryId: string,\n queryObj: object,\n tag: number | null\n ) {\n this.log_('Unlisten on ' + pathString + ' for ' + queryId);\n\n const req: { [k: string]: unknown } = { /*path*/ p: pathString };\n const action = 'n';\n // Only bother sending queryId if it's non-default.\n if (tag) {\n req['q'] = queryObj;\n req['t'] = tag;\n }\n\n this.sendRequest(action, req);\n }\n\n onDisconnectPut(\n pathString: string,\n data: unknown,\n onComplete?: (a: string, b: string) => void\n ) {\n this.initConnection_();\n\n if (this.connected_) {\n this.sendOnDisconnect_('o', pathString, data, onComplete);\n } else {\n this.onDisconnectRequestQueue_.push({\n pathString,\n action: 'o',\n data,\n onComplete\n });\n }\n }\n\n onDisconnectMerge(\n pathString: string,\n data: unknown,\n onComplete?: (a: string, b: string) => void\n ) {\n this.initConnection_();\n\n if (this.connected_) {\n this.sendOnDisconnect_('om', pathString, data, onComplete);\n } else {\n this.onDisconnectRequestQueue_.push({\n pathString,\n action: 'om',\n data,\n onComplete\n });\n }\n }\n\n onDisconnectCancel(\n pathString: string,\n onComplete?: (a: string, b: string) => void\n ) {\n this.initConnection_();\n\n if (this.connected_) {\n this.sendOnDisconnect_('oc', pathString, null, onComplete);\n } else {\n this.onDisconnectRequestQueue_.push({\n pathString,\n action: 'oc',\n data: null,\n onComplete\n });\n }\n }\n\n private sendOnDisconnect_(\n action: string,\n pathString: string,\n data: unknown,\n onComplete: (a: string, b: string) => void\n ) {\n const request = { /*path*/ p: pathString, /*data*/ d: data };\n this.log_('onDisconnect ' + action, request);\n this.sendRequest(action, request, (response: { [k: string]: unknown }) => {\n if (onComplete) {\n setTimeout(() => {\n onComplete(\n response[/*status*/ 's'] as string,\n response[/* data */ 'd'] as string\n );\n }, Math.floor(0));\n }\n });\n }\n\n put(\n pathString: string,\n data: unknown,\n onComplete?: (a: string, b: string) => void,\n hash?: string\n ) {\n this.putInternal('p', pathString, data, onComplete, hash);\n }\n\n merge(\n pathString: string,\n data: unknown,\n onComplete: (a: string, b: string | null) => void,\n hash?: string\n ) {\n this.putInternal('m', pathString, data, onComplete, hash);\n }\n\n putInternal(\n action: string,\n pathString: string,\n data: unknown,\n onComplete: (a: string, b: string | null) => void,\n hash?: string\n ) {\n this.initConnection_();\n\n const request: { [k: string]: unknown } = {\n /*path*/ p: pathString,\n /*data*/ d: data\n };\n\n if (hash !== undefined) {\n request[/*hash*/ 'h'] = hash;\n }\n\n // TODO: Only keep track of the most recent put for a given path?\n this.outstandingPuts_.push({\n action,\n request,\n onComplete\n });\n\n this.outstandingPutCount_++;\n const index = this.outstandingPuts_.length - 1;\n\n if (this.connected_) {\n this.sendPut_(index);\n } else {\n this.log_('Buffering put: ' + pathString);\n }\n }\n\n private sendPut_(index: number) {\n const action = this.outstandingPuts_[index].action;\n const request = this.outstandingPuts_[index].request;\n const onComplete = this.outstandingPuts_[index].onComplete;\n this.outstandingPuts_[index].queued = this.connected_;\n\n this.sendRequest(action, request, (message: { [k: string]: unknown }) => {\n this.log_(action + ' response', message);\n\n delete this.outstandingPuts_[index];\n this.outstandingPutCount_--;\n\n // Clean up array occasionally.\n if (this.outstandingPutCount_ === 0) {\n this.outstandingPuts_ = [];\n }\n\n if (onComplete) {\n onComplete(\n message[/*status*/ 's'] as string,\n message[/* data */ 'd'] as string\n );\n }\n });\n }\n\n reportStats(stats: { [k: string]: unknown }) {\n // If we're not connected, we just drop the stats.\n if (this.connected_) {\n const request = { /*counters*/ c: stats };\n this.log_('reportStats', request);\n\n this.sendRequest(/*stats*/ 's', request, result => {\n const status = result[/*status*/ 's'];\n if (status !== 'ok') {\n const errorReason = result[/* data */ 'd'];\n this.log_('reportStats', 'Error sending stats: ' + errorReason);\n }\n });\n }\n }\n\n private onDataMessage_(message: { [k: string]: unknown }) {\n if ('r' in message) {\n // this is a response\n this.log_('from server: ' + stringify(message));\n const reqNum = message['r'] as string;\n const onResponse = this.requestCBHash_[reqNum];\n if (onResponse) {\n delete this.requestCBHash_[reqNum];\n onResponse(message[/*body*/ 'b']);\n }\n } else if ('error' in message) {\n throw 'A server-side error has occurred: ' + message['error'];\n } else if ('a' in message) {\n // a and b are action and body, respectively\n this.onDataPush_(message['a'] as string, message['b'] as {});\n }\n }\n\n private onDataPush_(action: string, body: { [k: string]: unknown }) {\n this.log_('handleServerMessage', action, body);\n if (action === 'd') {\n this.onDataUpdate_(\n body[/*path*/ 'p'] as string,\n body[/*data*/ 'd'],\n /*isMerge*/ false,\n body['t'] as number\n );\n } else if (action === 'm') {\n this.onDataUpdate_(\n body[/*path*/ 'p'] as string,\n body[/*data*/ 'd'],\n /*isMerge=*/ true,\n body['t'] as number\n );\n } else if (action === 'c') {\n this.onListenRevoked_(\n body[/*path*/ 'p'] as string,\n body[/*query*/ 'q'] as unknown[]\n );\n } else if (action === 'ac') {\n this.onAuthRevoked_(\n body[/*status code*/ 's'] as string,\n body[/* explanation */ 'd'] as string\n );\n } else if (action === 'apc') {\n this.onAppCheckRevoked_(\n body[/*status code*/ 's'] as string,\n body[/* explanation */ 'd'] as string\n );\n } else if (action === 'sd') {\n this.onSecurityDebugPacket_(body);\n } else {\n error(\n 'Unrecognized action received from server: ' +\n stringify(action) +\n '\\nAre you using the latest client?'\n );\n }\n }\n\n private onReady_(timestamp: number, sessionId: string) {\n this.log_('connection ready');\n this.connected_ = true;\n this.lastConnectionEstablishedTime_ = new Date().getTime();\n this.handleTimestamp_(timestamp);\n this.lastSessionId = sessionId;\n if (this.firstConnection_) {\n this.sendConnectStats_();\n }\n this.restoreState_();\n this.firstConnection_ = false;\n this.onConnectStatus_(true);\n }\n\n private scheduleConnect_(timeout: number) {\n assert(\n !this.realtime_,\n \"Scheduling a connect when we're already connected/ing?\"\n );\n\n if (this.establishConnectionTimer_) {\n clearTimeout(this.establishConnectionTimer_);\n }\n\n // NOTE: Even when timeout is 0, it's important to do a setTimeout to work around an infuriating \"Security Error\" in\n // Firefox when trying to write to our long-polling iframe in some scenarios (e.g. Forge or our unit tests).\n\n this.establishConnectionTimer_ = setTimeout(() => {\n this.establishConnectionTimer_ = null;\n this.establishConnection_();\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n }, Math.floor(timeout)) as any;\n }\n\n private initConnection_() {\n if (!this.realtime_ && this.firstConnection_) {\n this.scheduleConnect_(0);\n }\n }\n\n private onVisible_(visible: boolean) {\n // NOTE: Tabbing away and back to a window will defeat our reconnect backoff, but I think that's fine.\n if (\n visible &&\n !this.visible_ &&\n this.reconnectDelay_ === this.maxReconnectDelay_\n ) {\n this.log_('Window became visible. Reducing delay.');\n this.reconnectDelay_ = RECONNECT_MIN_DELAY;\n\n if (!this.realtime_) {\n this.scheduleConnect_(0);\n }\n }\n this.visible_ = visible;\n }\n\n private onOnline_(online: boolean) {\n if (online) {\n this.log_('Browser went online.');\n this.reconnectDelay_ = RECONNECT_MIN_DELAY;\n if (!this.realtime_) {\n this.scheduleConnect_(0);\n }\n } else {\n this.log_('Browser went offline. Killing connection.');\n if (this.realtime_) {\n this.realtime_.close();\n }\n }\n }\n\n private onRealtimeDisconnect_() {\n this.log_('data client disconnected');\n this.connected_ = false;\n this.realtime_ = null;\n\n // Since we don't know if our sent transactions succeeded or not, we need to cancel them.\n this.cancelSentTransactions_();\n\n // Clear out the pending requests.\n this.requestCBHash_ = {};\n\n if (this.shouldReconnect_()) {\n if (!this.visible_) {\n this.log_(\"Window isn't visible. Delaying reconnect.\");\n this.reconnectDelay_ = this.maxReconnectDelay_;\n this.lastConnectionAttemptTime_ = new Date().getTime();\n } else if (this.lastConnectionEstablishedTime_) {\n // If we've been connected long enough, reset reconnect delay to minimum.\n const timeSinceLastConnectSucceeded =\n new Date().getTime() - this.lastConnectionEstablishedTime_;\n if (timeSinceLastConnectSucceeded > RECONNECT_DELAY_RESET_TIMEOUT) {\n this.reconnectDelay_ = RECONNECT_MIN_DELAY;\n }\n this.lastConnectionEstablishedTime_ = null;\n }\n\n const timeSinceLastConnectAttempt =\n new Date().getTime() - this.lastConnectionAttemptTime_;\n let reconnectDelay = Math.max(\n 0,\n this.reconnectDelay_ - timeSinceLastConnectAttempt\n );\n reconnectDelay = Math.random() * reconnectDelay;\n\n this.log_('Trying to reconnect in ' + reconnectDelay + 'ms');\n this.scheduleConnect_(reconnectDelay);\n\n // Adjust reconnect delay for next time.\n this.reconnectDelay_ = Math.min(\n this.maxReconnectDelay_,\n this.reconnectDelay_ * RECONNECT_DELAY_MULTIPLIER\n );\n }\n this.onConnectStatus_(false);\n }\n\n private async establishConnection_() {\n if (this.shouldReconnect_()) {\n this.log_('Making a connection attempt');\n this.lastConnectionAttemptTime_ = new Date().getTime();\n this.lastConnectionEstablishedTime_ = null;\n const onDataMessage = this.onDataMessage_.bind(this);\n const onReady = this.onReady_.bind(this);\n const onDisconnect = this.onRealtimeDisconnect_.bind(this);\n const connId = this.id + ':' + PersistentConnection.nextConnectionId_++;\n const lastSessionId = this.lastSessionId;\n let canceled = false;\n let connection: Connection | null = null;\n const closeFn = function () {\n if (connection) {\n connection.close();\n } else {\n canceled = true;\n onDisconnect();\n }\n };\n const sendRequestFn = function (msg: object) {\n assert(\n connection,\n \"sendRequest call when we're not connected not allowed.\"\n );\n connection.sendRequest(msg);\n };\n\n this.realtime_ = {\n close: closeFn,\n sendRequest: sendRequestFn\n };\n\n const forceRefresh = this.forceTokenRefresh_;\n this.forceTokenRefresh_ = false;\n\n try {\n // First fetch auth and app check token, and establish connection after\n // fetching the token was successful\n const [authToken, appCheckToken] = await Promise.all([\n this.authTokenProvider_.getToken(forceRefresh),\n this.appCheckTokenProvider_.getToken(forceRefresh)\n ]);\n\n if (!canceled) {\n log('getToken() completed. Creating connection.');\n this.authToken_ = authToken && authToken.accessToken;\n this.appCheckToken_ = appCheckToken && appCheckToken.token;\n connection = new Connection(\n connId,\n this.repoInfo_,\n this.applicationId_,\n this.appCheckToken_,\n this.authToken_,\n onDataMessage,\n onReady,\n onDisconnect,\n /* onKill= */ reason => {\n warn(reason + ' (' + this.repoInfo_.toString() + ')');\n this.interrupt(SERVER_KILL_INTERRUPT_REASON);\n },\n lastSessionId\n );\n } else {\n log('getToken() completed but was canceled');\n }\n } catch (error) {\n this.log_('Failed to get token: ' + error);\n if (!canceled) {\n if (this.repoInfo_.nodeAdmin) {\n // This may be a critical error for the Admin Node.js SDK, so log a warning.\n // But getToken() may also just have temporarily failed, so we still want to\n // continue retrying.\n warn(error);\n }\n closeFn();\n }\n }\n }\n }\n\n interrupt(reason: string) {\n log('Interrupting connection for reason: ' + reason);\n this.interruptReasons_[reason] = true;\n if (this.realtime_) {\n this.realtime_.close();\n } else {\n if (this.establishConnectionTimer_) {\n clearTimeout(this.establishConnectionTimer_);\n this.establishConnectionTimer_ = null;\n }\n if (this.connected_) {\n this.onRealtimeDisconnect_();\n }\n }\n }\n\n resume(reason: string) {\n log('Resuming connection for reason: ' + reason);\n delete this.interruptReasons_[reason];\n if (isEmpty(this.interruptReasons_)) {\n this.reconnectDelay_ = RECONNECT_MIN_DELAY;\n if (!this.realtime_) {\n this.scheduleConnect_(0);\n }\n }\n }\n\n private handleTimestamp_(timestamp: number) {\n const delta = timestamp - new Date().getTime();\n this.onServerInfoUpdate_({ serverTimeOffset: delta });\n }\n\n private cancelSentTransactions_() {\n for (let i = 0; i < this.outstandingPuts_.length; i++) {\n const put = this.outstandingPuts_[i];\n if (put && /*hash*/ 'h' in put.request && put.queued) {\n if (put.onComplete) {\n put.onComplete('disconnect');\n }\n\n delete this.outstandingPuts_[i];\n this.outstandingPutCount_--;\n }\n }\n\n // Clean up array occasionally.\n if (this.outstandingPutCount_ === 0) {\n this.outstandingPuts_ = [];\n }\n }\n\n private onListenRevoked_(pathString: string, query?: unknown[]) {\n // Remove the listen and manufacture a \"permission_denied\" error for the failed listen.\n let queryId;\n if (!query) {\n queryId = 'default';\n } else {\n queryId = query.map(q => ObjectToUniqueKey(q)).join('$');\n }\n const listen = this.removeListen_(pathString, queryId);\n if (listen && listen.onComplete) {\n listen.onComplete('permission_denied');\n }\n }\n\n private removeListen_(pathString: string, queryId: string): ListenSpec {\n const normalizedPathString = new Path(pathString).toString(); // normalize path.\n let listen;\n if (this.listens.has(normalizedPathString)) {\n const map = this.listens.get(normalizedPathString)!;\n listen = map.get(queryId);\n map.delete(queryId);\n if (map.size === 0) {\n this.listens.delete(normalizedPathString);\n }\n } else {\n // all listens for this path has already been removed\n listen = undefined;\n }\n return listen;\n }\n\n private onAuthRevoked_(statusCode: string, explanation: string) {\n log('Auth token revoked: ' + statusCode + '/' + explanation);\n this.authToken_ = null;\n this.forceTokenRefresh_ = true;\n this.realtime_.close();\n if (statusCode === 'invalid_token' || statusCode === 'permission_denied') {\n // We'll wait a couple times before logging the warning / increasing the\n // retry period since oauth tokens will report as \"invalid\" if they're\n // just expired. Plus there may be transient issues that resolve themselves.\n this.invalidAuthTokenCount_++;\n if (this.invalidAuthTokenCount_ >= INVALID_TOKEN_THRESHOLD) {\n // Set a long reconnect delay because recovery is unlikely\n this.reconnectDelay_ = RECONNECT_MAX_DELAY_FOR_ADMINS;\n\n // Notify the auth token provider that the token is invalid, which will log\n // a warning\n this.authTokenProvider_.notifyForInvalidToken();\n }\n }\n }\n\n private onAppCheckRevoked_(statusCode: string, explanation: string) {\n log('App check token revoked: ' + statusCode + '/' + explanation);\n this.appCheckToken_ = null;\n this.forceTokenRefresh_ = true;\n // Note: We don't close the connection as the developer may not have\n // enforcement enabled. The backend closes connections with enforcements.\n if (statusCode === 'invalid_token' || statusCode === 'permission_denied') {\n // We'll wait a couple times before logging the warning / increasing the\n // retry period since oauth tokens will report as \"invalid\" if they're\n // just expired. Plus there may be transient issues that resolve themselves.\n this.invalidAppCheckTokenCount_++;\n if (this.invalidAppCheckTokenCount_ >= INVALID_TOKEN_THRESHOLD) {\n this.appCheckTokenProvider_.notifyForInvalidToken();\n }\n }\n }\n\n private onSecurityDebugPacket_(body: { [k: string]: unknown }) {\n if (this.securityDebugCallback_) {\n this.securityDebugCallback_(body);\n } else {\n if ('msg' in body) {\n console.log(\n 'FIREBASE: ' + (body['msg'] as string).replace('\\n', '\\nFIREBASE: ')\n );\n }\n }\n }\n\n private restoreState_() {\n //Re-authenticate ourselves if we have a credential stored.\n this.tryAuth();\n this.tryAppCheck();\n\n // Puts depend on having received the corresponding data update from the server before they complete, so we must\n // make sure to send listens before puts.\n for (const queries of this.listens.values()) {\n for (const listenSpec of queries.values()) {\n this.sendListen_(listenSpec);\n }\n }\n\n for (let i = 0; i < this.outstandingPuts_.length; i++) {\n if (this.outstandingPuts_[i]) {\n this.sendPut_(i);\n }\n }\n\n while (this.onDisconnectRequestQueue_.length) {\n const request = this.onDisconnectRequestQueue_.shift();\n this.sendOnDisconnect_(\n request.action,\n request.pathString,\n request.data,\n request.onComplete\n );\n }\n\n for (let i = 0; i < this.outstandingGets_.length; i++) {\n if (this.outstandingGets_[i]) {\n this.sendGet_(i);\n }\n }\n }\n\n /**\n * Sends client stats for first connection\n */\n private sendConnectStats_() {\n const stats: { [k: string]: number } = {};\n\n let clientName = 'js';\n if (isNodeSdk()) {\n if (this.repoInfo_.nodeAdmin) {\n clientName = 'admin_node';\n } else {\n clientName = 'node';\n }\n }\n\n stats['sdk.' + clientName + '.' + SDK_VERSION.replace(/\\./g, '-')] = 1;\n\n if (isMobileCordova()) {\n stats['framework.cordova'] = 1;\n } else if (isReactNative()) {\n stats['framework.reactnative'] = 1;\n }\n this.reportStats(stats);\n }\n\n private shouldReconnect_(): boolean {\n const online = OnlineMonitor.getInstance().currentlyOnline();\n return isEmpty(this.interruptReasons_) && online;\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Path } from '../util/Path';\n\nimport { Index } from './indexes/Index';\n\n/**\n * Node is an interface defining the common functionality for nodes in\n * a DataSnapshot.\n *\n * @interface\n */\nexport interface Node {\n /**\n * Whether this node is a leaf node.\n * @returns Whether this is a leaf node.\n */\n isLeafNode(): boolean;\n\n /**\n * Gets the priority of the node.\n * @returns The priority of the node.\n */\n getPriority(): Node;\n\n /**\n * Returns a duplicate node with the new priority.\n * @param newPriorityNode - New priority to set for the node.\n * @returns Node with new priority.\n */\n updatePriority(newPriorityNode: Node): Node;\n\n /**\n * Returns the specified immediate child, or null if it doesn't exist.\n * @param childName - The name of the child to retrieve.\n * @returns The retrieved child, or an empty node.\n */\n getImmediateChild(childName: string): Node;\n\n /**\n * Returns a child by path, or null if it doesn't exist.\n * @param path - The path of the child to retrieve.\n * @returns The retrieved child or an empty node.\n */\n getChild(path: Path): Node;\n\n /**\n * Returns the name of the child immediately prior to the specified childNode, or null.\n * @param childName - The name of the child to find the predecessor of.\n * @param childNode - The node to find the predecessor of.\n * @param index - The index to use to determine the predecessor\n * @returns The name of the predecessor child, or null if childNode is the first child.\n */\n getPredecessorChildName(\n childName: string,\n childNode: Node,\n index: Index\n ): string | null;\n\n /**\n * Returns a duplicate node, with the specified immediate child updated.\n * Any value in the node will be removed.\n * @param childName - The name of the child to update.\n * @param newChildNode - The new child node\n * @returns The updated node.\n */\n updateImmediateChild(childName: string, newChildNode: Node): Node;\n\n /**\n * Returns a duplicate node, with the specified child updated. Any value will\n * be removed.\n * @param path - The path of the child to update.\n * @param newChildNode - The new child node, which may be an empty node\n * @returns The updated node.\n */\n updateChild(path: Path, newChildNode: Node): Node;\n\n /**\n * True if the immediate child specified exists\n */\n hasChild(childName: string): boolean;\n\n /**\n * @returns True if this node has no value or children.\n */\n isEmpty(): boolean;\n\n /**\n * @returns The number of children of this node.\n */\n numChildren(): number;\n\n /**\n * Calls action for each child.\n * @param action - Action to be called for\n * each child. It's passed the child name and the child node.\n * @returns The first truthy value return by action, or the last falsey one\n */\n forEachChild(index: Index, action: (a: string, b: Node) => void): unknown;\n\n /**\n * @param exportFormat - True for export format (also wire protocol format).\n * @returns Value of this node as JSON.\n */\n val(exportFormat?: boolean): unknown;\n\n /**\n * @returns hash representing the node contents.\n */\n hash(): string;\n\n /**\n * @param other - Another node\n * @returns -1 for less than, 0 for equal, 1 for greater than other\n */\n compareTo(other: Node): number;\n\n /**\n * @returns Whether or not this snapshot equals other\n */\n equals(other: Node): boolean;\n\n /**\n * @returns This node, with the specified index now available\n */\n withIndex(indexDefinition: Index): Node;\n\n isIndexed(indexDefinition: Index): boolean;\n}\n\nexport class NamedNode {\n constructor(public name: string, public node: Node) {}\n\n static Wrap(name: string, node: Node) {\n return new NamedNode(name, node);\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Comparator } from '../../util/SortedMap';\nimport { MIN_NAME } from '../../util/util';\nimport { Node, NamedNode } from '../Node';\n\nexport abstract class Index {\n abstract compare(a: NamedNode, b: NamedNode): number;\n\n abstract isDefinedOn(node: Node): boolean;\n\n /**\n * @returns A standalone comparison function for\n * this index\n */\n getCompare(): Comparator<NamedNode> {\n return this.compare.bind(this);\n }\n\n /**\n * Given a before and after value for a node, determine if the indexed value has changed. Even if they are different,\n * it's possible that the changes are isolated to parts of the snapshot that are not indexed.\n *\n *\n * @returns True if the portion of the snapshot being indexed changed between oldNode and newNode\n */\n indexedValueChanged(oldNode: Node, newNode: Node): boolean {\n const oldWrapped = new NamedNode(MIN_NAME, oldNode);\n const newWrapped = new NamedNode(MIN_NAME, newNode);\n return this.compare(oldWrapped, newWrapped) !== 0;\n }\n\n /**\n * @returns a node wrapper that will sort equal to or less than\n * any other node wrapper, using this index\n */\n minPost(): NamedNode {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n return (NamedNode as any).MIN;\n }\n\n /**\n * @returns a node wrapper that will sort greater than or equal to\n * any other node wrapper, using this index\n */\n abstract maxPost(): NamedNode;\n\n abstract makePost(indexValue: unknown, name: string): NamedNode;\n\n /**\n * @returns String representation for inclusion in a query spec\n */\n abstract toString(): string;\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert, assertionError } from '@firebase/util';\n\nimport { nameCompare, MAX_NAME } from '../../util/util';\nimport { ChildrenNode } from '../ChildrenNode';\nimport { Node, NamedNode } from '../Node';\n\nimport { Index } from './Index';\n\nlet __EMPTY_NODE: ChildrenNode;\n\nexport class KeyIndex extends Index {\n static get __EMPTY_NODE() {\n return __EMPTY_NODE;\n }\n\n static set __EMPTY_NODE(val) {\n __EMPTY_NODE = val;\n }\n compare(a: NamedNode, b: NamedNode): number {\n return nameCompare(a.name, b.name);\n }\n isDefinedOn(node: Node): boolean {\n // We could probably return true here (since every node has a key), but it's never called\n // so just leaving unimplemented for now.\n throw assertionError('KeyIndex.isDefinedOn not expected to be called.');\n }\n indexedValueChanged(oldNode: Node, newNode: Node): boolean {\n return false; // The key for a node never changes.\n }\n minPost() {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n return (NamedNode as any).MIN;\n }\n maxPost(): NamedNode {\n // TODO: This should really be created once and cached in a static property, but\n // NamedNode isn't defined yet, so I can't use it in a static. Bleh.\n return new NamedNode(MAX_NAME, __EMPTY_NODE);\n }\n\n makePost(indexValue: string, name: string): NamedNode {\n assert(\n typeof indexValue === 'string',\n 'KeyIndex indexValue must always be a string.'\n );\n // We just use empty node, but it'll never be compared, since our comparator only looks at name.\n return new NamedNode(indexValue, __EMPTY_NODE);\n }\n\n /**\n * @returns String representation for inclusion in a query spec\n */\n toString(): string {\n return '.key';\n }\n}\n\nexport const KEY_INDEX = new KeyIndex();\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/**\n * @fileoverview Implementation of an immutable SortedMap using a Left-leaning\n * Red-Black Tree, adapted from the implementation in Mugs\n * (http://mads379.github.com/mugs/) by Mads Hartmann Jensen\n * (mads379\\@gmail.com).\n *\n * Original paper on Left-leaning Red-Black Trees:\n * http://www.cs.princeton.edu/~rs/talks/LLRB/LLRB.pdf\n *\n * Invariant 1: No red node has a red child\n * Invariant 2: Every leaf path has the same number of black nodes\n * Invariant 3: Only the left child can be red (left leaning)\n */\n\n// TODO: There are some improvements I'd like to make to improve memory / perf:\n// * Create two prototypes, LLRedNode and LLBlackNode, instead of storing a\n// color property in every node.\n// TODO: It would also be good (and possibly necessary) to create a base\n// interface for LLRBNode and LLRBEmptyNode.\n\nexport type Comparator<K> = (key1: K, key2: K) => number;\n\n/**\n * An iterator over an LLRBNode.\n */\nexport class SortedMapIterator<K, V, T> {\n private nodeStack_: Array<LLRBNode<K, V> | LLRBEmptyNode<K, V>> = [];\n\n /**\n * @param node - Node to iterate.\n * @param isReverse_ - Whether or not to iterate in reverse\n */\n constructor(\n node: LLRBNode<K, V> | LLRBEmptyNode<K, V>,\n startKey: K | null,\n comparator: Comparator<K>,\n private isReverse_: boolean,\n private resultGenerator_: ((k: K, v: V) => T) | null = null\n ) {\n let cmp = 1;\n while (!node.isEmpty()) {\n node = node as LLRBNode<K, V>;\n cmp = startKey ? comparator(node.key, startKey) : 1;\n // flip the comparison if we're going in reverse\n if (isReverse_) {\n cmp *= -1;\n }\n\n if (cmp < 0) {\n // This node is less than our start key. ignore it\n if (this.isReverse_) {\n node = node.left;\n } else {\n node = node.right;\n }\n } else if (cmp === 0) {\n // This node is exactly equal to our start key. Push it on the stack, but stop iterating;\n this.nodeStack_.push(node);\n break;\n } else {\n // This node is greater than our start key, add it to the stack and move to the next one\n this.nodeStack_.push(node);\n if (this.isReverse_) {\n node = node.right;\n } else {\n node = node.left;\n }\n }\n }\n }\n\n getNext(): T {\n if (this.nodeStack_.length === 0) {\n return null;\n }\n\n let node = this.nodeStack_.pop();\n let result: T;\n if (this.resultGenerator_) {\n result = this.resultGenerator_(node.key, node.value);\n } else {\n result = { key: node.key, value: node.value } as unknown as T;\n }\n\n if (this.isReverse_) {\n node = node.left;\n while (!node.isEmpty()) {\n this.nodeStack_.push(node);\n node = node.right;\n }\n } else {\n node = node.right;\n while (!node.isEmpty()) {\n this.nodeStack_.push(node);\n node = node.left;\n }\n }\n\n return result;\n }\n\n hasNext(): boolean {\n return this.nodeStack_.length > 0;\n }\n\n peek(): T {\n if (this.nodeStack_.length === 0) {\n return null;\n }\n\n const node = this.nodeStack_[this.nodeStack_.length - 1];\n if (this.resultGenerator_) {\n return this.resultGenerator_(node.key, node.value);\n } else {\n return { key: node.key, value: node.value } as unknown as T;\n }\n }\n}\n\n/**\n * Represents a node in a Left-leaning Red-Black tree.\n */\nexport class LLRBNode<K, V> {\n color: boolean;\n left: LLRBNode<K, V> | LLRBEmptyNode<K, V>;\n right: LLRBNode<K, V> | LLRBEmptyNode<K, V>;\n\n /**\n * @param key - Key associated with this node.\n * @param value - Value associated with this node.\n * @param color - Whether this node is red.\n * @param left - Left child.\n * @param right - Right child.\n */\n constructor(\n public key: K,\n public value: V,\n color: boolean | null,\n left?: LLRBNode<K, V> | LLRBEmptyNode<K, V> | null,\n right?: LLRBNode<K, V> | LLRBEmptyNode<K, V> | null\n ) {\n this.color = color != null ? color : LLRBNode.RED;\n this.left =\n left != null ? left : (SortedMap.EMPTY_NODE as LLRBEmptyNode<K, V>);\n this.right =\n right != null ? right : (SortedMap.EMPTY_NODE as LLRBEmptyNode<K, V>);\n }\n\n static RED = true;\n static BLACK = false;\n\n /**\n * Returns a copy of the current node, optionally replacing pieces of it.\n *\n * @param key - New key for the node, or null.\n * @param value - New value for the node, or null.\n * @param color - New color for the node, or null.\n * @param left - New left child for the node, or null.\n * @param right - New right child for the node, or null.\n * @returns The node copy.\n */\n copy(\n key: K | null,\n value: V | null,\n color: boolean | null,\n left: LLRBNode<K, V> | LLRBEmptyNode<K, V> | null,\n right: LLRBNode<K, V> | LLRBEmptyNode<K, V> | null\n ): LLRBNode<K, V> {\n return new LLRBNode(\n key != null ? key : this.key,\n value != null ? value : this.value,\n color != null ? color : this.color,\n left != null ? left : this.left,\n right != null ? right : this.right\n );\n }\n\n /**\n * @returns The total number of nodes in the tree.\n */\n count(): number {\n return this.left.count() + 1 + this.right.count();\n }\n\n /**\n * @returns True if the tree is empty.\n */\n isEmpty(): boolean {\n return false;\n }\n\n /**\n * Traverses the tree in key order and calls the specified action function\n * for each node.\n *\n * @param action - Callback function to be called for each\n * node. If it returns true, traversal is aborted.\n * @returns The first truthy value returned by action, or the last falsey\n * value returned by action\n */\n inorderTraversal(action: (k: K, v: V) => unknown): boolean {\n return (\n this.left.inorderTraversal(action) ||\n !!action(this.key, this.value) ||\n this.right.inorderTraversal(action)\n );\n }\n\n /**\n * Traverses the tree in reverse key order and calls the specified action function\n * for each node.\n *\n * @param action - Callback function to be called for each\n * node. If it returns true, traversal is aborted.\n * @returns True if traversal was aborted.\n */\n reverseTraversal(action: (k: K, v: V) => void): boolean {\n return (\n this.right.reverseTraversal(action) ||\n action(this.key, this.value) ||\n this.left.reverseTraversal(action)\n );\n }\n\n /**\n * @returns The minimum node in the tree.\n */\n private min_(): LLRBNode<K, V> {\n if (this.left.isEmpty()) {\n return this;\n } else {\n return (this.left as LLRBNode<K, V>).min_();\n }\n }\n\n /**\n * @returns The maximum key in the tree.\n */\n minKey(): K {\n return this.min_().key;\n }\n\n /**\n * @returns The maximum key in the tree.\n */\n maxKey(): K {\n if (this.right.isEmpty()) {\n return this.key;\n } else {\n return this.right.maxKey();\n }\n }\n\n /**\n * @param key - Key to insert.\n * @param value - Value to insert.\n * @param comparator - Comparator.\n * @returns New tree, with the key/value added.\n */\n insert(key: K, value: V, comparator: Comparator<K>): LLRBNode<K, V> {\n let n: LLRBNode<K, V> = this;\n const cmp = comparator(key, n.key);\n if (cmp < 0) {\n n = n.copy(null, null, null, n.left.insert(key, value, comparator), null);\n } else if (cmp === 0) {\n n = n.copy(null, value, null, null, null);\n } else {\n n = n.copy(\n null,\n null,\n null,\n null,\n n.right.insert(key, value, comparator)\n );\n }\n return n.fixUp_();\n }\n\n /**\n * @returns New tree, with the minimum key removed.\n */\n private removeMin_(): LLRBNode<K, V> | LLRBEmptyNode<K, V> {\n if (this.left.isEmpty()) {\n return SortedMap.EMPTY_NODE as LLRBEmptyNode<K, V>;\n }\n let n: LLRBNode<K, V> = this;\n if (!n.left.isRed_() && !n.left.left.isRed_()) {\n n = n.moveRedLeft_();\n }\n n = n.copy(null, null, null, (n.left as LLRBNode<K, V>).removeMin_(), null);\n return n.fixUp_();\n }\n\n /**\n * @param key - The key of the item to remove.\n * @param comparator - Comparator.\n * @returns New tree, with the specified item removed.\n */\n remove(\n key: K,\n comparator: Comparator<K>\n ): LLRBNode<K, V> | LLRBEmptyNode<K, V> {\n let n, smallest;\n n = this;\n if (comparator(key, n.key) < 0) {\n if (!n.left.isEmpty() && !n.left.isRed_() && !n.left.left.isRed_()) {\n n = n.moveRedLeft_();\n }\n n = n.copy(null, null, null, n.left.remove(key, comparator), null);\n } else {\n if (n.left.isRed_()) {\n n = n.rotateRight_();\n }\n if (!n.right.isEmpty() && !n.right.isRed_() && !n.right.left.isRed_()) {\n n = n.moveRedRight_();\n }\n if (comparator(key, n.key) === 0) {\n if (n.right.isEmpty()) {\n return SortedMap.EMPTY_NODE as LLRBEmptyNode<K, V>;\n } else {\n smallest = (n.right as LLRBNode<K, V>).min_();\n n = n.copy(\n smallest.key,\n smallest.value,\n null,\n null,\n (n.right as LLRBNode<K, V>).removeMin_()\n );\n }\n }\n n = n.copy(null, null, null, null, n.right.remove(key, comparator));\n }\n return n.fixUp_();\n }\n\n /**\n * @returns Whether this is a RED node.\n */\n isRed_(): boolean {\n return this.color;\n }\n\n /**\n * @returns New tree after performing any needed rotations.\n */\n private fixUp_(): LLRBNode<K, V> {\n let n: LLRBNode<K, V> = this;\n if (n.right.isRed_() && !n.left.isRed_()) {\n n = n.rotateLeft_();\n }\n if (n.left.isRed_() && n.left.left.isRed_()) {\n n = n.rotateRight_();\n }\n if (n.left.isRed_() && n.right.isRed_()) {\n n = n.colorFlip_();\n }\n return n;\n }\n\n /**\n * @returns New tree, after moveRedLeft.\n */\n private moveRedLeft_(): LLRBNode<K, V> {\n let n = this.colorFlip_();\n if (n.right.left.isRed_()) {\n n = n.copy(\n null,\n null,\n null,\n null,\n (n.right as LLRBNode<K, V>).rotateRight_()\n );\n n = n.rotateLeft_();\n n = n.colorFlip_();\n }\n return n;\n }\n\n /**\n * @returns New tree, after moveRedRight.\n */\n private moveRedRight_(): LLRBNode<K, V> {\n let n = this.colorFlip_();\n if (n.left.left.isRed_()) {\n n = n.rotateRight_();\n n = n.colorFlip_();\n }\n return n;\n }\n\n /**\n * @returns New tree, after rotateLeft.\n */\n private rotateLeft_(): LLRBNode<K, V> {\n const nl = this.copy(null, null, LLRBNode.RED, null, this.right.left);\n return this.right.copy(null, null, this.color, nl, null) as LLRBNode<K, V>;\n }\n\n /**\n * @returns New tree, after rotateRight.\n */\n private rotateRight_(): LLRBNode<K, V> {\n const nr = this.copy(null, null, LLRBNode.RED, this.left.right, null);\n return this.left.copy(null, null, this.color, null, nr) as LLRBNode<K, V>;\n }\n\n /**\n * @returns Newt ree, after colorFlip.\n */\n private colorFlip_(): LLRBNode<K, V> {\n const left = this.left.copy(null, null, !this.left.color, null, null);\n const right = this.right.copy(null, null, !this.right.color, null, null);\n return this.copy(null, null, !this.color, left, right);\n }\n\n /**\n * For testing.\n *\n * @returns True if all is well.\n */\n private checkMaxDepth_(): boolean {\n const blackDepth = this.check_();\n return Math.pow(2.0, blackDepth) <= this.count() + 1;\n }\n\n check_(): number {\n if (this.isRed_() && this.left.isRed_()) {\n throw new Error(\n 'Red node has red child(' + this.key + ',' + this.value + ')'\n );\n }\n if (this.right.isRed_()) {\n throw new Error(\n 'Right child of (' + this.key + ',' + this.value + ') is red'\n );\n }\n const blackDepth = this.left.check_();\n if (blackDepth !== this.right.check_()) {\n throw new Error('Black depths differ');\n } else {\n return blackDepth + (this.isRed_() ? 0 : 1);\n }\n }\n}\n\n/**\n * Represents an empty node (a leaf node in the Red-Black Tree).\n */\nexport class LLRBEmptyNode<K, V> {\n key: K;\n value: V;\n left: LLRBNode<K, V> | LLRBEmptyNode<K, V>;\n right: LLRBNode<K, V> | LLRBEmptyNode<K, V>;\n color: boolean;\n\n /**\n * Returns a copy of the current node.\n *\n * @returns The node copy.\n */\n copy(\n key: K | null,\n value: V | null,\n color: boolean | null,\n left: LLRBNode<K, V> | LLRBEmptyNode<K, V> | null,\n right: LLRBNode<K, V> | LLRBEmptyNode<K, V> | null\n ): LLRBEmptyNode<K, V> {\n return this;\n }\n\n /**\n * Returns a copy of the tree, with the specified key/value added.\n *\n * @param key - Key to be added.\n * @param value - Value to be added.\n * @param comparator - Comparator.\n * @returns New tree, with item added.\n */\n insert(key: K, value: V, comparator: Comparator<K>): LLRBNode<K, V> {\n return new LLRBNode(key, value, null);\n }\n\n /**\n * Returns a copy of the tree, with the specified key removed.\n *\n * @param key - The key to remove.\n * @param comparator - Comparator.\n * @returns New tree, with item removed.\n */\n remove(key: K, comparator: Comparator<K>): LLRBEmptyNode<K, V> {\n return this;\n }\n\n /**\n * @returns The total number of nodes in the tree.\n */\n count(): number {\n return 0;\n }\n\n /**\n * @returns True if the tree is empty.\n */\n isEmpty(): boolean {\n return true;\n }\n\n /**\n * Traverses the tree in key order and calls the specified action function\n * for each node.\n *\n * @param action - Callback function to be called for each\n * node. If it returns true, traversal is aborted.\n * @returns True if traversal was aborted.\n */\n inorderTraversal(action: (k: K, v: V) => unknown): boolean {\n return false;\n }\n\n /**\n * Traverses the tree in reverse key order and calls the specified action function\n * for each node.\n *\n * @param action - Callback function to be called for each\n * node. If it returns true, traversal is aborted.\n * @returns True if traversal was aborted.\n */\n reverseTraversal(action: (k: K, v: V) => void): boolean {\n return false;\n }\n\n minKey(): null {\n return null;\n }\n\n maxKey(): null {\n return null;\n }\n\n check_(): number {\n return 0;\n }\n\n /**\n * @returns Whether this node is red.\n */\n isRed_() {\n return false;\n }\n}\n\n/**\n * An immutable sorted map implementation, based on a Left-leaning Red-Black\n * tree.\n */\nexport class SortedMap<K, V> {\n /**\n * Always use the same empty node, to reduce memory.\n */\n static EMPTY_NODE = new LLRBEmptyNode();\n\n /**\n * @param comparator_ - Key comparator.\n * @param root_ - Optional root node for the map.\n */\n constructor(\n private comparator_: Comparator<K>,\n private root_:\n | LLRBNode<K, V>\n | LLRBEmptyNode<K, V> = SortedMap.EMPTY_NODE as LLRBEmptyNode<K, V>\n ) {}\n\n /**\n * Returns a copy of the map, with the specified key/value added or replaced.\n * (TODO: We should perhaps rename this method to 'put')\n *\n * @param key - Key to be added.\n * @param value - Value to be added.\n * @returns New map, with item added.\n */\n insert(key: K, value: V): SortedMap<K, V> {\n return new SortedMap(\n this.comparator_,\n this.root_\n .insert(key, value, this.comparator_)\n .copy(null, null, LLRBNode.BLACK, null, null)\n );\n }\n\n /**\n * Returns a copy of the map, with the specified key removed.\n *\n * @param key - The key to remove.\n * @returns New map, with item removed.\n */\n remove(key: K): SortedMap<K, V> {\n return new SortedMap(\n this.comparator_,\n this.root_\n .remove(key, this.comparator_)\n .copy(null, null, LLRBNode.BLACK, null, null)\n );\n }\n\n /**\n * Returns the value of the node with the given key, or null.\n *\n * @param key - The key to look up.\n * @returns The value of the node with the given key, or null if the\n * key doesn't exist.\n */\n get(key: K): V | null {\n let cmp;\n let node = this.root_;\n while (!node.isEmpty()) {\n cmp = this.comparator_(key, node.key);\n if (cmp === 0) {\n return node.value;\n } else if (cmp < 0) {\n node = node.left;\n } else if (cmp > 0) {\n node = node.right;\n }\n }\n return null;\n }\n\n /**\n * Returns the key of the item *before* the specified key, or null if key is the first item.\n * @param key - The key to find the predecessor of\n * @returns The predecessor key.\n */\n getPredecessorKey(key: K): K | null {\n let cmp,\n node = this.root_,\n rightParent = null;\n while (!node.isEmpty()) {\n cmp = this.comparator_(key, node.key);\n if (cmp === 0) {\n if (!node.left.isEmpty()) {\n node = node.left;\n while (!node.right.isEmpty()) {\n node = node.right;\n }\n return node.key;\n } else if (rightParent) {\n return rightParent.key;\n } else {\n return null; // first item.\n }\n } else if (cmp < 0) {\n node = node.left;\n } else if (cmp > 0) {\n rightParent = node;\n node = node.right;\n }\n }\n\n throw new Error(\n 'Attempted to find predecessor key for a nonexistent key. What gives?'\n );\n }\n\n /**\n * @returns True if the map is empty.\n */\n isEmpty(): boolean {\n return this.root_.isEmpty();\n }\n\n /**\n * @returns The total number of nodes in the map.\n */\n count(): number {\n return this.root_.count();\n }\n\n /**\n * @returns The minimum key in the map.\n */\n minKey(): K | null {\n return this.root_.minKey();\n }\n\n /**\n * @returns The maximum key in the map.\n */\n maxKey(): K | null {\n return this.root_.maxKey();\n }\n\n /**\n * Traverses the map in key order and calls the specified action function\n * for each key/value pair.\n *\n * @param action - Callback function to be called\n * for each key/value pair. If action returns true, traversal is aborted.\n * @returns The first truthy value returned by action, or the last falsey\n * value returned by action\n */\n inorderTraversal(action: (k: K, v: V) => unknown): boolean {\n return this.root_.inorderTraversal(action);\n }\n\n /**\n * Traverses the map in reverse key order and calls the specified action function\n * for each key/value pair.\n *\n * @param action - Callback function to be called\n * for each key/value pair. If action returns true, traversal is aborted.\n * @returns True if the traversal was aborted.\n */\n reverseTraversal(action: (k: K, v: V) => void): boolean {\n return this.root_.reverseTraversal(action);\n }\n\n /**\n * Returns an iterator over the SortedMap.\n * @returns The iterator.\n */\n getIterator<T>(\n resultGenerator?: (k: K, v: V) => T\n ): SortedMapIterator<K, V, T> {\n return new SortedMapIterator(\n this.root_,\n null,\n this.comparator_,\n false,\n resultGenerator\n );\n }\n\n getIteratorFrom<T>(\n key: K,\n resultGenerator?: (k: K, v: V) => T\n ): SortedMapIterator<K, V, T> {\n return new SortedMapIterator(\n this.root_,\n key,\n this.comparator_,\n false,\n resultGenerator\n );\n }\n\n getReverseIteratorFrom<T>(\n key: K,\n resultGenerator?: (k: K, v: V) => T\n ): SortedMapIterator<K, V, T> {\n return new SortedMapIterator(\n this.root_,\n key,\n this.comparator_,\n true,\n resultGenerator\n );\n }\n\n getReverseIterator<T>(\n resultGenerator?: (k: K, v: V) => T\n ): SortedMapIterator<K, V, T> {\n return new SortedMapIterator(\n this.root_,\n null,\n this.comparator_,\n true,\n resultGenerator\n );\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { nameCompare } from '../util/util';\n\nimport { NamedNode } from './Node';\n\nexport function NAME_ONLY_COMPARATOR(left: NamedNode, right: NamedNode) {\n return nameCompare(left.name, right.name);\n}\n\nexport function NAME_COMPARATOR(left: string, right: string) {\n return nameCompare(left, right);\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert, contains } from '@firebase/util';\n\nimport { Indexable } from '../util/misc';\nimport { doubleToIEEE754String } from '../util/util';\n\nimport { Node } from './Node';\n\nlet MAX_NODE: Node;\n\nexport function setMaxNode(val: Node) {\n MAX_NODE = val;\n}\n\nexport const priorityHashText = function (priority: string | number): string {\n if (typeof priority === 'number') {\n return 'number:' + doubleToIEEE754String(priority);\n } else {\n return 'string:' + priority;\n }\n};\n\n/**\n * Validates that a priority snapshot Node is valid.\n */\nexport const validatePriorityNode = function (priorityNode: Node) {\n if (priorityNode.isLeafNode()) {\n const val = priorityNode.val();\n assert(\n typeof val === 'string' ||\n typeof val === 'number' ||\n (typeof val === 'object' && contains(val as Indexable, '.sv')),\n 'Priority must be a string or number.'\n );\n } else {\n assert(\n priorityNode === MAX_NODE || priorityNode.isEmpty(),\n 'priority of unexpected type.'\n );\n }\n // Don't call getPriority() on MAX_NODE to avoid hitting assertion.\n assert(\n priorityNode === MAX_NODE || priorityNode.getPriority().isEmpty(),\n \"Priority nodes can't have a priority of their own.\"\n );\n};\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert } from '@firebase/util';\n\nimport { Indexable } from '../util/misc';\nimport {\n Path,\n pathGetFront,\n pathGetLength,\n pathIsEmpty,\n pathPopFront\n} from '../util/Path';\nimport { doubleToIEEE754String, sha1 } from '../util/util';\n\nimport { ChildrenNodeConstructor } from './ChildrenNode';\nimport { Index } from './indexes/Index';\nimport { Node } from './Node';\nimport { priorityHashText, validatePriorityNode } from './snap';\n\nlet __childrenNodeConstructor: ChildrenNodeConstructor;\n\n/**\n * LeafNode is a class for storing leaf nodes in a DataSnapshot. It\n * implements Node and stores the value of the node (a string,\n * number, or boolean) accessible via getValue().\n */\nexport class LeafNode implements Node {\n static set __childrenNodeConstructor(val: ChildrenNodeConstructor) {\n __childrenNodeConstructor = val;\n }\n\n static get __childrenNodeConstructor() {\n return __childrenNodeConstructor;\n }\n\n /**\n * The sort order for comparing leaf nodes of different types. If two leaf nodes have\n * the same type, the comparison falls back to their value\n */\n static VALUE_TYPE_ORDER = ['object', 'boolean', 'number', 'string'];\n\n private lazyHash_: string | null = null;\n\n /**\n * @param value_ - The value to store in this leaf node. The object type is\n * possible in the event of a deferred value\n * @param priorityNode_ - The priority of this node.\n */\n constructor(\n private readonly value_: string | number | boolean | Indexable,\n private priorityNode_: Node = LeafNode.__childrenNodeConstructor.EMPTY_NODE\n ) {\n assert(\n this.value_ !== undefined && this.value_ !== null,\n \"LeafNode shouldn't be created with null/undefined value.\"\n );\n\n validatePriorityNode(this.priorityNode_);\n }\n\n /** @inheritDoc */\n isLeafNode(): boolean {\n return true;\n }\n\n /** @inheritDoc */\n getPriority(): Node {\n return this.priorityNode_;\n }\n\n /** @inheritDoc */\n updatePriority(newPriorityNode: Node): Node {\n return new LeafNode(this.value_, newPriorityNode);\n }\n\n /** @inheritDoc */\n getImmediateChild(childName: string): Node {\n // Hack to treat priority as a regular child\n if (childName === '.priority') {\n return this.priorityNode_;\n } else {\n return LeafNode.__childrenNodeConstructor.EMPTY_NODE;\n }\n }\n\n /** @inheritDoc */\n getChild(path: Path): Node {\n if (pathIsEmpty(path)) {\n return this;\n } else if (pathGetFront(path) === '.priority') {\n return this.priorityNode_;\n } else {\n return LeafNode.__childrenNodeConstructor.EMPTY_NODE;\n }\n }\n hasChild(): boolean {\n return false;\n }\n\n /** @inheritDoc */\n getPredecessorChildName(childName: string, childNode: Node): null {\n return null;\n }\n\n /** @inheritDoc */\n updateImmediateChild(childName: string, newChildNode: Node): Node {\n if (childName === '.priority') {\n return this.updatePriority(newChildNode);\n } else if (newChildNode.isEmpty() && childName !== '.priority') {\n return this;\n } else {\n return LeafNode.__childrenNodeConstructor.EMPTY_NODE.updateImmediateChild(\n childName,\n newChildNode\n ).updatePriority(this.priorityNode_);\n }\n }\n\n /** @inheritDoc */\n updateChild(path: Path, newChildNode: Node): Node {\n const front = pathGetFront(path);\n if (front === null) {\n return newChildNode;\n } else if (newChildNode.isEmpty() && front !== '.priority') {\n return this;\n } else {\n assert(\n front !== '.priority' || pathGetLength(path) === 1,\n '.priority must be the last token in a path'\n );\n\n return this.updateImmediateChild(\n front,\n LeafNode.__childrenNodeConstructor.EMPTY_NODE.updateChild(\n pathPopFront(path),\n newChildNode\n )\n );\n }\n }\n\n /** @inheritDoc */\n isEmpty(): boolean {\n return false;\n }\n\n /** @inheritDoc */\n numChildren(): number {\n return 0;\n }\n\n /** @inheritDoc */\n forEachChild(index: Index, action: (s: string, n: Node) => void): boolean {\n return false;\n }\n val(exportFormat?: boolean): {} {\n if (exportFormat && !this.getPriority().isEmpty()) {\n return {\n '.value': this.getValue(),\n '.priority': this.getPriority().val()\n };\n } else {\n return this.getValue();\n }\n }\n\n /** @inheritDoc */\n hash(): string {\n if (this.lazyHash_ === null) {\n let toHash = '';\n if (!this.priorityNode_.isEmpty()) {\n toHash +=\n 'priority:' +\n priorityHashText(this.priorityNode_.val() as number | string) +\n ':';\n }\n\n const type = typeof this.value_;\n toHash += type + ':';\n if (type === 'number') {\n toHash += doubleToIEEE754String(this.value_ as number);\n } else {\n toHash += this.value_;\n }\n this.lazyHash_ = sha1(toHash);\n }\n return this.lazyHash_;\n }\n\n /**\n * Returns the value of the leaf node.\n * @returns The value of the node.\n */\n getValue(): Indexable | string | number | boolean {\n return this.value_;\n }\n compareTo(other: Node): number {\n if (other === LeafNode.__childrenNodeConstructor.EMPTY_NODE) {\n return 1;\n } else if (other instanceof LeafNode.__childrenNodeConstructor) {\n return -1;\n } else {\n assert(other.isLeafNode(), 'Unknown node type');\n return this.compareToLeafNode_(other as LeafNode);\n }\n }\n\n /**\n * Comparison specifically for two leaf nodes\n */\n private compareToLeafNode_(otherLeaf: LeafNode): number {\n const otherLeafType = typeof otherLeaf.value_;\n const thisLeafType = typeof this.value_;\n const otherIndex = LeafNode.VALUE_TYPE_ORDER.indexOf(otherLeafType);\n const thisIndex = LeafNode.VALUE_TYPE_ORDER.indexOf(thisLeafType);\n assert(otherIndex >= 0, 'Unknown leaf type: ' + otherLeafType);\n assert(thisIndex >= 0, 'Unknown leaf type: ' + thisLeafType);\n if (otherIndex === thisIndex) {\n // Same type, compare values\n if (thisLeafType === 'object') {\n // Deferred value nodes are all equal, but we should also never get to this point...\n return 0;\n } else {\n // Note that this works because true > false, all others are number or string comparisons\n if (this.value_ < otherLeaf.value_) {\n return -1;\n } else if (this.value_ === otherLeaf.value_) {\n return 0;\n } else {\n return 1;\n }\n }\n } else {\n return thisIndex - otherIndex;\n }\n }\n withIndex(): Node {\n return this;\n }\n isIndexed(): boolean {\n return true;\n }\n equals(other: Node): boolean {\n if (other === this) {\n return true;\n } else if (other.isLeafNode()) {\n const otherLeaf = other as LeafNode;\n return (\n this.value_ === otherLeaf.value_ &&\n this.priorityNode_.equals(otherLeaf.priorityNode_)\n );\n } else {\n return false;\n }\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { nameCompare, MAX_NAME } from '../../util/util';\nimport { LeafNode } from '../LeafNode';\nimport { NamedNode, Node } from '../Node';\n\nimport { Index } from './Index';\n\nlet nodeFromJSON: (a: unknown) => Node;\nlet MAX_NODE: Node;\n\nexport function setNodeFromJSON(val: (a: unknown) => Node) {\n nodeFromJSON = val;\n}\n\nexport function setMaxNode(val: Node) {\n MAX_NODE = val;\n}\n\nexport class PriorityIndex extends Index {\n compare(a: NamedNode, b: NamedNode): number {\n const aPriority = a.node.getPriority();\n const bPriority = b.node.getPriority();\n const indexCmp = aPriority.compareTo(bPriority);\n if (indexCmp === 0) {\n return nameCompare(a.name, b.name);\n } else {\n return indexCmp;\n }\n }\n isDefinedOn(node: Node): boolean {\n return !node.getPriority().isEmpty();\n }\n indexedValueChanged(oldNode: Node, newNode: Node): boolean {\n return !oldNode.getPriority().equals(newNode.getPriority());\n }\n minPost(): NamedNode {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n return (NamedNode as any).MIN;\n }\n maxPost(): NamedNode {\n return new NamedNode(MAX_NAME, new LeafNode('[PRIORITY-POST]', MAX_NODE));\n }\n\n makePost(indexValue: unknown, name: string): NamedNode {\n const priorityNode = nodeFromJSON(indexValue);\n return new NamedNode(name, new LeafNode('[PRIORITY-POST]', priorityNode));\n }\n\n /**\n * @returns String representation for inclusion in a query spec\n */\n toString(): string {\n return '.priority';\n }\n}\n\nexport const PRIORITY_INDEX = new PriorityIndex();\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { LLRBNode, SortedMap } from '../util/SortedMap';\n\nimport { NamedNode } from './Node';\n\nconst LOG_2 = Math.log(2);\n\nclass Base12Num {\n count: number;\n private current_: number;\n private bits_: number;\n\n constructor(length: number) {\n const logBase2 = (num: number) =>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n parseInt((Math.log(num) / LOG_2) as any, 10);\n const bitMask = (bits: number) => parseInt(Array(bits + 1).join('1'), 2);\n this.count = logBase2(length + 1);\n this.current_ = this.count - 1;\n const mask = bitMask(this.count);\n this.bits_ = (length + 1) & mask;\n }\n\n nextBitIsOne(): boolean {\n //noinspection JSBitwiseOperatorUsage\n const result = !(this.bits_ & (0x1 << this.current_));\n this.current_--;\n return result;\n }\n}\n\n/**\n * Takes a list of child nodes and constructs a SortedSet using the given comparison\n * function\n *\n * Uses the algorithm described in the paper linked here:\n * http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.46.1458\n *\n * @param childList - Unsorted list of children\n * @param cmp - The comparison method to be used\n * @param keyFn - An optional function to extract K from a node wrapper, if K's\n * type is not NamedNode\n * @param mapSortFn - An optional override for comparator used by the generated sorted map\n */\nexport const buildChildSet = function <K, V>(\n childList: NamedNode[],\n cmp: (a: NamedNode, b: NamedNode) => number,\n keyFn?: (a: NamedNode) => K,\n mapSortFn?: (a: K, b: K) => number\n): SortedMap<K, V> {\n childList.sort(cmp);\n\n const buildBalancedTree = function (\n low: number,\n high: number\n ): LLRBNode<K, V> | null {\n const length = high - low;\n let namedNode: NamedNode;\n let key: K;\n if (length === 0) {\n return null;\n } else if (length === 1) {\n namedNode = childList[low];\n key = keyFn ? keyFn(namedNode) : (namedNode as unknown as K);\n return new LLRBNode(\n key,\n namedNode.node as unknown as V,\n LLRBNode.BLACK,\n null,\n null\n );\n } else {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const middle = parseInt((length / 2) as any, 10) + low;\n const left = buildBalancedTree(low, middle);\n const right = buildBalancedTree(middle + 1, high);\n namedNode = childList[middle];\n key = keyFn ? keyFn(namedNode) : (namedNode as unknown as K);\n return new LLRBNode(\n key,\n namedNode.node as unknown as V,\n LLRBNode.BLACK,\n left,\n right\n );\n }\n };\n\n const buildFrom12Array = function (base12: Base12Num): LLRBNode<K, V> {\n let node: LLRBNode<K, V> = null;\n let root = null;\n let index = childList.length;\n\n const buildPennant = function (chunkSize: number, color: boolean) {\n const low = index - chunkSize;\n const high = index;\n index -= chunkSize;\n const childTree = buildBalancedTree(low + 1, high);\n const namedNode = childList[low];\n const key: K = keyFn ? keyFn(namedNode) : (namedNode as unknown as K);\n attachPennant(\n new LLRBNode(\n key,\n namedNode.node as unknown as V,\n color,\n null,\n childTree\n )\n );\n };\n\n const attachPennant = function (pennant: LLRBNode<K, V>) {\n if (node) {\n node.left = pennant;\n node = pennant;\n } else {\n root = pennant;\n node = pennant;\n }\n };\n\n for (let i = 0; i < base12.count; ++i) {\n const isOne = base12.nextBitIsOne();\n // The number of nodes taken in each slice is 2^(arr.length - (i + 1))\n const chunkSize = Math.pow(2, base12.count - (i + 1));\n if (isOne) {\n buildPennant(chunkSize, LLRBNode.BLACK);\n } else {\n // current == 2\n buildPennant(chunkSize, LLRBNode.BLACK);\n buildPennant(chunkSize, LLRBNode.RED);\n }\n }\n return root;\n };\n\n const base12 = new Base12Num(childList.length);\n const root = buildFrom12Array(base12);\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n return new SortedMap<K, V>(mapSortFn || (cmp as any), root);\n};\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert, contains, map, safeGet } from '@firebase/util';\n\nimport { SortedMap } from '../util/SortedMap';\n\nimport { buildChildSet } from './childSet';\nimport { Index } from './indexes/Index';\nimport { KEY_INDEX } from './indexes/KeyIndex';\nimport { PRIORITY_INDEX } from './indexes/PriorityIndex';\nimport { NamedNode, Node } from './Node';\n\nlet _defaultIndexMap: IndexMap;\n\nconst fallbackObject = {};\n\nexport class IndexMap {\n /**\n * The default IndexMap for nodes without a priority\n */\n static get Default(): IndexMap {\n assert(\n fallbackObject && PRIORITY_INDEX,\n 'ChildrenNode.ts has not been loaded'\n );\n _defaultIndexMap =\n _defaultIndexMap ||\n new IndexMap(\n { '.priority': fallbackObject },\n { '.priority': PRIORITY_INDEX }\n );\n return _defaultIndexMap;\n }\n\n constructor(\n private indexes_: {\n [k: string]: SortedMap<NamedNode, Node> | /*FallbackType*/ object;\n },\n private indexSet_: { [k: string]: Index }\n ) {}\n\n get(indexKey: string): SortedMap<NamedNode, Node> | null {\n const sortedMap = safeGet(this.indexes_, indexKey);\n if (!sortedMap) {\n throw new Error('No index defined for ' + indexKey);\n }\n\n if (sortedMap instanceof SortedMap) {\n return sortedMap;\n } else {\n // The index exists, but it falls back to just name comparison. Return null so that the calling code uses the\n // regular child map\n return null;\n }\n }\n\n hasIndex(indexDefinition: Index): boolean {\n return contains(this.indexSet_, indexDefinition.toString());\n }\n\n addIndex(\n indexDefinition: Index,\n existingChildren: SortedMap<string, Node>\n ): IndexMap {\n assert(\n indexDefinition !== KEY_INDEX,\n \"KeyIndex always exists and isn't meant to be added to the IndexMap.\"\n );\n const childList = [];\n let sawIndexedValue = false;\n const iter = existingChildren.getIterator(NamedNode.Wrap);\n let next = iter.getNext();\n while (next) {\n sawIndexedValue =\n sawIndexedValue || indexDefinition.isDefinedOn(next.node);\n childList.push(next);\n next = iter.getNext();\n }\n let newIndex;\n if (sawIndexedValue) {\n newIndex = buildChildSet(childList, indexDefinition.getCompare());\n } else {\n newIndex = fallbackObject;\n }\n const indexName = indexDefinition.toString();\n const newIndexSet = { ...this.indexSet_ };\n newIndexSet[indexName] = indexDefinition;\n const newIndexes = { ...this.indexes_ };\n newIndexes[indexName] = newIndex;\n return new IndexMap(newIndexes, newIndexSet);\n }\n\n /**\n * Ensure that this node is properly tracked in any indexes that we're maintaining\n */\n addToIndexes(\n namedNode: NamedNode,\n existingChildren: SortedMap<string, Node>\n ): IndexMap {\n const newIndexes = map(\n this.indexes_,\n (indexedChildren: SortedMap<NamedNode, Node>, indexName: string) => {\n const index = safeGet(this.indexSet_, indexName);\n assert(index, 'Missing index implementation for ' + indexName);\n if (indexedChildren === fallbackObject) {\n // Check to see if we need to index everything\n if (index.isDefinedOn(namedNode.node)) {\n // We need to build this index\n const childList = [];\n const iter = existingChildren.getIterator(NamedNode.Wrap);\n let next = iter.getNext();\n while (next) {\n if (next.name !== namedNode.name) {\n childList.push(next);\n }\n next = iter.getNext();\n }\n childList.push(namedNode);\n return buildChildSet(childList, index.getCompare());\n } else {\n // No change, this remains a fallback\n return fallbackObject;\n }\n } else {\n const existingSnap = existingChildren.get(namedNode.name);\n let newChildren = indexedChildren;\n if (existingSnap) {\n newChildren = newChildren.remove(\n new NamedNode(namedNode.name, existingSnap)\n );\n }\n return newChildren.insert(namedNode, namedNode.node);\n }\n }\n );\n return new IndexMap(newIndexes, this.indexSet_);\n }\n\n /**\n * Create a new IndexMap instance with the given value removed\n */\n removeFromIndexes(\n namedNode: NamedNode,\n existingChildren: SortedMap<string, Node>\n ): IndexMap {\n const newIndexes = map(\n this.indexes_,\n (indexedChildren: SortedMap<NamedNode, Node>) => {\n if (indexedChildren === fallbackObject) {\n // This is the fallback. Just return it, nothing to do in this case\n return indexedChildren;\n } else {\n const existingSnap = existingChildren.get(namedNode.name);\n if (existingSnap) {\n return indexedChildren.remove(\n new NamedNode(namedNode.name, existingSnap)\n );\n } else {\n // No record of this child\n return indexedChildren;\n }\n }\n }\n );\n return new IndexMap(newIndexes, this.indexSet_);\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert } from '@firebase/util';\n\nimport { Path, pathGetFront, pathGetLength, pathPopFront } from '../util/Path';\nimport { SortedMap, SortedMapIterator } from '../util/SortedMap';\nimport { MAX_NAME, MIN_NAME, sha1 } from '../util/util';\n\nimport { NAME_COMPARATOR } from './comparators';\nimport { Index } from './indexes/Index';\nimport { KEY_INDEX, KeyIndex } from './indexes/KeyIndex';\nimport {\n PRIORITY_INDEX,\n setMaxNode as setPriorityMaxNode\n} from './indexes/PriorityIndex';\nimport { IndexMap } from './IndexMap';\nimport { LeafNode } from './LeafNode';\nimport { NamedNode, Node } from './Node';\nimport { priorityHashText, setMaxNode, validatePriorityNode } from './snap';\n\nexport interface ChildrenNodeConstructor {\n new (\n children_: SortedMap<string, Node>,\n priorityNode_: Node | null,\n indexMap_: IndexMap\n ): ChildrenNode;\n EMPTY_NODE: ChildrenNode;\n}\n\n// TODO: For memory savings, don't store priorityNode_ if it's empty.\n\nlet EMPTY_NODE: ChildrenNode;\n\n/**\n * ChildrenNode is a class for storing internal nodes in a DataSnapshot\n * (i.e. nodes with children). It implements Node and stores the\n * list of children in the children property, sorted by child name.\n */\nexport class ChildrenNode implements Node {\n private lazyHash_: string | null = null;\n\n static get EMPTY_NODE(): ChildrenNode {\n return (\n EMPTY_NODE ||\n (EMPTY_NODE = new ChildrenNode(\n new SortedMap<string, Node>(NAME_COMPARATOR),\n null,\n IndexMap.Default\n ))\n );\n }\n\n /**\n * @param children_ - List of children of this node..\n * @param priorityNode_ - The priority of this node (as a snapshot node).\n */\n constructor(\n private readonly children_: SortedMap<string, Node>,\n private readonly priorityNode_: Node | null,\n private indexMap_: IndexMap\n ) {\n /**\n * Note: The only reason we allow null priority is for EMPTY_NODE, since we can't use\n * EMPTY_NODE as the priority of EMPTY_NODE. We might want to consider making EMPTY_NODE its own\n * class instead of an empty ChildrenNode.\n */\n if (this.priorityNode_) {\n validatePriorityNode(this.priorityNode_);\n }\n\n if (this.children_.isEmpty()) {\n assert(\n !this.priorityNode_ || this.priorityNode_.isEmpty(),\n 'An empty node cannot have a priority'\n );\n }\n }\n\n /** @inheritDoc */\n isLeafNode(): boolean {\n return false;\n }\n\n /** @inheritDoc */\n getPriority(): Node {\n return this.priorityNode_ || EMPTY_NODE;\n }\n\n /** @inheritDoc */\n updatePriority(newPriorityNode: Node): Node {\n if (this.children_.isEmpty()) {\n // Don't allow priorities on empty nodes\n return this;\n } else {\n return new ChildrenNode(this.children_, newPriorityNode, this.indexMap_);\n }\n }\n\n /** @inheritDoc */\n getImmediateChild(childName: string): Node {\n // Hack to treat priority as a regular child\n if (childName === '.priority') {\n return this.getPriority();\n } else {\n const child = this.children_.get(childName);\n return child === null ? EMPTY_NODE : child;\n }\n }\n\n /** @inheritDoc */\n getChild(path: Path): Node {\n const front = pathGetFront(path);\n if (front === null) {\n return this;\n }\n\n return this.getImmediateChild(front).getChild(pathPopFront(path));\n }\n\n /** @inheritDoc */\n hasChild(childName: string): boolean {\n return this.children_.get(childName) !== null;\n }\n\n /** @inheritDoc */\n updateImmediateChild(childName: string, newChildNode: Node): Node {\n assert(newChildNode, 'We should always be passing snapshot nodes');\n if (childName === '.priority') {\n return this.updatePriority(newChildNode);\n } else {\n const namedNode = new NamedNode(childName, newChildNode);\n let newChildren, newIndexMap;\n if (newChildNode.isEmpty()) {\n newChildren = this.children_.remove(childName);\n newIndexMap = this.indexMap_.removeFromIndexes(\n namedNode,\n this.children_\n );\n } else {\n newChildren = this.children_.insert(childName, newChildNode);\n newIndexMap = this.indexMap_.addToIndexes(namedNode, this.children_);\n }\n\n const newPriority = newChildren.isEmpty()\n ? EMPTY_NODE\n : this.priorityNode_;\n return new ChildrenNode(newChildren, newPriority, newIndexMap);\n }\n }\n\n /** @inheritDoc */\n updateChild(path: Path, newChildNode: Node): Node {\n const front = pathGetFront(path);\n if (front === null) {\n return newChildNode;\n } else {\n assert(\n pathGetFront(path) !== '.priority' || pathGetLength(path) === 1,\n '.priority must be the last token in a path'\n );\n const newImmediateChild = this.getImmediateChild(front).updateChild(\n pathPopFront(path),\n newChildNode\n );\n return this.updateImmediateChild(front, newImmediateChild);\n }\n }\n\n /** @inheritDoc */\n isEmpty(): boolean {\n return this.children_.isEmpty();\n }\n\n /** @inheritDoc */\n numChildren(): number {\n return this.children_.count();\n }\n\n private static INTEGER_REGEXP_ = /^(0|[1-9]\\d*)$/;\n\n /** @inheritDoc */\n val(exportFormat?: boolean): object {\n if (this.isEmpty()) {\n return null;\n }\n\n const obj: { [k: string]: unknown } = {};\n let numKeys = 0,\n maxKey = 0,\n allIntegerKeys = true;\n this.forEachChild(PRIORITY_INDEX, (key: string, childNode: Node) => {\n obj[key] = childNode.val(exportFormat);\n\n numKeys++;\n if (allIntegerKeys && ChildrenNode.INTEGER_REGEXP_.test(key)) {\n maxKey = Math.max(maxKey, Number(key));\n } else {\n allIntegerKeys = false;\n }\n });\n\n if (!exportFormat && allIntegerKeys && maxKey < 2 * numKeys) {\n // convert to array.\n const array: unknown[] = [];\n // eslint-disable-next-line guard-for-in\n for (const key in obj) {\n array[key as unknown as number] = obj[key];\n }\n\n return array;\n } else {\n if (exportFormat && !this.getPriority().isEmpty()) {\n obj['.priority'] = this.getPriority().val();\n }\n return obj;\n }\n }\n\n /** @inheritDoc */\n hash(): string {\n if (this.lazyHash_ === null) {\n let toHash = '';\n if (!this.getPriority().isEmpty()) {\n toHash +=\n 'priority:' +\n priorityHashText(this.getPriority().val() as string | number) +\n ':';\n }\n\n this.forEachChild(PRIORITY_INDEX, (key, childNode) => {\n const childHash = childNode.hash();\n if (childHash !== '') {\n toHash += ':' + key + ':' + childHash;\n }\n });\n\n this.lazyHash_ = toHash === '' ? '' : sha1(toHash);\n }\n return this.lazyHash_;\n }\n\n /** @inheritDoc */\n getPredecessorChildName(\n childName: string,\n childNode: Node,\n index: Index\n ): string {\n const idx = this.resolveIndex_(index);\n if (idx) {\n const predecessor = idx.getPredecessorKey(\n new NamedNode(childName, childNode)\n );\n return predecessor ? predecessor.name : null;\n } else {\n return this.children_.getPredecessorKey(childName);\n }\n }\n\n getFirstChildName(indexDefinition: Index): string | null {\n const idx = this.resolveIndex_(indexDefinition);\n if (idx) {\n const minKey = idx.minKey();\n return minKey && minKey.name;\n } else {\n return this.children_.minKey();\n }\n }\n\n getFirstChild(indexDefinition: Index): NamedNode | null {\n const minKey = this.getFirstChildName(indexDefinition);\n if (minKey) {\n return new NamedNode(minKey, this.children_.get(minKey));\n } else {\n return null;\n }\n }\n\n /**\n * Given an index, return the key name of the largest value we have, according to that index\n */\n getLastChildName(indexDefinition: Index): string | null {\n const idx = this.resolveIndex_(indexDefinition);\n if (idx) {\n const maxKey = idx.maxKey();\n return maxKey && maxKey.name;\n } else {\n return this.children_.maxKey();\n }\n }\n\n getLastChild(indexDefinition: Index): NamedNode | null {\n const maxKey = this.getLastChildName(indexDefinition);\n if (maxKey) {\n return new NamedNode(maxKey, this.children_.get(maxKey));\n } else {\n return null;\n }\n }\n forEachChild(\n index: Index,\n action: (key: string, node: Node) => boolean | void\n ): boolean {\n const idx = this.resolveIndex_(index);\n if (idx) {\n return idx.inorderTraversal(wrappedNode => {\n return action(wrappedNode.name, wrappedNode.node);\n });\n } else {\n return this.children_.inorderTraversal(action);\n }\n }\n\n getIterator(\n indexDefinition: Index\n ): SortedMapIterator<string | NamedNode, Node, NamedNode> {\n return this.getIteratorFrom(indexDefinition.minPost(), indexDefinition);\n }\n\n getIteratorFrom(\n startPost: NamedNode,\n indexDefinition: Index\n ): SortedMapIterator<string | NamedNode, Node, NamedNode> {\n const idx = this.resolveIndex_(indexDefinition);\n if (idx) {\n return idx.getIteratorFrom(startPost, key => key);\n } else {\n const iterator = this.children_.getIteratorFrom(\n startPost.name,\n NamedNode.Wrap\n );\n let next = iterator.peek();\n while (next != null && indexDefinition.compare(next, startPost) < 0) {\n iterator.getNext();\n next = iterator.peek();\n }\n return iterator;\n }\n }\n\n getReverseIterator(\n indexDefinition: Index\n ): SortedMapIterator<string | NamedNode, Node, NamedNode> {\n return this.getReverseIteratorFrom(\n indexDefinition.maxPost(),\n indexDefinition\n );\n }\n\n getReverseIteratorFrom(\n endPost: NamedNode,\n indexDefinition: Index\n ): SortedMapIterator<string | NamedNode, Node, NamedNode> {\n const idx = this.resolveIndex_(indexDefinition);\n if (idx) {\n return idx.getReverseIteratorFrom(endPost, key => {\n return key;\n });\n } else {\n const iterator = this.children_.getReverseIteratorFrom(\n endPost.name,\n NamedNode.Wrap\n );\n let next = iterator.peek();\n while (next != null && indexDefinition.compare(next, endPost) > 0) {\n iterator.getNext();\n next = iterator.peek();\n }\n return iterator;\n }\n }\n compareTo(other: ChildrenNode): number {\n if (this.isEmpty()) {\n if (other.isEmpty()) {\n return 0;\n } else {\n return -1;\n }\n } else if (other.isLeafNode() || other.isEmpty()) {\n return 1;\n } else if (other === MAX_NODE) {\n return -1;\n } else {\n // Must be another node with children.\n return 0;\n }\n }\n withIndex(indexDefinition: Index): Node {\n if (\n indexDefinition === KEY_INDEX ||\n this.indexMap_.hasIndex(indexDefinition)\n ) {\n return this;\n } else {\n const newIndexMap = this.indexMap_.addIndex(\n indexDefinition,\n this.children_\n );\n return new ChildrenNode(this.children_, this.priorityNode_, newIndexMap);\n }\n }\n isIndexed(index: Index): boolean {\n return index === KEY_INDEX || this.indexMap_.hasIndex(index);\n }\n equals(other: Node): boolean {\n if (other === this) {\n return true;\n } else if (other.isLeafNode()) {\n return false;\n } else {\n const otherChildrenNode = other as ChildrenNode;\n if (!this.getPriority().equals(otherChildrenNode.getPriority())) {\n return false;\n } else if (\n this.children_.count() === otherChildrenNode.children_.count()\n ) {\n const thisIter = this.getIterator(PRIORITY_INDEX);\n const otherIter = otherChildrenNode.getIterator(PRIORITY_INDEX);\n let thisCurrent = thisIter.getNext();\n let otherCurrent = otherIter.getNext();\n while (thisCurrent && otherCurrent) {\n if (\n thisCurrent.name !== otherCurrent.name ||\n !thisCurrent.node.equals(otherCurrent.node)\n ) {\n return false;\n }\n thisCurrent = thisIter.getNext();\n otherCurrent = otherIter.getNext();\n }\n return thisCurrent === null && otherCurrent === null;\n } else {\n return false;\n }\n }\n }\n\n /**\n * Returns a SortedMap ordered by index, or null if the default (by-key) ordering can be used\n * instead.\n *\n */\n private resolveIndex_(\n indexDefinition: Index\n ): SortedMap<NamedNode, Node> | null {\n if (indexDefinition === KEY_INDEX) {\n return null;\n } else {\n return this.indexMap_.get(indexDefinition.toString());\n }\n }\n}\n\nexport class MaxNode extends ChildrenNode {\n constructor() {\n super(\n new SortedMap<string, Node>(NAME_COMPARATOR),\n ChildrenNode.EMPTY_NODE,\n IndexMap.Default\n );\n }\n\n compareTo(other: Node): number {\n if (other === this) {\n return 0;\n } else {\n return 1;\n }\n }\n\n equals(other: Node): boolean {\n // Not that we every compare it, but MAX_NODE is only ever equal to itself\n return other === this;\n }\n\n getPriority(): MaxNode {\n return this;\n }\n\n getImmediateChild(childName: string): ChildrenNode {\n return ChildrenNode.EMPTY_NODE;\n }\n\n isEmpty(): boolean {\n return false;\n }\n}\n\n/**\n * Marker that will sort higher than any other snapshot.\n */\nexport const MAX_NODE = new MaxNode();\n\n/**\n * Document NamedNode extensions\n */\ndeclare module './Node' {\n interface NamedNode {\n MIN: NamedNode;\n MAX: NamedNode;\n }\n}\n\nObject.defineProperties(NamedNode, {\n MIN: {\n value: new NamedNode(MIN_NAME, ChildrenNode.EMPTY_NODE)\n },\n MAX: {\n value: new NamedNode(MAX_NAME, MAX_NODE)\n }\n});\n\n/**\n * Reference Extensions\n */\nKeyIndex.__EMPTY_NODE = ChildrenNode.EMPTY_NODE;\nLeafNode.__childrenNodeConstructor = ChildrenNode;\nsetMaxNode(MAX_NODE);\nsetPriorityMaxNode(MAX_NODE);\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { contains, assert } from '@firebase/util';\n\nimport { Indexable } from '../util/misc';\nimport { SortedMap } from '../util/SortedMap';\nimport { each } from '../util/util';\n\nimport { ChildrenNode } from './ChildrenNode';\nimport { buildChildSet } from './childSet';\nimport { NAME_COMPARATOR, NAME_ONLY_COMPARATOR } from './comparators';\nimport { PRIORITY_INDEX, setNodeFromJSON } from './indexes/PriorityIndex';\nimport { IndexMap } from './IndexMap';\nimport { LeafNode } from './LeafNode';\nimport { NamedNode, Node } from './Node';\n\nconst USE_HINZE = true;\n\n/**\n * Constructs a snapshot node representing the passed JSON and returns it.\n * @param json - JSON to create a node for.\n * @param priority - Optional priority to use. This will be ignored if the\n * passed JSON contains a .priority property.\n */\nexport function nodeFromJSON(\n json: unknown | null,\n priority: unknown = null\n): Node {\n if (json === null) {\n return ChildrenNode.EMPTY_NODE;\n }\n\n if (typeof json === 'object' && '.priority' in json) {\n priority = json['.priority'];\n }\n\n assert(\n priority === null ||\n typeof priority === 'string' ||\n typeof priority === 'number' ||\n (typeof priority === 'object' && '.sv' in (priority as object)),\n 'Invalid priority type found: ' + typeof priority\n );\n\n if (typeof json === 'object' && '.value' in json && json['.value'] !== null) {\n json = json['.value'];\n }\n\n // Valid leaf nodes include non-objects or server-value wrapper objects\n if (typeof json !== 'object' || '.sv' in json) {\n const jsonLeaf = json as string | number | boolean | Indexable;\n return new LeafNode(jsonLeaf, nodeFromJSON(priority));\n }\n\n if (!(json instanceof Array) && USE_HINZE) {\n const children: NamedNode[] = [];\n let childrenHavePriority = false;\n const hinzeJsonObj = json;\n each(hinzeJsonObj, (key, child) => {\n if (key.substring(0, 1) !== '.') {\n // Ignore metadata nodes\n const childNode = nodeFromJSON(child);\n if (!childNode.isEmpty()) {\n childrenHavePriority =\n childrenHavePriority || !childNode.getPriority().isEmpty();\n children.push(new NamedNode(key, childNode));\n }\n }\n });\n\n if (children.length === 0) {\n return ChildrenNode.EMPTY_NODE;\n }\n\n const childSet = buildChildSet(\n children,\n NAME_ONLY_COMPARATOR,\n namedNode => namedNode.name,\n NAME_COMPARATOR\n ) as SortedMap<string, Node>;\n if (childrenHavePriority) {\n const sortedChildSet = buildChildSet(\n children,\n PRIORITY_INDEX.getCompare()\n );\n return new ChildrenNode(\n childSet,\n nodeFromJSON(priority),\n new IndexMap(\n { '.priority': sortedChildSet },\n { '.priority': PRIORITY_INDEX }\n )\n );\n } else {\n return new ChildrenNode(\n childSet,\n nodeFromJSON(priority),\n IndexMap.Default\n );\n }\n } else {\n let node: Node = ChildrenNode.EMPTY_NODE;\n each(json, (key: string, childData: unknown) => {\n if (contains(json as object, key)) {\n if (key.substring(0, 1) !== '.') {\n // ignore metadata nodes.\n const childNode = nodeFromJSON(childData);\n if (childNode.isLeafNode() || !childNode.isEmpty()) {\n node = node.updateImmediateChild(key, childNode);\n }\n }\n }\n });\n\n return node.updatePriority(nodeFromJSON(priority));\n }\n}\n\nsetNodeFromJSON(nodeFromJSON);\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert } from '@firebase/util';\n\nimport { Path, pathGetFront, pathIsEmpty, pathSlice } from '../../util/Path';\nimport { MAX_NAME, nameCompare } from '../../util/util';\nimport { ChildrenNode, MAX_NODE } from '../ChildrenNode';\nimport { NamedNode, Node } from '../Node';\nimport { nodeFromJSON } from '../nodeFromJSON';\n\nimport { Index } from './Index';\n\nexport class PathIndex extends Index {\n constructor(private indexPath_: Path) {\n super();\n\n assert(\n !pathIsEmpty(indexPath_) && pathGetFront(indexPath_) !== '.priority',\n \"Can't create PathIndex with empty path or .priority key\"\n );\n }\n\n protected extractChild(snap: Node): Node {\n return snap.getChild(this.indexPath_);\n }\n isDefinedOn(node: Node): boolean {\n return !node.getChild(this.indexPath_).isEmpty();\n }\n compare(a: NamedNode, b: NamedNode): number {\n const aChild = this.extractChild(a.node);\n const bChild = this.extractChild(b.node);\n const indexCmp = aChild.compareTo(bChild);\n if (indexCmp === 0) {\n return nameCompare(a.name, b.name);\n } else {\n return indexCmp;\n }\n }\n makePost(indexValue: object, name: string): NamedNode {\n const valueNode = nodeFromJSON(indexValue);\n const node = ChildrenNode.EMPTY_NODE.updateChild(\n this.indexPath_,\n valueNode\n );\n return new NamedNode(name, node);\n }\n maxPost(): NamedNode {\n const node = ChildrenNode.EMPTY_NODE.updateChild(this.indexPath_, MAX_NODE);\n return new NamedNode(MAX_NAME, node);\n }\n toString(): string {\n return pathSlice(this.indexPath_, 0).join('/');\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { nameCompare } from '../../util/util';\nimport { NamedNode, Node } from '../Node';\nimport { nodeFromJSON } from '../nodeFromJSON';\n\nimport { Index } from './Index';\n\nexport class ValueIndex extends Index {\n compare(a: NamedNode, b: NamedNode): number {\n const indexCmp = a.node.compareTo(b.node);\n if (indexCmp === 0) {\n return nameCompare(a.name, b.name);\n } else {\n return indexCmp;\n }\n }\n isDefinedOn(node: Node): boolean {\n return true;\n }\n indexedValueChanged(oldNode: Node, newNode: Node): boolean {\n return !oldNode.equals(newNode);\n }\n minPost(): NamedNode {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n return (NamedNode as any).MIN;\n }\n maxPost(): NamedNode {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n return (NamedNode as any).MAX;\n }\n\n makePost(indexValue: object, name: string): NamedNode {\n const valueNode = nodeFromJSON(indexValue);\n return new NamedNode(name, valueNode);\n }\n\n /**\n * @returns String representation for inclusion in a query spec\n */\n toString(): string {\n return '.value';\n }\n}\n\nexport const VALUE_INDEX = new ValueIndex();\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Node } from '../snap/Node';\n\nexport const enum ChangeType {\n /** Event type for a child added */\n CHILD_ADDED = 'child_added',\n /** Event type for a child removed */\n CHILD_REMOVED = 'child_removed',\n /** Event type for a child changed */\n CHILD_CHANGED = 'child_changed',\n /** Event type for a child moved */\n CHILD_MOVED = 'child_moved',\n /** Event type for a value change */\n VALUE = 'value'\n}\n\nexport interface Change {\n /** @param type - The event type */\n type: ChangeType;\n /** @param snapshotNode - The data */\n snapshotNode: Node;\n /** @param childName - The name for this child, if it's a child even */\n childName?: string;\n /** @param oldSnap - Used for intermediate processing of child changed events */\n oldSnap?: Node;\n /** * @param prevName - The name for the previous child, if applicable */\n prevName?: string | null;\n}\n\nexport function changeValue(snapshotNode: Node): Change {\n return { type: ChangeType.VALUE, snapshotNode };\n}\n\nexport function changeChildAdded(\n childName: string,\n snapshotNode: Node\n): Change {\n return { type: ChangeType.CHILD_ADDED, snapshotNode, childName };\n}\n\nexport function changeChildRemoved(\n childName: string,\n snapshotNode: Node\n): Change {\n return { type: ChangeType.CHILD_REMOVED, snapshotNode, childName };\n}\n\nexport function changeChildChanged(\n childName: string,\n snapshotNode: Node,\n oldSnap: Node\n): Change {\n return {\n type: ChangeType.CHILD_CHANGED,\n snapshotNode,\n childName,\n oldSnap\n };\n}\n\nexport function changeChildMoved(\n childName: string,\n snapshotNode: Node\n): Change {\n return { type: ChangeType.CHILD_MOVED, snapshotNode, childName };\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert } from '@firebase/util';\n\nimport { ChildrenNode } from '../../snap/ChildrenNode';\nimport { Index } from '../../snap/indexes/Index';\nimport { PRIORITY_INDEX } from '../../snap/indexes/PriorityIndex';\nimport { Node } from '../../snap/Node';\nimport { Path } from '../../util/Path';\nimport {\n changeChildAdded,\n changeChildChanged,\n changeChildRemoved\n} from '../Change';\nimport { ChildChangeAccumulator } from '../ChildChangeAccumulator';\nimport { CompleteChildSource } from '../CompleteChildSource';\n\nimport { NodeFilter } from './NodeFilter';\n\n/**\n * Doesn't really filter nodes but applies an index to the node and keeps track of any changes\n */\nexport class IndexedFilter implements NodeFilter {\n constructor(private readonly index_: Index) {}\n\n updateChild(\n snap: Node,\n key: string,\n newChild: Node,\n affectedPath: Path,\n source: CompleteChildSource,\n optChangeAccumulator: ChildChangeAccumulator | null\n ): Node {\n assert(\n snap.isIndexed(this.index_),\n 'A node must be indexed if only a child is updated'\n );\n const oldChild = snap.getImmediateChild(key);\n // Check if anything actually changed.\n if (\n oldChild.getChild(affectedPath).equals(newChild.getChild(affectedPath))\n ) {\n // There's an edge case where a child can enter or leave the view because affectedPath was set to null.\n // In this case, affectedPath will appear null in both the old and new snapshots. So we need\n // to avoid treating these cases as \"nothing changed.\"\n if (oldChild.isEmpty() === newChild.isEmpty()) {\n // Nothing changed.\n\n // This assert should be valid, but it's expensive (can dominate perf testing) so don't actually do it.\n //assert(oldChild.equals(newChild), 'Old and new snapshots should be equal.');\n return snap;\n }\n }\n\n if (optChangeAccumulator != null) {\n if (newChild.isEmpty()) {\n if (snap.hasChild(key)) {\n optChangeAccumulator.trackChildChange(\n changeChildRemoved(key, oldChild)\n );\n } else {\n assert(\n snap.isLeafNode(),\n 'A child remove without an old child only makes sense on a leaf node'\n );\n }\n } else if (oldChild.isEmpty()) {\n optChangeAccumulator.trackChildChange(changeChildAdded(key, newChild));\n } else {\n optChangeAccumulator.trackChildChange(\n changeChildChanged(key, newChild, oldChild)\n );\n }\n }\n if (snap.isLeafNode() && newChild.isEmpty()) {\n return snap;\n } else {\n // Make sure the node is indexed\n return snap.updateImmediateChild(key, newChild).withIndex(this.index_);\n }\n }\n updateFullNode(\n oldSnap: Node,\n newSnap: Node,\n optChangeAccumulator: ChildChangeAccumulator | null\n ): Node {\n if (optChangeAccumulator != null) {\n if (!oldSnap.isLeafNode()) {\n oldSnap.forEachChild(PRIORITY_INDEX, (key, childNode) => {\n if (!newSnap.hasChild(key)) {\n optChangeAccumulator.trackChildChange(\n changeChildRemoved(key, childNode)\n );\n }\n });\n }\n if (!newSnap.isLeafNode()) {\n newSnap.forEachChild(PRIORITY_INDEX, (key, childNode) => {\n if (oldSnap.hasChild(key)) {\n const oldChild = oldSnap.getImmediateChild(key);\n if (!oldChild.equals(childNode)) {\n optChangeAccumulator.trackChildChange(\n changeChildChanged(key, childNode, oldChild)\n );\n }\n } else {\n optChangeAccumulator.trackChildChange(\n changeChildAdded(key, childNode)\n );\n }\n });\n }\n }\n return newSnap.withIndex(this.index_);\n }\n updatePriority(oldSnap: Node, newPriority: Node): Node {\n if (oldSnap.isEmpty()) {\n return ChildrenNode.EMPTY_NODE;\n } else {\n return oldSnap.updatePriority(newPriority);\n }\n }\n filtersNodes(): boolean {\n return false;\n }\n getIndexedFilter(): IndexedFilter {\n return this;\n }\n getIndex(): Index {\n return this.index_;\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { NamedNode, Node } from '../../../core/snap/Node';\nimport { ChildrenNode } from '../../snap/ChildrenNode';\nimport { Index } from '../../snap/indexes/Index';\nimport { PRIORITY_INDEX } from '../../snap/indexes/PriorityIndex';\nimport { Path } from '../../util/Path';\nimport { ChildChangeAccumulator } from '../ChildChangeAccumulator';\nimport { CompleteChildSource } from '../CompleteChildSource';\nimport { QueryParams } from '../QueryParams';\n\nimport { IndexedFilter } from './IndexedFilter';\nimport { NodeFilter } from './NodeFilter';\n\n/**\n * Filters nodes by range and uses an IndexFilter to track any changes after filtering the node\n */\nexport class RangedFilter implements NodeFilter {\n private indexedFilter_: IndexedFilter;\n\n private index_: Index;\n\n private startPost_: NamedNode;\n\n private endPost_: NamedNode;\n\n private startIsInclusive_: boolean;\n\n private endIsInclusive_: boolean;\n\n constructor(params: QueryParams) {\n this.indexedFilter_ = new IndexedFilter(params.getIndex());\n this.index_ = params.getIndex();\n this.startPost_ = RangedFilter.getStartPost_(params);\n this.endPost_ = RangedFilter.getEndPost_(params);\n this.startIsInclusive_ = !params.startAfterSet_;\n this.endIsInclusive_ = !params.endBeforeSet_;\n }\n\n getStartPost(): NamedNode {\n return this.startPost_;\n }\n\n getEndPost(): NamedNode {\n return this.endPost_;\n }\n\n matches(node: NamedNode): boolean {\n const isWithinStart = this.startIsInclusive_\n ? this.index_.compare(this.getStartPost(), node) <= 0\n : this.index_.compare(this.getStartPost(), node) < 0;\n const isWithinEnd = this.endIsInclusive_\n ? this.index_.compare(node, this.getEndPost()) <= 0\n : this.index_.compare(node, this.getEndPost()) < 0;\n return isWithinStart && isWithinEnd;\n }\n updateChild(\n snap: Node,\n key: string,\n newChild: Node,\n affectedPath: Path,\n source: CompleteChildSource,\n optChangeAccumulator: ChildChangeAccumulator | null\n ): Node {\n if (!this.matches(new NamedNode(key, newChild))) {\n newChild = ChildrenNode.EMPTY_NODE;\n }\n return this.indexedFilter_.updateChild(\n snap,\n key,\n newChild,\n affectedPath,\n source,\n optChangeAccumulator\n );\n }\n updateFullNode(\n oldSnap: Node,\n newSnap: Node,\n optChangeAccumulator: ChildChangeAccumulator | null\n ): Node {\n if (newSnap.isLeafNode()) {\n // Make sure we have a children node with the correct index, not a leaf node;\n newSnap = ChildrenNode.EMPTY_NODE;\n }\n let filtered = newSnap.withIndex(this.index_);\n // Don't support priorities on queries\n filtered = filtered.updatePriority(ChildrenNode.EMPTY_NODE);\n const self = this;\n newSnap.forEachChild(PRIORITY_INDEX, (key, childNode) => {\n if (!self.matches(new NamedNode(key, childNode))) {\n filtered = filtered.updateImmediateChild(key, ChildrenNode.EMPTY_NODE);\n }\n });\n return this.indexedFilter_.updateFullNode(\n oldSnap,\n filtered,\n optChangeAccumulator\n );\n }\n updatePriority(oldSnap: Node, newPriority: Node): Node {\n // Don't support priorities on queries\n return oldSnap;\n }\n filtersNodes(): boolean {\n return true;\n }\n getIndexedFilter(): IndexedFilter {\n return this.indexedFilter_;\n }\n getIndex(): Index {\n return this.index_;\n }\n\n private static getStartPost_(params: QueryParams): NamedNode {\n if (params.hasStart()) {\n const startName = params.getIndexStartName();\n return params.getIndex().makePost(params.getIndexStartValue(), startName);\n } else {\n return params.getIndex().minPost();\n }\n }\n\n private static getEndPost_(params: QueryParams): NamedNode {\n if (params.hasEnd()) {\n const endName = params.getIndexEndName();\n return params.getIndex().makePost(params.getIndexEndValue(), endName);\n } else {\n return params.getIndex().maxPost();\n }\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert } from '@firebase/util';\n\nimport { ChildrenNode } from '../../snap/ChildrenNode';\nimport { Index } from '../../snap/indexes/Index';\nimport { NamedNode, Node } from '../../snap/Node';\nimport { Path } from '../../util/Path';\nimport {\n changeChildAdded,\n changeChildChanged,\n changeChildRemoved\n} from '../Change';\nimport { ChildChangeAccumulator } from '../ChildChangeAccumulator';\nimport { CompleteChildSource } from '../CompleteChildSource';\nimport { QueryParams } from '../QueryParams';\n\nimport { IndexedFilter } from './IndexedFilter';\nimport { NodeFilter } from './NodeFilter';\nimport { RangedFilter } from './RangedFilter';\n\n/**\n * Applies a limit and a range to a node and uses RangedFilter to do the heavy lifting where possible\n */\nexport class LimitedFilter implements NodeFilter {\n private readonly rangedFilter_: RangedFilter;\n\n private readonly index_: Index;\n\n private readonly limit_: number;\n\n private readonly reverse_: boolean;\n\n private readonly startIsInclusive_: boolean;\n\n private readonly endIsInclusive_: boolean;\n\n constructor(params: QueryParams) {\n this.rangedFilter_ = new RangedFilter(params);\n this.index_ = params.getIndex();\n this.limit_ = params.getLimit();\n this.reverse_ = !params.isViewFromLeft();\n this.startIsInclusive_ = !params.startAfterSet_;\n this.endIsInclusive_ = !params.endBeforeSet_;\n }\n updateChild(\n snap: Node,\n key: string,\n newChild: Node,\n affectedPath: Path,\n source: CompleteChildSource,\n optChangeAccumulator: ChildChangeAccumulator | null\n ): Node {\n if (!this.rangedFilter_.matches(new NamedNode(key, newChild))) {\n newChild = ChildrenNode.EMPTY_NODE;\n }\n if (snap.getImmediateChild(key).equals(newChild)) {\n // No change\n return snap;\n } else if (snap.numChildren() < this.limit_) {\n return this.rangedFilter_\n .getIndexedFilter()\n .updateChild(\n snap,\n key,\n newChild,\n affectedPath,\n source,\n optChangeAccumulator\n );\n } else {\n return this.fullLimitUpdateChild_(\n snap,\n key,\n newChild,\n source,\n optChangeAccumulator\n );\n }\n }\n updateFullNode(\n oldSnap: Node,\n newSnap: Node,\n optChangeAccumulator: ChildChangeAccumulator | null\n ): Node {\n let filtered;\n if (newSnap.isLeafNode() || newSnap.isEmpty()) {\n // Make sure we have a children node with the correct index, not a leaf node;\n filtered = ChildrenNode.EMPTY_NODE.withIndex(this.index_);\n } else {\n if (\n this.limit_ * 2 < newSnap.numChildren() &&\n newSnap.isIndexed(this.index_)\n ) {\n // Easier to build up a snapshot, since what we're given has more than twice the elements we want\n filtered = ChildrenNode.EMPTY_NODE.withIndex(this.index_);\n // anchor to the startPost, endPost, or last element as appropriate\n let iterator;\n if (this.reverse_) {\n iterator = (newSnap as ChildrenNode).getReverseIteratorFrom(\n this.rangedFilter_.getEndPost(),\n this.index_\n );\n } else {\n iterator = (newSnap as ChildrenNode).getIteratorFrom(\n this.rangedFilter_.getStartPost(),\n this.index_\n );\n }\n let count = 0;\n while (iterator.hasNext() && count < this.limit_) {\n const next = iterator.getNext();\n if (!this.withinDirectionalStart(next)) {\n // if we have not reached the start, skip to the next element\n continue;\n } else if (!this.withinDirectionalEnd(next)) {\n // if we have reached the end, stop adding elements\n break;\n } else {\n filtered = filtered.updateImmediateChild(next.name, next.node);\n count++;\n }\n }\n } else {\n // The snap contains less than twice the limit. Faster to delete from the snap than build up a new one\n filtered = newSnap.withIndex(this.index_);\n // Don't support priorities on queries\n filtered = filtered.updatePriority(\n ChildrenNode.EMPTY_NODE\n ) as ChildrenNode;\n\n let iterator;\n if (this.reverse_) {\n iterator = filtered.getReverseIterator(this.index_);\n } else {\n iterator = filtered.getIterator(this.index_);\n }\n\n let count = 0;\n while (iterator.hasNext()) {\n const next = iterator.getNext();\n const inRange =\n count < this.limit_ &&\n this.withinDirectionalStart(next) &&\n this.withinDirectionalEnd(next);\n if (inRange) {\n count++;\n } else {\n filtered = filtered.updateImmediateChild(\n next.name,\n ChildrenNode.EMPTY_NODE\n );\n }\n }\n }\n }\n return this.rangedFilter_\n .getIndexedFilter()\n .updateFullNode(oldSnap, filtered, optChangeAccumulator);\n }\n updatePriority(oldSnap: Node, newPriority: Node): Node {\n // Don't support priorities on queries\n return oldSnap;\n }\n filtersNodes(): boolean {\n return true;\n }\n getIndexedFilter(): IndexedFilter {\n return this.rangedFilter_.getIndexedFilter();\n }\n getIndex(): Index {\n return this.index_;\n }\n\n private fullLimitUpdateChild_(\n snap: Node,\n childKey: string,\n childSnap: Node,\n source: CompleteChildSource,\n changeAccumulator: ChildChangeAccumulator | null\n ): Node {\n // TODO: rename all cache stuff etc to general snap terminology\n let cmp;\n if (this.reverse_) {\n const indexCmp = this.index_.getCompare();\n cmp = (a: NamedNode, b: NamedNode) => indexCmp(b, a);\n } else {\n cmp = this.index_.getCompare();\n }\n const oldEventCache = snap as ChildrenNode;\n assert(oldEventCache.numChildren() === this.limit_, '');\n const newChildNamedNode = new NamedNode(childKey, childSnap);\n const windowBoundary = this.reverse_\n ? oldEventCache.getFirstChild(this.index_)\n : (oldEventCache.getLastChild(this.index_) as NamedNode);\n const inRange = this.rangedFilter_.matches(newChildNamedNode);\n if (oldEventCache.hasChild(childKey)) {\n const oldChildSnap = oldEventCache.getImmediateChild(childKey);\n let nextChild = source.getChildAfterChild(\n this.index_,\n windowBoundary,\n this.reverse_\n );\n while (\n nextChild != null &&\n (nextChild.name === childKey || oldEventCache.hasChild(nextChild.name))\n ) {\n // There is a weird edge case where a node is updated as part of a merge in the write tree, but hasn't\n // been applied to the limited filter yet. Ignore this next child which will be updated later in\n // the limited filter...\n nextChild = source.getChildAfterChild(\n this.index_,\n nextChild,\n this.reverse_\n );\n }\n const compareNext =\n nextChild == null ? 1 : cmp(nextChild, newChildNamedNode);\n const remainsInWindow =\n inRange && !childSnap.isEmpty() && compareNext >= 0;\n if (remainsInWindow) {\n if (changeAccumulator != null) {\n changeAccumulator.trackChildChange(\n changeChildChanged(childKey, childSnap, oldChildSnap)\n );\n }\n return oldEventCache.updateImmediateChild(childKey, childSnap);\n } else {\n if (changeAccumulator != null) {\n changeAccumulator.trackChildChange(\n changeChildRemoved(childKey, oldChildSnap)\n );\n }\n const newEventCache = oldEventCache.updateImmediateChild(\n childKey,\n ChildrenNode.EMPTY_NODE\n );\n const nextChildInRange =\n nextChild != null && this.rangedFilter_.matches(nextChild);\n if (nextChildInRange) {\n if (changeAccumulator != null) {\n changeAccumulator.trackChildChange(\n changeChildAdded(nextChild.name, nextChild.node)\n );\n }\n return newEventCache.updateImmediateChild(\n nextChild.name,\n nextChild.node\n );\n } else {\n return newEventCache;\n }\n }\n } else if (childSnap.isEmpty()) {\n // we're deleting a node, but it was not in the window, so ignore it\n return snap;\n } else if (inRange) {\n if (cmp(windowBoundary, newChildNamedNode) >= 0) {\n if (changeAccumulator != null) {\n changeAccumulator.trackChildChange(\n changeChildRemoved(windowBoundary.name, windowBoundary.node)\n );\n changeAccumulator.trackChildChange(\n changeChildAdded(childKey, childSnap)\n );\n }\n return oldEventCache\n .updateImmediateChild(childKey, childSnap)\n .updateImmediateChild(windowBoundary.name, ChildrenNode.EMPTY_NODE);\n } else {\n return snap;\n }\n } else {\n return snap;\n }\n }\n\n private withinDirectionalStart = (node: NamedNode) =>\n this.reverse_ ? this.withinEndPost(node) : this.withinStartPost(node);\n\n private withinDirectionalEnd = (node: NamedNode) =>\n this.reverse_ ? this.withinStartPost(node) : this.withinEndPost(node);\n\n private withinStartPost = (node: NamedNode) => {\n const compareRes = this.index_.compare(\n this.rangedFilter_.getStartPost(),\n node\n );\n return this.startIsInclusive_ ? compareRes <= 0 : compareRes < 0;\n };\n\n private withinEndPost = (node: NamedNode) => {\n const compareRes = this.index_.compare(\n node,\n this.rangedFilter_.getEndPost()\n );\n return this.endIsInclusive_ ? compareRes <= 0 : compareRes < 0;\n };\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert, stringify } from '@firebase/util';\n\nimport { Index } from '../snap/indexes/Index';\nimport { KEY_INDEX } from '../snap/indexes/KeyIndex';\nimport { PathIndex } from '../snap/indexes/PathIndex';\nimport { PRIORITY_INDEX, PriorityIndex } from '../snap/indexes/PriorityIndex';\nimport { VALUE_INDEX } from '../snap/indexes/ValueIndex';\nimport { MAX_NAME, MIN_NAME } from '../util/util';\n\nimport { IndexedFilter } from './filter/IndexedFilter';\nimport { LimitedFilter } from './filter/LimitedFilter';\nimport { NodeFilter } from './filter/NodeFilter';\nimport { RangedFilter } from './filter/RangedFilter';\n\n/**\n * Wire Protocol Constants\n */\nconst enum WIRE_PROTOCOL_CONSTANTS {\n INDEX_START_VALUE = 'sp',\n INDEX_START_NAME = 'sn',\n INDEX_START_IS_INCLUSIVE = 'sin',\n INDEX_END_VALUE = 'ep',\n INDEX_END_NAME = 'en',\n INDEX_END_IS_INCLUSIVE = 'ein',\n LIMIT = 'l',\n VIEW_FROM = 'vf',\n VIEW_FROM_LEFT = 'l',\n VIEW_FROM_RIGHT = 'r',\n INDEX = 'i'\n}\n\n/**\n * REST Query Constants\n */\nconst enum REST_QUERY_CONSTANTS {\n ORDER_BY = 'orderBy',\n PRIORITY_INDEX = '$priority',\n VALUE_INDEX = '$value',\n KEY_INDEX = '$key',\n START_AFTER = 'startAfter',\n START_AT = 'startAt',\n END_AT = 'endAt',\n END_BEFORE = 'endBefore',\n LIMIT_TO_FIRST = 'limitToFirst',\n LIMIT_TO_LAST = 'limitToLast'\n}\n\n/**\n * This class is an immutable-from-the-public-api struct containing a set of query parameters defining a\n * range to be returned for a particular location. It is assumed that validation of parameters is done at the\n * user-facing API level, so it is not done here.\n *\n * @internal\n */\nexport class QueryParams {\n limitSet_ = false;\n startSet_ = false;\n startNameSet_ = false;\n startAfterSet_ = false; // can only be true if startSet_ is true\n endSet_ = false;\n endNameSet_ = false;\n endBeforeSet_ = false; // can only be true if endSet_ is true\n limit_ = 0;\n viewFrom_ = '';\n indexStartValue_: unknown | null = null;\n indexStartName_ = '';\n indexEndValue_: unknown | null = null;\n indexEndName_ = '';\n index_: PriorityIndex = PRIORITY_INDEX;\n\n hasStart(): boolean {\n return this.startSet_;\n }\n\n /**\n * @returns True if it would return from left.\n */\n isViewFromLeft(): boolean {\n if (this.viewFrom_ === '') {\n // limit(), rather than limitToFirst or limitToLast was called.\n // This means that only one of startSet_ and endSet_ is true. Use them\n // to calculate which side of the view to anchor to. If neither is set,\n // anchor to the end.\n return this.startSet_;\n } else {\n return this.viewFrom_ === WIRE_PROTOCOL_CONSTANTS.VIEW_FROM_LEFT;\n }\n }\n\n /**\n * Only valid to call if hasStart() returns true\n */\n getIndexStartValue(): unknown {\n assert(this.startSet_, 'Only valid if start has been set');\n return this.indexStartValue_;\n }\n\n /**\n * Only valid to call if hasStart() returns true.\n * Returns the starting key name for the range defined by these query parameters\n */\n getIndexStartName(): string {\n assert(this.startSet_, 'Only valid if start has been set');\n if (this.startNameSet_) {\n return this.indexStartName_;\n } else {\n return MIN_NAME;\n }\n }\n\n hasEnd(): boolean {\n return this.endSet_;\n }\n\n /**\n * Only valid to call if hasEnd() returns true.\n */\n getIndexEndValue(): unknown {\n assert(this.endSet_, 'Only valid if end has been set');\n return this.indexEndValue_;\n }\n\n /**\n * Only valid to call if hasEnd() returns true.\n * Returns the end key name for the range defined by these query parameters\n */\n getIndexEndName(): string {\n assert(this.endSet_, 'Only valid if end has been set');\n if (this.endNameSet_) {\n return this.indexEndName_;\n } else {\n return MAX_NAME;\n }\n }\n\n hasLimit(): boolean {\n return this.limitSet_;\n }\n\n /**\n * @returns True if a limit has been set and it has been explicitly anchored\n */\n hasAnchoredLimit(): boolean {\n return this.limitSet_ && this.viewFrom_ !== '';\n }\n\n /**\n * Only valid to call if hasLimit() returns true\n */\n getLimit(): number {\n assert(this.limitSet_, 'Only valid if limit has been set');\n return this.limit_;\n }\n\n getIndex(): Index {\n return this.index_;\n }\n\n loadsAllData(): boolean {\n return !(this.startSet_ || this.endSet_ || this.limitSet_);\n }\n\n isDefault(): boolean {\n return this.loadsAllData() && this.index_ === PRIORITY_INDEX;\n }\n\n copy(): QueryParams {\n const copy = new QueryParams();\n copy.limitSet_ = this.limitSet_;\n copy.limit_ = this.limit_;\n copy.startSet_ = this.startSet_;\n copy.startAfterSet_ = this.startAfterSet_;\n copy.indexStartValue_ = this.indexStartValue_;\n copy.startNameSet_ = this.startNameSet_;\n copy.indexStartName_ = this.indexStartName_;\n copy.endSet_ = this.endSet_;\n copy.endBeforeSet_ = this.endBeforeSet_;\n copy.indexEndValue_ = this.indexEndValue_;\n copy.endNameSet_ = this.endNameSet_;\n copy.indexEndName_ = this.indexEndName_;\n copy.index_ = this.index_;\n copy.viewFrom_ = this.viewFrom_;\n return copy;\n }\n}\n\nexport function queryParamsGetNodeFilter(queryParams: QueryParams): NodeFilter {\n if (queryParams.loadsAllData()) {\n return new IndexedFilter(queryParams.getIndex());\n } else if (queryParams.hasLimit()) {\n return new LimitedFilter(queryParams);\n } else {\n return new RangedFilter(queryParams);\n }\n}\n\nexport function queryParamsLimit(\n queryParams: QueryParams,\n newLimit: number\n): QueryParams {\n const newParams = queryParams.copy();\n newParams.limitSet_ = true;\n newParams.limit_ = newLimit;\n newParams.viewFrom_ = '';\n return newParams;\n}\n\nexport function queryParamsLimitToFirst(\n queryParams: QueryParams,\n newLimit: number\n): QueryParams {\n const newParams = queryParams.copy();\n newParams.limitSet_ = true;\n newParams.limit_ = newLimit;\n newParams.viewFrom_ = WIRE_PROTOCOL_CONSTANTS.VIEW_FROM_LEFT;\n return newParams;\n}\n\nexport function queryParamsLimitToLast(\n queryParams: QueryParams,\n newLimit: number\n): QueryParams {\n const newParams = queryParams.copy();\n newParams.limitSet_ = true;\n newParams.limit_ = newLimit;\n newParams.viewFrom_ = WIRE_PROTOCOL_CONSTANTS.VIEW_FROM_RIGHT;\n return newParams;\n}\n\nexport function queryParamsStartAt(\n queryParams: QueryParams,\n indexValue: unknown,\n key?: string | null\n): QueryParams {\n const newParams = queryParams.copy();\n newParams.startSet_ = true;\n if (indexValue === undefined) {\n indexValue = null;\n }\n newParams.indexStartValue_ = indexValue;\n if (key != null) {\n newParams.startNameSet_ = true;\n newParams.indexStartName_ = key;\n } else {\n newParams.startNameSet_ = false;\n newParams.indexStartName_ = '';\n }\n return newParams;\n}\n\nexport function queryParamsStartAfter(\n queryParams: QueryParams,\n indexValue: unknown,\n key?: string | null\n): QueryParams {\n let params: QueryParams;\n if (queryParams.index_ === KEY_INDEX || !!key) {\n params = queryParamsStartAt(queryParams, indexValue, key);\n } else {\n params = queryParamsStartAt(queryParams, indexValue, MAX_NAME);\n }\n params.startAfterSet_ = true;\n return params;\n}\n\nexport function queryParamsEndAt(\n queryParams: QueryParams,\n indexValue: unknown,\n key?: string | null\n): QueryParams {\n const newParams = queryParams.copy();\n newParams.endSet_ = true;\n if (indexValue === undefined) {\n indexValue = null;\n }\n newParams.indexEndValue_ = indexValue;\n if (key !== undefined) {\n newParams.endNameSet_ = true;\n newParams.indexEndName_ = key;\n } else {\n newParams.endNameSet_ = false;\n newParams.indexEndName_ = '';\n }\n return newParams;\n}\n\nexport function queryParamsEndBefore(\n queryParams: QueryParams,\n indexValue: unknown,\n key?: string | null\n): QueryParams {\n let params: QueryParams;\n if (queryParams.index_ === KEY_INDEX || !!key) {\n params = queryParamsEndAt(queryParams, indexValue, key);\n } else {\n params = queryParamsEndAt(queryParams, indexValue, MIN_NAME);\n }\n params.endBeforeSet_ = true;\n return params;\n}\n\nexport function queryParamsOrderBy(\n queryParams: QueryParams,\n index: Index\n): QueryParams {\n const newParams = queryParams.copy();\n newParams.index_ = index;\n return newParams;\n}\n\n/**\n * Returns a set of REST query string parameters representing this query.\n *\n * @returns query string parameters\n */\nexport function queryParamsToRestQueryStringParameters(\n queryParams: QueryParams\n): Record<string, string | number> {\n const qs: Record<string, string | number> = {};\n\n if (queryParams.isDefault()) {\n return qs;\n }\n\n let orderBy;\n if (queryParams.index_ === PRIORITY_INDEX) {\n orderBy = REST_QUERY_CONSTANTS.PRIORITY_INDEX;\n } else if (queryParams.index_ === VALUE_INDEX) {\n orderBy = REST_QUERY_CONSTANTS.VALUE_INDEX;\n } else if (queryParams.index_ === KEY_INDEX) {\n orderBy = REST_QUERY_CONSTANTS.KEY_INDEX;\n } else {\n assert(queryParams.index_ instanceof PathIndex, 'Unrecognized index type!');\n orderBy = queryParams.index_.toString();\n }\n qs[REST_QUERY_CONSTANTS.ORDER_BY] = stringify(orderBy);\n\n if (queryParams.startSet_) {\n const startParam = queryParams.startAfterSet_\n ? REST_QUERY_CONSTANTS.START_AFTER\n : REST_QUERY_CONSTANTS.START_AT;\n qs[startParam] = stringify(queryParams.indexStartValue_);\n if (queryParams.startNameSet_) {\n qs[startParam] += ',' + stringify(queryParams.indexStartName_);\n }\n }\n\n if (queryParams.endSet_) {\n const endParam = queryParams.endBeforeSet_\n ? REST_QUERY_CONSTANTS.END_BEFORE\n : REST_QUERY_CONSTANTS.END_AT;\n qs[endParam] = stringify(queryParams.indexEndValue_);\n if (queryParams.endNameSet_) {\n qs[endParam] += ',' + stringify(queryParams.indexEndName_);\n }\n }\n\n if (queryParams.limitSet_) {\n if (queryParams.isViewFromLeft()) {\n qs[REST_QUERY_CONSTANTS.LIMIT_TO_FIRST] = queryParams.limit_;\n } else {\n qs[REST_QUERY_CONSTANTS.LIMIT_TO_LAST] = queryParams.limit_;\n }\n }\n\n return qs;\n}\n\nexport function queryParamsGetQueryObject(\n queryParams: QueryParams\n): Record<string, unknown> {\n const obj: Record<string, unknown> = {};\n if (queryParams.startSet_) {\n obj[WIRE_PROTOCOL_CONSTANTS.INDEX_START_VALUE] =\n queryParams.indexStartValue_;\n if (queryParams.startNameSet_) {\n obj[WIRE_PROTOCOL_CONSTANTS.INDEX_START_NAME] =\n queryParams.indexStartName_;\n }\n obj[WIRE_PROTOCOL_CONSTANTS.INDEX_START_IS_INCLUSIVE] =\n !queryParams.startAfterSet_;\n }\n if (queryParams.endSet_) {\n obj[WIRE_PROTOCOL_CONSTANTS.INDEX_END_VALUE] = queryParams.indexEndValue_;\n if (queryParams.endNameSet_) {\n obj[WIRE_PROTOCOL_CONSTANTS.INDEX_END_NAME] = queryParams.indexEndName_;\n }\n obj[WIRE_PROTOCOL_CONSTANTS.INDEX_END_IS_INCLUSIVE] =\n !queryParams.endBeforeSet_;\n }\n if (queryParams.limitSet_) {\n obj[WIRE_PROTOCOL_CONSTANTS.LIMIT] = queryParams.limit_;\n let viewFrom = queryParams.viewFrom_;\n if (viewFrom === '') {\n if (queryParams.isViewFromLeft()) {\n viewFrom = WIRE_PROTOCOL_CONSTANTS.VIEW_FROM_LEFT;\n } else {\n viewFrom = WIRE_PROTOCOL_CONSTANTS.VIEW_FROM_RIGHT;\n }\n }\n obj[WIRE_PROTOCOL_CONSTANTS.VIEW_FROM] = viewFrom;\n }\n // For now, priority index is the default, so we only specify if it's some other index\n if (queryParams.index_ !== PRIORITY_INDEX) {\n obj[WIRE_PROTOCOL_CONSTANTS.INDEX] = queryParams.index_.toString();\n }\n return obj;\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n assert,\n jsonEval,\n safeGet,\n querystring,\n Deferred\n} from '@firebase/util';\n\nimport { AppCheckTokenProvider } from './AppCheckTokenProvider';\nimport { AuthTokenProvider } from './AuthTokenProvider';\nimport { RepoInfo } from './RepoInfo';\nimport { ServerActions } from './ServerActions';\nimport { logWrapper, warn } from './util/util';\nimport { QueryContext } from './view/EventRegistration';\nimport { queryParamsToRestQueryStringParameters } from './view/QueryParams';\n\n/**\n * An implementation of ServerActions that communicates with the server via REST requests.\n * This is mostly useful for compatibility with crawlers, where we don't want to spin up a full\n * persistent connection (using WebSockets or long-polling)\n */\nexport class ReadonlyRestClient extends ServerActions {\n reportStats(stats: { [k: string]: unknown }): void {\n throw new Error('Method not implemented.');\n }\n\n /** @private {function(...[*])} */\n private log_: (...args: unknown[]) => void = logWrapper('p:rest:');\n\n /**\n * We don't actually need to track listens, except to prevent us calling an onComplete for a listen\n * that's been removed. :-/\n */\n private listens_: { [k: string]: object } = {};\n\n static getListenId_(query: QueryContext, tag?: number | null): string {\n if (tag !== undefined) {\n return 'tag$' + tag;\n } else {\n assert(\n query._queryParams.isDefault(),\n \"should have a tag if it's not a default query.\"\n );\n return query._path.toString();\n }\n }\n\n /**\n * @param repoInfo_ - Data about the namespace we are connecting to\n * @param onDataUpdate_ - A callback for new data from the server\n */\n constructor(\n private repoInfo_: RepoInfo,\n private onDataUpdate_: (\n a: string,\n b: unknown,\n c: boolean,\n d: number | null\n ) => void,\n private authTokenProvider_: AuthTokenProvider,\n private appCheckTokenProvider_: AppCheckTokenProvider\n ) {\n super();\n }\n\n /** @inheritDoc */\n listen(\n query: QueryContext,\n currentHashFn: () => string,\n tag: number | null,\n onComplete: (a: string, b: unknown) => void\n ) {\n const pathString = query._path.toString();\n this.log_('Listen called for ' + pathString + ' ' + query._queryIdentifier);\n\n // Mark this listener so we can tell if it's removed.\n const listenId = ReadonlyRestClient.getListenId_(query, tag);\n const thisListen = {};\n this.listens_[listenId] = thisListen;\n\n const queryStringParameters = queryParamsToRestQueryStringParameters(\n query._queryParams\n );\n\n this.restRequest_(\n pathString + '.json',\n queryStringParameters,\n (error, result) => {\n let data = result;\n\n if (error === 404) {\n data = null;\n error = null;\n }\n\n if (error === null) {\n this.onDataUpdate_(pathString, data, /*isMerge=*/ false, tag);\n }\n\n if (safeGet(this.listens_, listenId) === thisListen) {\n let status;\n if (!error) {\n status = 'ok';\n } else if (error === 401) {\n status = 'permission_denied';\n } else {\n status = 'rest_error:' + error;\n }\n\n onComplete(status, null);\n }\n }\n );\n }\n\n /** @inheritDoc */\n unlisten(query: QueryContext, tag: number | null) {\n const listenId = ReadonlyRestClient.getListenId_(query, tag);\n delete this.listens_[listenId];\n }\n\n get(query: QueryContext): Promise<string> {\n const queryStringParameters = queryParamsToRestQueryStringParameters(\n query._queryParams\n );\n\n const pathString = query._path.toString();\n\n const deferred = new Deferred<string>();\n\n this.restRequest_(\n pathString + '.json',\n queryStringParameters,\n (error, result) => {\n let data = result;\n\n if (error === 404) {\n data = null;\n error = null;\n }\n\n if (error === null) {\n this.onDataUpdate_(\n pathString,\n data,\n /*isMerge=*/ false,\n /*tag=*/ null\n );\n deferred.resolve(data as string);\n } else {\n deferred.reject(new Error(data as string));\n }\n }\n );\n return deferred.promise;\n }\n\n /** @inheritDoc */\n refreshAuthToken(token: string) {\n // no-op since we just always call getToken.\n }\n\n /**\n * Performs a REST request to the given path, with the provided query string parameters,\n * and any auth credentials we have.\n */\n private restRequest_(\n pathString: string,\n queryStringParameters: { [k: string]: string | number } = {},\n callback: ((a: number | null, b?: unknown) => void) | null\n ) {\n queryStringParameters['format'] = 'export';\n\n return Promise.all([\n this.authTokenProvider_.getToken(/*forceRefresh=*/ false),\n this.appCheckTokenProvider_.getToken(/*forceRefresh=*/ false)\n ]).then(([authToken, appCheckToken]) => {\n if (authToken && authToken.accessToken) {\n queryStringParameters['auth'] = authToken.accessToken;\n }\n if (appCheckToken && appCheckToken.token) {\n queryStringParameters['ac'] = appCheckToken.token;\n }\n\n const url =\n (this.repoInfo_.secure ? 'https://' : 'http://') +\n this.repoInfo_.host +\n pathString +\n '?' +\n 'ns=' +\n this.repoInfo_.namespace +\n querystring(queryStringParameters);\n\n this.log_('Sending REST request for ' + url);\n const xhr = new XMLHttpRequest();\n xhr.onreadystatechange = () => {\n if (callback && xhr.readyState === 4) {\n this.log_(\n 'REST Response for ' + url + ' received. status:',\n xhr.status,\n 'response:',\n xhr.responseText\n );\n let res = null;\n if (xhr.status >= 200 && xhr.status < 300) {\n try {\n res = jsonEval(xhr.responseText);\n } catch (e) {\n warn(\n 'Failed to parse JSON response for ' +\n url +\n ': ' +\n xhr.responseText\n );\n }\n callback(null, res);\n } else {\n // 401 and 404 are expected.\n if (xhr.status !== 401 && xhr.status !== 404) {\n warn(\n 'Got unsuccessful REST response for ' +\n url +\n ' Status: ' +\n xhr.status\n );\n }\n callback(xhr.status);\n }\n callback = null;\n }\n };\n\n xhr.open('GET', url, /*asynchronous=*/ true);\n xhr.send();\n });\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/**\n * Returns a querystring-formatted string (e.g. &arg=val&arg2=val2) from a\n * params object (e.g. {arg: 'val', arg2: 'val2'})\n * Note: You must prepend it with ? when adding it to a URL.\n */\nexport function querystring(querystringParams: {\n [key: string]: string | number;\n}): string {\n const params = [];\n for (const [key, value] of Object.entries(querystringParams)) {\n if (Array.isArray(value)) {\n value.forEach(arrayVal => {\n params.push(\n encodeURIComponent(key) + '=' + encodeURIComponent(arrayVal)\n );\n });\n } else {\n params.push(encodeURIComponent(key) + '=' + encodeURIComponent(value));\n }\n }\n return params.length ? '&' + params.join('&') : '';\n}\n\n/**\n * Decodes a querystring (e.g. ?arg=val&arg2=val2) into a params object\n * (e.g. {arg: 'val', arg2: 'val2'})\n */\nexport function querystringDecode(querystring: string): Record<string, string> {\n const obj: Record<string, string> = {};\n const tokens = querystring.replace(/^\\?/, '').split('&');\n\n tokens.forEach(token => {\n if (token) {\n const [key, value] = token.split('=');\n obj[decodeURIComponent(key)] = decodeURIComponent(value);\n }\n });\n return obj;\n}\n\n/**\n * Extract the query string part of a URL, including the leading question mark (if present).\n */\nexport function extractQuerystring(url: string): string {\n const queryStart = url.indexOf('?');\n if (!queryStart) {\n return '';\n }\n const fragmentStart = url.indexOf('#', queryStart);\n return url.substring(\n queryStart,\n fragmentStart > 0 ? fragmentStart : undefined\n );\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { ChildrenNode } from './snap/ChildrenNode';\nimport { Node } from './snap/Node';\nimport { Path } from './util/Path';\n\n/**\n * Mutable object which basically just stores a reference to the \"latest\" immutable snapshot.\n */\nexport class SnapshotHolder {\n private rootNode_: Node = ChildrenNode.EMPTY_NODE;\n\n getNode(path: Path): Node {\n return this.rootNode_.getChild(path);\n }\n\n updateSnapshot(path: Path, newSnapshotNode: Node) {\n this.rootNode_ = this.rootNode_.updateChild(path, newSnapshotNode);\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { PRIORITY_INDEX } from './snap/indexes/PriorityIndex';\nimport { Node } from './snap/Node';\nimport { Path, pathGetFront, pathIsEmpty, pathPopFront } from './util/Path';\n\n/**\n * Helper class to store a sparse set of snapshots.\n */\nexport interface SparseSnapshotTree {\n value: Node | null;\n readonly children: Map<string, SparseSnapshotTree>;\n}\n\nexport function newSparseSnapshotTree(): SparseSnapshotTree {\n return {\n value: null,\n children: new Map()\n };\n}\n\n/**\n * Gets the node stored at the given path if one exists.\n * Only seems to be used in tests.\n *\n * @param path - Path to look up snapshot for.\n * @returns The retrieved node, or null.\n */\nexport function sparseSnapshotTreeFind(\n sparseSnapshotTree: SparseSnapshotTree,\n path: Path\n): Node | null {\n if (sparseSnapshotTree.value != null) {\n return sparseSnapshotTree.value.getChild(path);\n } else if (!pathIsEmpty(path) && sparseSnapshotTree.children.size > 0) {\n const childKey = pathGetFront(path);\n path = pathPopFront(path);\n if (sparseSnapshotTree.children.has(childKey)) {\n const childTree = sparseSnapshotTree.children.get(childKey);\n return sparseSnapshotTreeFind(childTree, path);\n } else {\n return null;\n }\n } else {\n return null;\n }\n}\n\n/**\n * Stores the given node at the specified path. If there is already a node\n * at a shallower path, it merges the new data into that snapshot node.\n *\n * @param path - Path to look up snapshot for.\n * @param data - The new data, or null.\n */\nexport function sparseSnapshotTreeRemember(\n sparseSnapshotTree: SparseSnapshotTree,\n path: Path,\n data: Node\n): void {\n if (pathIsEmpty(path)) {\n sparseSnapshotTree.value = data;\n sparseSnapshotTree.children.clear();\n } else if (sparseSnapshotTree.value !== null) {\n sparseSnapshotTree.value = sparseSnapshotTree.value.updateChild(path, data);\n } else {\n const childKey = pathGetFront(path);\n if (!sparseSnapshotTree.children.has(childKey)) {\n sparseSnapshotTree.children.set(childKey, newSparseSnapshotTree());\n }\n\n const child = sparseSnapshotTree.children.get(childKey);\n path = pathPopFront(path);\n sparseSnapshotTreeRemember(child, path, data);\n }\n}\n\n/**\n * Purge the data at path from the cache.\n *\n * @param path - Path to look up snapshot for.\n * @returns True if this node should now be removed.\n */\nexport function sparseSnapshotTreeForget(\n sparseSnapshotTree: SparseSnapshotTree,\n path: Path\n): boolean {\n if (pathIsEmpty(path)) {\n sparseSnapshotTree.value = null;\n sparseSnapshotTree.children.clear();\n return true;\n } else {\n if (sparseSnapshotTree.value !== null) {\n if (sparseSnapshotTree.value.isLeafNode()) {\n // We're trying to forget a node that doesn't exist\n return false;\n } else {\n const value = sparseSnapshotTree.value;\n sparseSnapshotTree.value = null;\n\n value.forEachChild(PRIORITY_INDEX, (key, tree) => {\n sparseSnapshotTreeRemember(sparseSnapshotTree, new Path(key), tree);\n });\n\n return sparseSnapshotTreeForget(sparseSnapshotTree, path);\n }\n } else if (sparseSnapshotTree.children.size > 0) {\n const childKey = pathGetFront(path);\n path = pathPopFront(path);\n if (sparseSnapshotTree.children.has(childKey)) {\n const safeToRemove = sparseSnapshotTreeForget(\n sparseSnapshotTree.children.get(childKey),\n path\n );\n if (safeToRemove) {\n sparseSnapshotTree.children.delete(childKey);\n }\n }\n\n return sparseSnapshotTree.children.size === 0;\n } else {\n return true;\n }\n }\n}\n\n/**\n * Recursively iterates through all of the stored tree and calls the\n * callback on each one.\n *\n * @param prefixPath - Path to look up node for.\n * @param func - The function to invoke for each tree.\n */\nexport function sparseSnapshotTreeForEachTree(\n sparseSnapshotTree: SparseSnapshotTree,\n prefixPath: Path,\n func: (a: Path, b: Node) => unknown\n): void {\n if (sparseSnapshotTree.value !== null) {\n func(prefixPath, sparseSnapshotTree.value);\n } else {\n sparseSnapshotTreeForEachChild(sparseSnapshotTree, (key, tree) => {\n const path = new Path(prefixPath.toString() + '/' + key);\n sparseSnapshotTreeForEachTree(tree, path, func);\n });\n }\n}\n\n/**\n * Iterates through each immediate child and triggers the callback.\n * Only seems to be used in tests.\n *\n * @param func - The function to invoke for each child.\n */\nexport function sparseSnapshotTreeForEachChild(\n sparseSnapshotTree: SparseSnapshotTree,\n func: (a: string, b: SparseSnapshotTree) => void\n): void {\n sparseSnapshotTree.children.forEach((tree, key) => {\n func(key, tree);\n });\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { each } from '../util/util';\n\nimport { StatsCollection } from './StatsCollection';\n\n/**\n * Returns the delta from the previous call to get stats.\n *\n * @param collection_ - The collection to \"listen\" to.\n */\nexport class StatsListener {\n private last_: { [k: string]: number } | null = null;\n\n constructor(private collection_: StatsCollection) {}\n\n get(): { [k: string]: number } {\n const newStats = this.collection_.get();\n\n const delta = { ...newStats };\n if (this.last_) {\n each(this.last_, (stat: string, value: number) => {\n delta[stat] = delta[stat] - value;\n });\n }\n this.last_ = newStats;\n\n return delta;\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { contains } from '@firebase/util';\n\nimport { ServerActions } from '../ServerActions';\nimport { setTimeoutNonBlocking, each } from '../util/util';\n\nimport { StatsCollection } from './StatsCollection';\nimport { StatsListener } from './StatsListener';\n\n// Assuming some apps may have a short amount of time on page, and a bulk of firebase operations probably\n// happen on page load, we try to report our first set of stats pretty quickly, but we wait at least 10\n// seconds to try to ensure the Firebase connection is established / settled.\nconst FIRST_STATS_MIN_TIME = 10 * 1000;\nconst FIRST_STATS_MAX_TIME = 30 * 1000;\n\n// We'll continue to report stats on average every 5 minutes.\nconst REPORT_STATS_INTERVAL = 5 * 60 * 1000;\n\nexport class StatsReporter {\n private statsListener_: StatsListener;\n statsToReport_: { [k: string]: boolean } = {};\n\n constructor(collection: StatsCollection, private server_: ServerActions) {\n this.statsListener_ = new StatsListener(collection);\n\n const timeout =\n FIRST_STATS_MIN_TIME +\n (FIRST_STATS_MAX_TIME - FIRST_STATS_MIN_TIME) * Math.random();\n setTimeoutNonBlocking(this.reportStats_.bind(this), Math.floor(timeout));\n }\n\n private reportStats_() {\n const stats = this.statsListener_.get();\n const reportedStats: typeof stats = {};\n let haveStatsToReport = false;\n\n each(stats, (stat: string, value: number) => {\n if (value > 0 && contains(this.statsToReport_, stat)) {\n reportedStats[stat] = value;\n haveStatsToReport = true;\n }\n });\n\n if (haveStatsToReport) {\n this.server_.reportStats(reportedStats);\n }\n\n // queue our next run.\n setTimeoutNonBlocking(\n this.reportStats_.bind(this),\n Math.floor(Math.random() * 2 * REPORT_STATS_INTERVAL)\n );\n }\n}\n\nexport function statsReporterIncludeStat(\n reporter: StatsReporter,\n stat: string\n) {\n reporter.statsToReport_[stat] = true;\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Path } from '../util/Path';\n\n/**\n *\n * @enum\n */\nexport enum OperationType {\n OVERWRITE,\n MERGE,\n ACK_USER_WRITE,\n LISTEN_COMPLETE\n}\n\n/**\n * @interface\n */\nexport interface Operation {\n source: OperationSource;\n\n type: OperationType;\n\n path: Path;\n\n operationForChild(childName: string): Operation | null;\n}\n\nexport interface OperationSource {\n fromUser: boolean;\n fromServer: boolean;\n queryId: string | null;\n tagged: boolean;\n}\n\nexport function newOperationSourceUser(): OperationSource {\n return {\n fromUser: true,\n fromServer: false,\n queryId: null,\n tagged: false\n };\n}\n\nexport function newOperationSourceServer(): OperationSource {\n return {\n fromUser: false,\n fromServer: true,\n queryId: null,\n tagged: false\n };\n}\n\nexport function newOperationSourceServerTaggedQuery(\n queryId: string\n): OperationSource {\n return {\n fromUser: false,\n fromServer: true,\n queryId,\n tagged: true\n };\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert } from '@firebase/util';\n\nimport { ImmutableTree } from '../util/ImmutableTree';\nimport {\n newEmptyPath,\n Path,\n pathGetFront,\n pathIsEmpty,\n pathPopFront\n} from '../util/Path';\n\nimport { newOperationSourceUser, Operation, OperationType } from './Operation';\n\nexport class AckUserWrite implements Operation {\n /** @inheritDoc */\n type = OperationType.ACK_USER_WRITE;\n\n /** @inheritDoc */\n source = newOperationSourceUser();\n\n /**\n * @param affectedTree - A tree containing true for each affected path. Affected paths can't overlap.\n */\n constructor(\n /** @inheritDoc */ public path: Path,\n /** @inheritDoc */ public affectedTree: ImmutableTree<boolean>,\n /** @inheritDoc */ public revert: boolean\n ) {}\n operationForChild(childName: string): AckUserWrite {\n if (!pathIsEmpty(this.path)) {\n assert(\n pathGetFront(this.path) === childName,\n 'operationForChild called for unrelated child.'\n );\n return new AckUserWrite(\n pathPopFront(this.path),\n this.affectedTree,\n this.revert\n );\n } else if (this.affectedTree.value != null) {\n assert(\n this.affectedTree.children.isEmpty(),\n 'affectedTree should not have overlapping affected paths.'\n );\n // All child locations are affected as well; just return same operation.\n return this;\n } else {\n const childTree = this.affectedTree.subtree(new Path(childName));\n return new AckUserWrite(newEmptyPath(), childTree, this.revert);\n }\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { newEmptyPath, Path, pathIsEmpty, pathPopFront } from '../util/Path';\n\nimport { Operation, OperationSource, OperationType } from './Operation';\n\nexport class ListenComplete implements Operation {\n /** @inheritDoc */\n type = OperationType.LISTEN_COMPLETE;\n\n constructor(public source: OperationSource, public path: Path) {}\n\n operationForChild(childName: string): ListenComplete {\n if (pathIsEmpty(this.path)) {\n return new ListenComplete(this.source, newEmptyPath());\n } else {\n return new ListenComplete(this.source, pathPopFront(this.path));\n }\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Node } from '../snap/Node';\nimport { newEmptyPath, Path, pathIsEmpty, pathPopFront } from '../util/Path';\n\nimport { Operation, OperationSource, OperationType } from './Operation';\n\nexport class Overwrite implements Operation {\n /** @inheritDoc */\n type = OperationType.OVERWRITE;\n\n constructor(\n public source: OperationSource,\n public path: Path,\n public snap: Node\n ) {}\n\n operationForChild(childName: string): Overwrite {\n if (pathIsEmpty(this.path)) {\n return new Overwrite(\n this.source,\n newEmptyPath(),\n this.snap.getImmediateChild(childName)\n );\n } else {\n return new Overwrite(this.source, pathPopFront(this.path), this.snap);\n }\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert } from '@firebase/util';\n\nimport { Node } from '../snap/Node';\nimport { ImmutableTree } from '../util/ImmutableTree';\nimport {\n newEmptyPath,\n Path,\n pathGetFront,\n pathIsEmpty,\n pathPopFront\n} from '../util/Path';\n\nimport { Operation, OperationSource, OperationType } from './Operation';\nimport { Overwrite } from './Overwrite';\n\nexport class Merge implements Operation {\n /** @inheritDoc */\n type = OperationType.MERGE;\n\n constructor(\n /** @inheritDoc */ public source: OperationSource,\n /** @inheritDoc */ public path: Path,\n /** @inheritDoc */ public children: ImmutableTree<Node>\n ) {}\n operationForChild(childName: string): Operation {\n if (pathIsEmpty(this.path)) {\n const childTree = this.children.subtree(new Path(childName));\n if (childTree.isEmpty()) {\n // This child is unaffected\n return null;\n } else if (childTree.value) {\n // We have a snapshot for the child in question. This becomes an overwrite of the child.\n return new Overwrite(this.source, newEmptyPath(), childTree.value);\n } else {\n // This is a merge at a deeper level\n return new Merge(this.source, newEmptyPath(), childTree);\n }\n } else {\n assert(\n pathGetFront(this.path) === childName,\n \"Can't get a merge for a child not on the path of the operation\"\n );\n return new Merge(this.source, pathPopFront(this.path), this.children);\n }\n }\n toString(): string {\n return (\n 'Operation(' +\n this.path +\n ': ' +\n this.source.toString() +\n ' merge: ' +\n this.children.toString() +\n ')'\n );\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Node } from '../snap/Node';\nimport { Path, pathGetFront, pathIsEmpty } from '../util/Path';\n\n/**\n * A cache node only stores complete children. Additionally it holds a flag whether the node can be considered fully\n * initialized in the sense that we know at one point in time this represented a valid state of the world, e.g.\n * initialized with data from the server, or a complete overwrite by the client. The filtered flag also tracks\n * whether a node potentially had children removed due to a filter.\n */\nexport class CacheNode {\n constructor(\n private node_: Node,\n private fullyInitialized_: boolean,\n private filtered_: boolean\n ) {}\n\n /**\n * Returns whether this node was fully initialized with either server data or a complete overwrite by the client\n */\n isFullyInitialized(): boolean {\n return this.fullyInitialized_;\n }\n\n /**\n * Returns whether this node is potentially missing children due to a filter applied to the node\n */\n isFiltered(): boolean {\n return this.filtered_;\n }\n\n isCompleteForPath(path: Path): boolean {\n if (pathIsEmpty(path)) {\n return this.isFullyInitialized() && !this.filtered_;\n }\n\n const childKey = pathGetFront(path);\n return this.isCompleteForChild(childKey);\n }\n\n isCompleteForChild(key: string): boolean {\n return (\n (this.isFullyInitialized() && !this.filtered_) || this.node_.hasChild(key)\n );\n }\n\n getNode(): Node {\n return this.node_;\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assertionError } from '@firebase/util';\n\nimport { Index } from '../snap/indexes/Index';\nimport { NamedNode, Node } from '../snap/Node';\n\nimport { Change, ChangeType, changeChildMoved } from './Change';\nimport { Event } from './Event';\nimport { EventRegistration, QueryContext } from './EventRegistration';\n\n/**\n * An EventGenerator is used to convert \"raw\" changes (Change) as computed by the\n * CacheDiffer into actual events (Event) that can be raised. See generateEventsForChanges()\n * for details.\n *\n */\nexport class EventGenerator {\n index_: Index;\n\n constructor(public query_: QueryContext) {\n this.index_ = this.query_._queryParams.getIndex();\n }\n}\n\n/**\n * Given a set of raw changes (no moved events and prevName not specified yet), and a set of\n * EventRegistrations that should be notified of these changes, generate the actual events to be raised.\n *\n * Notes:\n * - child_moved events will be synthesized at this time for any child_changed events that affect\n * our index.\n * - prevName will be calculated based on the index ordering.\n */\nexport function eventGeneratorGenerateEventsForChanges(\n eventGenerator: EventGenerator,\n changes: Change[],\n eventCache: Node,\n eventRegistrations: EventRegistration[]\n): Event[] {\n const events: Event[] = [];\n const moves: Change[] = [];\n\n changes.forEach(change => {\n if (\n change.type === ChangeType.CHILD_CHANGED &&\n eventGenerator.index_.indexedValueChanged(\n change.oldSnap as Node,\n change.snapshotNode\n )\n ) {\n moves.push(changeChildMoved(change.childName, change.snapshotNode));\n }\n });\n\n eventGeneratorGenerateEventsForType(\n eventGenerator,\n events,\n ChangeType.CHILD_REMOVED,\n changes,\n eventRegistrations,\n eventCache\n );\n eventGeneratorGenerateEventsForType(\n eventGenerator,\n events,\n ChangeType.CHILD_ADDED,\n changes,\n eventRegistrations,\n eventCache\n );\n eventGeneratorGenerateEventsForType(\n eventGenerator,\n events,\n ChangeType.CHILD_MOVED,\n moves,\n eventRegistrations,\n eventCache\n );\n eventGeneratorGenerateEventsForType(\n eventGenerator,\n events,\n ChangeType.CHILD_CHANGED,\n changes,\n eventRegistrations,\n eventCache\n );\n eventGeneratorGenerateEventsForType(\n eventGenerator,\n events,\n ChangeType.VALUE,\n changes,\n eventRegistrations,\n eventCache\n );\n\n return events;\n}\n\n/**\n * Given changes of a single change type, generate the corresponding events.\n */\nfunction eventGeneratorGenerateEventsForType(\n eventGenerator: EventGenerator,\n events: Event[],\n eventType: string,\n changes: Change[],\n registrations: EventRegistration[],\n eventCache: Node\n) {\n const filteredChanges = changes.filter(change => change.type === eventType);\n\n filteredChanges.sort((a, b) =>\n eventGeneratorCompareChanges(eventGenerator, a, b)\n );\n filteredChanges.forEach(change => {\n const materializedChange = eventGeneratorMaterializeSingleChange(\n eventGenerator,\n change,\n eventCache\n );\n registrations.forEach(registration => {\n if (registration.respondsTo(change.type)) {\n events.push(\n registration.createEvent(materializedChange, eventGenerator.query_)\n );\n }\n });\n });\n}\n\nfunction eventGeneratorMaterializeSingleChange(\n eventGenerator: EventGenerator,\n change: Change,\n eventCache: Node\n): Change {\n if (change.type === 'value' || change.type === 'child_removed') {\n return change;\n } else {\n change.prevName = eventCache.getPredecessorChildName(\n change.childName,\n change.snapshotNode,\n eventGenerator.index_\n );\n return change;\n }\n}\n\nfunction eventGeneratorCompareChanges(\n eventGenerator: EventGenerator,\n a: Change,\n b: Change\n) {\n if (a.childName == null || b.childName == null) {\n throw assertionError('Should only compare child_ events.');\n }\n const aWrapped = new NamedNode(a.childName, a.snapshotNode);\n const bWrapped = new NamedNode(b.childName, b.snapshotNode);\n return eventGenerator.index_.compare(aWrapped, bWrapped);\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Node } from '../snap/Node';\n\nimport { CacheNode } from './CacheNode';\n\n/**\n * Stores the data we have cached for a view.\n *\n * serverSnap is the cached server data, eventSnap is the cached event data (server data plus any local writes).\n */\nexport interface ViewCache {\n readonly eventCache: CacheNode;\n readonly serverCache: CacheNode;\n}\n\nexport function newViewCache(\n eventCache: CacheNode,\n serverCache: CacheNode\n): ViewCache {\n return { eventCache, serverCache };\n}\n\nexport function viewCacheUpdateEventSnap(\n viewCache: ViewCache,\n eventSnap: Node,\n complete: boolean,\n filtered: boolean\n): ViewCache {\n return newViewCache(\n new CacheNode(eventSnap, complete, filtered),\n viewCache.serverCache\n );\n}\n\nexport function viewCacheUpdateServerSnap(\n viewCache: ViewCache,\n serverSnap: Node,\n complete: boolean,\n filtered: boolean\n): ViewCache {\n return newViewCache(\n viewCache.eventCache,\n new CacheNode(serverSnap, complete, filtered)\n );\n}\n\nexport function viewCacheGetCompleteEventSnap(\n viewCache: ViewCache\n): Node | null {\n return viewCache.eventCache.isFullyInitialized()\n ? viewCache.eventCache.getNode()\n : null;\n}\n\nexport function viewCacheGetCompleteServerSnap(\n viewCache: ViewCache\n): Node | null {\n return viewCache.serverCache.isFullyInitialized()\n ? viewCache.serverCache.getNode()\n : null;\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n newEmptyPath,\n Path,\n pathChild,\n pathGetFront,\n pathIsEmpty,\n pathPopFront\n} from './Path';\nimport { SortedMap } from './SortedMap';\nimport { each, stringCompare } from './util';\n\nlet emptyChildrenSingleton: SortedMap<string, ImmutableTree<null>>;\n\n/**\n * Singleton empty children collection.\n *\n */\nconst EmptyChildren = (): SortedMap<string, ImmutableTree<null>> => {\n if (!emptyChildrenSingleton) {\n emptyChildrenSingleton = new SortedMap<string, ImmutableTree<null>>(\n stringCompare\n );\n }\n return emptyChildrenSingleton;\n};\n\n/**\n * A tree with immutable elements.\n */\nexport class ImmutableTree<T> {\n static fromObject<T>(obj: { [k: string]: T }): ImmutableTree<T> {\n let tree: ImmutableTree<T> = new ImmutableTree<T>(null);\n each(obj, (childPath: string, childSnap: T) => {\n tree = tree.set(new Path(childPath), childSnap);\n });\n return tree;\n }\n\n constructor(\n public readonly value: T | null,\n public readonly children: SortedMap<\n string,\n ImmutableTree<T>\n > = EmptyChildren()\n ) {}\n\n /**\n * True if the value is empty and there are no children\n */\n isEmpty(): boolean {\n return this.value === null && this.children.isEmpty();\n }\n\n /**\n * Given a path and predicate, return the first node and the path to that node\n * where the predicate returns true.\n *\n * TODO Do a perf test -- If we're creating a bunch of `{path: value:}`\n * objects on the way back out, it may be better to pass down a pathSoFar obj.\n *\n * @param relativePath - The remainder of the path\n * @param predicate - The predicate to satisfy to return a node\n */\n findRootMostMatchingPathAndValue(\n relativePath: Path,\n predicate: (a: T) => boolean\n ): { path: Path; value: T } | null {\n if (this.value != null && predicate(this.value)) {\n return { path: newEmptyPath(), value: this.value };\n } else {\n if (pathIsEmpty(relativePath)) {\n return null;\n } else {\n const front = pathGetFront(relativePath);\n const child = this.children.get(front);\n if (child !== null) {\n const childExistingPathAndValue =\n child.findRootMostMatchingPathAndValue(\n pathPopFront(relativePath),\n predicate\n );\n if (childExistingPathAndValue != null) {\n const fullPath = pathChild(\n new Path(front),\n childExistingPathAndValue.path\n );\n return { path: fullPath, value: childExistingPathAndValue.value };\n } else {\n return null;\n }\n } else {\n return null;\n }\n }\n }\n }\n\n /**\n * Find, if it exists, the shortest subpath of the given path that points a defined\n * value in the tree\n */\n findRootMostValueAndPath(\n relativePath: Path\n ): { path: Path; value: T } | null {\n return this.findRootMostMatchingPathAndValue(relativePath, () => true);\n }\n\n /**\n * @returns The subtree at the given path\n */\n subtree(relativePath: Path): ImmutableTree<T> {\n if (pathIsEmpty(relativePath)) {\n return this;\n } else {\n const front = pathGetFront(relativePath);\n const childTree = this.children.get(front);\n if (childTree !== null) {\n return childTree.subtree(pathPopFront(relativePath));\n } else {\n return new ImmutableTree<T>(null);\n }\n }\n }\n\n /**\n * Sets a value at the specified path.\n *\n * @param relativePath - Path to set value at.\n * @param toSet - Value to set.\n * @returns Resulting tree.\n */\n set(relativePath: Path, toSet: T | null): ImmutableTree<T> {\n if (pathIsEmpty(relativePath)) {\n return new ImmutableTree(toSet, this.children);\n } else {\n const front = pathGetFront(relativePath);\n const child = this.children.get(front) || new ImmutableTree<T>(null);\n const newChild = child.set(pathPopFront(relativePath), toSet);\n const newChildren = this.children.insert(front, newChild);\n return new ImmutableTree(this.value, newChildren);\n }\n }\n\n /**\n * Removes the value at the specified path.\n *\n * @param relativePath - Path to value to remove.\n * @returns Resulting tree.\n */\n remove(relativePath: Path): ImmutableTree<T> {\n if (pathIsEmpty(relativePath)) {\n if (this.children.isEmpty()) {\n return new ImmutableTree<T>(null);\n } else {\n return new ImmutableTree(null, this.children);\n }\n } else {\n const front = pathGetFront(relativePath);\n const child = this.children.get(front);\n if (child) {\n const newChild = child.remove(pathPopFront(relativePath));\n let newChildren;\n if (newChild.isEmpty()) {\n newChildren = this.children.remove(front);\n } else {\n newChildren = this.children.insert(front, newChild);\n }\n if (this.value === null && newChildren.isEmpty()) {\n return new ImmutableTree<T>(null);\n } else {\n return new ImmutableTree(this.value, newChildren);\n }\n } else {\n return this;\n }\n }\n }\n\n /**\n * Gets a value from the tree.\n *\n * @param relativePath - Path to get value for.\n * @returns Value at path, or null.\n */\n get(relativePath: Path): T | null {\n if (pathIsEmpty(relativePath)) {\n return this.value;\n } else {\n const front = pathGetFront(relativePath);\n const child = this.children.get(front);\n if (child) {\n return child.get(pathPopFront(relativePath));\n } else {\n return null;\n }\n }\n }\n\n /**\n * Replace the subtree at the specified path with the given new tree.\n *\n * @param relativePath - Path to replace subtree for.\n * @param newTree - New tree.\n * @returns Resulting tree.\n */\n setTree(relativePath: Path, newTree: ImmutableTree<T>): ImmutableTree<T> {\n if (pathIsEmpty(relativePath)) {\n return newTree;\n } else {\n const front = pathGetFront(relativePath);\n const child = this.children.get(front) || new ImmutableTree<T>(null);\n const newChild = child.setTree(pathPopFront(relativePath), newTree);\n let newChildren;\n if (newChild.isEmpty()) {\n newChildren = this.children.remove(front);\n } else {\n newChildren = this.children.insert(front, newChild);\n }\n return new ImmutableTree(this.value, newChildren);\n }\n }\n\n /**\n * Performs a depth first fold on this tree. Transforms a tree into a single\n * value, given a function that operates on the path to a node, an optional\n * current value, and a map of child names to folded subtrees\n */\n fold<V>(fn: (path: Path, value: T, children: { [k: string]: V }) => V): V {\n return this.fold_(newEmptyPath(), fn);\n }\n\n /**\n * Recursive helper for public-facing fold() method\n */\n private fold_<V>(\n pathSoFar: Path,\n fn: (path: Path, value: T | null, children: { [k: string]: V }) => V\n ): V {\n const accum: { [k: string]: V } = {};\n this.children.inorderTraversal(\n (childKey: string, childTree: ImmutableTree<T>) => {\n accum[childKey] = childTree.fold_(pathChild(pathSoFar, childKey), fn);\n }\n );\n return fn(pathSoFar, this.value, accum);\n }\n\n /**\n * Find the first matching value on the given path. Return the result of applying f to it.\n */\n findOnPath<V>(path: Path, f: (path: Path, value: T) => V | null): V | null {\n return this.findOnPath_(path, newEmptyPath(), f);\n }\n\n private findOnPath_<V>(\n pathToFollow: Path,\n pathSoFar: Path,\n f: (path: Path, value: T) => V | null\n ): V | null {\n const result = this.value ? f(pathSoFar, this.value) : false;\n if (result) {\n return result;\n } else {\n if (pathIsEmpty(pathToFollow)) {\n return null;\n } else {\n const front = pathGetFront(pathToFollow)!;\n const nextChild = this.children.get(front);\n if (nextChild) {\n return nextChild.findOnPath_(\n pathPopFront(pathToFollow),\n pathChild(pathSoFar, front),\n f\n );\n } else {\n return null;\n }\n }\n }\n }\n\n foreachOnPath(\n path: Path,\n f: (path: Path, value: T) => void\n ): ImmutableTree<T> {\n return this.foreachOnPath_(path, newEmptyPath(), f);\n }\n\n private foreachOnPath_(\n pathToFollow: Path,\n currentRelativePath: Path,\n f: (path: Path, value: T) => void\n ): ImmutableTree<T> {\n if (pathIsEmpty(pathToFollow)) {\n return this;\n } else {\n if (this.value) {\n f(currentRelativePath, this.value);\n }\n const front = pathGetFront(pathToFollow);\n const nextChild = this.children.get(front);\n if (nextChild) {\n return nextChild.foreachOnPath_(\n pathPopFront(pathToFollow),\n pathChild(currentRelativePath, front),\n f\n );\n } else {\n return new ImmutableTree<T>(null);\n }\n }\n }\n\n /**\n * Calls the given function for each node in the tree that has a value.\n *\n * @param f - A function to be called with the path from the root of the tree to\n * a node, and the value at that node. Called in depth-first order.\n */\n foreach(f: (path: Path, value: T) => void) {\n this.foreach_(newEmptyPath(), f);\n }\n\n private foreach_(\n currentRelativePath: Path,\n f: (path: Path, value: T) => void\n ) {\n this.children.inorderTraversal((childName, childTree) => {\n childTree.foreach_(pathChild(currentRelativePath, childName), f);\n });\n if (this.value) {\n f(currentRelativePath, this.value);\n }\n }\n\n foreachChild(f: (name: string, value: T) => void) {\n this.children.inorderTraversal(\n (childName: string, childTree: ImmutableTree<T>) => {\n if (childTree.value) {\n f(childName, childTree.value);\n }\n }\n );\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert } from '@firebase/util';\n\nimport { ChildrenNode } from './snap/ChildrenNode';\nimport { PRIORITY_INDEX } from './snap/indexes/PriorityIndex';\nimport { NamedNode, Node } from './snap/Node';\nimport { ImmutableTree } from './util/ImmutableTree';\nimport {\n newEmptyPath,\n newRelativePath,\n Path,\n pathChild,\n pathIsEmpty\n} from './util/Path';\nimport { each } from './util/util';\n\n/**\n * This class holds a collection of writes that can be applied to nodes in unison. It abstracts away the logic with\n * dealing with priority writes and multiple nested writes. At any given path there is only allowed to be one write\n * modifying that path. Any write to an existing path or shadowing an existing path will modify that existing write\n * to reflect the write added.\n */\nexport class CompoundWrite {\n constructor(public writeTree_: ImmutableTree<Node>) {}\n\n static empty(): CompoundWrite {\n return new CompoundWrite(new ImmutableTree(null));\n }\n}\n\nexport function compoundWriteAddWrite(\n compoundWrite: CompoundWrite,\n path: Path,\n node: Node\n): CompoundWrite {\n if (pathIsEmpty(path)) {\n return new CompoundWrite(new ImmutableTree(node));\n } else {\n const rootmost = compoundWrite.writeTree_.findRootMostValueAndPath(path);\n if (rootmost != null) {\n const rootMostPath = rootmost.path;\n let value = rootmost.value;\n const relativePath = newRelativePath(rootMostPath, path);\n value = value.updateChild(relativePath, node);\n return new CompoundWrite(\n compoundWrite.writeTree_.set(rootMostPath, value)\n );\n } else {\n const subtree = new ImmutableTree(node);\n const newWriteTree = compoundWrite.writeTree_.setTree(path, subtree);\n return new CompoundWrite(newWriteTree);\n }\n }\n}\n\nexport function compoundWriteAddWrites(\n compoundWrite: CompoundWrite,\n path: Path,\n updates: { [name: string]: Node }\n): CompoundWrite {\n let newWrite = compoundWrite;\n each(updates, (childKey: string, node: Node) => {\n newWrite = compoundWriteAddWrite(newWrite, pathChild(path, childKey), node);\n });\n return newWrite;\n}\n\n/**\n * Will remove a write at the given path and deeper paths. This will <em>not</em> modify a write at a higher\n * location, which must be removed by calling this method with that path.\n *\n * @param compoundWrite - The CompoundWrite to remove.\n * @param path - The path at which a write and all deeper writes should be removed\n * @returns The new CompoundWrite with the removed path\n */\nexport function compoundWriteRemoveWrite(\n compoundWrite: CompoundWrite,\n path: Path\n): CompoundWrite {\n if (pathIsEmpty(path)) {\n return CompoundWrite.empty();\n } else {\n const newWriteTree = compoundWrite.writeTree_.setTree(\n path,\n new ImmutableTree<Node>(null)\n );\n return new CompoundWrite(newWriteTree);\n }\n}\n\n/**\n * Returns whether this CompoundWrite will fully overwrite a node at a given location and can therefore be\n * considered \"complete\".\n *\n * @param compoundWrite - The CompoundWrite to check.\n * @param path - The path to check for\n * @returns Whether there is a complete write at that path\n */\nexport function compoundWriteHasCompleteWrite(\n compoundWrite: CompoundWrite,\n path: Path\n): boolean {\n return compoundWriteGetCompleteNode(compoundWrite, path) != null;\n}\n\n/**\n * Returns a node for a path if and only if the node is a \"complete\" overwrite at that path. This will not aggregate\n * writes from deeper paths, but will return child nodes from a more shallow path.\n *\n * @param compoundWrite - The CompoundWrite to get the node from.\n * @param path - The path to get a complete write\n * @returns The node if complete at that path, or null otherwise.\n */\nexport function compoundWriteGetCompleteNode(\n compoundWrite: CompoundWrite,\n path: Path\n): Node | null {\n const rootmost = compoundWrite.writeTree_.findRootMostValueAndPath(path);\n if (rootmost != null) {\n return compoundWrite.writeTree_\n .get(rootmost.path)\n .getChild(newRelativePath(rootmost.path, path));\n } else {\n return null;\n }\n}\n\n/**\n * Returns all children that are guaranteed to be a complete overwrite.\n *\n * @param compoundWrite - The CompoundWrite to get children from.\n * @returns A list of all complete children.\n */\nexport function compoundWriteGetCompleteChildren(\n compoundWrite: CompoundWrite\n): NamedNode[] {\n const children: NamedNode[] = [];\n const node = compoundWrite.writeTree_.value;\n if (node != null) {\n // If it's a leaf node, it has no children; so nothing to do.\n if (!node.isLeafNode()) {\n (node as ChildrenNode).forEachChild(\n PRIORITY_INDEX,\n (childName, childNode) => {\n children.push(new NamedNode(childName, childNode));\n }\n );\n }\n } else {\n compoundWrite.writeTree_.children.inorderTraversal(\n (childName, childTree) => {\n if (childTree.value != null) {\n children.push(new NamedNode(childName, childTree.value));\n }\n }\n );\n }\n return children;\n}\n\nexport function compoundWriteChildCompoundWrite(\n compoundWrite: CompoundWrite,\n path: Path\n): CompoundWrite {\n if (pathIsEmpty(path)) {\n return compoundWrite;\n } else {\n const shadowingNode = compoundWriteGetCompleteNode(compoundWrite, path);\n if (shadowingNode != null) {\n return new CompoundWrite(new ImmutableTree(shadowingNode));\n } else {\n return new CompoundWrite(compoundWrite.writeTree_.subtree(path));\n }\n }\n}\n\n/**\n * Returns true if this CompoundWrite is empty and therefore does not modify any nodes.\n * @returns Whether this CompoundWrite is empty\n */\nexport function compoundWriteIsEmpty(compoundWrite: CompoundWrite): boolean {\n return compoundWrite.writeTree_.isEmpty();\n}\n\n/**\n * Applies this CompoundWrite to a node. The node is returned with all writes from this CompoundWrite applied to the\n * node\n * @param node - The node to apply this CompoundWrite to\n * @returns The node with all writes applied\n */\nexport function compoundWriteApply(\n compoundWrite: CompoundWrite,\n node: Node\n): Node {\n return applySubtreeWrite(newEmptyPath(), compoundWrite.writeTree_, node);\n}\n\nfunction applySubtreeWrite(\n relativePath: Path,\n writeTree: ImmutableTree<Node>,\n node: Node\n): Node {\n if (writeTree.value != null) {\n // Since there a write is always a leaf, we're done here\n return node.updateChild(relativePath, writeTree.value);\n } else {\n let priorityWrite = null;\n writeTree.children.inorderTraversal((childKey, childTree) => {\n if (childKey === '.priority') {\n // Apply priorities at the end so we don't update priorities for either empty nodes or forget\n // to apply priorities to empty nodes that are later filled\n assert(\n childTree.value !== null,\n 'Priority writes must always be leaf nodes'\n );\n priorityWrite = childTree.value;\n } else {\n node = applySubtreeWrite(\n pathChild(relativePath, childKey),\n childTree,\n node\n );\n }\n });\n // If there was a priority write, we only apply it if the node is not empty\n if (!node.getChild(relativePath).isEmpty() && priorityWrite !== null) {\n node = node.updateChild(\n pathChild(relativePath, '.priority'),\n priorityWrite\n );\n }\n return node;\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert, assertionError, safeGet } from '@firebase/util';\n\nimport {\n CompoundWrite,\n compoundWriteAddWrite,\n compoundWriteAddWrites,\n compoundWriteApply,\n compoundWriteChildCompoundWrite,\n compoundWriteGetCompleteChildren,\n compoundWriteGetCompleteNode,\n compoundWriteHasCompleteWrite,\n compoundWriteIsEmpty,\n compoundWriteRemoveWrite\n} from './CompoundWrite';\nimport { ChildrenNode } from './snap/ChildrenNode';\nimport { Index } from './snap/indexes/Index';\nimport { PRIORITY_INDEX } from './snap/indexes/PriorityIndex';\nimport { NamedNode, Node } from './snap/Node';\nimport {\n newEmptyPath,\n newRelativePath,\n Path,\n pathChild,\n pathContains,\n pathGetFront,\n pathIsEmpty,\n pathPopFront\n} from './util/Path';\nimport { each } from './util/util';\nimport { CacheNode } from './view/CacheNode';\n\n/**\n * Defines a single user-initiated write operation. May be the result of a set(), transaction(), or update() call. In\n * the case of a set() or transaction, snap wil be non-null. In the case of an update(), children will be non-null.\n */\nexport interface WriteRecord {\n writeId: number;\n path: Path;\n snap?: Node | null;\n children?: { [k: string]: Node } | null;\n visible: boolean;\n}\n\n/**\n * Create a new WriteTreeRef for the given path. For use with a new sync point at the given path.\n *\n */\nexport function writeTreeChildWrites(\n writeTree: WriteTree,\n path: Path\n): WriteTreeRef {\n return newWriteTreeRef(path, writeTree);\n}\n\n/**\n * Record a new overwrite from user code.\n *\n * @param visible - This is set to false by some transactions. It should be excluded from event caches\n */\nexport function writeTreeAddOverwrite(\n writeTree: WriteTree,\n path: Path,\n snap: Node,\n writeId: number,\n visible?: boolean\n) {\n assert(\n writeId > writeTree.lastWriteId,\n 'Stacking an older write on top of newer ones'\n );\n if (visible === undefined) {\n visible = true;\n }\n writeTree.allWrites.push({\n path,\n snap,\n writeId,\n visible\n });\n\n if (visible) {\n writeTree.visibleWrites = compoundWriteAddWrite(\n writeTree.visibleWrites,\n path,\n snap\n );\n }\n writeTree.lastWriteId = writeId;\n}\n\n/**\n * Record a new merge from user code.\n */\nexport function writeTreeAddMerge(\n writeTree: WriteTree,\n path: Path,\n changedChildren: { [k: string]: Node },\n writeId: number\n) {\n assert(\n writeId > writeTree.lastWriteId,\n 'Stacking an older merge on top of newer ones'\n );\n writeTree.allWrites.push({\n path,\n children: changedChildren,\n writeId,\n visible: true\n });\n\n writeTree.visibleWrites = compoundWriteAddWrites(\n writeTree.visibleWrites,\n path,\n changedChildren\n );\n writeTree.lastWriteId = writeId;\n}\n\nexport function writeTreeGetWrite(\n writeTree: WriteTree,\n writeId: number\n): WriteRecord | null {\n for (let i = 0; i < writeTree.allWrites.length; i++) {\n const record = writeTree.allWrites[i];\n if (record.writeId === writeId) {\n return record;\n }\n }\n return null;\n}\n\n/**\n * Remove a write (either an overwrite or merge) that has been successfully acknowledge by the server. Recalculates\n * the tree if necessary. We return true if it may have been visible, meaning views need to reevaluate.\n *\n * @returns true if the write may have been visible (meaning we'll need to reevaluate / raise\n * events as a result).\n */\nexport function writeTreeRemoveWrite(\n writeTree: WriteTree,\n writeId: number\n): boolean {\n // Note: disabling this check. It could be a transaction that preempted another transaction, and thus was applied\n // out of order.\n //const validClear = revert || this.allWrites_.length === 0 || writeId <= this.allWrites_[0].writeId;\n //assert(validClear, \"Either we don't have this write, or it's the first one in the queue\");\n\n const idx = writeTree.allWrites.findIndex(s => {\n return s.writeId === writeId;\n });\n assert(idx >= 0, 'removeWrite called with nonexistent writeId.');\n const writeToRemove = writeTree.allWrites[idx];\n writeTree.allWrites.splice(idx, 1);\n\n let removedWriteWasVisible = writeToRemove.visible;\n let removedWriteOverlapsWithOtherWrites = false;\n\n let i = writeTree.allWrites.length - 1;\n\n while (removedWriteWasVisible && i >= 0) {\n const currentWrite = writeTree.allWrites[i];\n if (currentWrite.visible) {\n if (\n i >= idx &&\n writeTreeRecordContainsPath_(currentWrite, writeToRemove.path)\n ) {\n // The removed write was completely shadowed by a subsequent write.\n removedWriteWasVisible = false;\n } else if (pathContains(writeToRemove.path, currentWrite.path)) {\n // Either we're covering some writes or they're covering part of us (depending on which came first).\n removedWriteOverlapsWithOtherWrites = true;\n }\n }\n i--;\n }\n\n if (!removedWriteWasVisible) {\n return false;\n } else if (removedWriteOverlapsWithOtherWrites) {\n // There's some shadowing going on. Just rebuild the visible writes from scratch.\n writeTreeResetTree_(writeTree);\n return true;\n } else {\n // There's no shadowing. We can safely just remove the write(s) from visibleWrites.\n if (writeToRemove.snap) {\n writeTree.visibleWrites = compoundWriteRemoveWrite(\n writeTree.visibleWrites,\n writeToRemove.path\n );\n } else {\n const children = writeToRemove.children;\n each(children, (childName: string) => {\n writeTree.visibleWrites = compoundWriteRemoveWrite(\n writeTree.visibleWrites,\n pathChild(writeToRemove.path, childName)\n );\n });\n }\n return true;\n }\n}\n\nfunction writeTreeRecordContainsPath_(\n writeRecord: WriteRecord,\n path: Path\n): boolean {\n if (writeRecord.snap) {\n return pathContains(writeRecord.path, path);\n } else {\n for (const childName in writeRecord.children) {\n if (\n writeRecord.children.hasOwnProperty(childName) &&\n pathContains(pathChild(writeRecord.path, childName), path)\n ) {\n return true;\n }\n }\n return false;\n }\n}\n\n/**\n * Re-layer the writes and merges into a tree so we can efficiently calculate event snapshots\n */\nfunction writeTreeResetTree_(writeTree: WriteTree) {\n writeTree.visibleWrites = writeTreeLayerTree_(\n writeTree.allWrites,\n writeTreeDefaultFilter_,\n newEmptyPath()\n );\n if (writeTree.allWrites.length > 0) {\n writeTree.lastWriteId =\n writeTree.allWrites[writeTree.allWrites.length - 1].writeId;\n } else {\n writeTree.lastWriteId = -1;\n }\n}\n\n/**\n * The default filter used when constructing the tree. Keep everything that's visible.\n */\nfunction writeTreeDefaultFilter_(write: WriteRecord) {\n return write.visible;\n}\n\n/**\n * Static method. Given an array of WriteRecords, a filter for which ones to include, and a path, construct the tree of\n * event data at that path.\n */\nfunction writeTreeLayerTree_(\n writes: WriteRecord[],\n filter: (w: WriteRecord) => boolean,\n treeRoot: Path\n): CompoundWrite {\n let compoundWrite = CompoundWrite.empty();\n for (let i = 0; i < writes.length; ++i) {\n const write = writes[i];\n // Theory, a later set will either:\n // a) abort a relevant transaction, so no need to worry about excluding it from calculating that transaction\n // b) not be relevant to a transaction (separate branch), so again will not affect the data for that transaction\n if (filter(write)) {\n const writePath = write.path;\n let relativePath: Path;\n if (write.snap) {\n if (pathContains(treeRoot, writePath)) {\n relativePath = newRelativePath(treeRoot, writePath);\n compoundWrite = compoundWriteAddWrite(\n compoundWrite,\n relativePath,\n write.snap\n );\n } else if (pathContains(writePath, treeRoot)) {\n relativePath = newRelativePath(writePath, treeRoot);\n compoundWrite = compoundWriteAddWrite(\n compoundWrite,\n newEmptyPath(),\n write.snap.getChild(relativePath)\n );\n } else {\n // There is no overlap between root path and write path, ignore write\n }\n } else if (write.children) {\n if (pathContains(treeRoot, writePath)) {\n relativePath = newRelativePath(treeRoot, writePath);\n compoundWrite = compoundWriteAddWrites(\n compoundWrite,\n relativePath,\n write.children\n );\n } else if (pathContains(writePath, treeRoot)) {\n relativePath = newRelativePath(writePath, treeRoot);\n if (pathIsEmpty(relativePath)) {\n compoundWrite = compoundWriteAddWrites(\n compoundWrite,\n newEmptyPath(),\n write.children\n );\n } else {\n const child = safeGet(write.children, pathGetFront(relativePath));\n if (child) {\n // There exists a child in this node that matches the root path\n const deepNode = child.getChild(pathPopFront(relativePath));\n compoundWrite = compoundWriteAddWrite(\n compoundWrite,\n newEmptyPath(),\n deepNode\n );\n }\n }\n } else {\n // There is no overlap between root path and write path, ignore write\n }\n } else {\n throw assertionError('WriteRecord should have .snap or .children');\n }\n }\n }\n return compoundWrite;\n}\n\n/**\n * Return a complete snapshot for the given path if there's visible write data at that path, else null.\n * No server data is considered.\n *\n */\nexport function writeTreeGetCompleteWriteData(\n writeTree: WriteTree,\n path: Path\n): Node | null {\n return compoundWriteGetCompleteNode(writeTree.visibleWrites, path);\n}\n\n/**\n * Given optional, underlying server data, and an optional set of constraints (exclude some sets, include hidden\n * writes), attempt to calculate a complete snapshot for the given path\n *\n * @param writeIdsToExclude - An optional set to be excluded\n * @param includeHiddenWrites - Defaults to false, whether or not to layer on writes with visible set to false\n */\nexport function writeTreeCalcCompleteEventCache(\n writeTree: WriteTree,\n treePath: Path,\n completeServerCache: Node | null,\n writeIdsToExclude?: number[],\n includeHiddenWrites?: boolean\n): Node | null {\n if (!writeIdsToExclude && !includeHiddenWrites) {\n const shadowingNode = compoundWriteGetCompleteNode(\n writeTree.visibleWrites,\n treePath\n );\n if (shadowingNode != null) {\n return shadowingNode;\n } else {\n const subMerge = compoundWriteChildCompoundWrite(\n writeTree.visibleWrites,\n treePath\n );\n if (compoundWriteIsEmpty(subMerge)) {\n return completeServerCache;\n } else if (\n completeServerCache == null &&\n !compoundWriteHasCompleteWrite(subMerge, newEmptyPath())\n ) {\n // We wouldn't have a complete snapshot, since there's no underlying data and no complete shadow\n return null;\n } else {\n const layeredCache = completeServerCache || ChildrenNode.EMPTY_NODE;\n return compoundWriteApply(subMerge, layeredCache);\n }\n }\n } else {\n const merge = compoundWriteChildCompoundWrite(\n writeTree.visibleWrites,\n treePath\n );\n if (!includeHiddenWrites && compoundWriteIsEmpty(merge)) {\n return completeServerCache;\n } else {\n // If the server cache is null, and we don't have a complete cache, we need to return null\n if (\n !includeHiddenWrites &&\n completeServerCache == null &&\n !compoundWriteHasCompleteWrite(merge, newEmptyPath())\n ) {\n return null;\n } else {\n const filter = function (write: WriteRecord) {\n return (\n (write.visible || includeHiddenWrites) &&\n (!writeIdsToExclude ||\n !~writeIdsToExclude.indexOf(write.writeId)) &&\n (pathContains(write.path, treePath) ||\n pathContains(treePath, write.path))\n );\n };\n const mergeAtPath = writeTreeLayerTree_(\n writeTree.allWrites,\n filter,\n treePath\n );\n const layeredCache = completeServerCache || ChildrenNode.EMPTY_NODE;\n return compoundWriteApply(mergeAtPath, layeredCache);\n }\n }\n }\n}\n\n/**\n * With optional, underlying server data, attempt to return a children node of children that we have complete data for.\n * Used when creating new views, to pre-fill their complete event children snapshot.\n */\nexport function writeTreeCalcCompleteEventChildren(\n writeTree: WriteTree,\n treePath: Path,\n completeServerChildren: ChildrenNode | null\n) {\n let completeChildren = ChildrenNode.EMPTY_NODE as Node;\n const topLevelSet = compoundWriteGetCompleteNode(\n writeTree.visibleWrites,\n treePath\n );\n if (topLevelSet) {\n if (!topLevelSet.isLeafNode()) {\n // we're shadowing everything. Return the children.\n topLevelSet.forEachChild(PRIORITY_INDEX, (childName, childSnap) => {\n completeChildren = completeChildren.updateImmediateChild(\n childName,\n childSnap\n );\n });\n }\n return completeChildren;\n } else if (completeServerChildren) {\n // Layer any children we have on top of this\n // We know we don't have a top-level set, so just enumerate existing children\n const merge = compoundWriteChildCompoundWrite(\n writeTree.visibleWrites,\n treePath\n );\n completeServerChildren.forEachChild(\n PRIORITY_INDEX,\n (childName, childNode) => {\n const node = compoundWriteApply(\n compoundWriteChildCompoundWrite(merge, new Path(childName)),\n childNode\n );\n completeChildren = completeChildren.updateImmediateChild(\n childName,\n node\n );\n }\n );\n // Add any complete children we have from the set\n compoundWriteGetCompleteChildren(merge).forEach(namedNode => {\n completeChildren = completeChildren.updateImmediateChild(\n namedNode.name,\n namedNode.node\n );\n });\n return completeChildren;\n } else {\n // We don't have anything to layer on top of. Layer on any children we have\n // Note that we can return an empty snap if we have a defined delete\n const merge = compoundWriteChildCompoundWrite(\n writeTree.visibleWrites,\n treePath\n );\n compoundWriteGetCompleteChildren(merge).forEach(namedNode => {\n completeChildren = completeChildren.updateImmediateChild(\n namedNode.name,\n namedNode.node\n );\n });\n return completeChildren;\n }\n}\n\n/**\n * Given that the underlying server data has updated, determine what, if anything, needs to be\n * applied to the event cache.\n *\n * Possibilities:\n *\n * 1. No writes are shadowing. Events should be raised, the snap to be applied comes from the server data\n *\n * 2. Some write is completely shadowing. No events to be raised\n *\n * 3. Is partially shadowed. Events\n *\n * Either existingEventSnap or existingServerSnap must exist\n */\nexport function writeTreeCalcEventCacheAfterServerOverwrite(\n writeTree: WriteTree,\n treePath: Path,\n childPath: Path,\n existingEventSnap: Node | null,\n existingServerSnap: Node | null\n): Node | null {\n assert(\n existingEventSnap || existingServerSnap,\n 'Either existingEventSnap or existingServerSnap must exist'\n );\n const path = pathChild(treePath, childPath);\n if (compoundWriteHasCompleteWrite(writeTree.visibleWrites, path)) {\n // At this point we can probably guarantee that we're in case 2, meaning no events\n // May need to check visibility while doing the findRootMostValueAndPath call\n return null;\n } else {\n // No complete shadowing. We're either partially shadowing or not shadowing at all.\n const childMerge = compoundWriteChildCompoundWrite(\n writeTree.visibleWrites,\n path\n );\n if (compoundWriteIsEmpty(childMerge)) {\n // We're not shadowing at all. Case 1\n return existingServerSnap.getChild(childPath);\n } else {\n // This could be more efficient if the serverNode + updates doesn't change the eventSnap\n // However this is tricky to find out, since user updates don't necessary change the server\n // snap, e.g. priority updates on empty nodes, or deep deletes. Another special case is if the server\n // adds nodes, but doesn't change any existing writes. It is therefore not enough to\n // only check if the updates change the serverNode.\n // Maybe check if the merge tree contains these special cases and only do a full overwrite in that case?\n return compoundWriteApply(\n childMerge,\n existingServerSnap.getChild(childPath)\n );\n }\n }\n}\n\n/**\n * Returns a complete child for a given server snap after applying all user writes or null if there is no\n * complete child for this ChildKey.\n */\nexport function writeTreeCalcCompleteChild(\n writeTree: WriteTree,\n treePath: Path,\n childKey: string,\n existingServerSnap: CacheNode\n): Node | null {\n const path = pathChild(treePath, childKey);\n const shadowingNode = compoundWriteGetCompleteNode(\n writeTree.visibleWrites,\n path\n );\n if (shadowingNode != null) {\n return shadowingNode;\n } else {\n if (existingServerSnap.isCompleteForChild(childKey)) {\n const childMerge = compoundWriteChildCompoundWrite(\n writeTree.visibleWrites,\n path\n );\n return compoundWriteApply(\n childMerge,\n existingServerSnap.getNode().getImmediateChild(childKey)\n );\n } else {\n return null;\n }\n }\n}\n\n/**\n * Returns a node if there is a complete overwrite for this path. More specifically, if there is a write at\n * a higher path, this will return the child of that write relative to the write and this path.\n * Returns null if there is no write at this path.\n */\nexport function writeTreeShadowingWrite(\n writeTree: WriteTree,\n path: Path\n): Node | null {\n return compoundWriteGetCompleteNode(writeTree.visibleWrites, path);\n}\n\n/**\n * This method is used when processing child remove events on a query. If we can, we pull in children that were outside\n * the window, but may now be in the window.\n */\nexport function writeTreeCalcIndexedSlice(\n writeTree: WriteTree,\n treePath: Path,\n completeServerData: Node | null,\n startPost: NamedNode,\n count: number,\n reverse: boolean,\n index: Index\n): NamedNode[] {\n let toIterate: Node;\n const merge = compoundWriteChildCompoundWrite(\n writeTree.visibleWrites,\n treePath\n );\n const shadowingNode = compoundWriteGetCompleteNode(merge, newEmptyPath());\n if (shadowingNode != null) {\n toIterate = shadowingNode;\n } else if (completeServerData != null) {\n toIterate = compoundWriteApply(merge, completeServerData);\n } else {\n // no children to iterate on\n return [];\n }\n toIterate = toIterate.withIndex(index);\n if (!toIterate.isEmpty() && !toIterate.isLeafNode()) {\n const nodes = [];\n const cmp = index.getCompare();\n const iter = reverse\n ? (toIterate as ChildrenNode).getReverseIteratorFrom(startPost, index)\n : (toIterate as ChildrenNode).getIteratorFrom(startPost, index);\n let next = iter.getNext();\n while (next && nodes.length < count) {\n if (cmp(next, startPost) !== 0) {\n nodes.push(next);\n }\n next = iter.getNext();\n }\n return nodes;\n } else {\n return [];\n }\n}\n\nexport function newWriteTree(): WriteTree {\n return {\n visibleWrites: CompoundWrite.empty(),\n allWrites: [],\n lastWriteId: -1\n };\n}\n\n/**\n * WriteTree tracks all pending user-initiated writes and has methods to calculate the result of merging them\n * with underlying server data (to create \"event cache\" data). Pending writes are added with addOverwrite()\n * and addMerge(), and removed with removeWrite().\n */\nexport interface WriteTree {\n /**\n * A tree tracking the result of applying all visible writes. This does not include transactions with\n * applyLocally=false or writes that are completely shadowed by other writes.\n */\n visibleWrites: CompoundWrite;\n\n /**\n * A list of all pending writes, regardless of visibility and shadowed-ness. Used to calculate arbitrary\n * sets of the changed data, such as hidden writes (from transactions) or changes with certain writes excluded (also\n * used by transactions).\n */\n allWrites: WriteRecord[];\n\n lastWriteId: number;\n}\n\n/**\n * If possible, returns a complete event cache, using the underlying server data if possible. In addition, can be used\n * to get a cache that includes hidden writes, and excludes arbitrary writes. Note that customizing the returned node\n * can lead to a more expensive calculation.\n *\n * @param writeIdsToExclude - Optional writes to exclude.\n * @param includeHiddenWrites - Defaults to false, whether or not to layer on writes with visible set to false\n */\nexport function writeTreeRefCalcCompleteEventCache(\n writeTreeRef: WriteTreeRef,\n completeServerCache: Node | null,\n writeIdsToExclude?: number[],\n includeHiddenWrites?: boolean\n): Node | null {\n return writeTreeCalcCompleteEventCache(\n writeTreeRef.writeTree,\n writeTreeRef.treePath,\n completeServerCache,\n writeIdsToExclude,\n includeHiddenWrites\n );\n}\n\n/**\n * If possible, returns a children node containing all of the complete children we have data for. The returned data is a\n * mix of the given server data and write data.\n *\n */\nexport function writeTreeRefCalcCompleteEventChildren(\n writeTreeRef: WriteTreeRef,\n completeServerChildren: ChildrenNode | null\n): ChildrenNode {\n return writeTreeCalcCompleteEventChildren(\n writeTreeRef.writeTree,\n writeTreeRef.treePath,\n completeServerChildren\n ) as ChildrenNode;\n}\n\n/**\n * Given that either the underlying server data has updated or the outstanding writes have updated, determine what,\n * if anything, needs to be applied to the event cache.\n *\n * Possibilities:\n *\n * 1. No writes are shadowing. Events should be raised, the snap to be applied comes from the server data\n *\n * 2. Some write is completely shadowing. No events to be raised\n *\n * 3. Is partially shadowed. Events should be raised\n *\n * Either existingEventSnap or existingServerSnap must exist, this is validated via an assert\n *\n *\n */\nexport function writeTreeRefCalcEventCacheAfterServerOverwrite(\n writeTreeRef: WriteTreeRef,\n path: Path,\n existingEventSnap: Node | null,\n existingServerSnap: Node | null\n): Node | null {\n return writeTreeCalcEventCacheAfterServerOverwrite(\n writeTreeRef.writeTree,\n writeTreeRef.treePath,\n path,\n existingEventSnap,\n existingServerSnap\n );\n}\n\n/**\n * Returns a node if there is a complete overwrite for this path. More specifically, if there is a write at\n * a higher path, this will return the child of that write relative to the write and this path.\n * Returns null if there is no write at this path.\n *\n */\nexport function writeTreeRefShadowingWrite(\n writeTreeRef: WriteTreeRef,\n path: Path\n): Node | null {\n return writeTreeShadowingWrite(\n writeTreeRef.writeTree,\n pathChild(writeTreeRef.treePath, path)\n );\n}\n\n/**\n * This method is used when processing child remove events on a query. If we can, we pull in children that were outside\n * the window, but may now be in the window\n */\nexport function writeTreeRefCalcIndexedSlice(\n writeTreeRef: WriteTreeRef,\n completeServerData: Node | null,\n startPost: NamedNode,\n count: number,\n reverse: boolean,\n index: Index\n): NamedNode[] {\n return writeTreeCalcIndexedSlice(\n writeTreeRef.writeTree,\n writeTreeRef.treePath,\n completeServerData,\n startPost,\n count,\n reverse,\n index\n );\n}\n\n/**\n * Returns a complete child for a given server snap after applying all user writes or null if there is no\n * complete child for this ChildKey.\n */\nexport function writeTreeRefCalcCompleteChild(\n writeTreeRef: WriteTreeRef,\n childKey: string,\n existingServerCache: CacheNode\n): Node | null {\n return writeTreeCalcCompleteChild(\n writeTreeRef.writeTree,\n writeTreeRef.treePath,\n childKey,\n existingServerCache\n );\n}\n\n/**\n * Return a WriteTreeRef for a child.\n */\nexport function writeTreeRefChild(\n writeTreeRef: WriteTreeRef,\n childName: string\n): WriteTreeRef {\n return newWriteTreeRef(\n pathChild(writeTreeRef.treePath, childName),\n writeTreeRef.writeTree\n );\n}\n\nexport function newWriteTreeRef(\n path: Path,\n writeTree: WriteTree\n): WriteTreeRef {\n return {\n treePath: path,\n writeTree\n };\n}\n\n/**\n * A WriteTreeRef wraps a WriteTree and a path, for convenient access to a particular subtree. All of the methods\n * just proxy to the underlying WriteTree.\n *\n */\nexport interface WriteTreeRef {\n /**\n * The path to this particular write tree ref. Used for calling methods on writeTree_ while exposing a simpler\n * interface to callers.\n */\n readonly treePath: Path;\n\n /**\n * * A reference to the actual tree of write data. All methods are pass-through to the tree, but with the appropriate\n * path prefixed.\n *\n * This lets us make cheap references to points in the tree for sync points without having to copy and maintain all of\n * the data.\n */\n readonly writeTree: WriteTree;\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert, assertionError } from '@firebase/util';\n\nimport {\n Change,\n ChangeType,\n changeChildAdded,\n changeChildChanged,\n changeChildRemoved\n} from './Change';\n\nexport class ChildChangeAccumulator {\n private readonly changeMap: Map<string, Change> = new Map();\n\n trackChildChange(change: Change) {\n const type = change.type;\n const childKey = change.childName!;\n assert(\n type === ChangeType.CHILD_ADDED ||\n type === ChangeType.CHILD_CHANGED ||\n type === ChangeType.CHILD_REMOVED,\n 'Only child changes supported for tracking'\n );\n assert(\n childKey !== '.priority',\n 'Only non-priority child changes can be tracked.'\n );\n const oldChange = this.changeMap.get(childKey);\n if (oldChange) {\n const oldType = oldChange.type;\n if (\n type === ChangeType.CHILD_ADDED &&\n oldType === ChangeType.CHILD_REMOVED\n ) {\n this.changeMap.set(\n childKey,\n changeChildChanged(\n childKey,\n change.snapshotNode,\n oldChange.snapshotNode\n )\n );\n } else if (\n type === ChangeType.CHILD_REMOVED &&\n oldType === ChangeType.CHILD_ADDED\n ) {\n this.changeMap.delete(childKey);\n } else if (\n type === ChangeType.CHILD_REMOVED &&\n oldType === ChangeType.CHILD_CHANGED\n ) {\n this.changeMap.set(\n childKey,\n changeChildRemoved(childKey, oldChange.oldSnap)\n );\n } else if (\n type === ChangeType.CHILD_CHANGED &&\n oldType === ChangeType.CHILD_ADDED\n ) {\n this.changeMap.set(\n childKey,\n changeChildAdded(childKey, change.snapshotNode)\n );\n } else if (\n type === ChangeType.CHILD_CHANGED &&\n oldType === ChangeType.CHILD_CHANGED\n ) {\n this.changeMap.set(\n childKey,\n changeChildChanged(childKey, change.snapshotNode, oldChange.oldSnap)\n );\n } else {\n throw assertionError(\n 'Illegal combination of changes: ' +\n change +\n ' occurred after ' +\n oldChange\n );\n }\n } else {\n this.changeMap.set(childKey, change);\n }\n }\n\n getChanges(): Change[] {\n return Array.from(this.changeMap.values());\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Index } from '../snap/indexes/Index';\nimport { NamedNode, Node } from '../snap/Node';\nimport {\n WriteTreeRef,\n writeTreeRefCalcCompleteChild,\n writeTreeRefCalcIndexedSlice\n} from '../WriteTree';\n\nimport { CacheNode } from './CacheNode';\nimport { ViewCache, viewCacheGetCompleteServerSnap } from './ViewCache';\n\n/**\n * Since updates to filtered nodes might require nodes to be pulled in from \"outside\" the node, this interface\n * can help to get complete children that can be pulled in.\n * A class implementing this interface takes potentially multiple sources (e.g. user writes, server data from\n * other views etc.) to try it's best to get a complete child that might be useful in pulling into the view.\n *\n * @interface\n */\nexport interface CompleteChildSource {\n getCompleteChild(childKey: string): Node | null;\n\n getChildAfterChild(\n index: Index,\n child: NamedNode,\n reverse: boolean\n ): NamedNode | null;\n}\n\n/**\n * An implementation of CompleteChildSource that never returns any additional children\n */\n// eslint-disable-next-line @typescript-eslint/naming-convention\nexport class NoCompleteChildSource_ implements CompleteChildSource {\n getCompleteChild(childKey?: string): Node | null {\n return null;\n }\n getChildAfterChild(\n index?: Index,\n child?: NamedNode,\n reverse?: boolean\n ): NamedNode | null {\n return null;\n }\n}\n\n/**\n * Singleton instance.\n */\nexport const NO_COMPLETE_CHILD_SOURCE = new NoCompleteChildSource_();\n\n/**\n * An implementation of CompleteChildSource that uses a WriteTree in addition to any other server data or\n * old event caches available to calculate complete children.\n */\nexport class WriteTreeCompleteChildSource implements CompleteChildSource {\n constructor(\n private writes_: WriteTreeRef,\n private viewCache_: ViewCache,\n private optCompleteServerCache_: Node | null = null\n ) {}\n getCompleteChild(childKey: string): Node | null {\n const node = this.viewCache_.eventCache;\n if (node.isCompleteForChild(childKey)) {\n return node.getNode().getImmediateChild(childKey);\n } else {\n const serverNode =\n this.optCompleteServerCache_ != null\n ? new CacheNode(this.optCompleteServerCache_, true, false)\n : this.viewCache_.serverCache;\n return writeTreeRefCalcCompleteChild(this.writes_, childKey, serverNode);\n }\n }\n getChildAfterChild(\n index: Index,\n child: NamedNode,\n reverse: boolean\n ): NamedNode | null {\n const completeServerData =\n this.optCompleteServerCache_ != null\n ? this.optCompleteServerCache_\n : viewCacheGetCompleteServerSnap(this.viewCache_);\n const nodes = writeTreeRefCalcIndexedSlice(\n this.writes_,\n completeServerData,\n child,\n 1,\n reverse,\n index\n );\n if (nodes.length === 0) {\n return null;\n } else {\n return nodes[0];\n }\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert, assertionError } from '@firebase/util';\n\nimport { AckUserWrite } from '../operation/AckUserWrite';\nimport { Merge } from '../operation/Merge';\nimport { Operation, OperationType } from '../operation/Operation';\nimport { Overwrite } from '../operation/Overwrite';\nimport { ChildrenNode } from '../snap/ChildrenNode';\nimport { KEY_INDEX } from '../snap/indexes/KeyIndex';\nimport { Node } from '../snap/Node';\nimport { ImmutableTree } from '../util/ImmutableTree';\nimport {\n newEmptyPath,\n Path,\n pathChild,\n pathGetBack,\n pathGetFront,\n pathGetLength,\n pathIsEmpty,\n pathParent,\n pathPopFront\n} from '../util/Path';\nimport {\n WriteTreeRef,\n writeTreeRefCalcCompleteChild,\n writeTreeRefCalcCompleteEventCache,\n writeTreeRefCalcCompleteEventChildren,\n writeTreeRefCalcEventCacheAfterServerOverwrite,\n writeTreeRefShadowingWrite\n} from '../WriteTree';\n\nimport { Change, changeValue } from './Change';\nimport { ChildChangeAccumulator } from './ChildChangeAccumulator';\nimport {\n CompleteChildSource,\n NO_COMPLETE_CHILD_SOURCE,\n WriteTreeCompleteChildSource\n} from './CompleteChildSource';\nimport { NodeFilter } from './filter/NodeFilter';\nimport {\n ViewCache,\n viewCacheGetCompleteEventSnap,\n viewCacheGetCompleteServerSnap,\n viewCacheUpdateEventSnap,\n viewCacheUpdateServerSnap\n} from './ViewCache';\n\nexport interface ProcessorResult {\n readonly viewCache: ViewCache;\n readonly changes: Change[];\n}\n\nexport interface ViewProcessor {\n readonly filter: NodeFilter;\n}\n\nexport function newViewProcessor(filter: NodeFilter): ViewProcessor {\n return { filter };\n}\n\nexport function viewProcessorAssertIndexed(\n viewProcessor: ViewProcessor,\n viewCache: ViewCache\n): void {\n assert(\n viewCache.eventCache.getNode().isIndexed(viewProcessor.filter.getIndex()),\n 'Event snap not indexed'\n );\n assert(\n viewCache.serverCache.getNode().isIndexed(viewProcessor.filter.getIndex()),\n 'Server snap not indexed'\n );\n}\n\nexport function viewProcessorApplyOperation(\n viewProcessor: ViewProcessor,\n oldViewCache: ViewCache,\n operation: Operation,\n writesCache: WriteTreeRef,\n completeCache: Node | null\n): ProcessorResult {\n const accumulator = new ChildChangeAccumulator();\n let newViewCache, filterServerNode;\n if (operation.type === OperationType.OVERWRITE) {\n const overwrite = operation as Overwrite;\n if (overwrite.source.fromUser) {\n newViewCache = viewProcessorApplyUserOverwrite(\n viewProcessor,\n oldViewCache,\n overwrite.path,\n overwrite.snap,\n writesCache,\n completeCache,\n accumulator\n );\n } else {\n assert(overwrite.source.fromServer, 'Unknown source.');\n // We filter the node if it's a tagged update or the node has been previously filtered and the\n // update is not at the root in which case it is ok (and necessary) to mark the node unfiltered\n // again\n filterServerNode =\n overwrite.source.tagged ||\n (oldViewCache.serverCache.isFiltered() && !pathIsEmpty(overwrite.path));\n newViewCache = viewProcessorApplyServerOverwrite(\n viewProcessor,\n oldViewCache,\n overwrite.path,\n overwrite.snap,\n writesCache,\n completeCache,\n filterServerNode,\n accumulator\n );\n }\n } else if (operation.type === OperationType.MERGE) {\n const merge = operation as Merge;\n if (merge.source.fromUser) {\n newViewCache = viewProcessorApplyUserMerge(\n viewProcessor,\n oldViewCache,\n merge.path,\n merge.children,\n writesCache,\n completeCache,\n accumulator\n );\n } else {\n assert(merge.source.fromServer, 'Unknown source.');\n // We filter the node if it's a tagged update or the node has been previously filtered\n filterServerNode =\n merge.source.tagged || oldViewCache.serverCache.isFiltered();\n newViewCache = viewProcessorApplyServerMerge(\n viewProcessor,\n oldViewCache,\n merge.path,\n merge.children,\n writesCache,\n completeCache,\n filterServerNode,\n accumulator\n );\n }\n } else if (operation.type === OperationType.ACK_USER_WRITE) {\n const ackUserWrite = operation as AckUserWrite;\n if (!ackUserWrite.revert) {\n newViewCache = viewProcessorAckUserWrite(\n viewProcessor,\n oldViewCache,\n ackUserWrite.path,\n ackUserWrite.affectedTree,\n writesCache,\n completeCache,\n accumulator\n );\n } else {\n newViewCache = viewProcessorRevertUserWrite(\n viewProcessor,\n oldViewCache,\n ackUserWrite.path,\n writesCache,\n completeCache,\n accumulator\n );\n }\n } else if (operation.type === OperationType.LISTEN_COMPLETE) {\n newViewCache = viewProcessorListenComplete(\n viewProcessor,\n oldViewCache,\n operation.path,\n writesCache,\n accumulator\n );\n } else {\n throw assertionError('Unknown operation type: ' + operation.type);\n }\n const changes = accumulator.getChanges();\n viewProcessorMaybeAddValueEvent(oldViewCache, newViewCache, changes);\n return { viewCache: newViewCache, changes };\n}\n\nfunction viewProcessorMaybeAddValueEvent(\n oldViewCache: ViewCache,\n newViewCache: ViewCache,\n accumulator: Change[]\n): void {\n const eventSnap = newViewCache.eventCache;\n if (eventSnap.isFullyInitialized()) {\n const isLeafOrEmpty =\n eventSnap.getNode().isLeafNode() || eventSnap.getNode().isEmpty();\n const oldCompleteSnap = viewCacheGetCompleteEventSnap(oldViewCache);\n if (\n accumulator.length > 0 ||\n !oldViewCache.eventCache.isFullyInitialized() ||\n (isLeafOrEmpty && !eventSnap.getNode().equals(oldCompleteSnap)) ||\n !eventSnap.getNode().getPriority().equals(oldCompleteSnap.getPriority())\n ) {\n accumulator.push(\n changeValue(viewCacheGetCompleteEventSnap(newViewCache))\n );\n }\n }\n}\n\nfunction viewProcessorGenerateEventCacheAfterServerEvent(\n viewProcessor: ViewProcessor,\n viewCache: ViewCache,\n changePath: Path,\n writesCache: WriteTreeRef,\n source: CompleteChildSource,\n accumulator: ChildChangeAccumulator\n): ViewCache {\n const oldEventSnap = viewCache.eventCache;\n if (writeTreeRefShadowingWrite(writesCache, changePath) != null) {\n // we have a shadowing write, ignore changes\n return viewCache;\n } else {\n let newEventCache, serverNode;\n if (pathIsEmpty(changePath)) {\n // TODO: figure out how this plays with \"sliding ack windows\"\n assert(\n viewCache.serverCache.isFullyInitialized(),\n 'If change path is empty, we must have complete server data'\n );\n if (viewCache.serverCache.isFiltered()) {\n // We need to special case this, because we need to only apply writes to complete children, or\n // we might end up raising events for incomplete children. If the server data is filtered deep\n // writes cannot be guaranteed to be complete\n const serverCache = viewCacheGetCompleteServerSnap(viewCache);\n const completeChildren =\n serverCache instanceof ChildrenNode\n ? serverCache\n : ChildrenNode.EMPTY_NODE;\n const completeEventChildren = writeTreeRefCalcCompleteEventChildren(\n writesCache,\n completeChildren\n );\n newEventCache = viewProcessor.filter.updateFullNode(\n viewCache.eventCache.getNode(),\n completeEventChildren,\n accumulator\n );\n } else {\n const completeNode = writeTreeRefCalcCompleteEventCache(\n writesCache,\n viewCacheGetCompleteServerSnap(viewCache)\n );\n newEventCache = viewProcessor.filter.updateFullNode(\n viewCache.eventCache.getNode(),\n completeNode,\n accumulator\n );\n }\n } else {\n const childKey = pathGetFront(changePath);\n if (childKey === '.priority') {\n assert(\n pathGetLength(changePath) === 1,\n \"Can't have a priority with additional path components\"\n );\n const oldEventNode = oldEventSnap.getNode();\n serverNode = viewCache.serverCache.getNode();\n // we might have overwrites for this priority\n const updatedPriority = writeTreeRefCalcEventCacheAfterServerOverwrite(\n writesCache,\n changePath,\n oldEventNode,\n serverNode\n );\n if (updatedPriority != null) {\n newEventCache = viewProcessor.filter.updatePriority(\n oldEventNode,\n updatedPriority\n );\n } else {\n // priority didn't change, keep old node\n newEventCache = oldEventSnap.getNode();\n }\n } else {\n const childChangePath = pathPopFront(changePath);\n // update child\n let newEventChild;\n if (oldEventSnap.isCompleteForChild(childKey)) {\n serverNode = viewCache.serverCache.getNode();\n const eventChildUpdate =\n writeTreeRefCalcEventCacheAfterServerOverwrite(\n writesCache,\n changePath,\n oldEventSnap.getNode(),\n serverNode\n );\n if (eventChildUpdate != null) {\n newEventChild = oldEventSnap\n .getNode()\n .getImmediateChild(childKey)\n .updateChild(childChangePath, eventChildUpdate);\n } else {\n // Nothing changed, just keep the old child\n newEventChild = oldEventSnap.getNode().getImmediateChild(childKey);\n }\n } else {\n newEventChild = writeTreeRefCalcCompleteChild(\n writesCache,\n childKey,\n viewCache.serverCache\n );\n }\n if (newEventChild != null) {\n newEventCache = viewProcessor.filter.updateChild(\n oldEventSnap.getNode(),\n childKey,\n newEventChild,\n childChangePath,\n source,\n accumulator\n );\n } else {\n // no complete child available or no change\n newEventCache = oldEventSnap.getNode();\n }\n }\n }\n return viewCacheUpdateEventSnap(\n viewCache,\n newEventCache,\n oldEventSnap.isFullyInitialized() || pathIsEmpty(changePath),\n viewProcessor.filter.filtersNodes()\n );\n }\n}\n\nfunction viewProcessorApplyServerOverwrite(\n viewProcessor: ViewProcessor,\n oldViewCache: ViewCache,\n changePath: Path,\n changedSnap: Node,\n writesCache: WriteTreeRef,\n completeCache: Node | null,\n filterServerNode: boolean,\n accumulator: ChildChangeAccumulator\n): ViewCache {\n const oldServerSnap = oldViewCache.serverCache;\n let newServerCache;\n const serverFilter = filterServerNode\n ? viewProcessor.filter\n : viewProcessor.filter.getIndexedFilter();\n if (pathIsEmpty(changePath)) {\n newServerCache = serverFilter.updateFullNode(\n oldServerSnap.getNode(),\n changedSnap,\n null\n );\n } else if (serverFilter.filtersNodes() && !oldServerSnap.isFiltered()) {\n // we want to filter the server node, but we didn't filter the server node yet, so simulate a full update\n const newServerNode = oldServerSnap\n .getNode()\n .updateChild(changePath, changedSnap);\n newServerCache = serverFilter.updateFullNode(\n oldServerSnap.getNode(),\n newServerNode,\n null\n );\n } else {\n const childKey = pathGetFront(changePath);\n if (\n !oldServerSnap.isCompleteForPath(changePath) &&\n pathGetLength(changePath) > 1\n ) {\n // We don't update incomplete nodes with updates intended for other listeners\n return oldViewCache;\n }\n const childChangePath = pathPopFront(changePath);\n const childNode = oldServerSnap.getNode().getImmediateChild(childKey);\n const newChildNode = childNode.updateChild(childChangePath, changedSnap);\n if (childKey === '.priority') {\n newServerCache = serverFilter.updatePriority(\n oldServerSnap.getNode(),\n newChildNode\n );\n } else {\n newServerCache = serverFilter.updateChild(\n oldServerSnap.getNode(),\n childKey,\n newChildNode,\n childChangePath,\n NO_COMPLETE_CHILD_SOURCE,\n null\n );\n }\n }\n const newViewCache = viewCacheUpdateServerSnap(\n oldViewCache,\n newServerCache,\n oldServerSnap.isFullyInitialized() || pathIsEmpty(changePath),\n serverFilter.filtersNodes()\n );\n const source = new WriteTreeCompleteChildSource(\n writesCache,\n newViewCache,\n completeCache\n );\n return viewProcessorGenerateEventCacheAfterServerEvent(\n viewProcessor,\n newViewCache,\n changePath,\n writesCache,\n source,\n accumulator\n );\n}\n\nfunction viewProcessorApplyUserOverwrite(\n viewProcessor: ViewProcessor,\n oldViewCache: ViewCache,\n changePath: Path,\n changedSnap: Node,\n writesCache: WriteTreeRef,\n completeCache: Node | null,\n accumulator: ChildChangeAccumulator\n): ViewCache {\n const oldEventSnap = oldViewCache.eventCache;\n let newViewCache, newEventCache;\n const source = new WriteTreeCompleteChildSource(\n writesCache,\n oldViewCache,\n completeCache\n );\n if (pathIsEmpty(changePath)) {\n newEventCache = viewProcessor.filter.updateFullNode(\n oldViewCache.eventCache.getNode(),\n changedSnap,\n accumulator\n );\n newViewCache = viewCacheUpdateEventSnap(\n oldViewCache,\n newEventCache,\n true,\n viewProcessor.filter.filtersNodes()\n );\n } else {\n const childKey = pathGetFront(changePath);\n if (childKey === '.priority') {\n newEventCache = viewProcessor.filter.updatePriority(\n oldViewCache.eventCache.getNode(),\n changedSnap\n );\n newViewCache = viewCacheUpdateEventSnap(\n oldViewCache,\n newEventCache,\n oldEventSnap.isFullyInitialized(),\n oldEventSnap.isFiltered()\n );\n } else {\n const childChangePath = pathPopFront(changePath);\n const oldChild = oldEventSnap.getNode().getImmediateChild(childKey);\n let newChild;\n if (pathIsEmpty(childChangePath)) {\n // Child overwrite, we can replace the child\n newChild = changedSnap;\n } else {\n const childNode = source.getCompleteChild(childKey);\n if (childNode != null) {\n if (\n pathGetBack(childChangePath) === '.priority' &&\n childNode.getChild(pathParent(childChangePath)).isEmpty()\n ) {\n // This is a priority update on an empty node. If this node exists on the server, the\n // server will send down the priority in the update, so ignore for now\n newChild = childNode;\n } else {\n newChild = childNode.updateChild(childChangePath, changedSnap);\n }\n } else {\n // There is no complete child node available\n newChild = ChildrenNode.EMPTY_NODE;\n }\n }\n if (!oldChild.equals(newChild)) {\n const newEventSnap = viewProcessor.filter.updateChild(\n oldEventSnap.getNode(),\n childKey,\n newChild,\n childChangePath,\n source,\n accumulator\n );\n newViewCache = viewCacheUpdateEventSnap(\n oldViewCache,\n newEventSnap,\n oldEventSnap.isFullyInitialized(),\n viewProcessor.filter.filtersNodes()\n );\n } else {\n newViewCache = oldViewCache;\n }\n }\n }\n return newViewCache;\n}\n\nfunction viewProcessorCacheHasChild(\n viewCache: ViewCache,\n childKey: string\n): boolean {\n return viewCache.eventCache.isCompleteForChild(childKey);\n}\n\nfunction viewProcessorApplyUserMerge(\n viewProcessor: ViewProcessor,\n viewCache: ViewCache,\n path: Path,\n changedChildren: ImmutableTree<Node>,\n writesCache: WriteTreeRef,\n serverCache: Node | null,\n accumulator: ChildChangeAccumulator\n): ViewCache {\n // HACK: In the case of a limit query, there may be some changes that bump things out of the\n // window leaving room for new items. It's important we process these changes first, so we\n // iterate the changes twice, first processing any that affect items currently in view.\n // TODO: I consider an item \"in view\" if cacheHasChild is true, which checks both the server\n // and event snap. I'm not sure if this will result in edge cases when a child is in one but\n // not the other.\n let curViewCache = viewCache;\n changedChildren.foreach((relativePath, childNode) => {\n const writePath = pathChild(path, relativePath);\n if (viewProcessorCacheHasChild(viewCache, pathGetFront(writePath))) {\n curViewCache = viewProcessorApplyUserOverwrite(\n viewProcessor,\n curViewCache,\n writePath,\n childNode,\n writesCache,\n serverCache,\n accumulator\n );\n }\n });\n\n changedChildren.foreach((relativePath, childNode) => {\n const writePath = pathChild(path, relativePath);\n if (!viewProcessorCacheHasChild(viewCache, pathGetFront(writePath))) {\n curViewCache = viewProcessorApplyUserOverwrite(\n viewProcessor,\n curViewCache,\n writePath,\n childNode,\n writesCache,\n serverCache,\n accumulator\n );\n }\n });\n\n return curViewCache;\n}\n\nfunction viewProcessorApplyMerge(\n viewProcessor: ViewProcessor,\n node: Node,\n merge: ImmutableTree<Node>\n): Node {\n merge.foreach((relativePath, childNode) => {\n node = node.updateChild(relativePath, childNode);\n });\n return node;\n}\n\nfunction viewProcessorApplyServerMerge(\n viewProcessor: ViewProcessor,\n viewCache: ViewCache,\n path: Path,\n changedChildren: ImmutableTree<Node>,\n writesCache: WriteTreeRef,\n serverCache: Node | null,\n filterServerNode: boolean,\n accumulator: ChildChangeAccumulator\n): ViewCache {\n // If we don't have a cache yet, this merge was intended for a previously listen in the same location. Ignore it and\n // wait for the complete data update coming soon.\n if (\n viewCache.serverCache.getNode().isEmpty() &&\n !viewCache.serverCache.isFullyInitialized()\n ) {\n return viewCache;\n }\n\n // HACK: In the case of a limit query, there may be some changes that bump things out of the\n // window leaving room for new items. It's important we process these changes first, so we\n // iterate the changes twice, first processing any that affect items currently in view.\n // TODO: I consider an item \"in view\" if cacheHasChild is true, which checks both the server\n // and event snap. I'm not sure if this will result in edge cases when a child is in one but\n // not the other.\n let curViewCache = viewCache;\n let viewMergeTree: ImmutableTree<Node>;\n if (pathIsEmpty(path)) {\n viewMergeTree = changedChildren;\n } else {\n viewMergeTree = new ImmutableTree<Node>(null).setTree(\n path,\n changedChildren\n );\n }\n const serverNode = viewCache.serverCache.getNode();\n viewMergeTree.children.inorderTraversal((childKey, childTree) => {\n if (serverNode.hasChild(childKey)) {\n const serverChild = viewCache.serverCache\n .getNode()\n .getImmediateChild(childKey);\n const newChild = viewProcessorApplyMerge(\n viewProcessor,\n serverChild,\n childTree\n );\n curViewCache = viewProcessorApplyServerOverwrite(\n viewProcessor,\n curViewCache,\n new Path(childKey),\n newChild,\n writesCache,\n serverCache,\n filterServerNode,\n accumulator\n );\n }\n });\n viewMergeTree.children.inorderTraversal((childKey, childMergeTree) => {\n const isUnknownDeepMerge =\n !viewCache.serverCache.isCompleteForChild(childKey) &&\n childMergeTree.value === null;\n if (!serverNode.hasChild(childKey) && !isUnknownDeepMerge) {\n const serverChild = viewCache.serverCache\n .getNode()\n .getImmediateChild(childKey);\n const newChild = viewProcessorApplyMerge(\n viewProcessor,\n serverChild,\n childMergeTree\n );\n curViewCache = viewProcessorApplyServerOverwrite(\n viewProcessor,\n curViewCache,\n new Path(childKey),\n newChild,\n writesCache,\n serverCache,\n filterServerNode,\n accumulator\n );\n }\n });\n\n return curViewCache;\n}\n\nfunction viewProcessorAckUserWrite(\n viewProcessor: ViewProcessor,\n viewCache: ViewCache,\n ackPath: Path,\n affectedTree: ImmutableTree<boolean>,\n writesCache: WriteTreeRef,\n completeCache: Node | null,\n accumulator: ChildChangeAccumulator\n): ViewCache {\n if (writeTreeRefShadowingWrite(writesCache, ackPath) != null) {\n return viewCache;\n }\n\n // Only filter server node if it is currently filtered\n const filterServerNode = viewCache.serverCache.isFiltered();\n\n // Essentially we'll just get our existing server cache for the affected paths and re-apply it as a server update\n // now that it won't be shadowed.\n const serverCache = viewCache.serverCache;\n if (affectedTree.value != null) {\n // This is an overwrite.\n if (\n (pathIsEmpty(ackPath) && serverCache.isFullyInitialized()) ||\n serverCache.isCompleteForPath(ackPath)\n ) {\n return viewProcessorApplyServerOverwrite(\n viewProcessor,\n viewCache,\n ackPath,\n serverCache.getNode().getChild(ackPath),\n writesCache,\n completeCache,\n filterServerNode,\n accumulator\n );\n } else if (pathIsEmpty(ackPath)) {\n // This is a goofy edge case where we are acking data at this location but don't have full data. We\n // should just re-apply whatever we have in our cache as a merge.\n let changedChildren = new ImmutableTree<Node>(null);\n serverCache.getNode().forEachChild(KEY_INDEX, (name, node) => {\n changedChildren = changedChildren.set(new Path(name), node);\n });\n return viewProcessorApplyServerMerge(\n viewProcessor,\n viewCache,\n ackPath,\n changedChildren,\n writesCache,\n completeCache,\n filterServerNode,\n accumulator\n );\n } else {\n return viewCache;\n }\n } else {\n // This is a merge.\n let changedChildren = new ImmutableTree<Node>(null);\n affectedTree.foreach((mergePath, value) => {\n const serverCachePath = pathChild(ackPath, mergePath);\n if (serverCache.isCompleteForPath(serverCachePath)) {\n changedChildren = changedChildren.set(\n mergePath,\n serverCache.getNode().getChild(serverCachePath)\n );\n }\n });\n return viewProcessorApplyServerMerge(\n viewProcessor,\n viewCache,\n ackPath,\n changedChildren,\n writesCache,\n completeCache,\n filterServerNode,\n accumulator\n );\n }\n}\n\nfunction viewProcessorListenComplete(\n viewProcessor: ViewProcessor,\n viewCache: ViewCache,\n path: Path,\n writesCache: WriteTreeRef,\n accumulator: ChildChangeAccumulator\n): ViewCache {\n const oldServerNode = viewCache.serverCache;\n const newViewCache = viewCacheUpdateServerSnap(\n viewCache,\n oldServerNode.getNode(),\n oldServerNode.isFullyInitialized() || pathIsEmpty(path),\n oldServerNode.isFiltered()\n );\n return viewProcessorGenerateEventCacheAfterServerEvent(\n viewProcessor,\n newViewCache,\n path,\n writesCache,\n NO_COMPLETE_CHILD_SOURCE,\n accumulator\n );\n}\n\nfunction viewProcessorRevertUserWrite(\n viewProcessor: ViewProcessor,\n viewCache: ViewCache,\n path: Path,\n writesCache: WriteTreeRef,\n completeServerCache: Node | null,\n accumulator: ChildChangeAccumulator\n): ViewCache {\n let complete;\n if (writeTreeRefShadowingWrite(writesCache, path) != null) {\n return viewCache;\n } else {\n const source = new WriteTreeCompleteChildSource(\n writesCache,\n viewCache,\n completeServerCache\n );\n const oldEventCache = viewCache.eventCache.getNode();\n let newEventCache;\n if (pathIsEmpty(path) || pathGetFront(path) === '.priority') {\n let newNode;\n if (viewCache.serverCache.isFullyInitialized()) {\n newNode = writeTreeRefCalcCompleteEventCache(\n writesCache,\n viewCacheGetCompleteServerSnap(viewCache)\n );\n } else {\n const serverChildren = viewCache.serverCache.getNode();\n assert(\n serverChildren instanceof ChildrenNode,\n 'serverChildren would be complete if leaf node'\n );\n newNode = writeTreeRefCalcCompleteEventChildren(\n writesCache,\n serverChildren as ChildrenNode\n );\n }\n newNode = newNode as Node;\n newEventCache = viewProcessor.filter.updateFullNode(\n oldEventCache,\n newNode,\n accumulator\n );\n } else {\n const childKey = pathGetFront(path);\n let newChild = writeTreeRefCalcCompleteChild(\n writesCache,\n childKey,\n viewCache.serverCache\n );\n if (\n newChild == null &&\n viewCache.serverCache.isCompleteForChild(childKey)\n ) {\n newChild = oldEventCache.getImmediateChild(childKey);\n }\n if (newChild != null) {\n newEventCache = viewProcessor.filter.updateChild(\n oldEventCache,\n childKey,\n newChild,\n pathPopFront(path),\n source,\n accumulator\n );\n } else if (viewCache.eventCache.getNode().hasChild(childKey)) {\n // No complete child available, delete the existing one, if any\n newEventCache = viewProcessor.filter.updateChild(\n oldEventCache,\n childKey,\n ChildrenNode.EMPTY_NODE,\n pathPopFront(path),\n source,\n accumulator\n );\n } else {\n newEventCache = oldEventCache;\n }\n if (\n newEventCache.isEmpty() &&\n viewCache.serverCache.isFullyInitialized()\n ) {\n // We might have reverted all child writes. Maybe the old event was a leaf node\n complete = writeTreeRefCalcCompleteEventCache(\n writesCache,\n viewCacheGetCompleteServerSnap(viewCache)\n );\n if (complete.isLeafNode()) {\n newEventCache = viewProcessor.filter.updateFullNode(\n newEventCache,\n complete,\n accumulator\n );\n }\n }\n }\n complete =\n viewCache.serverCache.isFullyInitialized() ||\n writeTreeRefShadowingWrite(writesCache, newEmptyPath()) != null;\n return viewCacheUpdateEventSnap(\n viewCache,\n newEventCache,\n complete,\n viewProcessor.filter.filtersNodes()\n );\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert } from '@firebase/util';\n\nimport { Operation, OperationType } from '../operation/Operation';\nimport { ChildrenNode } from '../snap/ChildrenNode';\nimport { PRIORITY_INDEX } from '../snap/indexes/PriorityIndex';\nimport { Node } from '../snap/Node';\nimport { Path, pathGetFront, pathIsEmpty } from '../util/Path';\nimport { WriteTreeRef } from '../WriteTree';\n\nimport { CacheNode } from './CacheNode';\nimport { Change, changeChildAdded, changeValue } from './Change';\nimport { CancelEvent, Event } from './Event';\nimport {\n EventGenerator,\n eventGeneratorGenerateEventsForChanges\n} from './EventGenerator';\nimport { EventRegistration, QueryContext } from './EventRegistration';\nimport { IndexedFilter } from './filter/IndexedFilter';\nimport { queryParamsGetNodeFilter } from './QueryParams';\nimport {\n newViewCache,\n ViewCache,\n viewCacheGetCompleteEventSnap,\n viewCacheGetCompleteServerSnap\n} from './ViewCache';\nimport {\n newViewProcessor,\n ViewProcessor,\n viewProcessorApplyOperation,\n viewProcessorAssertIndexed\n} from './ViewProcessor';\n\n/**\n * A view represents a specific location and query that has 1 or more event registrations.\n *\n * It does several things:\n * - Maintains the list of event registrations for this location/query.\n * - Maintains a cache of the data visible for this location/query.\n * - Applies new operations (via applyOperation), updates the cache, and based on the event\n * registrations returns the set of events to be raised.\n */\nexport class View {\n processor_: ViewProcessor;\n viewCache_: ViewCache;\n eventRegistrations_: EventRegistration[] = [];\n eventGenerator_: EventGenerator;\n\n constructor(private query_: QueryContext, initialViewCache: ViewCache) {\n const params = this.query_._queryParams;\n\n const indexFilter = new IndexedFilter(params.getIndex());\n const filter = queryParamsGetNodeFilter(params);\n\n this.processor_ = newViewProcessor(filter);\n\n const initialServerCache = initialViewCache.serverCache;\n const initialEventCache = initialViewCache.eventCache;\n\n // Don't filter server node with other filter than index, wait for tagged listen\n const serverSnap = indexFilter.updateFullNode(\n ChildrenNode.EMPTY_NODE,\n initialServerCache.getNode(),\n null\n );\n const eventSnap = filter.updateFullNode(\n ChildrenNode.EMPTY_NODE,\n initialEventCache.getNode(),\n null\n );\n const newServerCache = new CacheNode(\n serverSnap,\n initialServerCache.isFullyInitialized(),\n indexFilter.filtersNodes()\n );\n const newEventCache = new CacheNode(\n eventSnap,\n initialEventCache.isFullyInitialized(),\n filter.filtersNodes()\n );\n\n this.viewCache_ = newViewCache(newEventCache, newServerCache);\n this.eventGenerator_ = new EventGenerator(this.query_);\n }\n\n get query(): QueryContext {\n return this.query_;\n }\n}\n\nexport function viewGetServerCache(view: View): Node | null {\n return view.viewCache_.serverCache.getNode();\n}\n\nexport function viewGetCompleteNode(view: View): Node | null {\n return viewCacheGetCompleteEventSnap(view.viewCache_);\n}\n\nexport function viewGetCompleteServerCache(\n view: View,\n path: Path\n): Node | null {\n const cache = viewCacheGetCompleteServerSnap(view.viewCache_);\n if (cache) {\n // If this isn't a \"loadsAllData\" view, then cache isn't actually a complete cache and\n // we need to see if it contains the child we're interested in.\n if (\n view.query._queryParams.loadsAllData() ||\n (!pathIsEmpty(path) &&\n !cache.getImmediateChild(pathGetFront(path)).isEmpty())\n ) {\n return cache.getChild(path);\n }\n }\n return null;\n}\n\nexport function viewIsEmpty(view: View): boolean {\n return view.eventRegistrations_.length === 0;\n}\n\nexport function viewAddEventRegistration(\n view: View,\n eventRegistration: EventRegistration\n) {\n view.eventRegistrations_.push(eventRegistration);\n}\n\n/**\n * @param eventRegistration - If null, remove all callbacks.\n * @param cancelError - If a cancelError is provided, appropriate cancel events will be returned.\n * @returns Cancel events, if cancelError was provided.\n */\nexport function viewRemoveEventRegistration(\n view: View,\n eventRegistration: EventRegistration | null,\n cancelError?: Error\n): Event[] {\n const cancelEvents: CancelEvent[] = [];\n if (cancelError) {\n assert(\n eventRegistration == null,\n 'A cancel should cancel all event registrations.'\n );\n const path = view.query._path;\n view.eventRegistrations_.forEach(registration => {\n const maybeEvent = registration.createCancelEvent(cancelError, path);\n if (maybeEvent) {\n cancelEvents.push(maybeEvent);\n }\n });\n }\n\n if (eventRegistration) {\n let remaining = [];\n for (let i = 0; i < view.eventRegistrations_.length; ++i) {\n const existing = view.eventRegistrations_[i];\n if (!existing.matches(eventRegistration)) {\n remaining.push(existing);\n } else if (eventRegistration.hasAnyCallback()) {\n // We're removing just this one\n remaining = remaining.concat(view.eventRegistrations_.slice(i + 1));\n break;\n }\n }\n view.eventRegistrations_ = remaining;\n } else {\n view.eventRegistrations_ = [];\n }\n return cancelEvents;\n}\n\n/**\n * Applies the given Operation, updates our cache, and returns the appropriate events.\n */\nexport function viewApplyOperation(\n view: View,\n operation: Operation,\n writesCache: WriteTreeRef,\n completeServerCache: Node | null\n): Event[] {\n if (\n operation.type === OperationType.MERGE &&\n operation.source.queryId !== null\n ) {\n assert(\n viewCacheGetCompleteServerSnap(view.viewCache_),\n 'We should always have a full cache before handling merges'\n );\n assert(\n viewCacheGetCompleteEventSnap(view.viewCache_),\n 'Missing event cache, even though we have a server cache'\n );\n }\n\n const oldViewCache = view.viewCache_;\n const result = viewProcessorApplyOperation(\n view.processor_,\n oldViewCache,\n operation,\n writesCache,\n completeServerCache\n );\n viewProcessorAssertIndexed(view.processor_, result.viewCache);\n\n assert(\n result.viewCache.serverCache.isFullyInitialized() ||\n !oldViewCache.serverCache.isFullyInitialized(),\n 'Once a server snap is complete, it should never go back'\n );\n\n view.viewCache_ = result.viewCache;\n\n return viewGenerateEventsForChanges_(\n view,\n result.changes,\n result.viewCache.eventCache.getNode(),\n null\n );\n}\n\nexport function viewGetInitialEvents(\n view: View,\n registration: EventRegistration\n): Event[] {\n const eventSnap = view.viewCache_.eventCache;\n const initialChanges: Change[] = [];\n if (!eventSnap.getNode().isLeafNode()) {\n const eventNode = eventSnap.getNode() as ChildrenNode;\n eventNode.forEachChild(PRIORITY_INDEX, (key, childNode) => {\n initialChanges.push(changeChildAdded(key, childNode));\n });\n }\n if (eventSnap.isFullyInitialized()) {\n initialChanges.push(changeValue(eventSnap.getNode()));\n }\n return viewGenerateEventsForChanges_(\n view,\n initialChanges,\n eventSnap.getNode(),\n registration\n );\n}\n\nfunction viewGenerateEventsForChanges_(\n view: View,\n changes: Change[],\n eventCache: Node,\n eventRegistration?: EventRegistration\n): Event[] {\n const registrations = eventRegistration\n ? [eventRegistration]\n : view.eventRegistrations_;\n return eventGeneratorGenerateEventsForChanges(\n view.eventGenerator_,\n changes,\n eventCache,\n registrations\n );\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert } from '@firebase/util';\n\nimport { ReferenceConstructor } from '../api/Reference';\n\nimport { Operation } from './operation/Operation';\nimport { ChildrenNode } from './snap/ChildrenNode';\nimport { Node } from './snap/Node';\nimport { Path } from './util/Path';\nimport { CacheNode } from './view/CacheNode';\nimport { Event } from './view/Event';\nimport { EventRegistration, QueryContext } from './view/EventRegistration';\nimport {\n View,\n viewAddEventRegistration,\n viewApplyOperation,\n viewGetCompleteServerCache,\n viewGetInitialEvents,\n viewIsEmpty,\n viewRemoveEventRegistration\n} from './view/View';\nimport { newViewCache } from './view/ViewCache';\nimport {\n WriteTreeRef,\n writeTreeRefCalcCompleteEventCache,\n writeTreeRefCalcCompleteEventChildren\n} from './WriteTree';\n\nlet referenceConstructor: ReferenceConstructor;\n\n/**\n * SyncPoint represents a single location in a SyncTree with 1 or more event registrations, meaning we need to\n * maintain 1 or more Views at this location to cache server data and raise appropriate events for server changes\n * and user writes (set, transaction, update).\n *\n * It's responsible for:\n * - Maintaining the set of 1 or more views necessary at this location (a SyncPoint with 0 views should be removed).\n * - Proxying user / server operations to the views as appropriate (i.e. applyServerOverwrite,\n * applyUserOverwrite, etc.)\n */\nexport class SyncPoint {\n /**\n * The Views being tracked at this location in the tree, stored as a map where the key is a\n * queryId and the value is the View for that query.\n *\n * NOTE: This list will be quite small (usually 1, but perhaps 2 or 3; any more is an odd use case).\n */\n readonly views: Map<string, View> = new Map();\n}\n\nexport function syncPointSetReferenceConstructor(\n val: ReferenceConstructor\n): void {\n assert(\n !referenceConstructor,\n '__referenceConstructor has already been defined'\n );\n referenceConstructor = val;\n}\n\nfunction syncPointGetReferenceConstructor(): ReferenceConstructor {\n assert(referenceConstructor, 'Reference.ts has not been loaded');\n return referenceConstructor;\n}\n\nexport function syncPointIsEmpty(syncPoint: SyncPoint): boolean {\n return syncPoint.views.size === 0;\n}\n\nexport function syncPointApplyOperation(\n syncPoint: SyncPoint,\n operation: Operation,\n writesCache: WriteTreeRef,\n optCompleteServerCache: Node | null\n): Event[] {\n const queryId = operation.source.queryId;\n if (queryId !== null) {\n const view = syncPoint.views.get(queryId);\n assert(view != null, 'SyncTree gave us an op for an invalid query.');\n return viewApplyOperation(\n view,\n operation,\n writesCache,\n optCompleteServerCache\n );\n } else {\n let events: Event[] = [];\n\n for (const view of syncPoint.views.values()) {\n events = events.concat(\n viewApplyOperation(view, operation, writesCache, optCompleteServerCache)\n );\n }\n\n return events;\n }\n}\n\n/**\n * Get a view for the specified query.\n *\n * @param query - The query to return a view for\n * @param writesCache\n * @param serverCache\n * @param serverCacheComplete\n * @returns Events to raise.\n */\nexport function syncPointGetView(\n syncPoint: SyncPoint,\n query: QueryContext,\n writesCache: WriteTreeRef,\n serverCache: Node | null,\n serverCacheComplete: boolean\n): View {\n const queryId = query._queryIdentifier;\n const view = syncPoint.views.get(queryId);\n if (!view) {\n // TODO: make writesCache take flag for complete server node\n let eventCache = writeTreeRefCalcCompleteEventCache(\n writesCache,\n serverCacheComplete ? serverCache : null\n );\n let eventCacheComplete = false;\n if (eventCache) {\n eventCacheComplete = true;\n } else if (serverCache instanceof ChildrenNode) {\n eventCache = writeTreeRefCalcCompleteEventChildren(\n writesCache,\n serverCache\n );\n eventCacheComplete = false;\n } else {\n eventCache = ChildrenNode.EMPTY_NODE;\n eventCacheComplete = false;\n }\n const viewCache = newViewCache(\n new CacheNode(eventCache, eventCacheComplete, false),\n new CacheNode(serverCache, serverCacheComplete, false)\n );\n return new View(query, viewCache);\n }\n return view;\n}\n\n/**\n * Add an event callback for the specified query.\n *\n * @param query\n * @param eventRegistration\n * @param writesCache\n * @param serverCache - Complete server cache, if we have it.\n * @param serverCacheComplete\n * @returns Events to raise.\n */\nexport function syncPointAddEventRegistration(\n syncPoint: SyncPoint,\n query: QueryContext,\n eventRegistration: EventRegistration,\n writesCache: WriteTreeRef,\n serverCache: Node | null,\n serverCacheComplete: boolean\n): Event[] {\n const view = syncPointGetView(\n syncPoint,\n query,\n writesCache,\n serverCache,\n serverCacheComplete\n );\n if (!syncPoint.views.has(query._queryIdentifier)) {\n syncPoint.views.set(query._queryIdentifier, view);\n }\n // This is guaranteed to exist now, we just created anything that was missing\n viewAddEventRegistration(view, eventRegistration);\n return viewGetInitialEvents(view, eventRegistration);\n}\n\n/**\n * Remove event callback(s). Return cancelEvents if a cancelError is specified.\n *\n * If query is the default query, we'll check all views for the specified eventRegistration.\n * If eventRegistration is null, we'll remove all callbacks for the specified view(s).\n *\n * @param eventRegistration - If null, remove all callbacks.\n * @param cancelError - If a cancelError is provided, appropriate cancel events will be returned.\n * @returns removed queries and any cancel events\n */\nexport function syncPointRemoveEventRegistration(\n syncPoint: SyncPoint,\n query: QueryContext,\n eventRegistration: EventRegistration | null,\n cancelError?: Error\n): { removed: QueryContext[]; events: Event[] } {\n const queryId = query._queryIdentifier;\n const removed: QueryContext[] = [];\n let cancelEvents: Event[] = [];\n const hadCompleteView = syncPointHasCompleteView(syncPoint);\n if (queryId === 'default') {\n // When you do ref.off(...), we search all views for the registration to remove.\n for (const [viewQueryId, view] of syncPoint.views.entries()) {\n cancelEvents = cancelEvents.concat(\n viewRemoveEventRegistration(view, eventRegistration, cancelError)\n );\n if (viewIsEmpty(view)) {\n syncPoint.views.delete(viewQueryId);\n\n // We'll deal with complete views later.\n if (!view.query._queryParams.loadsAllData()) {\n removed.push(view.query);\n }\n }\n }\n } else {\n // remove the callback from the specific view.\n const view = syncPoint.views.get(queryId);\n if (view) {\n cancelEvents = cancelEvents.concat(\n viewRemoveEventRegistration(view, eventRegistration, cancelError)\n );\n if (viewIsEmpty(view)) {\n syncPoint.views.delete(queryId);\n\n // We'll deal with complete views later.\n if (!view.query._queryParams.loadsAllData()) {\n removed.push(view.query);\n }\n }\n }\n }\n\n if (hadCompleteView && !syncPointHasCompleteView(syncPoint)) {\n // We removed our last complete view.\n removed.push(\n new (syncPointGetReferenceConstructor())(query._repo, query._path)\n );\n }\n\n return { removed, events: cancelEvents };\n}\n\nexport function syncPointGetQueryViews(syncPoint: SyncPoint): View[] {\n const result = [];\n for (const view of syncPoint.views.values()) {\n if (!view.query._queryParams.loadsAllData()) {\n result.push(view);\n }\n }\n return result;\n}\n\n/**\n * @param path - The path to the desired complete snapshot\n * @returns A complete cache, if it exists\n */\nexport function syncPointGetCompleteServerCache(\n syncPoint: SyncPoint,\n path: Path\n): Node | null {\n let serverCache: Node | null = null;\n for (const view of syncPoint.views.values()) {\n serverCache = serverCache || viewGetCompleteServerCache(view, path);\n }\n return serverCache;\n}\n\nexport function syncPointViewForQuery(\n syncPoint: SyncPoint,\n query: QueryContext\n): View | null {\n const params = query._queryParams;\n if (params.loadsAllData()) {\n return syncPointGetCompleteView(syncPoint);\n } else {\n const queryId = query._queryIdentifier;\n return syncPoint.views.get(queryId);\n }\n}\n\nexport function syncPointViewExistsForQuery(\n syncPoint: SyncPoint,\n query: QueryContext\n): boolean {\n return syncPointViewForQuery(syncPoint, query) != null;\n}\n\nexport function syncPointHasCompleteView(syncPoint: SyncPoint): boolean {\n return syncPointGetCompleteView(syncPoint) != null;\n}\n\nexport function syncPointGetCompleteView(syncPoint: SyncPoint): View | null {\n for (const view of syncPoint.views.values()) {\n if (view.query._queryParams.loadsAllData()) {\n return view;\n }\n }\n return null;\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert } from '@firebase/util';\n\nimport { ReferenceConstructor } from '../api/Reference';\n\nimport { AckUserWrite } from './operation/AckUserWrite';\nimport { ListenComplete } from './operation/ListenComplete';\nimport { Merge } from './operation/Merge';\nimport {\n newOperationSourceServer,\n newOperationSourceServerTaggedQuery,\n newOperationSourceUser,\n Operation\n} from './operation/Operation';\nimport { Overwrite } from './operation/Overwrite';\nimport { ChildrenNode } from './snap/ChildrenNode';\nimport { Node } from './snap/Node';\nimport {\n SyncPoint,\n syncPointAddEventRegistration,\n syncPointApplyOperation,\n syncPointGetCompleteServerCache,\n syncPointGetCompleteView,\n syncPointGetQueryViews,\n syncPointGetView,\n syncPointHasCompleteView,\n syncPointIsEmpty,\n syncPointRemoveEventRegistration,\n syncPointViewExistsForQuery,\n syncPointViewForQuery\n} from './SyncPoint';\nimport { ImmutableTree } from './util/ImmutableTree';\nimport {\n newEmptyPath,\n newRelativePath,\n Path,\n pathGetFront,\n pathIsEmpty\n} from './util/Path';\nimport { each, errorForServerCode } from './util/util';\nimport { CacheNode } from './view/CacheNode';\nimport { Event } from './view/Event';\nimport { EventRegistration, QueryContext } from './view/EventRegistration';\nimport { View, viewGetCompleteNode, viewGetServerCache } from './view/View';\nimport {\n newWriteTree,\n WriteTree,\n writeTreeAddMerge,\n writeTreeAddOverwrite,\n writeTreeCalcCompleteEventCache,\n writeTreeChildWrites,\n writeTreeGetWrite,\n WriteTreeRef,\n writeTreeRefChild,\n writeTreeRemoveWrite\n} from './WriteTree';\n\nlet referenceConstructor: ReferenceConstructor;\n\nexport function syncTreeSetReferenceConstructor(\n val: ReferenceConstructor\n): void {\n assert(\n !referenceConstructor,\n '__referenceConstructor has already been defined'\n );\n referenceConstructor = val;\n}\n\nfunction syncTreeGetReferenceConstructor(): ReferenceConstructor {\n assert(referenceConstructor, 'Reference.ts has not been loaded');\n return referenceConstructor;\n}\n\nexport interface ListenProvider {\n startListening(\n query: QueryContext,\n tag: number | null,\n hashFn: () => string,\n onComplete: (a: string, b?: unknown) => Event[]\n ): Event[];\n\n stopListening(a: QueryContext, b: number | null): void;\n}\n\n/**\n * Static tracker for next query tag.\n */\nlet syncTreeNextQueryTag_ = 1;\n\nexport function resetSyncTreeTag() {\n syncTreeNextQueryTag_ = 1;\n}\n\n/**\n * SyncTree is the central class for managing event callback registration, data caching, views\n * (query processing), and event generation. There are typically two SyncTree instances for\n * each Repo, one for the normal Firebase data, and one for the .info data.\n *\n * It has a number of responsibilities, including:\n * - Tracking all user event callbacks (registered via addEventRegistration() and removeEventRegistration()).\n * - Applying and caching data changes for user set(), transaction(), and update() calls\n * (applyUserOverwrite(), applyUserMerge()).\n * - Applying and caching data changes for server data changes (applyServerOverwrite(),\n * applyServerMerge()).\n * - Generating user-facing events for server and user changes (all of the apply* methods\n * return the set of events that need to be raised as a result).\n * - Maintaining the appropriate set of server listens to ensure we are always subscribed\n * to the correct set of paths and queries to satisfy the current set of user event\n * callbacks (listens are started/stopped using the provided listenProvider).\n *\n * NOTE: Although SyncTree tracks event callbacks and calculates events to raise, the actual\n * events are returned to the caller rather than raised synchronously.\n *\n */\nexport class SyncTree {\n /**\n * Tree of SyncPoints. There's a SyncPoint at any location that has 1 or more views.\n */\n syncPointTree_: ImmutableTree<SyncPoint> = new ImmutableTree<SyncPoint>(null);\n\n /**\n * A tree of all pending user writes (user-initiated set()'s, transaction()'s, update()'s, etc.).\n */\n pendingWriteTree_: WriteTree = newWriteTree();\n\n readonly tagToQueryMap: Map<number, string> = new Map();\n readonly queryToTagMap: Map<string, number> = new Map();\n\n /**\n * @param listenProvider_ - Used by SyncTree to start / stop listening\n * to server data.\n */\n constructor(public listenProvider_: ListenProvider) {}\n}\n\n/**\n * Apply the data changes for a user-generated set() or transaction() call.\n *\n * @returns Events to raise.\n */\nexport function syncTreeApplyUserOverwrite(\n syncTree: SyncTree,\n path: Path,\n newData: Node,\n writeId: number,\n visible?: boolean\n): Event[] {\n // Record pending write.\n writeTreeAddOverwrite(\n syncTree.pendingWriteTree_,\n path,\n newData,\n writeId,\n visible\n );\n\n if (!visible) {\n return [];\n } else {\n return syncTreeApplyOperationToSyncPoints_(\n syncTree,\n new Overwrite(newOperationSourceUser(), path, newData)\n );\n }\n}\n\n/**\n * Apply the data from a user-generated update() call\n *\n * @returns Events to raise.\n */\nexport function syncTreeApplyUserMerge(\n syncTree: SyncTree,\n path: Path,\n changedChildren: { [k: string]: Node },\n writeId: number\n): Event[] {\n // Record pending merge.\n writeTreeAddMerge(syncTree.pendingWriteTree_, path, changedChildren, writeId);\n\n const changeTree = ImmutableTree.fromObject(changedChildren);\n\n return syncTreeApplyOperationToSyncPoints_(\n syncTree,\n new Merge(newOperationSourceUser(), path, changeTree)\n );\n}\n\n/**\n * Acknowledge a pending user write that was previously registered with applyUserOverwrite() or applyUserMerge().\n *\n * @param revert - True if the given write failed and needs to be reverted\n * @returns Events to raise.\n */\nexport function syncTreeAckUserWrite(\n syncTree: SyncTree,\n writeId: number,\n revert: boolean = false\n) {\n const write = writeTreeGetWrite(syncTree.pendingWriteTree_, writeId);\n const needToReevaluate = writeTreeRemoveWrite(\n syncTree.pendingWriteTree_,\n writeId\n );\n if (!needToReevaluate) {\n return [];\n } else {\n let affectedTree = new ImmutableTree<boolean>(null);\n if (write.snap != null) {\n // overwrite\n affectedTree = affectedTree.set(newEmptyPath(), true);\n } else {\n each(write.children, (pathString: string) => {\n affectedTree = affectedTree.set(new Path(pathString), true);\n });\n }\n return syncTreeApplyOperationToSyncPoints_(\n syncTree,\n new AckUserWrite(write.path, affectedTree, revert)\n );\n }\n}\n\n/**\n * Apply new server data for the specified path..\n *\n * @returns Events to raise.\n */\nexport function syncTreeApplyServerOverwrite(\n syncTree: SyncTree,\n path: Path,\n newData: Node\n): Event[] {\n return syncTreeApplyOperationToSyncPoints_(\n syncTree,\n new Overwrite(newOperationSourceServer(), path, newData)\n );\n}\n\n/**\n * Apply new server data to be merged in at the specified path.\n *\n * @returns Events to raise.\n */\nexport function syncTreeApplyServerMerge(\n syncTree: SyncTree,\n path: Path,\n changedChildren: { [k: string]: Node }\n): Event[] {\n const changeTree = ImmutableTree.fromObject(changedChildren);\n\n return syncTreeApplyOperationToSyncPoints_(\n syncTree,\n new Merge(newOperationSourceServer(), path, changeTree)\n );\n}\n\n/**\n * Apply a listen complete for a query\n *\n * @returns Events to raise.\n */\nexport function syncTreeApplyListenComplete(\n syncTree: SyncTree,\n path: Path\n): Event[] {\n return syncTreeApplyOperationToSyncPoints_(\n syncTree,\n new ListenComplete(newOperationSourceServer(), path)\n );\n}\n\n/**\n * Apply a listen complete for a tagged query\n *\n * @returns Events to raise.\n */\nexport function syncTreeApplyTaggedListenComplete(\n syncTree: SyncTree,\n path: Path,\n tag: number\n): Event[] {\n const queryKey = syncTreeQueryKeyForTag_(syncTree, tag);\n if (queryKey) {\n const r = syncTreeParseQueryKey_(queryKey);\n const queryPath = r.path,\n queryId = r.queryId;\n const relativePath = newRelativePath(queryPath, path);\n const op = new ListenComplete(\n newOperationSourceServerTaggedQuery(queryId),\n relativePath\n );\n return syncTreeApplyTaggedOperation_(syncTree, queryPath, op);\n } else {\n // We've already removed the query. No big deal, ignore the update\n return [];\n }\n}\n\n/**\n * Remove event callback(s).\n *\n * If query is the default query, we'll check all queries for the specified eventRegistration.\n * If eventRegistration is null, we'll remove all callbacks for the specified query/queries.\n *\n * @param eventRegistration - If null, all callbacks are removed.\n * @param cancelError - If a cancelError is provided, appropriate cancel events will be returned.\n * @param skipListenerDedup - When performing a `get()`, we don't add any new listeners, so no\n * deduping needs to take place. This flag allows toggling of that behavior\n * @returns Cancel events, if cancelError was provided.\n */\nexport function syncTreeRemoveEventRegistration(\n syncTree: SyncTree,\n query: QueryContext,\n eventRegistration: EventRegistration | null,\n cancelError?: Error,\n skipListenerDedup = false\n): Event[] {\n // Find the syncPoint first. Then deal with whether or not it has matching listeners\n const path = query._path;\n const maybeSyncPoint = syncTree.syncPointTree_.get(path);\n let cancelEvents: Event[] = [];\n // A removal on a default query affects all queries at that location. A removal on an indexed query, even one without\n // other query constraints, does *not* affect all queries at that location. So this check must be for 'default', and\n // not loadsAllData().\n if (\n maybeSyncPoint &&\n (query._queryIdentifier === 'default' ||\n syncPointViewExistsForQuery(maybeSyncPoint, query))\n ) {\n const removedAndEvents = syncPointRemoveEventRegistration(\n maybeSyncPoint,\n query,\n eventRegistration,\n cancelError\n );\n if (syncPointIsEmpty(maybeSyncPoint)) {\n syncTree.syncPointTree_ = syncTree.syncPointTree_.remove(path);\n }\n\n const removed = removedAndEvents.removed;\n cancelEvents = removedAndEvents.events;\n\n if (!skipListenerDedup) {\n /**\n * We may have just removed one of many listeners and can short-circuit this whole process\n * We may also not have removed a default listener, in which case all of the descendant listeners should already be\n * properly set up.\n */\n\n // Since indexed queries can shadow if they don't have other query constraints, check for loadsAllData(), instead of\n // queryId === 'default'\n const removingDefault =\n -1 !==\n removed.findIndex(query => {\n return query._queryParams.loadsAllData();\n });\n const covered = syncTree.syncPointTree_.findOnPath(\n path,\n (relativePath, parentSyncPoint) =>\n syncPointHasCompleteView(parentSyncPoint)\n );\n\n if (removingDefault && !covered) {\n const subtree = syncTree.syncPointTree_.subtree(path);\n // There are potentially child listeners. Determine what if any listens we need to send before executing the\n // removal\n if (!subtree.isEmpty()) {\n // We need to fold over our subtree and collect the listeners to send\n const newViews = syncTreeCollectDistinctViewsForSubTree_(subtree);\n\n // Ok, we've collected all the listens we need. Set them up.\n for (let i = 0; i < newViews.length; ++i) {\n const view = newViews[i],\n newQuery = view.query;\n const listener = syncTreeCreateListenerForView_(syncTree, view);\n syncTree.listenProvider_.startListening(\n syncTreeQueryForListening_(newQuery),\n syncTreeTagForQuery(syncTree, newQuery),\n listener.hashFn,\n listener.onComplete\n );\n }\n }\n // Otherwise there's nothing below us, so nothing we need to start listening on\n }\n // If we removed anything and we're not covered by a higher up listen, we need to stop listening on this query\n // The above block has us covered in terms of making sure we're set up on listens lower in the tree.\n // Also, note that if we have a cancelError, it's already been removed at the provider level.\n if (!covered && removed.length > 0 && !cancelError) {\n // If we removed a default, then we weren't listening on any of the other queries here. Just cancel the one\n // default. Otherwise, we need to iterate through and cancel each individual query\n if (removingDefault) {\n // We don't tag default listeners\n const defaultTag: number | null = null;\n syncTree.listenProvider_.stopListening(\n syncTreeQueryForListening_(query),\n defaultTag\n );\n } else {\n removed.forEach((queryToRemove: QueryContext) => {\n const tagToRemove = syncTree.queryToTagMap.get(\n syncTreeMakeQueryKey_(queryToRemove)\n );\n syncTree.listenProvider_.stopListening(\n syncTreeQueryForListening_(queryToRemove),\n tagToRemove\n );\n });\n }\n }\n }\n // Now, clear all of the tags we're tracking for the removed listens\n syncTreeRemoveTags_(syncTree, removed);\n } else {\n // No-op, this listener must've been already removed\n }\n return cancelEvents;\n}\n\n/**\n * Apply new server data for the specified tagged query.\n *\n * @returns Events to raise.\n */\nexport function syncTreeApplyTaggedQueryOverwrite(\n syncTree: SyncTree,\n path: Path,\n snap: Node,\n tag: number\n): Event[] {\n const queryKey = syncTreeQueryKeyForTag_(syncTree, tag);\n if (queryKey != null) {\n const r = syncTreeParseQueryKey_(queryKey);\n const queryPath = r.path,\n queryId = r.queryId;\n const relativePath = newRelativePath(queryPath, path);\n const op = new Overwrite(\n newOperationSourceServerTaggedQuery(queryId),\n relativePath,\n snap\n );\n return syncTreeApplyTaggedOperation_(syncTree, queryPath, op);\n } else {\n // Query must have been removed already\n return [];\n }\n}\n\n/**\n * Apply server data to be merged in for the specified tagged query.\n *\n * @returns Events to raise.\n */\nexport function syncTreeApplyTaggedQueryMerge(\n syncTree: SyncTree,\n path: Path,\n changedChildren: { [k: string]: Node },\n tag: number\n): Event[] {\n const queryKey = syncTreeQueryKeyForTag_(syncTree, tag);\n if (queryKey) {\n const r = syncTreeParseQueryKey_(queryKey);\n const queryPath = r.path,\n queryId = r.queryId;\n const relativePath = newRelativePath(queryPath, path);\n const changeTree = ImmutableTree.fromObject(changedChildren);\n const op = new Merge(\n newOperationSourceServerTaggedQuery(queryId),\n relativePath,\n changeTree\n );\n return syncTreeApplyTaggedOperation_(syncTree, queryPath, op);\n } else {\n // We've already removed the query. No big deal, ignore the update\n return [];\n }\n}\n\n/**\n * Add an event callback for the specified query.\n *\n * @returns Events to raise.\n */\nexport function syncTreeAddEventRegistration(\n syncTree: SyncTree,\n query: QueryContext,\n eventRegistration: EventRegistration,\n skipSetupListener = false\n): Event[] {\n const path = query._path;\n\n let serverCache: Node | null = null;\n let foundAncestorDefaultView = false;\n // Any covering writes will necessarily be at the root, so really all we need to find is the server cache.\n // Consider optimizing this once there's a better understanding of what actual behavior will be.\n syncTree.syncPointTree_.foreachOnPath(path, (pathToSyncPoint, sp) => {\n const relativePath = newRelativePath(pathToSyncPoint, path);\n serverCache =\n serverCache || syncPointGetCompleteServerCache(sp, relativePath);\n foundAncestorDefaultView =\n foundAncestorDefaultView || syncPointHasCompleteView(sp);\n });\n let syncPoint = syncTree.syncPointTree_.get(path);\n if (!syncPoint) {\n syncPoint = new SyncPoint();\n syncTree.syncPointTree_ = syncTree.syncPointTree_.set(path, syncPoint);\n } else {\n foundAncestorDefaultView =\n foundAncestorDefaultView || syncPointHasCompleteView(syncPoint);\n serverCache =\n serverCache || syncPointGetCompleteServerCache(syncPoint, newEmptyPath());\n }\n\n let serverCacheComplete;\n if (serverCache != null) {\n serverCacheComplete = true;\n } else {\n serverCacheComplete = false;\n serverCache = ChildrenNode.EMPTY_NODE;\n const subtree = syncTree.syncPointTree_.subtree(path);\n subtree.foreachChild((childName, childSyncPoint) => {\n const completeCache = syncPointGetCompleteServerCache(\n childSyncPoint,\n newEmptyPath()\n );\n if (completeCache) {\n serverCache = serverCache.updateImmediateChild(\n childName,\n completeCache\n );\n }\n });\n }\n\n const viewAlreadyExists = syncPointViewExistsForQuery(syncPoint, query);\n if (!viewAlreadyExists && !query._queryParams.loadsAllData()) {\n // We need to track a tag for this query\n const queryKey = syncTreeMakeQueryKey_(query);\n assert(\n !syncTree.queryToTagMap.has(queryKey),\n 'View does not exist, but we have a tag'\n );\n const tag = syncTreeGetNextQueryTag_();\n syncTree.queryToTagMap.set(queryKey, tag);\n syncTree.tagToQueryMap.set(tag, queryKey);\n }\n const writesCache = writeTreeChildWrites(syncTree.pendingWriteTree_, path);\n let events = syncPointAddEventRegistration(\n syncPoint,\n query,\n eventRegistration,\n writesCache,\n serverCache,\n serverCacheComplete\n );\n if (!viewAlreadyExists && !foundAncestorDefaultView && !skipSetupListener) {\n const view = syncPointViewForQuery(syncPoint, query);\n events = events.concat(syncTreeSetupListener_(syncTree, query, view));\n }\n return events;\n}\n\n/**\n * Returns a complete cache, if we have one, of the data at a particular path. If the location does not have a\n * listener above it, we will get a false \"null\". This shouldn't be a problem because transactions will always\n * have a listener above, and atomic operations would correctly show a jitter of <increment value> ->\n * <incremented total> as the write is applied locally and then acknowledged at the server.\n *\n * Note: this method will *include* hidden writes from transaction with applyLocally set to false.\n *\n * @param path - The path to the data we want\n * @param writeIdsToExclude - A specific set to be excluded\n */\nexport function syncTreeCalcCompleteEventCache(\n syncTree: SyncTree,\n path: Path,\n writeIdsToExclude?: number[]\n): Node {\n const includeHiddenSets = true;\n const writeTree = syncTree.pendingWriteTree_;\n const serverCache = syncTree.syncPointTree_.findOnPath(\n path,\n (pathSoFar, syncPoint) => {\n const relativePath = newRelativePath(pathSoFar, path);\n const serverCache = syncPointGetCompleteServerCache(\n syncPoint,\n relativePath\n );\n if (serverCache) {\n return serverCache;\n }\n }\n );\n return writeTreeCalcCompleteEventCache(\n writeTree,\n path,\n serverCache,\n writeIdsToExclude,\n includeHiddenSets\n );\n}\n\nexport function syncTreeGetServerValue(\n syncTree: SyncTree,\n query: QueryContext\n): Node | null {\n const path = query._path;\n let serverCache: Node | null = null;\n // Any covering writes will necessarily be at the root, so really all we need to find is the server cache.\n // Consider optimizing this once there's a better understanding of what actual behavior will be.\n syncTree.syncPointTree_.foreachOnPath(path, (pathToSyncPoint, sp) => {\n const relativePath = newRelativePath(pathToSyncPoint, path);\n serverCache =\n serverCache || syncPointGetCompleteServerCache(sp, relativePath);\n });\n let syncPoint = syncTree.syncPointTree_.get(path);\n if (!syncPoint) {\n syncPoint = new SyncPoint();\n syncTree.syncPointTree_ = syncTree.syncPointTree_.set(path, syncPoint);\n } else {\n serverCache =\n serverCache || syncPointGetCompleteServerCache(syncPoint, newEmptyPath());\n }\n const serverCacheComplete = serverCache != null;\n const serverCacheNode: CacheNode | null = serverCacheComplete\n ? new CacheNode(serverCache, true, false)\n : null;\n const writesCache: WriteTreeRef | null = writeTreeChildWrites(\n syncTree.pendingWriteTree_,\n query._path\n );\n const view: View = syncPointGetView(\n syncPoint,\n query,\n writesCache,\n serverCacheComplete ? serverCacheNode.getNode() : ChildrenNode.EMPTY_NODE,\n serverCacheComplete\n );\n return viewGetCompleteNode(view);\n}\n\n/**\n * A helper method that visits all descendant and ancestor SyncPoints, applying the operation.\n *\n * NOTES:\n * - Descendant SyncPoints will be visited first (since we raise events depth-first).\n *\n * - We call applyOperation() on each SyncPoint passing three things:\n * 1. A version of the Operation that has been made relative to the SyncPoint location.\n * 2. A WriteTreeRef of any writes we have cached at the SyncPoint location.\n * 3. A snapshot Node with cached server data, if we have it.\n *\n * - We concatenate all of the events returned by each SyncPoint and return the result.\n */\nfunction syncTreeApplyOperationToSyncPoints_(\n syncTree: SyncTree,\n operation: Operation\n): Event[] {\n return syncTreeApplyOperationHelper_(\n operation,\n syncTree.syncPointTree_,\n /*serverCache=*/ null,\n writeTreeChildWrites(syncTree.pendingWriteTree_, newEmptyPath())\n );\n}\n\n/**\n * Recursive helper for applyOperationToSyncPoints_\n */\nfunction syncTreeApplyOperationHelper_(\n operation: Operation,\n syncPointTree: ImmutableTree<SyncPoint>,\n serverCache: Node | null,\n writesCache: WriteTreeRef\n): Event[] {\n if (pathIsEmpty(operation.path)) {\n return syncTreeApplyOperationDescendantsHelper_(\n operation,\n syncPointTree,\n serverCache,\n writesCache\n );\n } else {\n const syncPoint = syncPointTree.get(newEmptyPath());\n\n // If we don't have cached server data, see if we can get it from this SyncPoint.\n if (serverCache == null && syncPoint != null) {\n serverCache = syncPointGetCompleteServerCache(syncPoint, newEmptyPath());\n }\n\n let events: Event[] = [];\n const childName = pathGetFront(operation.path);\n const childOperation = operation.operationForChild(childName);\n const childTree = syncPointTree.children.get(childName);\n if (childTree && childOperation) {\n const childServerCache = serverCache\n ? serverCache.getImmediateChild(childName)\n : null;\n const childWritesCache = writeTreeRefChild(writesCache, childName);\n events = events.concat(\n syncTreeApplyOperationHelper_(\n childOperation,\n childTree,\n childServerCache,\n childWritesCache\n )\n );\n }\n\n if (syncPoint) {\n events = events.concat(\n syncPointApplyOperation(syncPoint, operation, writesCache, serverCache)\n );\n }\n\n return events;\n }\n}\n\n/**\n * Recursive helper for applyOperationToSyncPoints_\n */\nfunction syncTreeApplyOperationDescendantsHelper_(\n operation: Operation,\n syncPointTree: ImmutableTree<SyncPoint>,\n serverCache: Node | null,\n writesCache: WriteTreeRef\n): Event[] {\n const syncPoint = syncPointTree.get(newEmptyPath());\n\n // If we don't have cached server data, see if we can get it from this SyncPoint.\n if (serverCache == null && syncPoint != null) {\n serverCache = syncPointGetCompleteServerCache(syncPoint, newEmptyPath());\n }\n\n let events: Event[] = [];\n syncPointTree.children.inorderTraversal((childName, childTree) => {\n const childServerCache = serverCache\n ? serverCache.getImmediateChild(childName)\n : null;\n const childWritesCache = writeTreeRefChild(writesCache, childName);\n const childOperation = operation.operationForChild(childName);\n if (childOperation) {\n events = events.concat(\n syncTreeApplyOperationDescendantsHelper_(\n childOperation,\n childTree,\n childServerCache,\n childWritesCache\n )\n );\n }\n });\n\n if (syncPoint) {\n events = events.concat(\n syncPointApplyOperation(syncPoint, operation, writesCache, serverCache)\n );\n }\n\n return events;\n}\n\nfunction syncTreeCreateListenerForView_(\n syncTree: SyncTree,\n view: View\n): { hashFn(): string; onComplete(a: string, b?: unknown): Event[] } {\n const query = view.query;\n const tag = syncTreeTagForQuery(syncTree, query);\n\n return {\n hashFn: () => {\n const cache = viewGetServerCache(view) || ChildrenNode.EMPTY_NODE;\n return cache.hash();\n },\n onComplete: (status: string): Event[] => {\n if (status === 'ok') {\n if (tag) {\n return syncTreeApplyTaggedListenComplete(syncTree, query._path, tag);\n } else {\n return syncTreeApplyListenComplete(syncTree, query._path);\n }\n } else {\n // If a listen failed, kill all of the listeners here, not just the one that triggered the error.\n // Note that this may need to be scoped to just this listener if we change permissions on filtered children\n const error = errorForServerCode(status, query);\n return syncTreeRemoveEventRegistration(\n syncTree,\n query,\n /*eventRegistration*/ null,\n error\n );\n }\n }\n };\n}\n\n/**\n * Return the tag associated with the given query.\n */\nexport function syncTreeTagForQuery(\n syncTree: SyncTree,\n query: QueryContext\n): number | null {\n const queryKey = syncTreeMakeQueryKey_(query);\n return syncTree.queryToTagMap.get(queryKey);\n}\n\n/**\n * Given a query, computes a \"queryKey\" suitable for use in our queryToTagMap_.\n */\nfunction syncTreeMakeQueryKey_(query: QueryContext): string {\n return query._path.toString() + '$' + query._queryIdentifier;\n}\n\n/**\n * Return the query associated with the given tag, if we have one\n */\nfunction syncTreeQueryKeyForTag_(\n syncTree: SyncTree,\n tag: number\n): string | null {\n return syncTree.tagToQueryMap.get(tag);\n}\n\n/**\n * Given a queryKey (created by makeQueryKey), parse it back into a path and queryId.\n */\nfunction syncTreeParseQueryKey_(queryKey: string): {\n queryId: string;\n path: Path;\n} {\n const splitIndex = queryKey.indexOf('$');\n assert(\n splitIndex !== -1 && splitIndex < queryKey.length - 1,\n 'Bad queryKey.'\n );\n return {\n queryId: queryKey.substr(splitIndex + 1),\n path: new Path(queryKey.substr(0, splitIndex))\n };\n}\n\n/**\n * A helper method to apply tagged operations\n */\nfunction syncTreeApplyTaggedOperation_(\n syncTree: SyncTree,\n queryPath: Path,\n operation: Operation\n): Event[] {\n const syncPoint = syncTree.syncPointTree_.get(queryPath);\n assert(syncPoint, \"Missing sync point for query tag that we're tracking\");\n const writesCache = writeTreeChildWrites(\n syncTree.pendingWriteTree_,\n queryPath\n );\n return syncPointApplyOperation(syncPoint, operation, writesCache, null);\n}\n\n/**\n * This collapses multiple unfiltered views into a single view, since we only need a single\n * listener for them.\n */\nfunction syncTreeCollectDistinctViewsForSubTree_(\n subtree: ImmutableTree<SyncPoint>\n): View[] {\n return subtree.fold<View[]>((relativePath, maybeChildSyncPoint, childMap) => {\n if (maybeChildSyncPoint && syncPointHasCompleteView(maybeChildSyncPoint)) {\n const completeView = syncPointGetCompleteView(maybeChildSyncPoint);\n return [completeView];\n } else {\n // No complete view here, flatten any deeper listens into an array\n let views: View[] = [];\n if (maybeChildSyncPoint) {\n views = syncPointGetQueryViews(maybeChildSyncPoint);\n }\n each(childMap, (_key: string, childViews: View[]) => {\n views = views.concat(childViews);\n });\n return views;\n }\n });\n}\n\n/**\n * Normalizes a query to a query we send the server for listening\n *\n * @returns The normalized query\n */\nfunction syncTreeQueryForListening_(query: QueryContext): QueryContext {\n if (query._queryParams.loadsAllData() && !query._queryParams.isDefault()) {\n // We treat queries that load all data as default queries\n // Cast is necessary because ref() technically returns Firebase which is actually fb.api.Firebase which inherits\n // from Query\n return new (syncTreeGetReferenceConstructor())(query._repo, query._path);\n } else {\n return query;\n }\n}\n\nfunction syncTreeRemoveTags_(syncTree: SyncTree, queries: QueryContext[]) {\n for (let j = 0; j < queries.length; ++j) {\n const removedQuery = queries[j];\n if (!removedQuery._queryParams.loadsAllData()) {\n // We should have a tag for this\n const removedQueryKey = syncTreeMakeQueryKey_(removedQuery);\n const removedQueryTag = syncTree.queryToTagMap.get(removedQueryKey);\n syncTree.queryToTagMap.delete(removedQueryKey);\n syncTree.tagToQueryMap.delete(removedQueryTag);\n }\n }\n}\n\n/**\n * Static accessor for query tags.\n */\nfunction syncTreeGetNextQueryTag_(): number {\n return syncTreeNextQueryTag_++;\n}\n\n/**\n * For a given new listen, manage the de-duplication of outstanding subscriptions.\n *\n * @returns This method can return events to support synchronous data sources\n */\nfunction syncTreeSetupListener_(\n syncTree: SyncTree,\n query: QueryContext,\n view: View\n): Event[] {\n const path = query._path;\n const tag = syncTreeTagForQuery(syncTree, query);\n const listener = syncTreeCreateListenerForView_(syncTree, view);\n\n const events = syncTree.listenProvider_.startListening(\n syncTreeQueryForListening_(query),\n tag,\n listener.hashFn,\n listener.onComplete\n );\n\n const subtree = syncTree.syncPointTree_.subtree(path);\n // The root of this subtree has our query. We're here because we definitely need to send a listen for that, but we\n // may need to shadow other listens as well.\n if (tag) {\n assert(\n !syncPointHasCompleteView(subtree.value),\n \"If we're adding a query, it shouldn't be shadowed\"\n );\n } else {\n // Shadow everything at or below this location, this is a default listener.\n const queriesToStop = subtree.fold<QueryContext[]>(\n (relativePath, maybeChildSyncPoint, childMap) => {\n if (\n !pathIsEmpty(relativePath) &&\n maybeChildSyncPoint &&\n syncPointHasCompleteView(maybeChildSyncPoint)\n ) {\n return [syncPointGetCompleteView(maybeChildSyncPoint).query];\n } else {\n // No default listener here, flatten any deeper queries into an array\n let queries: QueryContext[] = [];\n if (maybeChildSyncPoint) {\n queries = queries.concat(\n syncPointGetQueryViews(maybeChildSyncPoint).map(\n view => view.query\n )\n );\n }\n each(childMap, (_key: string, childQueries: QueryContext[]) => {\n queries = queries.concat(childQueries);\n });\n return queries;\n }\n }\n );\n for (let i = 0; i < queriesToStop.length; ++i) {\n const queryToStop = queriesToStop[i];\n syncTree.listenProvider_.stopListening(\n syncTreeQueryForListening_(queryToStop),\n syncTreeTagForQuery(syncTree, queryToStop)\n );\n }\n }\n return events;\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { assert } from '@firebase/util';\n\nimport { ChildrenNode } from '../snap/ChildrenNode';\nimport { PRIORITY_INDEX } from '../snap/indexes/PriorityIndex';\nimport { LeafNode } from '../snap/LeafNode';\nimport { Node } from '../snap/Node';\nimport { nodeFromJSON } from '../snap/nodeFromJSON';\nimport { SyncTree, syncTreeCalcCompleteEventCache } from '../SyncTree';\n\nimport { Indexable } from './misc';\nimport { Path, pathChild } from './Path';\n\n/* It's critical for performance that we do not calculate actual values from a SyncTree\n * unless and until the value is needed. Because we expose both a SyncTree and Node\n * version of deferred value resolution, we ned a wrapper class that will let us share\n * code.\n *\n * @see https://github.com/firebase/firebase-js-sdk/issues/2487\n */\ninterface ValueProvider {\n getImmediateChild(childName: string): ValueProvider;\n node(): Node;\n}\n\nclass ExistingValueProvider implements ValueProvider {\n constructor(readonly node_: Node) {}\n\n getImmediateChild(childName: string): ValueProvider {\n const child = this.node_.getImmediateChild(childName);\n return new ExistingValueProvider(child);\n }\n\n node(): Node {\n return this.node_;\n }\n}\n\nclass DeferredValueProvider implements ValueProvider {\n private syncTree_: SyncTree;\n private path_: Path;\n\n constructor(syncTree: SyncTree, path: Path) {\n this.syncTree_ = syncTree;\n this.path_ = path;\n }\n\n getImmediateChild(childName: string): ValueProvider {\n const childPath = pathChild(this.path_, childName);\n return new DeferredValueProvider(this.syncTree_, childPath);\n }\n\n node(): Node {\n return syncTreeCalcCompleteEventCache(this.syncTree_, this.path_);\n }\n}\n\n/**\n * Generate placeholders for deferred values.\n */\nexport const generateWithValues = function (\n values: {\n [k: string]: unknown;\n } | null\n): { [k: string]: unknown } {\n values = values || {};\n values['timestamp'] = values['timestamp'] || new Date().getTime();\n return values;\n};\n\n/**\n * Value to use when firing local events. When writing server values, fire\n * local events with an approximate value, otherwise return value as-is.\n */\nexport const resolveDeferredLeafValue = function (\n value: { [k: string]: unknown } | string | number | boolean,\n existingVal: ValueProvider,\n serverValues: { [k: string]: unknown }\n): string | number | boolean {\n if (!value || typeof value !== 'object') {\n return value as string | number | boolean;\n }\n assert('.sv' in value, 'Unexpected leaf node or priority contents');\n\n if (typeof value['.sv'] === 'string') {\n return resolveScalarDeferredValue(value['.sv'], existingVal, serverValues);\n } else if (typeof value['.sv'] === 'object') {\n return resolveComplexDeferredValue(value['.sv'], existingVal, serverValues);\n } else {\n assert(false, 'Unexpected server value: ' + JSON.stringify(value, null, 2));\n }\n};\n\nconst resolveScalarDeferredValue = function (\n op: string,\n existing: ValueProvider,\n serverValues: { [k: string]: unknown }\n): string | number | boolean {\n switch (op) {\n case 'timestamp':\n return serverValues['timestamp'] as string | number | boolean;\n default:\n assert(false, 'Unexpected server value: ' + op);\n }\n};\n\nconst resolveComplexDeferredValue = function (\n op: object,\n existing: ValueProvider,\n unused: { [k: string]: unknown }\n): string | number | boolean {\n if (!op.hasOwnProperty('increment')) {\n assert(false, 'Unexpected server value: ' + JSON.stringify(op, null, 2));\n }\n const delta = op['increment'];\n if (typeof delta !== 'number') {\n assert(false, 'Unexpected increment value: ' + delta);\n }\n\n const existingNode = existing.node();\n assert(\n existingNode !== null && typeof existingNode !== 'undefined',\n 'Expected ChildrenNode.EMPTY_NODE for nulls'\n );\n\n // Incrementing a non-number sets the value to the incremented amount\n if (!existingNode.isLeafNode()) {\n return delta;\n }\n\n const leaf = existingNode as LeafNode;\n const existingVal = leaf.getValue();\n if (typeof existingVal !== 'number') {\n return delta;\n }\n\n // No need to do over/underflow arithmetic here because JS only handles floats under the covers\n return existingVal + delta;\n};\n\n/**\n * Recursively replace all deferred values and priorities in the tree with the\n * specified generated replacement values.\n * @param path - path to which write is relative\n * @param node - new data written at path\n * @param syncTree - current data\n */\nexport const resolveDeferredValueTree = function (\n path: Path,\n node: Node,\n syncTree: SyncTree,\n serverValues: Indexable\n): Node {\n return resolveDeferredValue(\n node,\n new DeferredValueProvider(syncTree, path),\n serverValues\n );\n};\n\n/**\n * Recursively replace all deferred values and priorities in the node with the\n * specified generated replacement values. If there are no server values in the node,\n * it'll be returned as-is.\n */\nexport const resolveDeferredValueSnapshot = function (\n node: Node,\n existing: Node,\n serverValues: Indexable\n): Node {\n return resolveDeferredValue(\n node,\n new ExistingValueProvider(existing),\n serverValues\n );\n};\n\nfunction resolveDeferredValue(\n node: Node,\n existingVal: ValueProvider,\n serverValues: Indexable\n): Node {\n const rawPri = node.getPriority().val() as\n | Indexable\n | boolean\n | null\n | number\n | string;\n const priority = resolveDeferredLeafValue(\n rawPri,\n existingVal.getImmediateChild('.priority'),\n serverValues\n );\n let newNode: Node;\n\n if (node.isLeafNode()) {\n const leafNode = node as LeafNode;\n const value = resolveDeferredLeafValue(\n leafNode.getValue(),\n existingVal,\n serverValues\n );\n if (\n value !== leafNode.getValue() ||\n priority !== leafNode.getPriority().val()\n ) {\n return new LeafNode(value, nodeFromJSON(priority));\n } else {\n return node;\n }\n } else {\n const childrenNode = node as ChildrenNode;\n newNode = childrenNode;\n if (priority !== childrenNode.getPriority().val()) {\n newNode = newNode.updatePriority(new LeafNode(priority));\n }\n childrenNode.forEachChild(PRIORITY_INDEX, (childName, childNode) => {\n const newChildNode = resolveDeferredValue(\n childNode,\n existingVal.getImmediateChild(childName),\n serverValues\n );\n if (newChildNode !== childNode) {\n newNode = newNode.updateImmediateChild(childName, newChildNode);\n }\n });\n return newNode;\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { contains, safeGet } from '@firebase/util';\n\nimport { Path, pathGetFront, pathPopFront } from './Path';\nimport { each } from './util';\n\n/**\n * Node in a Tree.\n */\nexport interface TreeNode<T> {\n // TODO: Consider making accessors that create children and value lazily or\n // separate Internal / Leaf 'types'.\n children: Record<string, TreeNode<T>>;\n childCount: number;\n value?: T;\n}\n\n/**\n * A light-weight tree, traversable by path. Nodes can have both values and children.\n * Nodes are not enumerated (by forEachChild) unless they have a value or non-empty\n * children.\n */\nexport class Tree<T> {\n /**\n * @param name - Optional name of the node.\n * @param parent - Optional parent node.\n * @param node - Optional node to wrap.\n */\n constructor(\n readonly name: string = '',\n readonly parent: Tree<T> | null = null,\n public node: TreeNode<T> = { children: {}, childCount: 0 }\n ) {}\n}\n\n/**\n * Returns a sub-Tree for the given path.\n *\n * @param pathObj - Path to look up.\n * @returns Tree for path.\n */\nexport function treeSubTree<T>(tree: Tree<T>, pathObj: string | Path): Tree<T> {\n // TODO: Require pathObj to be Path?\n let path = pathObj instanceof Path ? pathObj : new Path(pathObj);\n let child = tree,\n next = pathGetFront(path);\n while (next !== null) {\n const childNode = safeGet(child.node.children, next) || {\n children: {},\n childCount: 0\n };\n child = new Tree<T>(next, child, childNode);\n path = pathPopFront(path);\n next = pathGetFront(path);\n }\n\n return child;\n}\n\n/**\n * Returns the data associated with this tree node.\n *\n * @returns The data or null if no data exists.\n */\nexport function treeGetValue<T>(tree: Tree<T>): T | undefined {\n return tree.node.value;\n}\n\n/**\n * Sets data to this tree node.\n *\n * @param value - Value to set.\n */\nexport function treeSetValue<T>(tree: Tree<T>, value: T | undefined): void {\n tree.node.value = value;\n treeUpdateParents(tree);\n}\n\n/**\n * @returns Whether the tree has any children.\n */\nexport function treeHasChildren<T>(tree: Tree<T>): boolean {\n return tree.node.childCount > 0;\n}\n\n/**\n * @returns Whethe rthe tree is empty (no value or children).\n */\nexport function treeIsEmpty<T>(tree: Tree<T>): boolean {\n return treeGetValue(tree) === undefined && !treeHasChildren(tree);\n}\n\n/**\n * Calls action for each child of this tree node.\n *\n * @param action - Action to be called for each child.\n */\nexport function treeForEachChild<T>(\n tree: Tree<T>,\n action: (tree: Tree<T>) => void\n): void {\n each(tree.node.children, (child: string, childTree: TreeNode<T>) => {\n action(new Tree<T>(child, tree, childTree));\n });\n}\n\n/**\n * Does a depth-first traversal of this node's descendants, calling action for each one.\n *\n * @param action - Action to be called for each child.\n * @param includeSelf - Whether to call action on this node as well. Defaults to\n * false.\n * @param childrenFirst - Whether to call action on children before calling it on\n * parent.\n */\nexport function treeForEachDescendant<T>(\n tree: Tree<T>,\n action: (tree: Tree<T>) => void,\n includeSelf?: boolean,\n childrenFirst?: boolean\n): void {\n if (includeSelf && !childrenFirst) {\n action(tree);\n }\n\n treeForEachChild(tree, child => {\n treeForEachDescendant(child, action, true, childrenFirst);\n });\n\n if (includeSelf && childrenFirst) {\n action(tree);\n }\n}\n\n/**\n * Calls action on each ancestor node.\n *\n * @param action - Action to be called on each parent; return\n * true to abort.\n * @param includeSelf - Whether to call action on this node as well.\n * @returns true if the action callback returned true.\n */\nexport function treeForEachAncestor<T>(\n tree: Tree<T>,\n action: (tree: Tree<T>) => unknown,\n includeSelf?: boolean\n): boolean {\n let node = includeSelf ? tree : tree.parent;\n while (node !== null) {\n if (action(node)) {\n return true;\n }\n node = node.parent;\n }\n return false;\n}\n\n/**\n * Does a depth-first traversal of this node's descendants. When a descendant with a value\n * is found, action is called on it and traversal does not continue inside the node.\n * Action is *not* called on this node.\n *\n * @param action - Action to be called for each child.\n */\nexport function treeForEachImmediateDescendantWithValue<T>(\n tree: Tree<T>,\n action: (tree: Tree<T>) => void\n): void {\n treeForEachChild(tree, child => {\n if (treeGetValue(child) !== undefined) {\n action(child);\n } else {\n treeForEachImmediateDescendantWithValue(child, action);\n }\n });\n}\n\n/**\n * @returns The path of this tree node, as a Path.\n */\nexport function treeGetPath<T>(tree: Tree<T>) {\n return new Path(\n tree.parent === null\n ? tree.name\n : treeGetPath(tree.parent) + '/' + tree.name\n );\n}\n\n/**\n * Adds or removes this child from its parent based on whether it's empty or not.\n */\nfunction treeUpdateParents<T>(tree: Tree<T>) {\n if (tree.parent !== null) {\n treeUpdateChild(tree.parent, tree.name, tree);\n }\n}\n\n/**\n * Adds or removes the passed child to this tree node, depending on whether it's empty.\n *\n * @param childName - The name of the child to update.\n * @param child - The child to update.\n */\nfunction treeUpdateChild<T>(tree: Tree<T>, childName: string, child: Tree<T>) {\n const childEmpty = treeIsEmpty(child);\n const childExists = contains(tree.node.children, childName);\n if (childEmpty && childExists) {\n delete tree.node.children[childName];\n tree.node.childCount--;\n treeUpdateParents(tree);\n } else if (!childEmpty && !childExists) {\n tree.node.children[childName] = child.node;\n tree.node.childCount++;\n treeUpdateParents(tree);\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n contains,\n errorPrefix as errorPrefixFxn,\n safeGet,\n stringLength\n} from '@firebase/util';\n\nimport { RepoInfo } from '../RepoInfo';\n\nimport {\n Path,\n pathChild,\n pathCompare,\n pathContains,\n pathGetBack,\n pathGetFront,\n pathSlice,\n ValidationPath,\n validationPathPop,\n validationPathPush,\n validationPathToErrorString\n} from './Path';\nimport { each, isInvalidJSONNumber } from './util';\n\n/**\n * True for invalid Firebase keys\n */\nexport const INVALID_KEY_REGEX_ = /[\\[\\].#$\\/\\u0000-\\u001F\\u007F]/;\n\n/**\n * True for invalid Firebase paths.\n * Allows '/' in paths.\n */\nexport const INVALID_PATH_REGEX_ = /[\\[\\].#$\\u0000-\\u001F\\u007F]/;\n\n/**\n * Maximum number of characters to allow in leaf value\n */\nexport const MAX_LEAF_SIZE_ = 10 * 1024 * 1024;\n\nexport const isValidKey = function (key: unknown): boolean {\n return (\n typeof key === 'string' && key.length !== 0 && !INVALID_KEY_REGEX_.test(key)\n );\n};\n\nexport const isValidPathString = function (pathString: string): boolean {\n return (\n typeof pathString === 'string' &&\n pathString.length !== 0 &&\n !INVALID_PATH_REGEX_.test(pathString)\n );\n};\n\nexport const isValidRootPathString = function (pathString: string): boolean {\n if (pathString) {\n // Allow '/.info/' at the beginning.\n pathString = pathString.replace(/^\\/*\\.info(\\/|$)/, '/');\n }\n\n return isValidPathString(pathString);\n};\n\nexport const isValidPriority = function (priority: unknown): boolean {\n return (\n priority === null ||\n typeof priority === 'string' ||\n (typeof priority === 'number' && !isInvalidJSONNumber(priority)) ||\n (priority &&\n typeof priority === 'object' &&\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n contains(priority as any, '.sv'))\n );\n};\n\n/**\n * Pre-validate a datum passed as an argument to Firebase function.\n */\nexport const validateFirebaseDataArg = function (\n fnName: string,\n value: unknown,\n path: Path,\n optional: boolean\n) {\n if (optional && value === undefined) {\n return;\n }\n\n validateFirebaseData(errorPrefixFxn(fnName, 'value'), value, path);\n};\n\n/**\n * Validate a data object client-side before sending to server.\n */\nexport const validateFirebaseData = function (\n errorPrefix: string,\n data: unknown,\n path_: Path | ValidationPath\n) {\n const path =\n path_ instanceof Path ? new ValidationPath(path_, errorPrefix) : path_;\n\n if (data === undefined) {\n throw new Error(\n errorPrefix + 'contains undefined ' + validationPathToErrorString(path)\n );\n }\n if (typeof data === 'function') {\n throw new Error(\n errorPrefix +\n 'contains a function ' +\n validationPathToErrorString(path) +\n ' with contents = ' +\n data.toString()\n );\n }\n if (isInvalidJSONNumber(data)) {\n throw new Error(\n errorPrefix +\n 'contains ' +\n data.toString() +\n ' ' +\n validationPathToErrorString(path)\n );\n }\n\n // Check max leaf size, but try to avoid the utf8 conversion if we can.\n if (\n typeof data === 'string' &&\n data.length > MAX_LEAF_SIZE_ / 3 &&\n stringLength(data) > MAX_LEAF_SIZE_\n ) {\n throw new Error(\n errorPrefix +\n 'contains a string greater than ' +\n MAX_LEAF_SIZE_ +\n ' utf8 bytes ' +\n validationPathToErrorString(path) +\n \" ('\" +\n data.substring(0, 50) +\n \"...')\"\n );\n }\n\n // TODO = Perf = Consider combining the recursive validation of keys into NodeFromJSON\n // to save extra walking of large objects.\n if (data && typeof data === 'object') {\n let hasDotValue = false;\n let hasActualChild = false;\n each(data, (key: string, value: unknown) => {\n if (key === '.value') {\n hasDotValue = true;\n } else if (key !== '.priority' && key !== '.sv') {\n hasActualChild = true;\n if (!isValidKey(key)) {\n throw new Error(\n errorPrefix +\n ' contains an invalid key (' +\n key +\n ') ' +\n validationPathToErrorString(path) +\n '. Keys must be non-empty strings ' +\n 'and can\\'t contain \".\", \"#\", \"$\", \"/\", \"[\", or \"]\"'\n );\n }\n }\n\n validationPathPush(path, key);\n validateFirebaseData(errorPrefix, value, path);\n validationPathPop(path);\n });\n\n if (hasDotValue && hasActualChild) {\n throw new Error(\n errorPrefix +\n ' contains \".value\" child ' +\n validationPathToErrorString(path) +\n ' in addition to actual children.'\n );\n }\n }\n};\n\n/**\n * Pre-validate paths passed in the firebase function.\n */\nexport const validateFirebaseMergePaths = function (\n errorPrefix: string,\n mergePaths: Path[]\n) {\n let i, curPath: Path;\n for (i = 0; i < mergePaths.length; i++) {\n curPath = mergePaths[i];\n const keys = pathSlice(curPath);\n for (let j = 0; j < keys.length; j++) {\n if (keys[j] === '.priority' && j === keys.length - 1) {\n // .priority is OK\n } else if (!isValidKey(keys[j])) {\n throw new Error(\n errorPrefix +\n 'contains an invalid key (' +\n keys[j] +\n ') in path ' +\n curPath.toString() +\n '. Keys must be non-empty strings ' +\n 'and can\\'t contain \".\", \"#\", \"$\", \"/\", \"[\", or \"]\"'\n );\n }\n }\n }\n\n // Check that update keys are not descendants of each other.\n // We rely on the property that sorting guarantees that ancestors come\n // right before descendants.\n mergePaths.sort(pathCompare);\n let prevPath: Path | null = null;\n for (i = 0; i < mergePaths.length; i++) {\n curPath = mergePaths[i];\n if (prevPath !== null && pathContains(prevPath, curPath)) {\n throw new Error(\n errorPrefix +\n 'contains a path ' +\n prevPath.toString() +\n ' that is ancestor of another path ' +\n curPath.toString()\n );\n }\n prevPath = curPath;\n }\n};\n\n/**\n * pre-validate an object passed as an argument to firebase function (\n * must be an object - e.g. for firebase.update()).\n */\nexport const validateFirebaseMergeDataArg = function (\n fnName: string,\n data: unknown,\n path: Path,\n optional: boolean\n) {\n if (optional && data === undefined) {\n return;\n }\n\n const errorPrefix = errorPrefixFxn(fnName, 'values');\n\n if (!(data && typeof data === 'object') || Array.isArray(data)) {\n throw new Error(\n errorPrefix + ' must be an object containing the children to replace.'\n );\n }\n\n const mergePaths: Path[] = [];\n each(data, (key: string, value: unknown) => {\n const curPath = new Path(key);\n validateFirebaseData(errorPrefix, value, pathChild(path, curPath));\n if (pathGetBack(curPath) === '.priority') {\n if (!isValidPriority(value)) {\n throw new Error(\n errorPrefix +\n \"contains an invalid value for '\" +\n curPath.toString() +\n \"', which must be a valid \" +\n 'Firebase priority (a string, finite number, server value, or null).'\n );\n }\n }\n mergePaths.push(curPath);\n });\n validateFirebaseMergePaths(errorPrefix, mergePaths);\n};\n\nexport const validatePriority = function (\n fnName: string,\n priority: unknown,\n optional: boolean\n) {\n if (optional && priority === undefined) {\n return;\n }\n if (isInvalidJSONNumber(priority)) {\n throw new Error(\n errorPrefixFxn(fnName, 'priority') +\n 'is ' +\n priority.toString() +\n ', but must be a valid Firebase priority (a string, finite number, ' +\n 'server value, or null).'\n );\n }\n // Special case to allow importing data with a .sv.\n if (!isValidPriority(priority)) {\n throw new Error(\n errorPrefixFxn(fnName, 'priority') +\n 'must be a valid Firebase priority ' +\n '(a string, finite number, server value, or null).'\n );\n }\n};\n\nexport const validateKey = function (\n fnName: string,\n argumentName: string,\n key: string,\n optional: boolean\n) {\n if (optional && key === undefined) {\n return;\n }\n if (!isValidKey(key)) {\n throw new Error(\n errorPrefixFxn(fnName, argumentName) +\n 'was an invalid key = \"' +\n key +\n '\". Firebase keys must be non-empty strings and ' +\n 'can\\'t contain \".\", \"#\", \"$\", \"/\", \"[\", or \"]\").'\n );\n }\n};\n\n/**\n * @internal\n */\nexport const validatePathString = function (\n fnName: string,\n argumentName: string,\n pathString: string,\n optional: boolean\n) {\n if (optional && pathString === undefined) {\n return;\n }\n\n if (!isValidPathString(pathString)) {\n throw new Error(\n errorPrefixFxn(fnName, argumentName) +\n 'was an invalid path = \"' +\n pathString +\n '\". Paths must be non-empty strings and ' +\n 'can\\'t contain \".\", \"#\", \"$\", \"[\", or \"]\"'\n );\n }\n};\n\nexport const validateRootPathString = function (\n fnName: string,\n argumentName: string,\n pathString: string,\n optional: boolean\n) {\n if (pathString) {\n // Allow '/.info/' at the beginning.\n pathString = pathString.replace(/^\\/*\\.info(\\/|$)/, '/');\n }\n\n validatePathString(fnName, argumentName, pathString, optional);\n};\n\n/**\n * @internal\n */\nexport const validateWritablePath = function (fnName: string, path: Path) {\n if (pathGetFront(path) === '.info') {\n throw new Error(fnName + \" failed = Can't modify data under /.info/\");\n }\n};\n\nexport const validateUrl = function (\n fnName: string,\n parsedUrl: { repoInfo: RepoInfo; path: Path }\n) {\n // TODO = Validate server better.\n const pathString = parsedUrl.path.toString();\n if (\n !(typeof parsedUrl.repoInfo.host === 'string') ||\n parsedUrl.repoInfo.host.length === 0 ||\n (!isValidKey(parsedUrl.repoInfo.namespace) &&\n parsedUrl.repoInfo.host.split(':')[0] !== 'localhost') ||\n (pathString.length !== 0 && !isValidRootPathString(pathString))\n ) {\n throw new Error(\n errorPrefixFxn(fnName, 'url') +\n 'must be a valid firebase URL and ' +\n 'the path can\\'t contain \".\", \"#\", \"$\", \"[\", or \"]\".'\n );\n }\n};\n\nexport const validateString = function (\n fnName: string,\n argumentName: string,\n string: unknown,\n optional: boolean\n) {\n if (optional && string === undefined) {\n return;\n }\n if (!(typeof string === 'string')) {\n throw new Error(\n errorPrefixFxn(fnName, argumentName) + 'must be a valid string.'\n );\n }\n};\n\nexport const validateObject = function (\n fnName: string,\n argumentName: string,\n obj: unknown,\n optional: boolean\n) {\n if (optional && obj === undefined) {\n return;\n }\n if (!(obj && typeof obj === 'object') || obj === null) {\n throw new Error(\n errorPrefixFxn(fnName, argumentName) + 'must be a valid object.'\n );\n }\n};\n\nexport const validateObjectContainsKey = function (\n fnName: string,\n argumentName: string,\n obj: unknown,\n key: string,\n optional: boolean,\n optType?: string\n) {\n const objectContainsKey =\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n obj && typeof obj === 'object' && contains(obj as any, key);\n\n if (!objectContainsKey) {\n if (optional) {\n return;\n } else {\n throw new Error(\n errorPrefixFxn(fnName, argumentName) +\n 'must contain the key \"' +\n key +\n '\"'\n );\n }\n }\n\n if (optType) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const val = safeGet(obj as any, key);\n if (\n (optType === 'number' && !(typeof val === 'number')) ||\n (optType === 'string' && !(typeof val === 'string')) ||\n (optType === 'boolean' && !(typeof val === 'boolean')) ||\n (optType === 'function' && !(typeof val === 'function')) ||\n (optType === 'object' && !(typeof val === 'object') && val)\n ) {\n if (optional) {\n throw new Error(\n errorPrefixFxn(fnName, argumentName) +\n 'contains invalid value for key \"' +\n key +\n '\" (must be of type \"' +\n optType +\n '\")'\n );\n } else {\n throw new Error(\n errorPrefixFxn(fnName, argumentName) +\n 'must contain the key \"' +\n key +\n '\" with type \"' +\n optType +\n '\"'\n );\n }\n }\n }\n};\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Path, pathContains, pathEquals } from '../util/Path';\nimport { exceptionGuard, log, logger } from '../util/util';\n\nimport { Event } from './Event';\n\n/**\n * The event queue serves a few purposes:\n * 1. It ensures we maintain event order in the face of event callbacks doing operations that result in more\n * events being queued.\n * 2. raiseQueuedEvents() handles being called reentrantly nicely. That is, if in the course of raising events,\n * raiseQueuedEvents() is called again, the \"inner\" call will pick up raising events where the \"outer\" call\n * left off, ensuring that the events are still raised synchronously and in order.\n * 3. You can use raiseEventsAtPath and raiseEventsForChangedPath to ensure only relevant previously-queued\n * events are raised synchronously.\n *\n * NOTE: This can all go away if/when we move to async events.\n *\n */\nexport class EventQueue {\n eventLists_: EventList[] = [];\n\n /**\n * Tracks recursion depth of raiseQueuedEvents_, for debugging purposes.\n */\n recursionDepth_ = 0;\n}\n\n/**\n * @param eventDataList - The new events to queue.\n */\nexport function eventQueueQueueEvents(\n eventQueue: EventQueue,\n eventDataList: Event[]\n) {\n // We group events by path, storing them in a single EventList, to make it easier to skip over them quickly.\n let currList: EventList | null = null;\n for (let i = 0; i < eventDataList.length; i++) {\n const data = eventDataList[i];\n const path = data.getPath();\n if (currList !== null && !pathEquals(path, currList.path)) {\n eventQueue.eventLists_.push(currList);\n currList = null;\n }\n\n if (currList === null) {\n currList = { events: [], path };\n }\n\n currList.events.push(data);\n }\n if (currList) {\n eventQueue.eventLists_.push(currList);\n }\n}\n\n/**\n * Queues the specified events and synchronously raises all events (including previously queued ones)\n * for the specified path.\n *\n * It is assumed that the new events are all for the specified path.\n *\n * @param path - The path to raise events for.\n * @param eventDataList - The new events to raise.\n */\nexport function eventQueueRaiseEventsAtPath(\n eventQueue: EventQueue,\n path: Path,\n eventDataList: Event[]\n) {\n eventQueueQueueEvents(eventQueue, eventDataList);\n eventQueueRaiseQueuedEventsMatchingPredicate(eventQueue, eventPath =>\n pathEquals(eventPath, path)\n );\n}\n\n/**\n * Queues the specified events and synchronously raises all events (including previously queued ones) for\n * locations related to the specified change path (i.e. all ancestors and descendants).\n *\n * It is assumed that the new events are all related (ancestor or descendant) to the specified path.\n *\n * @param changedPath - The path to raise events for.\n * @param eventDataList - The events to raise\n */\nexport function eventQueueRaiseEventsForChangedPath(\n eventQueue: EventQueue,\n changedPath: Path,\n eventDataList: Event[]\n) {\n eventQueueQueueEvents(eventQueue, eventDataList);\n eventQueueRaiseQueuedEventsMatchingPredicate(\n eventQueue,\n eventPath =>\n pathContains(eventPath, changedPath) ||\n pathContains(changedPath, eventPath)\n );\n}\n\nfunction eventQueueRaiseQueuedEventsMatchingPredicate(\n eventQueue: EventQueue,\n predicate: (path: Path) => boolean\n) {\n eventQueue.recursionDepth_++;\n\n let sentAll = true;\n for (let i = 0; i < eventQueue.eventLists_.length; i++) {\n const eventList = eventQueue.eventLists_[i];\n if (eventList) {\n const eventPath = eventList.path;\n if (predicate(eventPath)) {\n eventListRaise(eventQueue.eventLists_[i]);\n eventQueue.eventLists_[i] = null;\n } else {\n sentAll = false;\n }\n }\n }\n\n if (sentAll) {\n eventQueue.eventLists_ = [];\n }\n\n eventQueue.recursionDepth_--;\n}\n\ninterface EventList {\n events: Event[];\n path: Path;\n}\n\n/**\n * Iterates through the list and raises each event\n */\nfunction eventListRaise(eventList: EventList) {\n for (let i = 0; i < eventList.events.length; i++) {\n const eventData = eventList.events[i];\n if (eventData !== null) {\n eventList.events[i] = null;\n const eventFn = eventData.getEventRunner();\n if (logger) {\n log('event: ' + eventData.toString());\n }\n exceptionGuard(eventFn);\n }\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n assert,\n contains,\n isEmpty,\n map,\n safeGet,\n stringify\n} from '@firebase/util';\n\nimport { ValueEventRegistration } from '../api/Reference_impl';\n\nimport { AppCheckTokenProvider } from './AppCheckTokenProvider';\nimport { AuthTokenProvider } from './AuthTokenProvider';\nimport { PersistentConnection } from './PersistentConnection';\nimport { ReadonlyRestClient } from './ReadonlyRestClient';\nimport { RepoInfo } from './RepoInfo';\nimport { ServerActions } from './ServerActions';\nimport { ChildrenNode } from './snap/ChildrenNode';\nimport { Node } from './snap/Node';\nimport { nodeFromJSON } from './snap/nodeFromJSON';\nimport { SnapshotHolder } from './SnapshotHolder';\nimport {\n newSparseSnapshotTree,\n SparseSnapshotTree,\n sparseSnapshotTreeForEachTree,\n sparseSnapshotTreeForget,\n sparseSnapshotTreeRemember\n} from './SparseSnapshotTree';\nimport { StatsCollection } from './stats/StatsCollection';\nimport { StatsListener } from './stats/StatsListener';\nimport {\n statsManagerGetCollection,\n statsManagerGetOrCreateReporter\n} from './stats/StatsManager';\nimport { StatsReporter, statsReporterIncludeStat } from './stats/StatsReporter';\nimport {\n SyncTree,\n syncTreeAckUserWrite,\n syncTreeAddEventRegistration,\n syncTreeApplyServerMerge,\n syncTreeApplyServerOverwrite,\n syncTreeApplyTaggedQueryMerge,\n syncTreeApplyTaggedQueryOverwrite,\n syncTreeApplyUserMerge,\n syncTreeApplyUserOverwrite,\n syncTreeCalcCompleteEventCache,\n syncTreeGetServerValue,\n syncTreeRemoveEventRegistration,\n syncTreeTagForQuery\n} from './SyncTree';\nimport { Indexable } from './util/misc';\nimport {\n newEmptyPath,\n newRelativePath,\n Path,\n pathChild,\n pathGetFront,\n pathPopFront\n} from './util/Path';\nimport {\n generateWithValues,\n resolveDeferredValueSnapshot,\n resolveDeferredValueTree\n} from './util/ServerValues';\nimport {\n Tree,\n treeForEachAncestor,\n treeForEachChild,\n treeForEachDescendant,\n treeGetPath,\n treeGetValue,\n treeHasChildren,\n treeSetValue,\n treeSubTree\n} from './util/Tree';\nimport {\n beingCrawled,\n each,\n exceptionGuard,\n log,\n LUIDGenerator,\n warn\n} from './util/util';\nimport { isValidPriority, validateFirebaseData } from './util/validation';\nimport { Event } from './view/Event';\nimport {\n EventQueue,\n eventQueueQueueEvents,\n eventQueueRaiseEventsAtPath,\n eventQueueRaiseEventsForChangedPath\n} from './view/EventQueue';\nimport { EventRegistration, QueryContext } from './view/EventRegistration';\n\nconst INTERRUPT_REASON = 'repo_interrupt';\n\n/**\n * If a transaction does not succeed after 25 retries, we abort it. Among other\n * things this ensure that if there's ever a bug causing a mismatch between\n * client / server hashes for some data, we won't retry indefinitely.\n */\nconst MAX_TRANSACTION_RETRIES = 25;\n\nconst enum TransactionStatus {\n // We've run the transaction and updated transactionResultData_ with the result, but it isn't currently sent to the\n // server. A transaction will go from RUN -> SENT -> RUN if it comes back from the server as rejected due to\n
|