matrix_sdk_indexeddb/crypto_store/migrations/
mod.rs

1// Copyright 2023 The Matrix.org Foundation C.I.C.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::ops::Deref;
16
17use indexed_db_futures::{
18    database::Database,
19    error::{Error, OpenDbError},
20    index::Index,
21    internals::SystemRepr,
22    object_store::ObjectStore,
23    prelude::*,
24    transaction::Transaction,
25};
26use tracing::info;
27
28use crate::{crypto_store::Result, serializer::SafeEncodeSerializer, IndexeddbCryptoStoreError};
29
30mod old_keys;
31mod v0_to_v5;
32mod v101_to_v102;
33mod v10_to_v11;
34mod v11_to_v12;
35mod v12_to_v13;
36mod v13_to_v14;
37mod v14_to_v101;
38mod v5_to_v7;
39mod v7;
40mod v7_to_v8;
41mod v8_to_v10;
42
43struct MigrationDb {
44    db: Database,
45    next_version: u32,
46}
47
48impl MigrationDb {
49    /// Create an Indexed DB wrapper that manages a database migration,
50    /// logging messages before and after the migration, and automatically
51    /// closing the DB when this object is dropped.
52    async fn new(name: &str, next_version: u32) -> Result<Self> {
53        info!("IndexeddbCryptoStore migrate data before v{next_version} starting");
54        Ok(Self { db: Database::open(name).await?, next_version })
55    }
56}
57
58impl Deref for MigrationDb {
59    type Target = Database;
60
61    fn deref(&self) -> &Self::Target {
62        &self.db
63    }
64}
65
66impl Drop for MigrationDb {
67    fn drop(&mut self) {
68        let version = self.next_version;
69        info!("IndexeddbCryptoStore migrate data before v{version} finished");
70        self.db.as_sys().close();
71    }
72}
73
74/// The latest version of the schema we can support. If we encounter a database
75/// version with a higher schema version, we will return an error.
76///
77/// A note on how this works.
78///
79/// Normally, when you open an indexeddb database, you tell it the "schema
80/// version" that you know about. If the existing database is older than
81/// that, it lets you run a migration. If the existing database is newer, then
82/// it assumes that there have been incompatible schema changes and complains
83/// with an error ("The requested version (10) is less than the existing version
84/// (11)").
85///
86/// The problem with this is that, if someone upgrades their installed
87/// application, then realises it was a terrible mistake and tries to roll
88/// back, then suddenly every user's session is completely hosed. (They see
89/// an "unable to restore session" dialog.) Often, schema updates aren't
90/// actually backwards-incompatible — for example, existing code will work just
91/// fine if someone adds a new store or a new index — so this approach is too
92/// heavy-handed.
93///
94/// The solution we take here is to say "any schema changes up to
95/// [`MAX_SUPPORTED_SCHEMA_VERSION`] will be backwards-compatible". If, at some
96/// point, we do make a breaking change, we will give that schema version a
97/// higher number. Then, rather than using the implicit version check that comes
98/// with `indexedDB.open(name, version)`, we explicitly check the version
99/// ourselves.
100///
101/// It is expected that we will use version numbers that are multiples of 100 to
102/// represent breaking changes — for example, version 100 is a breaking change,
103/// as is version 200, but versions 101-199 are all backwards compatible with
104/// version 100. In other words, if you divide by 100, you get something
105/// approaching semver: version 200 is major version 2, minor version 0.
106const MAX_SUPPORTED_SCHEMA_VERSION: u32 = 199;
107
108/// Open the indexeddb with the given name, upgrading it to the latest version
109/// of the schema if necessary.
110pub async fn open_and_upgrade_db(
111    name: &str,
112    serializer: &SafeEncodeSerializer,
113) -> Result<Database, IndexeddbCryptoStoreError> {
114    // Move the DB version up from where it is to the latest version.
115    //
116    // Schema changes need to be separate from data migrations, so we often
117    // have a pattern of:
118    //
119    // 1. schema_add - create new object stores, indices etc.
120    // 2. data_migrate - move data from the old stores to the new ones
121    // 3. schema_delete - delete any now-unused stores etc.
122    //
123    // Migrations like these require the schema version to be bumped twice,
124    // because of the separate "add" and "delete" stages.
125
126    let old_version = db_version(name).await?;
127
128    // If the database version is too new, bail out. We assume that schema updates
129    // all the way up to `MAX_SUPPORTED_SCHEMA_VERSION` will be
130    // backwards-compatible.
131    if old_version > MAX_SUPPORTED_SCHEMA_VERSION {
132        return Err(IndexeddbCryptoStoreError::SchemaTooNewError {
133            max_supported_version: MAX_SUPPORTED_SCHEMA_VERSION,
134            current_version: old_version,
135        });
136    }
137
138    if old_version < 5 {
139        v0_to_v5::schema_add(name).await?;
140    }
141
142    if old_version < 6 {
143        v5_to_v7::schema_add(name).await?;
144    }
145    if old_version < 7 {
146        v5_to_v7::data_migrate(name, serializer).await?;
147        v5_to_v7::schema_delete(name).await?;
148    }
149
150    if old_version < 8 {
151        v7_to_v8::data_migrate(name, serializer).await?;
152        v7_to_v8::schema_bump(name).await?;
153    }
154
155    if old_version < 9 {
156        v8_to_v10::schema_add(name).await?;
157    }
158    if old_version < 10 {
159        v8_to_v10::data_migrate(name, serializer).await?;
160        v8_to_v10::schema_delete(name).await?;
161    }
162
163    if old_version < 11 {
164        v10_to_v11::data_migrate(name, serializer).await?;
165        v10_to_v11::schema_bump(name).await?;
166    }
167
168    if old_version < 12 {
169        v11_to_v12::schema_add(name).await?;
170    }
171
172    if old_version < 13 {
173        v12_to_v13::schema_add(name).await?;
174    }
175
176    if old_version < 14 {
177        v13_to_v14::data_migrate(name, serializer).await?;
178        v13_to_v14::schema_bump(name).await?;
179    }
180
181    if old_version < 100 {
182        v14_to_v101::schema_add(name).await?;
183    }
184
185    if old_version < 101 {
186        v14_to_v101::data_migrate(name, serializer).await?;
187        v14_to_v101::schema_delete(name).await?;
188    }
189
190    if old_version < 102 {
191        v101_to_v102::schema_add(name).await?;
192    }
193
194    // If you add more migrations here, you'll need to update
195    // `tests::EXPECTED_SCHEMA_VERSION`.
196
197    // NOTE: IF YOU MAKE A BREAKING CHANGE TO THE SCHEMA, BUMP THE SCHEMA VERSION TO
198    // SOMETHING HIGHER THAN `MAX_SUPPORTED_SCHEMA_VERSION`! (And then bump
199    // `MAX_SUPPORTED_SCHEMA_VERSION` itself to the next multiple of 10).
200
201    // Open and return the DB (we know it's at the latest version)
202    Ok(Database::open(name).await?)
203}
204
205async fn db_version(name: &str) -> Result<u32, IndexeddbCryptoStoreError> {
206    let db = Database::open(name).await?;
207    let old_version = db.version() as u32;
208    db.close();
209    Ok(old_version)
210}
211
212type OldVersion = u32;
213
214/// Run a database schema upgrade operation
215///
216/// # Arguments
217///
218/// * `name` - name of the indexeddb database to be upgraded.
219/// * `version` - version we are upgrading to.
220/// * `f` - closure which will be called if the database is below the version
221///   given. It will be called with three arguments `(db, txn, oldver)`, where:
222///   * `db` - the [`Database`]
223///   * `txn` - the database transaction: a [`Transaction`]
224///   * `oldver` - the version number before the upgrade.
225async fn do_schema_upgrade<F>(name: &str, version: u32, f: F) -> Result<(), OpenDbError>
226where
227    F: Fn(&Transaction<'_>, OldVersion) -> Result<(), Error> + 'static,
228{
229    info!("IndexeddbCryptoStore upgrade schema -> v{version} starting");
230    let db = Database::open(name)
231        .with_version(version)
232        .with_on_upgrade_needed(move |evt, tx| {
233            // Even if the web-sys bindings expose the version as a f64, the IndexedDB API
234            // works with an unsigned integer.
235            // See <https://github.com/rustwasm/wasm-bindgen/issues/1149>
236            let old_version = evt.old_version() as u32;
237
238            // Run the upgrade code we were supplied
239            f(tx, old_version)
240        })
241        .await?;
242    db.close();
243    info!("IndexeddbCryptoStore upgrade schema -> v{version} complete");
244    Ok(())
245}
246
247fn add_nonunique_index<'a>(
248    object_store: &'a ObjectStore<'a>,
249    name: &str,
250    key_path: &str,
251) -> Result<Index<'a>, Error> {
252    object_store.create_index(name, key_path.into()).with_unique(false).build()
253}
254
255fn add_unique_index<'a>(
256    object_store: &'a ObjectStore<'a>,
257    name: &str,
258    key_path: &str,
259) -> Result<Index<'a>, Error> {
260    object_store.create_index(name, key_path.into()).with_unique(true).build()
261}
262
263#[cfg(all(test, target_family = "wasm"))]
264mod tests {
265    use std::{cell::Cell, future::Future, rc::Rc, sync::Arc};
266
267    use assert_matches::assert_matches;
268    use gloo_utils::format::JsValueSerdeExt;
269    use indexed_db_futures::{
270        database::VersionChangeEvent, prelude::*, transaction::TransactionMode,
271    };
272    use matrix_sdk_common::{
273        deserialized_responses::WithheldCode, js_tracing::make_tracing_subscriber,
274    };
275    use matrix_sdk_crypto::{
276        olm::{InboundGroupSession, SenderData, SessionKey},
277        store::{types::RoomKeyWithheldEntry, CryptoStore},
278        types::{events::room_key_withheld::RoomKeyWithheldContent, EventEncryptionAlgorithm},
279        vodozemac::{Curve25519PublicKey, Curve25519SecretKey, Ed25519PublicKey, Ed25519SecretKey},
280    };
281    use matrix_sdk_store_encryption::StoreCipher;
282    use matrix_sdk_test::async_test;
283    use ruma::{device_id, owned_user_id, room_id, OwnedRoomId, RoomId};
284    use serde::Serialize;
285    use tracing_subscriber::util::SubscriberInitExt;
286    use wasm_bindgen::JsValue;
287    use web_sys::console;
288
289    use super::{v0_to_v5, v7::InboundGroupSessionIndexedDbObject2};
290    use crate::{
291        crypto_store::{keys, migrations::*, InboundGroupSessionIndexedDbObject},
292        IndexeddbCryptoStore,
293    };
294
295    wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
296
297    /// The schema version we expect after we open the store.
298    const EXPECTED_SCHEMA_VERSION: u32 = 102;
299
300    /// Adjust this to test do a more comprehensive perf test
301    const NUM_RECORDS_FOR_PERF: usize = 2_000;
302
303    /// Make lots of sessions and see how long it takes to count them in v8
304    #[async_test]
305    async fn test_count_lots_of_sessions_v8() {
306        let cipher = Arc::new(StoreCipher::new().unwrap());
307        let serializer = SafeEncodeSerializer::new(Some(cipher.clone()));
308        // Session keys are slow to create, so make one upfront and use it for every
309        // session
310        let session_key = create_session_key();
311
312        // Create lots of InboundGroupSessionIndexedDbObject2 objects
313        let mut objects = Vec::with_capacity(NUM_RECORDS_FOR_PERF);
314        for i in 0..NUM_RECORDS_FOR_PERF {
315            objects.push(
316                create_inbound_group_sessions2_record(i, &session_key, &cipher, &serializer).await,
317            );
318        }
319
320        // Create a DB with an inbound_group_sessions2 store
321        let db_prefix = "count_lots_of_sessions_v8";
322        let db = create_db(db_prefix).await;
323        let transaction = create_transaction(&db, db_prefix).await;
324        let store = create_store(&transaction, db_prefix).await;
325
326        // Check how long it takes to insert these records
327        measure_performance("Inserting", "v8", NUM_RECORDS_FOR_PERF, || async {
328            for (key, session_js) in objects.iter() {
329                store
330                    .add(session_js)
331                    .with_key(key)
332                    .without_key_type()
333                    .build()
334                    .unwrap()
335                    .await
336                    .unwrap();
337            }
338        })
339        .await;
340
341        // Check how long it takes to count these records
342        measure_performance("Counting", "v8", NUM_RECORDS_FOR_PERF, || async {
343            store.count().await.unwrap();
344        })
345        .await;
346    }
347
348    /// Make lots of sessions and see how long it takes to count them in v10
349    #[async_test]
350    async fn test_count_lots_of_sessions_v10() {
351        let serializer = SafeEncodeSerializer::new(Some(Arc::new(StoreCipher::new().unwrap())));
352
353        // Session keys are slow to create, so make one upfront and use it for every
354        // session
355        let session_key = create_session_key();
356
357        // Create lots of InboundGroupSessionIndexedDbObject objects
358        let mut objects = Vec::with_capacity(NUM_RECORDS_FOR_PERF);
359        for i in 0..NUM_RECORDS_FOR_PERF {
360            objects.push(create_inbound_group_sessions3_record(i, &session_key, &serializer).await);
361        }
362
363        // Create a DB with an inbound_group_sessions3 store
364        let db_prefix = "count_lots_of_sessions_v8";
365        let db = create_db(db_prefix).await;
366        let transaction = create_transaction(&db, db_prefix).await;
367        let store = create_store(&transaction, db_prefix).await;
368
369        // Check how long it takes to insert these records
370        measure_performance("Inserting", "v10", NUM_RECORDS_FOR_PERF, || async {
371            for (key, session_js) in objects.iter() {
372                store
373                    .add(session_js)
374                    .with_key(key)
375                    .without_key_type()
376                    .build()
377                    .unwrap()
378                    .await
379                    .unwrap();
380            }
381        })
382        .await;
383
384        // Check how long it takes to count these records
385        measure_performance("Counting", "v10", NUM_RECORDS_FOR_PERF, || async {
386            store.count().await.unwrap();
387        })
388        .await;
389    }
390
391    async fn create_db(db_prefix: &str) -> Database {
392        let db_name = format!("{db_prefix}::matrix-sdk-crypto");
393        let store_name = format!("{db_prefix}_store");
394        Database::open(&db_name)
395            .with_version(1u32)
396            .with_on_upgrade_needed(
397                move |_: VersionChangeEvent, tx: &Transaction<'_>| -> Result<(), Error> {
398                    tx.db().create_object_store(&store_name).build()?;
399                    Ok(())
400                },
401            )
402            .build()
403            .unwrap()
404            .await
405            .unwrap()
406    }
407
408    async fn create_transaction<'a>(db: &'a Database, db_prefix: &str) -> Transaction<'a> {
409        let store_name = format!("{db_prefix}_store");
410        db.transaction(&store_name).with_mode(TransactionMode::Readwrite).build().unwrap()
411    }
412
413    async fn create_store<'a>(
414        transaction: &'a Transaction<'a>,
415        db_prefix: &str,
416    ) -> ObjectStore<'a> {
417        let store_name = format!("{db_prefix}_store");
418        transaction.object_store(&store_name).unwrap()
419    }
420
421    fn create_session_key() -> SessionKey {
422        SessionKey::from_base64(
423            "\
424            AgAAAADBy9+YIYTIqBjFT67nyi31gIOypZQl8day2hkhRDCZaHoG+cZh4tZLQIAZimJail0\
425            0zq4DVJVljO6cZ2t8kIto/QVk+7p20Fcf2nvqZyL2ZCda2Ei7VsqWZHTM/gqa2IU9+ktkwz\
426            +KFhENnHvDhG9f+hjsAPZd5mTTpdO+tVcqtdWhX4dymaJ/2UpAAjuPXQW+nXhQWQhXgXOUa\
427            JCYurJtvbCbqZGeDMmVIoqukBs2KugNJ6j5WlTPoeFnMl6Guy9uH2iWWxGg8ZgT2xspqVl5\
428            CwujjC+m7Dh1toVkvu+bAw\
429            ",
430        )
431        .unwrap()
432    }
433
434    async fn create_inbound_group_sessions2_record(
435        i: usize,
436        session_key: &SessionKey,
437        cipher: &Arc<StoreCipher>,
438        serializer: &SafeEncodeSerializer,
439    ) -> (JsValue, JsValue) {
440        let session = create_inbound_group_session(i, session_key);
441        let pickled_session = session.pickle().await;
442        let session_dbo = InboundGroupSessionIndexedDbObject2 {
443            pickled_session: cipher.encrypt_value(&pickled_session).unwrap(),
444            needs_backup: false,
445        };
446        let session_js: JsValue = serde_wasm_bindgen::to_value(&session_dbo).unwrap();
447
448        let key = serializer.encode_key(
449            old_keys::INBOUND_GROUP_SESSIONS_V2,
450            (&session.room_id, session.session_id()),
451        );
452
453        (key, session_js)
454    }
455
456    async fn create_inbound_group_sessions3_record(
457        i: usize,
458        session_key: &SessionKey,
459        serializer: &SafeEncodeSerializer,
460    ) -> (JsValue, JsValue) {
461        let session = create_inbound_group_session(i, session_key);
462        let pickled_session = session.pickle().await;
463
464        let session_dbo = InboundGroupSessionIndexedDbObject {
465            pickled_session: serializer.maybe_encrypt_value(pickled_session).unwrap(),
466            session_id: None,
467            needs_backup: false,
468            backed_up_to: -1,
469            sender_key: None,
470            sender_data_type: None,
471        };
472        let session_js: JsValue = serde_wasm_bindgen::to_value(&session_dbo).unwrap();
473
474        let key = serializer.encode_key(
475            old_keys::INBOUND_GROUP_SESSIONS_V2,
476            (&session.room_id, session.session_id()),
477        );
478
479        (key, session_js)
480    }
481
482    async fn measure_performance<Fut, R>(
483        name: &str,
484        schema: &str,
485        num_records: usize,
486        f: impl Fn() -> Fut,
487    ) -> R
488    where
489        Fut: Future<Output = R>,
490    {
491        let window = web_sys::window().expect("should have a window in this context");
492        let performance = window.performance().expect("performance should be available");
493        let start = performance.now();
494
495        let ret = f().await;
496
497        let elapsed = performance.now() - start;
498        console::log_1(
499            &format!("{name} {num_records} records with {schema} schema took {elapsed:.2}ms.")
500                .into(),
501        );
502
503        ret
504    }
505
506    /// Create an example InboundGroupSession of known size
507    fn create_inbound_group_session(i: usize, session_key: &SessionKey) -> InboundGroupSession {
508        let sender_key = Curve25519PublicKey::from_bytes([
509            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
510            24, 25, 26, 27, 28, 29, 30, 31,
511        ]);
512        let signing_key = Ed25519PublicKey::from_slice(&[
513            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
514            24, 25, 26, 27, 28, 29, 30, 31,
515        ])
516        .unwrap();
517        let room_id: OwnedRoomId = format!("!a{i}:b.co").try_into().unwrap();
518        let encryption_algorithm = EventEncryptionAlgorithm::MegolmV1AesSha2;
519        let history_visibility = None;
520
521        InboundGroupSession::new(
522            sender_key,
523            signing_key,
524            &room_id,
525            session_key,
526            SenderData::unknown(),
527            encryption_algorithm,
528            history_visibility,
529            false,
530        )
531        .unwrap()
532    }
533
534    /// Test migrating `inbound_group_sessions` data from store v5 to latest,
535    /// on a store with encryption disabled.
536    #[async_test]
537    async fn test_v8_v10_v12_migration_unencrypted() {
538        test_v8_v10_v12_migration_with_cipher("test_v8_migration_unencrypted", None).await
539    }
540
541    /// Test migrating `inbound_group_sessions` data from store v5 to store v8,
542    /// on a store with encryption enabled.
543    #[async_test]
544    async fn test_v8_v10_v12_migration_encrypted() {
545        let cipher = StoreCipher::new().unwrap();
546        test_v8_v10_v12_migration_with_cipher(
547            "test_v8_migration_encrypted",
548            Some(Arc::new(cipher)),
549        )
550        .await;
551    }
552
553    /// Helper function for `test_v8_v10_v12_migration_{un,}encrypted`: test
554    /// migrating `inbound_group_sessions` data from store v5 to store v12.
555    async fn test_v8_v10_v12_migration_with_cipher(
556        db_prefix: &str,
557        store_cipher: Option<Arc<StoreCipher>>,
558    ) {
559        let _ = make_tracing_subscriber(None).try_init();
560        let db_name = format!("{db_prefix:0}::matrix-sdk-crypto");
561
562        // delete the db in case it was used in a previous run
563        let _ = Database::delete_by_name(&db_name);
564
565        // Given a DB with data in it as it was at v5
566        let room_id = room_id!("!test:localhost");
567        let (backed_up_session, not_backed_up_session) = create_sessions(&room_id);
568        populate_v5_db(
569            &db_name,
570            store_cipher.clone(),
571            &[&backed_up_session, &not_backed_up_session],
572        )
573        .await;
574
575        // When I open a store based on that DB, triggering an upgrade
576        let store =
577            IndexeddbCryptoStore::open_with_store_cipher(&db_prefix, store_cipher).await.unwrap();
578
579        // Then I can find the sessions using their keys and their info is correct
580        let fetched_backed_up_session = store
581            .get_inbound_group_session(room_id, backed_up_session.session_id())
582            .await
583            .unwrap()
584            .unwrap();
585        assert_eq!(fetched_backed_up_session.session_id(), backed_up_session.session_id());
586
587        let fetched_not_backed_up_session = store
588            .get_inbound_group_session(room_id, not_backed_up_session.session_id())
589            .await
590            .unwrap()
591            .unwrap();
592        assert_eq!(fetched_not_backed_up_session.session_id(), not_backed_up_session.session_id());
593
594        // For v8: the backed_up info is preserved
595        assert!(fetched_backed_up_session.backed_up());
596        assert!(!fetched_not_backed_up_session.backed_up());
597
598        // For v10: they have the backed_up_to property and it is indexed
599        assert_matches_v10_schema(&db_name, &store, &fetched_backed_up_session).await;
600
601        // For v12: they have the session_id, sender_key and sender_data_type properties
602        // and they are indexed
603        assert_matches_v12_schema(&db_name, &store, &fetched_backed_up_session).await;
604    }
605
606    async fn assert_matches_v10_schema(
607        db_name: &str,
608        store: &IndexeddbCryptoStore,
609        fetched_backed_up_session: &InboundGroupSession,
610    ) {
611        let db = Database::open(&db_name).build().unwrap().await.unwrap();
612        assert!(db.version() >= 10.0);
613        let transaction = db.transaction("inbound_group_sessions3").build().unwrap();
614        let raw_store = transaction.object_store("inbound_group_sessions3").unwrap();
615        let key = store.serializer.encode_key(
616            keys::INBOUND_GROUP_SESSIONS_V3,
617            (fetched_backed_up_session.room_id(), fetched_backed_up_session.session_id()),
618        );
619        let idb_object: InboundGroupSessionIndexedDbObject =
620            serde_wasm_bindgen::from_value(raw_store.get(&key).await.unwrap().unwrap()).unwrap();
621
622        assert_eq!(idb_object.backed_up_to, -1);
623        assert!(raw_store.index_names().find(|idx| idx == "backed_up_to").is_some());
624
625        transaction.commit().await.unwrap();
626        db.close();
627    }
628
629    async fn assert_matches_v12_schema(
630        db_name: &str,
631        store: &IndexeddbCryptoStore,
632        session: &InboundGroupSession,
633    ) {
634        let db = Database::open(&db_name).build().unwrap().await.unwrap();
635        assert!(db.version() >= 12.0);
636        let transaction = db.transaction("inbound_group_sessions3").build().unwrap();
637        let raw_store = transaction.object_store("inbound_group_sessions3").unwrap();
638        let key = store
639            .serializer
640            .encode_key(keys::INBOUND_GROUP_SESSIONS_V3, (session.room_id(), session.session_id()));
641        let idb_object: InboundGroupSessionIndexedDbObject =
642            serde_wasm_bindgen::from_value(raw_store.get(&key).await.unwrap().unwrap()).unwrap();
643
644        assert_eq!(
645            idb_object.session_id,
646            Some(
647                store
648                    .serializer
649                    .encode_key_as_string(keys::INBOUND_GROUP_SESSIONS_V3, session.session_id())
650            )
651        );
652        assert_eq!(
653            idb_object.sender_key,
654            Some(store.serializer.encode_key_as_string(
655                keys::INBOUND_GROUP_SESSIONS_V3,
656                session.sender_key().to_base64()
657            ))
658        );
659        assert_eq!(idb_object.sender_data_type, Some(session.sender_data_type() as u8));
660        assert!(raw_store
661            .index_names()
662            .find(|idx| idx == "inbound_group_session_sender_key_sender_data_type_idx")
663            .is_some());
664
665        transaction.commit().await.unwrap();
666        db.close();
667    }
668
669    fn create_sessions(room_id: &RoomId) -> (InboundGroupSession, InboundGroupSession) {
670        let curve_key = Curve25519PublicKey::from(&Curve25519SecretKey::new());
671        let ed_key = Ed25519SecretKey::new().public_key();
672
673        let backed_up_session = InboundGroupSession::new(
674            curve_key,
675            ed_key,
676            room_id,
677            &SessionKey::from_base64(
678                "AgAAAABTyn3CR8mzAxhsHH88td5DrRqfipJCnNbZeMrfzhON6O1Cyr9ewx/sDFLO6\
679                 +NvyW92yGvMub7nuAEQb+SgnZLm7nwvuVvJgSZKpoJMVliwg8iY9TXKFT286oBtT2\
680                 /8idy6TcpKax4foSHdMYlZXu5zOsGDdd9eYnYHpUEyDT0utuiaakZM3XBMNLEVDj9\
681                 Ps929j1FGgne1bDeFVoty2UAOQK8s/0JJigbKSu6wQ/SzaCYpE/LD4Egk2Nxs1JE2\
682                 33ii9J8RGPYOp7QWl0kTEc8mAlqZL7mKppo9AwgtmYweAg",
683            )
684            .unwrap(),
685            SenderData::legacy(),
686            EventEncryptionAlgorithm::MegolmV1AesSha2,
687            None,
688            false,
689        )
690        .unwrap();
691        backed_up_session.mark_as_backed_up();
692
693        let not_backed_up_session = InboundGroupSession::new(
694            curve_key,
695            ed_key,
696            room_id,
697            &SessionKey::from_base64(
698                "AgAAAACO1PjBdqucFUcNFU6JgXYAi7KMeeUqUibaLm6CkHJcMiDTFWq/K5SFAukJc\
699                 WjeyOpnZr4vpezRlbvNaQpNPMub2Cs2u14fHj9OpKFD7c4hFS4j94q4pTLZly3qEV\
700                 BIjWdOpcIVfN7QVGVIxYiI6KHEddCHrNCo9fc8GUdfzrMnmUooQr/m4ZAkRdErzUH\
701                 uUAlUBwOKcPi7Cs/KrMw/sHCRDkTntHZ3BOrzJsAVbHUgq+8/Sqy3YE+CX6uEnig+\
702                 1NWjZD9f1vvXnSKKDdHj1927WFMFZ/yYc24607zEVUaODQ",
703            )
704            .unwrap(),
705            SenderData::legacy(),
706            EventEncryptionAlgorithm::MegolmV1AesSha2,
707            None,
708            false,
709        )
710        .unwrap();
711
712        (backed_up_session, not_backed_up_session)
713    }
714
715    async fn populate_v5_db(
716        db_name: &str,
717        store_cipher: Option<Arc<StoreCipher>>,
718        session_entries: &[&InboundGroupSession],
719    ) {
720        // Schema V7 migrated the inbound group sessions to a new format.
721        // To test, first create a database and populate it with the *old* style of
722        // entry.
723        let db = create_v5_db(&db_name).await.unwrap();
724
725        let serializer = SafeEncodeSerializer::new(store_cipher.clone());
726
727        let txn = db
728            .transaction(old_keys::INBOUND_GROUP_SESSIONS_V1)
729            .with_mode(TransactionMode::Readwrite)
730            .build()
731            .unwrap();
732        let sessions = txn.object_store(old_keys::INBOUND_GROUP_SESSIONS_V1).unwrap();
733        for session in session_entries {
734            let room_id = session.room_id();
735            let session_id = session.session_id();
736            let key =
737                serializer.encode_key(old_keys::INBOUND_GROUP_SESSIONS_V1, (room_id, session_id));
738            let pickle = session.pickle().await;
739
740            // Serialize the session with the old style of serialization, since that's what
741            // we used at the time.
742            let serialized_session = serialize_value_as_legacy(&store_cipher, &pickle);
743            sessions.put(&serialized_session).with_key(key).build().unwrap();
744        }
745        txn.commit().await.unwrap();
746
747        // now close our DB, reopen it properly, and check that we can still read our
748        // data.
749        db.close();
750    }
751
752    /// Test migrating `backup_keys` data from store v10 to latest,
753    /// on a store with encryption disabled.
754    #[async_test]
755    async fn test_v10_v11_migration_unencrypted() {
756        test_v10_v11_migration_with_cipher("test_v10_migration_unencrypted", None).await
757    }
758
759    /// Test migrating `backup_keys` data from store v10 to latest,
760    /// on a store with encryption enabled.
761    #[async_test]
762    async fn test_v10_v11_migration_encrypted() {
763        let cipher = StoreCipher::new().unwrap();
764        test_v10_v11_migration_with_cipher("test_v10_migration_encrypted", Some(Arc::new(cipher)))
765            .await;
766    }
767
768    /// Helper function for `test_v10_v11_migration_{un,}encrypted`: test
769    /// migrating `backup_keys` data from store v10 to store v11.
770    async fn test_v10_v11_migration_with_cipher(
771        db_prefix: &str,
772        store_cipher: Option<Arc<StoreCipher>>,
773    ) {
774        let _ = make_tracing_subscriber(None).try_init();
775        let db_name = format!("{db_prefix:0}::matrix-sdk-crypto");
776
777        // delete the db in case it was used in a previous run
778        let _ = Database::delete_by_name(&db_name).unwrap().await.unwrap();
779
780        // Given a DB with data in it as it was at v5
781        let db = create_v5_db(&db_name).await.unwrap();
782
783        let txn = db
784            .transaction(keys::BACKUP_KEYS)
785            .with_mode(TransactionMode::Readwrite)
786            .build()
787            .unwrap();
788        let store = txn.object_store(keys::BACKUP_KEYS).unwrap();
789        store
790            .put(&serialize_value_as_legacy(&store_cipher, &"1".to_owned()))
791            .with_key(JsValue::from_str(old_keys::BACKUP_KEY_V1))
792            .build()
793            .unwrap();
794        txn.commit().await.unwrap();
795        db.close();
796
797        // When I open a store based on that DB, triggering an upgrade
798        let store =
799            IndexeddbCryptoStore::open_with_store_cipher(&db_prefix, store_cipher).await.unwrap();
800
801        // Then I can read the backup settings
802        let backup_data = store.load_backup_keys().await.unwrap();
803        assert_eq!(backup_data.backup_version, Some("1".to_owned()));
804    }
805
806    /// Test migrating `withheld_sessions` data from store v14 to latest,
807    /// on a store with encryption disabled.
808    #[async_test]
809    async fn test_v14_v101_migration_unencrypted() {
810        test_v14_v101_migration_with_cipher("test_v101_migration_unencrypted", None).await
811    }
812
813    /// Test migrating `withheld_sessions` data from store v14 to latest,
814    /// on a store with encryption enabled.
815    #[async_test]
816    async fn test_v14_v101_migration_encrypted() {
817        let cipher = StoreCipher::new().unwrap();
818        test_v14_v101_migration_with_cipher(
819            "test_v101_migration_encrypted",
820            Some(Arc::new(cipher)),
821        )
822        .await;
823    }
824
825    /// Helper function for `test_v14_v101_migration_{un,}encrypted`: test
826    /// migrating `withheld_sessions` data from store v14 to store v101.
827    async fn test_v14_v101_migration_with_cipher(
828        db_prefix: &str,
829        store_cipher: Option<Arc<StoreCipher>>,
830    ) {
831        let serializer = SafeEncodeSerializer::new(store_cipher.clone());
832
833        let _ = make_tracing_subscriber(None).try_init();
834        let db_name = format!("{db_prefix:0}::matrix-sdk-crypto");
835
836        // delete the db in case it was used in a previous run
837        let _ = Database::delete_by_name(&db_name).unwrap().await.unwrap();
838
839        let room_id = room_id!("!test:example.com");
840        let session_id = "12345";
841
842        // Given a DB with data in it as it was at v5
843        {
844            let db = create_v5_db(&db_name).await.unwrap();
845
846            let txn = db
847                .transaction(old_keys::DIRECT_WITHHELD_INFO)
848                .with_mode(TransactionMode::Readwrite)
849                .build()
850                .unwrap();
851            let store = txn.object_store(old_keys::DIRECT_WITHHELD_INFO).unwrap();
852
853            let sender_key =
854                Curve25519PublicKey::from_base64("9n7mdWKOjr9c4NTlG6zV8dbFtNK79q9vZADoh7nMUwA")
855                    .unwrap();
856
857            let withheld_entry = RoomKeyWithheldEntry {
858                sender: owned_user_id!("@alice:example.com"),
859                content: RoomKeyWithheldContent::new(
860                    EventEncryptionAlgorithm::MegolmV1AesSha2,
861                    WithheldCode::Blacklisted,
862                    room_id.to_owned(),
863                    session_id.to_owned(),
864                    sender_key,
865                    device_id!("ABC").to_owned(),
866                ),
867            };
868
869            let key = serializer.encode_key(old_keys::DIRECT_WITHHELD_INFO, (room_id, session_id));
870            let value = serializer.serialize_value(&withheld_entry).unwrap();
871            store.add(value).with_key(key).build().unwrap();
872            txn.commit().await.unwrap();
873            db.close();
874        }
875
876        // When I open a store based on that DB, triggering an upgrade
877        let store =
878            IndexeddbCryptoStore::open_with_store_cipher(&db_prefix, store_cipher).await.unwrap();
879
880        // Then I can read the withheld session settings
881        let withheld_entry = store
882            .get_withheld_info(room_id, session_id)
883            .await
884            .unwrap()
885            .expect("Should find a withheld entry in migrated data");
886        assert_eq!(withheld_entry.content.withheld_code(), WithheldCode::Blacklisted)
887    }
888
889    async fn create_v5_db(name: &str) -> std::result::Result<Database, OpenDbError> {
890        v0_to_v5::schema_add(name).await?;
891        Database::open(name).with_version(5u32).build()?.await
892    }
893
894    /// Opening a db that has been upgraded to MAX_SUPPORTED_SCHEMA_VERSION
895    /// should be ok
896    #[async_test]
897    async fn test_can_open_max_supported_schema_version() {
898        let _ = make_tracing_subscriber(None).try_init();
899
900        let db_prefix = "test_can_open_max_supported_schema_version";
901        // Create a database at MAX_SUPPORTED_SCHEMA_VERSION
902        create_future_schema_db(db_prefix, MAX_SUPPORTED_SCHEMA_VERSION).await;
903
904        // Now, try opening it again
905        IndexeddbCryptoStore::open_with_store_cipher(&db_prefix, None).await.unwrap();
906    }
907
908    /// Opening a db that has been upgraded beyond MAX_SUPPORTED_SCHEMA_VERSION
909    /// should throw an error
910    #[async_test]
911    async fn test_can_not_open_too_new_db() {
912        let _ = make_tracing_subscriber(None).try_init();
913
914        let db_prefix = "test_can_not_open_too_new_db";
915        // Create a database at MAX_SUPPORTED_SCHEMA_VERSION+1
916        create_future_schema_db(db_prefix, MAX_SUPPORTED_SCHEMA_VERSION + 1).await;
917
918        // Now, try opening it again
919        let result = IndexeddbCryptoStore::open_with_store_cipher(&db_prefix, None).await;
920        assert_matches!(
921            result,
922            Err(IndexeddbCryptoStoreError::SchemaTooNewError {
923                max_supported_version,
924                current_version
925            }) => {
926                assert_eq!(max_supported_version, MAX_SUPPORTED_SCHEMA_VERSION);
927                assert_eq!(current_version, MAX_SUPPORTED_SCHEMA_VERSION + 1);
928            }
929        );
930    }
931
932    // Create a database, and increase its schema version to the given version
933    // number.
934    async fn create_future_schema_db(db_prefix: &str, version: u32) {
935        let db_name = format!("{db_prefix}::matrix-sdk-crypto");
936
937        // delete the db in case it was used in a previous run
938        let _ = Database::delete_by_name(&db_name);
939
940        // Open, and close, the store at the regular version.
941        IndexeddbCryptoStore::open_with_store_cipher(&db_prefix, None).await.unwrap();
942
943        // Now upgrade to the given version, keeping a record of the previous version so
944        // that we can double-check it.
945        let old_version: Rc<Cell<Option<u32>>> = Rc::new(Cell::new(None));
946        let old_version2 = old_version.clone();
947
948        let db = Database::open(&db_name)
949            .with_version(version)
950            .with_on_upgrade_needed(move |evt: VersionChangeEvent, _: &Transaction<'_>| {
951                old_version2.set(Some(evt.old_version() as u32));
952                Ok(())
953            })
954            .build()
955            .unwrap()
956            .await
957            .unwrap();
958
959        assert_eq!(
960            old_version.get(),
961            Some(EXPECTED_SCHEMA_VERSION),
962            "Existing store had unexpected version number"
963        );
964        db.close();
965    }
966
967    /// Emulate the old behaviour of [`IndexeddbSerializer::serialize_value`].
968    ///
969    /// We used to use an inefficient format for serializing objects in the
970    /// indexeddb store. This replicates that old behaviour, for testing
971    /// purposes.
972    fn serialize_value_as_legacy<T: Serialize>(
973        store_cipher: &Option<Arc<StoreCipher>>,
974        value: &T,
975    ) -> JsValue {
976        if let Some(cipher) = &store_cipher {
977            // Old-style serialization/encryption. First JSON-serialize into a byte array...
978            let data = serde_json::to_vec(&value).unwrap();
979            // ... then encrypt...
980            let encrypted = cipher.encrypt_value_data(data).unwrap();
981            // ... then JSON-serialize into another byte array ...
982            let value = serde_json::to_vec(&encrypted).unwrap();
983            // and finally, turn it into a javascript array.
984            JsValue::from_serde(&value).unwrap()
985        } else {
986            JsValue::from_serde(&value).unwrap()
987        }
988    }
989}