matrix_sdk_indexeddb/crypto_store/migrations/
mod.rs

1// Copyright 2023 The Matrix.org Foundation C.I.C.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::ops::Deref;
16
17use indexed_db_futures::{prelude::*, web_sys::DomException};
18use tracing::info;
19use wasm_bindgen::JsValue;
20
21use crate::{
22    crypto_store::{indexeddb_serializer::IndexeddbSerializer, Result},
23    IndexeddbCryptoStoreError,
24};
25
26mod old_keys;
27mod v0_to_v5;
28mod v10_to_v11;
29mod v11_to_v12;
30mod v5_to_v7;
31mod v7;
32mod v7_to_v8;
33mod v8_to_v10;
34
35struct MigrationDb {
36    db: IdbDatabase,
37    next_version: u32,
38}
39
40impl MigrationDb {
41    /// Create an Indexed DB wrapper that manages a database migration,
42    /// logging messages before and after the migration, and automatically
43    /// closing the DB when this object is dropped.
44    async fn new(name: &str, next_version: u32) -> Result<Self> {
45        info!("IndexeddbCryptoStore migrate data before v{next_version} starting");
46        Ok(Self { db: IdbDatabase::open(name)?.await?, next_version })
47    }
48}
49
50impl Deref for MigrationDb {
51    type Target = IdbDatabase;
52
53    fn deref(&self) -> &Self::Target {
54        &self.db
55    }
56}
57
58impl Drop for MigrationDb {
59    fn drop(&mut self) {
60        let version = self.next_version;
61        info!("IndexeddbCryptoStore migrate data before v{version} finished");
62        self.db.close();
63    }
64}
65
66/// The latest version of the schema we can support. If we encounter a database
67/// version with a higher schema version, we will return an error.
68///
69/// A note on how this works.
70///
71/// Normally, when you open an indexeddb database, you tell it the "schema
72/// version" that you know about. If the existing database is older than
73/// that, it lets you run a migration. If the existing database is newer, then
74/// it assumes that there have been incompatible schema changes and complains
75/// with an error ("The requested version (10) is less than the existing version
76/// (11)").
77///
78/// The problem with this is that, if someone upgrades their installed
79/// application, then realises it was a terrible mistake and tries to roll
80/// back, then suddenly every user's session is completely hosed. (They see
81/// an "unable to restore session" dialog.) Often, schema updates aren't
82/// actually backwards-incompatible — for example, existing code will work just
83/// fine if someone adds a new store or a new index — so this approach is too
84/// heavy-handed.
85///
86/// The solution we take here is to say "any schema changes up to
87/// [`MAX_SUPPORTED_SCHEMA_VERSION`] will be backwards-compatible". If, at some
88/// point, we do make a breaking change, we will give that schema version a
89/// higher number. Then, rather than using the implicit version check that comes
90/// with `indexedDB.open(name, version)`, we explicitly check the version
91/// ourselves.
92///
93/// It is expected that we will use version numbers that are multiples of 100 to
94/// represent breaking changes — for example, version 100 is a breaking change,
95/// as is version 200, but versions 101-199 are all backwards compatible with
96/// version 100. In other words, if you divide by 100, you get something
97/// approaching semver: version 200 is major version 2, minor version 0.
98const MAX_SUPPORTED_SCHEMA_VERSION: u32 = 99;
99
100/// Open the indexeddb with the given name, upgrading it to the latest version
101/// of the schema if necessary.
102pub async fn open_and_upgrade_db(
103    name: &str,
104    serializer: &IndexeddbSerializer,
105) -> Result<IdbDatabase, IndexeddbCryptoStoreError> {
106    // Move the DB version up from where it is to the latest version.
107    //
108    // Schema changes need to be separate from data migrations, so we often
109    // have a pattern of:
110    //
111    // 1. schema_add - create new object stores, indices etc.
112    // 2. data_migrate - move data from the old stores to the new ones
113    // 3. schema_delete - delete any now-unused stores etc.
114    //
115    // Migrations like these require the schema version to be bumped twice,
116    // because of the separate "add" and "delete" stages.
117
118    let old_version = db_version(name).await?;
119
120    // If the database version is too new, bail out. We assume that schema updates
121    // all the way up to `MAX_SUPPORTED_SCHEMA_VERSION` will be
122    // backwards-compatible.
123    if old_version > MAX_SUPPORTED_SCHEMA_VERSION {
124        return Err(IndexeddbCryptoStoreError::SchemaTooNewError {
125            max_supported_version: MAX_SUPPORTED_SCHEMA_VERSION,
126            current_version: old_version,
127        });
128    }
129
130    if old_version < 5 {
131        v0_to_v5::schema_add(name).await?;
132    }
133
134    if old_version < 6 {
135        v5_to_v7::schema_add(name).await?;
136    }
137    if old_version < 7 {
138        v5_to_v7::data_migrate(name, serializer).await?;
139        v5_to_v7::schema_delete(name).await?;
140    }
141
142    if old_version < 8 {
143        v7_to_v8::data_migrate(name, serializer).await?;
144        v7_to_v8::schema_bump(name).await?;
145    }
146
147    if old_version < 9 {
148        v8_to_v10::schema_add(name).await?;
149    }
150    if old_version < 10 {
151        v8_to_v10::data_migrate(name, serializer).await?;
152        v8_to_v10::schema_delete(name).await?;
153    }
154
155    if old_version < 11 {
156        v10_to_v11::data_migrate(name, serializer).await?;
157        v10_to_v11::schema_bump(name).await?;
158    }
159
160    if old_version < 12 {
161        v11_to_v12::schema_add(name).await?;
162    }
163
164    // If you add more migrations here, you'll need to update
165    // `tests::EXPECTED_SCHEMA_VERSION`.
166
167    // NOTE: IF YOU MAKE A BREAKING CHANGE TO THE SCHEMA, BUMP THE SCHEMA VERSION TO
168    // SOMETHING HIGHER THAN `MAX_SUPPORTED_SCHEMA_VERSION`! (And then bump
169    // `MAX_SUPPORTED_SCHEMA_VERSION` itself to the next multiple of 10).
170
171    // Open and return the DB (we know it's at the latest version)
172    Ok(IdbDatabase::open(name)?.await?)
173}
174
175async fn db_version(name: &str) -> Result<u32, IndexeddbCryptoStoreError> {
176    let db = IdbDatabase::open(name)?.await?;
177    let old_version = db.version() as u32;
178    db.close();
179    Ok(old_version)
180}
181
182type OldVersion = u32;
183
184/// Run a database schema upgrade operation
185///
186/// # Arguments
187///
188/// * `name` - name of the indexeddb database to be upgraded.
189/// * `version` - version we are upgrading to.
190/// * `f` - closure which will be called if the database is below the version
191///   given. It will be called with three arguments `(db, txn, oldver)`, where:
192///   * `db` - the [`IdbDatabase`]
193///   * `txn` - the database transaction: a [`IdbTransaction`]
194///   * `oldver` - the version number before the upgrade.
195async fn do_schema_upgrade<F>(name: &str, version: u32, f: F) -> Result<(), DomException>
196where
197    F: Fn(&IdbDatabase, IdbTransaction<'_>, OldVersion) -> Result<(), JsValue> + 'static,
198{
199    info!("IndexeddbCryptoStore upgrade schema -> v{version} starting");
200    let mut db_req: OpenDbRequest = IdbDatabase::open_u32(name, version)?;
201
202    db_req.set_on_upgrade_needed(Some(move |evt: &IdbVersionChangeEvent| {
203        // Even if the web-sys bindings expose the version as a f64, the IndexedDB API
204        // works with an unsigned integer.
205        // See <https://github.com/rustwasm/wasm-bindgen/issues/1149>
206        let old_version = evt.old_version() as u32;
207
208        // Run the upgrade code we were supplied
209        f(evt.db(), evt.transaction(), old_version)
210    }));
211
212    let db = db_req.await?;
213    db.close();
214    info!("IndexeddbCryptoStore upgrade schema -> v{version} complete");
215    Ok(())
216}
217
218fn add_nonunique_index<'a>(
219    object_store: &'a IdbObjectStore<'a>,
220    name: &str,
221    key_path: &str,
222) -> Result<IdbIndex<'a>, DomException> {
223    let mut params = IdbIndexParameters::new();
224    params.unique(false);
225    object_store.create_index_with_params(name, &IdbKeyPath::str(key_path), &params)
226}
227
228fn add_unique_index<'a>(
229    object_store: &'a IdbObjectStore<'a>,
230    name: &str,
231    key_path: &str,
232) -> Result<IdbIndex<'a>, DomException> {
233    let mut params = IdbIndexParameters::new();
234    params.unique(true);
235    object_store.create_index_with_params(name, &IdbKeyPath::str(key_path), &params)
236}
237
238#[cfg(all(test, target_arch = "wasm32"))]
239mod tests {
240    use std::{cell::Cell, future::Future, rc::Rc, sync::Arc};
241
242    use assert_matches::assert_matches;
243    use gloo_utils::format::JsValueSerdeExt;
244    use indexed_db_futures::prelude::*;
245    use matrix_sdk_common::js_tracing::make_tracing_subscriber;
246    use matrix_sdk_crypto::{
247        olm::{InboundGroupSession, SenderData, SessionKey},
248        store::CryptoStore,
249        types::EventEncryptionAlgorithm,
250        vodozemac::{Curve25519PublicKey, Curve25519SecretKey, Ed25519PublicKey, Ed25519SecretKey},
251    };
252    use matrix_sdk_store_encryption::StoreCipher;
253    use matrix_sdk_test::async_test;
254    use ruma::{room_id, OwnedRoomId, RoomId};
255    use serde::Serialize;
256    use tracing_subscriber::util::SubscriberInitExt;
257    use web_sys::console;
258
259    use super::{v0_to_v5, v7::InboundGroupSessionIndexedDbObject2};
260    use crate::{
261        crypto_store::{keys, migrations::*, InboundGroupSessionIndexedDbObject},
262        IndexeddbCryptoStore,
263    };
264
265    wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
266
267    /// The schema version we expect after we open the store.
268    const EXPECTED_SCHEMA_VERSION: u32 = 12;
269
270    /// Adjust this to test do a more comprehensive perf test
271    const NUM_RECORDS_FOR_PERF: usize = 2_000;
272
273    /// Make lots of sessions and see how long it takes to count them in v8
274    #[async_test]
275    async fn test_count_lots_of_sessions_v8() {
276        let cipher = Arc::new(StoreCipher::new().unwrap());
277        let serializer = IndexeddbSerializer::new(Some(cipher.clone()));
278        // Session keys are slow to create, so make one upfront and use it for every
279        // session
280        let session_key = create_session_key();
281
282        // Create lots of InboundGroupSessionIndexedDbObject2 objects
283        let mut objects = Vec::with_capacity(NUM_RECORDS_FOR_PERF);
284        for i in 0..NUM_RECORDS_FOR_PERF {
285            objects.push(
286                create_inbound_group_sessions2_record(i, &session_key, &cipher, &serializer).await,
287            );
288        }
289
290        // Create a DB with an inbound_group_sessions2 store
291        let db_prefix = "count_lots_of_sessions_v8";
292        let db = create_db(db_prefix).await;
293        let transaction = create_transaction(&db, db_prefix).await;
294        let store = create_store(&transaction, db_prefix).await;
295
296        // Check how long it takes to insert these records
297        measure_performance("Inserting", "v8", NUM_RECORDS_FOR_PERF, || async {
298            for (key, session_js) in objects.iter() {
299                store.add_key_val(key, session_js).unwrap().await.unwrap();
300            }
301        })
302        .await;
303
304        // Check how long it takes to count these records
305        measure_performance("Counting", "v8", NUM_RECORDS_FOR_PERF, || async {
306            store.count().unwrap().await.unwrap();
307        })
308        .await;
309    }
310
311    /// Make lots of sessions and see how long it takes to count them in v10
312    #[async_test]
313    async fn test_count_lots_of_sessions_v10() {
314        let serializer = IndexeddbSerializer::new(Some(Arc::new(StoreCipher::new().unwrap())));
315
316        // Session keys are slow to create, so make one upfront and use it for every
317        // session
318        let session_key = create_session_key();
319
320        // Create lots of InboundGroupSessionIndexedDbObject objects
321        let mut objects = Vec::with_capacity(NUM_RECORDS_FOR_PERF);
322        for i in 0..NUM_RECORDS_FOR_PERF {
323            objects.push(create_inbound_group_sessions3_record(i, &session_key, &serializer).await);
324        }
325
326        // Create a DB with an inbound_group_sessions3 store
327        let db_prefix = "count_lots_of_sessions_v8";
328        let db = create_db(db_prefix).await;
329        let transaction = create_transaction(&db, db_prefix).await;
330        let store = create_store(&transaction, db_prefix).await;
331
332        // Check how long it takes to insert these records
333        measure_performance("Inserting", "v10", NUM_RECORDS_FOR_PERF, || async {
334            for (key, session_js) in objects.iter() {
335                store.add_key_val(key, session_js).unwrap().await.unwrap();
336            }
337        })
338        .await;
339
340        // Check how long it takes to count these records
341        measure_performance("Counting", "v10", NUM_RECORDS_FOR_PERF, || async {
342            store.count().unwrap().await.unwrap();
343        })
344        .await;
345    }
346
347    async fn create_db(db_prefix: &str) -> IdbDatabase {
348        let db_name = format!("{db_prefix}::matrix-sdk-crypto");
349        let store_name = format!("{db_prefix}_store");
350        let mut db_req: OpenDbRequest = IdbDatabase::open_u32(&db_name, 1).unwrap();
351        db_req.set_on_upgrade_needed(Some(
352            move |evt: &IdbVersionChangeEvent| -> Result<(), JsValue> {
353                evt.db().create_object_store(&store_name)?;
354                Ok(())
355            },
356        ));
357        db_req.await.unwrap()
358    }
359
360    async fn create_transaction<'a>(db: &'a IdbDatabase, db_prefix: &str) -> IdbTransaction<'a> {
361        let store_name = format!("{db_prefix}_store");
362        db.transaction_on_one_with_mode(&store_name, IdbTransactionMode::Readwrite).unwrap()
363    }
364
365    async fn create_store<'a>(
366        transaction: &'a IdbTransaction<'a>,
367        db_prefix: &str,
368    ) -> IdbObjectStore<'a> {
369        let store_name = format!("{db_prefix}_store");
370        transaction.object_store(&store_name).unwrap()
371    }
372
373    fn create_session_key() -> SessionKey {
374        SessionKey::from_base64(
375            "\
376            AgAAAADBy9+YIYTIqBjFT67nyi31gIOypZQl8day2hkhRDCZaHoG+cZh4tZLQIAZimJail0\
377            0zq4DVJVljO6cZ2t8kIto/QVk+7p20Fcf2nvqZyL2ZCda2Ei7VsqWZHTM/gqa2IU9+ktkwz\
378            +KFhENnHvDhG9f+hjsAPZd5mTTpdO+tVcqtdWhX4dymaJ/2UpAAjuPXQW+nXhQWQhXgXOUa\
379            JCYurJtvbCbqZGeDMmVIoqukBs2KugNJ6j5WlTPoeFnMl6Guy9uH2iWWxGg8ZgT2xspqVl5\
380            CwujjC+m7Dh1toVkvu+bAw\
381            ",
382        )
383        .unwrap()
384    }
385
386    async fn create_inbound_group_sessions2_record(
387        i: usize,
388        session_key: &SessionKey,
389        cipher: &Arc<StoreCipher>,
390        serializer: &IndexeddbSerializer,
391    ) -> (JsValue, JsValue) {
392        let session = create_inbound_group_session(i, session_key);
393        let pickled_session = session.pickle().await;
394        let session_dbo = InboundGroupSessionIndexedDbObject2 {
395            pickled_session: cipher.encrypt_value(&pickled_session).unwrap(),
396            needs_backup: false,
397        };
398        let session_js: JsValue = serde_wasm_bindgen::to_value(&session_dbo).unwrap();
399
400        let key = serializer.encode_key(
401            old_keys::INBOUND_GROUP_SESSIONS_V2,
402            (&session.room_id, session.session_id()),
403        );
404
405        (key, session_js)
406    }
407
408    async fn create_inbound_group_sessions3_record(
409        i: usize,
410        session_key: &SessionKey,
411        serializer: &IndexeddbSerializer,
412    ) -> (JsValue, JsValue) {
413        let session = create_inbound_group_session(i, session_key);
414        let pickled_session = session.pickle().await;
415
416        let session_dbo = InboundGroupSessionIndexedDbObject {
417            pickled_session: serializer.maybe_encrypt_value(pickled_session).unwrap(),
418            session_id: None,
419            needs_backup: false,
420            backed_up_to: -1,
421            sender_key: None,
422            sender_data_type: None,
423        };
424        let session_js: JsValue = serde_wasm_bindgen::to_value(&session_dbo).unwrap();
425
426        let key = serializer.encode_key(
427            old_keys::INBOUND_GROUP_SESSIONS_V2,
428            (&session.room_id, session.session_id()),
429        );
430
431        (key, session_js)
432    }
433
434    async fn measure_performance<Fut, R>(
435        name: &str,
436        schema: &str,
437        num_records: usize,
438        f: impl Fn() -> Fut,
439    ) -> R
440    where
441        Fut: Future<Output = R>,
442    {
443        let window = web_sys::window().expect("should have a window in this context");
444        let performance = window.performance().expect("performance should be available");
445        let start = performance.now();
446
447        let ret = f().await;
448
449        let elapsed = performance.now() - start;
450        console::log_1(
451            &format!("{name} {num_records} records with {schema} schema took {elapsed:.2}ms.")
452                .into(),
453        );
454
455        ret
456    }
457
458    /// Create an example InboundGroupSession of known size
459    fn create_inbound_group_session(i: usize, session_key: &SessionKey) -> InboundGroupSession {
460        let sender_key = Curve25519PublicKey::from_bytes([
461            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
462            24, 25, 26, 27, 28, 29, 30, 31,
463        ]);
464        let signing_key = Ed25519PublicKey::from_slice(&[
465            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
466            24, 25, 26, 27, 28, 29, 30, 31,
467        ])
468        .unwrap();
469        let room_id: OwnedRoomId = format!("!a{i}:b.co").try_into().unwrap();
470        let encryption_algorithm = EventEncryptionAlgorithm::MegolmV1AesSha2;
471        let history_visibility = None;
472
473        InboundGroupSession::new(
474            sender_key,
475            signing_key,
476            &room_id,
477            session_key,
478            SenderData::unknown(),
479            encryption_algorithm,
480            history_visibility,
481        )
482        .unwrap()
483    }
484
485    /// Test migrating `inbound_group_sessions` data from store v5 to latest,
486    /// on a store with encryption disabled.
487    #[async_test]
488    async fn test_v8_v10_v12_migration_unencrypted() {
489        test_v8_v10_v12_migration_with_cipher("test_v8_migration_unencrypted", None).await
490    }
491
492    /// Test migrating `inbound_group_sessions` data from store v5 to store v8,
493    /// on a store with encryption enabled.
494    #[async_test]
495    async fn test_v8_v10_v12_migration_encrypted() {
496        let cipher = StoreCipher::new().unwrap();
497        test_v8_v10_v12_migration_with_cipher(
498            "test_v8_migration_encrypted",
499            Some(Arc::new(cipher)),
500        )
501        .await;
502    }
503
504    /// Helper function for `test_v8_v10_v12_migration_{un,}encrypted`: test
505    /// migrating `inbound_group_sessions` data from store v5 to store v12.
506    async fn test_v8_v10_v12_migration_with_cipher(
507        db_prefix: &str,
508        store_cipher: Option<Arc<StoreCipher>>,
509    ) {
510        let _ = make_tracing_subscriber(None).try_init();
511        let db_name = format!("{db_prefix:0}::matrix-sdk-crypto");
512
513        // delete the db in case it was used in a previous run
514        let _ = IdbDatabase::delete_by_name(&db_name);
515
516        // Given a DB with data in it as it was at v5
517        let room_id = room_id!("!test:localhost");
518        let (backed_up_session, not_backed_up_session) = create_sessions(&room_id);
519        populate_v5_db(
520            &db_name,
521            store_cipher.clone(),
522            &[&backed_up_session, &not_backed_up_session],
523        )
524        .await;
525
526        // When I open a store based on that DB, triggering an upgrade
527        let store =
528            IndexeddbCryptoStore::open_with_store_cipher(&db_prefix, store_cipher).await.unwrap();
529
530        // Then I can find the sessions using their keys and their info is correct
531        let fetched_backed_up_session = store
532            .get_inbound_group_session(room_id, backed_up_session.session_id())
533            .await
534            .unwrap()
535            .unwrap();
536        assert_eq!(fetched_backed_up_session.session_id(), backed_up_session.session_id());
537
538        let fetched_not_backed_up_session = store
539            .get_inbound_group_session(room_id, not_backed_up_session.session_id())
540            .await
541            .unwrap()
542            .unwrap();
543        assert_eq!(fetched_not_backed_up_session.session_id(), not_backed_up_session.session_id());
544
545        // For v8: the backed_up info is preserved
546        assert!(fetched_backed_up_session.backed_up());
547        assert!(!fetched_not_backed_up_session.backed_up());
548
549        // For v10: they have the backed_up_to property and it is indexed
550        assert_matches_v10_schema(&db_name, &store, &fetched_backed_up_session).await;
551
552        // For v12: they have the session_id, sender_key and sender_data_type properties
553        // and they are indexed
554        assert_matches_v12_schema(&db_name, &store, &fetched_backed_up_session).await;
555    }
556
557    async fn assert_matches_v10_schema(
558        db_name: &str,
559        store: &IndexeddbCryptoStore,
560        fetched_backed_up_session: &InboundGroupSession,
561    ) {
562        let db = IdbDatabase::open(&db_name).unwrap().await.unwrap();
563        assert!(db.version() >= 10.0);
564        let transaction = db.transaction_on_one("inbound_group_sessions3").unwrap();
565        let raw_store = transaction.object_store("inbound_group_sessions3").unwrap();
566        let key = store.serializer.encode_key(
567            keys::INBOUND_GROUP_SESSIONS_V3,
568            (fetched_backed_up_session.room_id(), fetched_backed_up_session.session_id()),
569        );
570        let idb_object: InboundGroupSessionIndexedDbObject =
571            serde_wasm_bindgen::from_value(raw_store.get(&key).unwrap().await.unwrap().unwrap())
572                .unwrap();
573
574        assert_eq!(idb_object.backed_up_to, -1);
575        assert!(raw_store.index_names().find(|idx| idx == "backed_up_to").is_some());
576
577        db.close();
578    }
579
580    async fn assert_matches_v12_schema(
581        db_name: &str,
582        store: &IndexeddbCryptoStore,
583        session: &InboundGroupSession,
584    ) {
585        let db = IdbDatabase::open(&db_name).unwrap().await.unwrap();
586        assert!(db.version() >= 12.0);
587        let transaction = db.transaction_on_one("inbound_group_sessions3").unwrap();
588        let raw_store = transaction.object_store("inbound_group_sessions3").unwrap();
589        let key = store
590            .serializer
591            .encode_key(keys::INBOUND_GROUP_SESSIONS_V3, (session.room_id(), session.session_id()));
592        let idb_object: InboundGroupSessionIndexedDbObject =
593            serde_wasm_bindgen::from_value(raw_store.get(&key).unwrap().await.unwrap().unwrap())
594                .unwrap();
595
596        assert_eq!(
597            idb_object.session_id,
598            Some(
599                store
600                    .serializer
601                    .encode_key_as_string(keys::INBOUND_GROUP_SESSIONS_V3, session.session_id())
602            )
603        );
604        assert_eq!(
605            idb_object.sender_key,
606            Some(store.serializer.encode_key_as_string(
607                keys::INBOUND_GROUP_SESSIONS_V3,
608                session.sender_key().to_base64()
609            ))
610        );
611        assert_eq!(idb_object.sender_data_type, Some(session.sender_data_type() as u8));
612        assert!(raw_store
613            .index_names()
614            .find(|idx| idx == "inbound_group_session_sender_key_sender_data_type_idx")
615            .is_some());
616
617        db.close();
618    }
619
620    fn create_sessions(room_id: &RoomId) -> (InboundGroupSession, InboundGroupSession) {
621        let curve_key = Curve25519PublicKey::from(&Curve25519SecretKey::new());
622        let ed_key = Ed25519SecretKey::new().public_key();
623
624        let backed_up_session = InboundGroupSession::new(
625            curve_key,
626            ed_key,
627            room_id,
628            &SessionKey::from_base64(
629                "AgAAAABTyn3CR8mzAxhsHH88td5DrRqfipJCnNbZeMrfzhON6O1Cyr9ewx/sDFLO6\
630                 +NvyW92yGvMub7nuAEQb+SgnZLm7nwvuVvJgSZKpoJMVliwg8iY9TXKFT286oBtT2\
631                 /8idy6TcpKax4foSHdMYlZXu5zOsGDdd9eYnYHpUEyDT0utuiaakZM3XBMNLEVDj9\
632                 Ps929j1FGgne1bDeFVoty2UAOQK8s/0JJigbKSu6wQ/SzaCYpE/LD4Egk2Nxs1JE2\
633                 33ii9J8RGPYOp7QWl0kTEc8mAlqZL7mKppo9AwgtmYweAg",
634            )
635            .unwrap(),
636            SenderData::legacy(),
637            EventEncryptionAlgorithm::MegolmV1AesSha2,
638            None,
639        )
640        .unwrap();
641        backed_up_session.mark_as_backed_up();
642
643        let not_backed_up_session = InboundGroupSession::new(
644            curve_key,
645            ed_key,
646            room_id,
647            &SessionKey::from_base64(
648                "AgAAAACO1PjBdqucFUcNFU6JgXYAi7KMeeUqUibaLm6CkHJcMiDTFWq/K5SFAukJc\
649                 WjeyOpnZr4vpezRlbvNaQpNPMub2Cs2u14fHj9OpKFD7c4hFS4j94q4pTLZly3qEV\
650                 BIjWdOpcIVfN7QVGVIxYiI6KHEddCHrNCo9fc8GUdfzrMnmUooQr/m4ZAkRdErzUH\
651                 uUAlUBwOKcPi7Cs/KrMw/sHCRDkTntHZ3BOrzJsAVbHUgq+8/Sqy3YE+CX6uEnig+\
652                 1NWjZD9f1vvXnSKKDdHj1927WFMFZ/yYc24607zEVUaODQ",
653            )
654            .unwrap(),
655            SenderData::legacy(),
656            EventEncryptionAlgorithm::MegolmV1AesSha2,
657            None,
658        )
659        .unwrap();
660
661        (backed_up_session, not_backed_up_session)
662    }
663
664    async fn populate_v5_db(
665        db_name: &str,
666        store_cipher: Option<Arc<StoreCipher>>,
667        session_entries: &[&InboundGroupSession],
668    ) {
669        // Schema V7 migrated the inbound group sessions to a new format.
670        // To test, first create a database and populate it with the *old* style of
671        // entry.
672        let db = create_v5_db(&db_name).await.unwrap();
673
674        let serializer = IndexeddbSerializer::new(store_cipher.clone());
675
676        let txn = db
677            .transaction_on_one_with_mode(
678                old_keys::INBOUND_GROUP_SESSIONS_V1,
679                IdbTransactionMode::Readwrite,
680            )
681            .unwrap();
682        let sessions = txn.object_store(old_keys::INBOUND_GROUP_SESSIONS_V1).unwrap();
683        for session in session_entries {
684            let room_id = session.room_id();
685            let session_id = session.session_id();
686            let key =
687                serializer.encode_key(old_keys::INBOUND_GROUP_SESSIONS_V1, (room_id, session_id));
688            let pickle = session.pickle().await;
689
690            // Serialize the session with the old style of serialization, since that's what
691            // we used at the time.
692            let serialized_session = serialize_value_as_legacy(&store_cipher, &pickle);
693            sessions.put_key_val(&key, &serialized_session).unwrap();
694        }
695        txn.await.into_result().unwrap();
696
697        // now close our DB, reopen it properly, and check that we can still read our
698        // data.
699        db.close();
700    }
701
702    /// Test migrating `backup_keys` data from store v10 to latest,
703    /// on a store with encryption disabled.
704    #[async_test]
705    async fn test_v10_v11_migration_unencrypted() {
706        test_v10_v11_migration_with_cipher("test_v10_migration_unencrypted", None).await
707    }
708
709    /// Test migrating `backup_keys` data from store v10 to latest,
710    /// on a store with encryption enabled.
711    #[async_test]
712    async fn test_v10_v11_migration_encrypted() {
713        let cipher = StoreCipher::new().unwrap();
714        test_v10_v11_migration_with_cipher("test_v10_migration_encrypted", Some(Arc::new(cipher)))
715            .await;
716    }
717
718    /// Helper function for `test_v10_v11_migration_{un,}encrypted`: test
719    /// migrating `backup_keys` data from store v10 to store v11.
720    async fn test_v10_v11_migration_with_cipher(
721        db_prefix: &str,
722        store_cipher: Option<Arc<StoreCipher>>,
723    ) {
724        let _ = make_tracing_subscriber(None).try_init();
725        let db_name = format!("{db_prefix:0}::matrix-sdk-crypto");
726
727        // delete the db in case it was used in a previous run
728        let _ = IdbDatabase::delete_by_name(&db_name);
729
730        // Given a DB with data in it as it was at v5
731        let db = create_v5_db(&db_name).await.unwrap();
732
733        let txn = db
734            .transaction_on_one_with_mode(keys::BACKUP_KEYS, IdbTransactionMode::Readwrite)
735            .unwrap();
736        let store = txn.object_store(keys::BACKUP_KEYS).unwrap();
737        store
738            .put_key_val(
739                &JsValue::from_str(old_keys::BACKUP_KEY_V1),
740                &serialize_value_as_legacy(&store_cipher, &"1".to_owned()),
741            )
742            .unwrap();
743        db.close();
744
745        // When I open a store based on that DB, triggering an upgrade
746        let store =
747            IndexeddbCryptoStore::open_with_store_cipher(&db_prefix, store_cipher).await.unwrap();
748
749        // Then I can read the backup settings
750        let backup_data = store.load_backup_keys().await.unwrap();
751        assert_eq!(backup_data.backup_version, Some("1".to_owned()));
752    }
753
754    async fn create_v5_db(name: &str) -> std::result::Result<IdbDatabase, DomException> {
755        v0_to_v5::schema_add(name).await?;
756        IdbDatabase::open_u32(name, 5)?.await
757    }
758
759    /// Opening a db that has been upgraded to MAX_SUPPORTED_SCHEMA_VERSION
760    /// should be ok
761    #[async_test]
762    async fn test_can_open_max_supported_schema_version() {
763        let _ = make_tracing_subscriber(None).try_init();
764
765        let db_prefix = "test_can_open_max_supported_schema_version";
766        // Create a database at MAX_SUPPORTED_SCHEMA_VERSION
767        create_future_schema_db(db_prefix, MAX_SUPPORTED_SCHEMA_VERSION).await;
768
769        // Now, try opening it again
770        IndexeddbCryptoStore::open_with_store_cipher(&db_prefix, None).await.unwrap();
771    }
772
773    /// Opening a db that has been upgraded beyond MAX_SUPPORTED_SCHEMA_VERSION
774    /// should throw an error
775    #[async_test]
776    async fn test_can_not_open_too_new_db() {
777        let _ = make_tracing_subscriber(None).try_init();
778
779        let db_prefix = "test_can_not_open_too_new_db";
780        // Create a database at MAX_SUPPORTED_SCHEMA_VERSION+1
781        create_future_schema_db(db_prefix, MAX_SUPPORTED_SCHEMA_VERSION + 1).await;
782
783        // Now, try opening it again
784        let result = IndexeddbCryptoStore::open_with_store_cipher(&db_prefix, None).await;
785        assert_matches!(
786            result,
787            Err(IndexeddbCryptoStoreError::SchemaTooNewError {
788                max_supported_version,
789                current_version
790            }) => {
791                assert_eq!(max_supported_version, MAX_SUPPORTED_SCHEMA_VERSION);
792                assert_eq!(current_version, MAX_SUPPORTED_SCHEMA_VERSION + 1);
793            }
794        );
795    }
796
797    // Create a database, and increase its schema version to the given version
798    // number.
799    async fn create_future_schema_db(db_prefix: &str, version: u32) {
800        let db_name = format!("{db_prefix}::matrix-sdk-crypto");
801
802        // delete the db in case it was used in a previous run
803        let _ = IdbDatabase::delete_by_name(&db_name);
804
805        // Open, and close, the store at the regular version.
806        IndexeddbCryptoStore::open_with_store_cipher(&db_prefix, None).await.unwrap();
807
808        // Now upgrade to the given version, keeping a record of the previous version so
809        // that we can double-check it.
810        let mut db_req: OpenDbRequest = IdbDatabase::open_u32(&db_name, version).unwrap();
811
812        let old_version: Rc<Cell<Option<u32>>> = Rc::new(Cell::new(None));
813        let old_version2 = old_version.clone();
814        db_req.set_on_upgrade_needed(Some(move |evt: &IdbVersionChangeEvent| {
815            old_version2.set(Some(evt.old_version() as u32));
816            Ok(())
817        }));
818
819        let db = db_req.await.unwrap();
820        assert_eq!(
821            old_version.get(),
822            Some(EXPECTED_SCHEMA_VERSION),
823            "Existing store had unexpected version number"
824        );
825        db.close();
826    }
827
828    /// Emulate the old behaviour of [`IndexeddbSerializer::serialize_value`].
829    ///
830    /// We used to use an inefficient format for serializing objects in the
831    /// indexeddb store. This replicates that old behaviour, for testing
832    /// purposes.
833    fn serialize_value_as_legacy<T: Serialize>(
834        store_cipher: &Option<Arc<StoreCipher>>,
835        value: &T,
836    ) -> JsValue {
837        if let Some(cipher) = &store_cipher {
838            // Old-style serialization/encryption. First JSON-serialize into a byte array...
839            let data = serde_json::to_vec(&value).unwrap();
840            // ... then encrypt...
841            let encrypted = cipher.encrypt_value_data(data).unwrap();
842            // ... then JSON-serialize into another byte array ...
843            let value = serde_json::to_vec(&encrypted).unwrap();
844            // and finally, turn it into a javascript array.
845            JsValue::from_serde(&value).unwrap()
846        } else {
847            JsValue::from_serde(&value).unwrap()
848        }
849    }
850}