matrix_sdk_indexeddb/crypto_store/migrations/
mod.rs

1// Copyright 2023 The Matrix.org Foundation C.I.C.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::ops::Deref;
16
17use indexed_db_futures::{prelude::*, web_sys::DomException};
18use tracing::info;
19use wasm_bindgen::JsValue;
20
21use crate::{
22    crypto_store::{indexeddb_serializer::IndexeddbSerializer, Result},
23    IndexeddbCryptoStoreError,
24};
25
26mod old_keys;
27mod v0_to_v5;
28mod v10_to_v11;
29mod v11_to_v12;
30mod v5_to_v7;
31mod v7;
32mod v7_to_v8;
33mod v8_to_v10;
34
35struct MigrationDb {
36    db: IdbDatabase,
37    next_version: u32,
38}
39
40impl MigrationDb {
41    /// Create an Indexed DB wrapper that manages a database migration,
42    /// logging messages before and after the migration, and automatically
43    /// closing the DB when this object is dropped.
44    async fn new(name: &str, next_version: u32) -> Result<Self> {
45        info!("IndexeddbCryptoStore migrate data before v{next_version} starting");
46        Ok(Self { db: IdbDatabase::open(name)?.await?, next_version })
47    }
48}
49
50impl Deref for MigrationDb {
51    type Target = IdbDatabase;
52
53    fn deref(&self) -> &Self::Target {
54        &self.db
55    }
56}
57
58impl Drop for MigrationDb {
59    fn drop(&mut self) {
60        let version = self.next_version;
61        info!("IndexeddbCryptoStore migrate data before v{version} finished");
62        self.db.close();
63    }
64}
65
66/// The latest version of the schema we can support. If we encounter a database
67/// version with a higher schema version, we will return an error.
68///
69/// A note on how this works.
70///
71/// Normally, when you open an indexeddb database, you tell it the "schema
72/// version" that you know about. If the existing database is older than
73/// that, it lets you run a migration. If the existing database is newer, then
74/// it assumes that there have been incompatible schema changes and complains
75/// with an error ("The requested version (10) is less than the existing version
76/// (11)").
77///
78/// The problem with this is that, if someone upgrades their installed
79/// application, then realises it was a terrible mistake and tries to roll
80/// back, then suddenly every user's session is completely hosed. (They see
81/// an "unable to restore session" dialog.) Often, schema updates aren't
82/// actually backwards-incompatible — for example, existing code will work just
83/// fine if someone adds a new store or a new index — so this approach is too
84/// heavy-handed.
85///
86/// The solution we take here is to say "any schema changes up to
87/// [`MAX_SUPPORTED_SCHEMA_VERSION`] will be backwards-compatible". If, at some
88/// point, we do make a breaking change, we will give that schema version a
89/// higher number. Then, rather than using the implicit version check that comes
90/// with `indexedDB.open(name, version)`, we explicitly check the version
91/// ourselves.
92///
93/// It is expected that we will use version numbers that are multiples of 100 to
94/// represent breaking changes — for example, version 100 is a breaking change,
95/// as is version 200, but versions 101-199 are all backwards compatible with
96/// version 100. In other words, if you divide by 100, you get something
97/// approaching semver: version 200 is major version 2, minor version 0.
98const MAX_SUPPORTED_SCHEMA_VERSION: u32 = 99;
99
100/// Open the indexeddb with the given name, upgrading it to the latest version
101/// of the schema if necessary.
102pub async fn open_and_upgrade_db(
103    name: &str,
104    serializer: &IndexeddbSerializer,
105) -> Result<IdbDatabase, IndexeddbCryptoStoreError> {
106    // Move the DB version up from where it is to the latest version.
107    //
108    // Schema changes need to be separate from data migrations, so we often
109    // have a pattern of:
110    //
111    // 1. schema_add - create new object stores, indices etc.
112    // 2. data_migrate - move data from the old stores to the new ones
113    // 3. schema_delete - delete any now-unused stores etc.
114    //
115    // Migrations like these require the schema version to be bumped twice,
116    // because of the separate "add" and "delete" stages.
117
118    let old_version = db_version(name).await?;
119
120    // If the database version is too new, bail out. We assume that schema updates
121    // all the way up to `MAX_SUPPORTED_SCHEMA_VERSION` will be
122    // backwards-compatible.
123    if old_version > MAX_SUPPORTED_SCHEMA_VERSION {
124        return Err(IndexeddbCryptoStoreError::SchemaTooNewError {
125            max_supported_version: MAX_SUPPORTED_SCHEMA_VERSION,
126            current_version: old_version,
127        });
128    }
129
130    if old_version < 5 {
131        v0_to_v5::schema_add(name).await?;
132    }
133
134    if old_version < 6 {
135        v5_to_v7::schema_add(name).await?;
136    }
137    if old_version < 7 {
138        v5_to_v7::data_migrate(name, serializer).await?;
139        v5_to_v7::schema_delete(name).await?;
140    }
141
142    if old_version < 8 {
143        v7_to_v8::data_migrate(name, serializer).await?;
144        v7_to_v8::schema_bump(name).await?;
145    }
146
147    if old_version < 9 {
148        v8_to_v10::schema_add(name).await?;
149    }
150    if old_version < 10 {
151        v8_to_v10::data_migrate(name, serializer).await?;
152        v8_to_v10::schema_delete(name).await?;
153    }
154
155    if old_version < 11 {
156        v10_to_v11::data_migrate(name, serializer).await?;
157        v10_to_v11::schema_bump(name).await?;
158    }
159
160    if old_version < 12 {
161        v11_to_v12::schema_add(name).await?;
162    }
163
164    // If you add more migrations here, you'll need to update
165    // `tests::EXPECTED_SCHEMA_VERSION`.
166
167    // NOTE: IF YOU MAKE A BREAKING CHANGE TO THE SCHEMA, BUMP THE SCHEMA VERSION TO
168    // SOMETHING HIGHER THAN `MAX_SUPPORTED_SCHEMA_VERSION`! (And then bump
169    // `MAX_SUPPORTED_SCHEMA_VERSION` itself to the next multiple of 10).
170
171    // Open and return the DB (we know it's at the latest version)
172    Ok(IdbDatabase::open(name)?.await?)
173}
174
175async fn db_version(name: &str) -> Result<u32, IndexeddbCryptoStoreError> {
176    let db = IdbDatabase::open(name)?.await?;
177    let old_version = db.version() as u32;
178    db.close();
179    Ok(old_version)
180}
181
182type OldVersion = u32;
183
184/// Run a database schema upgrade operation
185///
186/// # Arguments
187///
188/// * `name` - name of the indexeddb database to be upgraded.
189/// * `version` - version we are upgrading to.
190/// * `f` - closure which will be called if the database is below the version
191///   given. It will be called with three arguments `(db, txn, oldver)`, where:
192///   * `db` - the [`IdbDatabase`]
193///   * `txn` - the database transaction: a [`IdbTransaction`]
194///   * `oldver` - the version number before the upgrade.
195async fn do_schema_upgrade<F>(name: &str, version: u32, f: F) -> Result<(), DomException>
196where
197    F: Fn(&IdbDatabase, IdbTransaction<'_>, OldVersion) -> Result<(), JsValue> + 'static,
198{
199    info!("IndexeddbCryptoStore upgrade schema -> v{version} starting");
200    let mut db_req: OpenDbRequest = IdbDatabase::open_u32(name, version)?;
201
202    db_req.set_on_upgrade_needed(Some(move |evt: &IdbVersionChangeEvent| {
203        // Even if the web-sys bindings expose the version as a f64, the IndexedDB API
204        // works with an unsigned integer.
205        // See <https://github.com/rustwasm/wasm-bindgen/issues/1149>
206        let old_version = evt.old_version() as u32;
207
208        // Run the upgrade code we were supplied
209        f(evt.db(), evt.transaction(), old_version)
210    }));
211
212    let db = db_req.await?;
213    db.close();
214    info!("IndexeddbCryptoStore upgrade schema -> v{version} complete");
215    Ok(())
216}
217
218fn add_nonunique_index<'a>(
219    object_store: &'a IdbObjectStore<'a>,
220    name: &str,
221    key_path: &str,
222) -> Result<IdbIndex<'a>, DomException> {
223    let mut params = IdbIndexParameters::new();
224    params.unique(false);
225    object_store.create_index_with_params(name, &IdbKeyPath::str(key_path), &params)
226}
227
228fn add_unique_index<'a>(
229    object_store: &'a IdbObjectStore<'a>,
230    name: &str,
231    key_path: &str,
232) -> Result<IdbIndex<'a>, DomException> {
233    let mut params = IdbIndexParameters::new();
234    params.unique(true);
235    object_store.create_index_with_params(name, &IdbKeyPath::str(key_path), &params)
236}
237
238#[cfg(all(test, target_arch = "wasm32"))]
239mod tests {
240    use std::{cell::Cell, future::Future, rc::Rc, sync::Arc};
241
242    use assert_matches::assert_matches;
243    use gloo_utils::format::JsValueSerdeExt;
244    use indexed_db_futures::prelude::*;
245    use matrix_sdk_common::js_tracing::make_tracing_subscriber;
246    use matrix_sdk_crypto::{
247        olm::{InboundGroupSession, SenderData, SessionKey},
248        store::CryptoStore,
249        types::EventEncryptionAlgorithm,
250        vodozemac::{Curve25519PublicKey, Curve25519SecretKey, Ed25519PublicKey, Ed25519SecretKey},
251    };
252    use matrix_sdk_store_encryption::StoreCipher;
253    use matrix_sdk_test::async_test;
254    use ruma::{room_id, OwnedRoomId, RoomId};
255    use serde::Serialize;
256    use tracing_subscriber::util::SubscriberInitExt;
257    use web_sys::console;
258
259    use super::{v0_to_v5, v7::InboundGroupSessionIndexedDbObject2};
260    use crate::{
261        crypto_store::{keys, migrations::*, InboundGroupSessionIndexedDbObject},
262        IndexeddbCryptoStore,
263    };
264
265    wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
266
267    /// The schema version we expect after we open the store.
268    const EXPECTED_SCHEMA_VERSION: u32 = 12;
269
270    /// Adjust this to test do a more comprehensive perf test
271    const NUM_RECORDS_FOR_PERF: usize = 2_000;
272
273    /// Make lots of sessions and see how long it takes to count them in v8
274    #[async_test]
275    async fn test_count_lots_of_sessions_v8() {
276        let cipher = Arc::new(StoreCipher::new().unwrap());
277        let serializer = IndexeddbSerializer::new(Some(cipher.clone()));
278        // Session keys are slow to create, so make one upfront and use it for every
279        // session
280        let session_key = create_session_key();
281
282        // Create lots of InboundGroupSessionIndexedDbObject2 objects
283        let mut objects = Vec::with_capacity(NUM_RECORDS_FOR_PERF);
284        for i in 0..NUM_RECORDS_FOR_PERF {
285            objects.push(
286                create_inbound_group_sessions2_record(i, &session_key, &cipher, &serializer).await,
287            );
288        }
289
290        // Create a DB with an inbound_group_sessions2 store
291        let db_prefix = "count_lots_of_sessions_v8";
292        let db = create_db(db_prefix).await;
293        let transaction = create_transaction(&db, db_prefix).await;
294        let store = create_store(&transaction, db_prefix).await;
295
296        // Check how long it takes to insert these records
297        measure_performance("Inserting", "v8", NUM_RECORDS_FOR_PERF, || async {
298            for (key, session_js) in objects.iter() {
299                store.add_key_val(key, session_js).unwrap().await.unwrap();
300            }
301        })
302        .await;
303
304        // Check how long it takes to count these records
305        measure_performance("Counting", "v8", NUM_RECORDS_FOR_PERF, || async {
306            store.count().unwrap().await.unwrap();
307        })
308        .await;
309    }
310
311    /// Make lots of sessions and see how long it takes to count them in v10
312    #[async_test]
313    async fn test_count_lots_of_sessions_v10() {
314        let serializer = IndexeddbSerializer::new(Some(Arc::new(StoreCipher::new().unwrap())));
315
316        // Session keys are slow to create, so make one upfront and use it for every
317        // session
318        let session_key = create_session_key();
319
320        // Create lots of InboundGroupSessionIndexedDbObject objects
321        let mut objects = Vec::with_capacity(NUM_RECORDS_FOR_PERF);
322        for i in 0..NUM_RECORDS_FOR_PERF {
323            objects.push(create_inbound_group_sessions3_record(i, &session_key, &serializer).await);
324        }
325
326        // Create a DB with an inbound_group_sessions3 store
327        let db_prefix = "count_lots_of_sessions_v8";
328        let db = create_db(db_prefix).await;
329        let transaction = create_transaction(&db, db_prefix).await;
330        let store = create_store(&transaction, db_prefix).await;
331
332        // Check how long it takes to insert these records
333        measure_performance("Inserting", "v10", NUM_RECORDS_FOR_PERF, || async {
334            for (key, session_js) in objects.iter() {
335                store.add_key_val(key, session_js).unwrap().await.unwrap();
336            }
337        })
338        .await;
339
340        // Check how long it takes to count these records
341        measure_performance("Counting", "v10", NUM_RECORDS_FOR_PERF, || async {
342            store.count().unwrap().await.unwrap();
343        })
344        .await;
345    }
346
347    async fn create_db(db_prefix: &str) -> IdbDatabase {
348        let db_name = format!("{db_prefix}::matrix-sdk-crypto");
349        let store_name = format!("{db_prefix}_store");
350        let mut db_req: OpenDbRequest = IdbDatabase::open_u32(&db_name, 1).unwrap();
351        db_req.set_on_upgrade_needed(Some(
352            move |evt: &IdbVersionChangeEvent| -> Result<(), JsValue> {
353                evt.db().create_object_store(&store_name)?;
354                Ok(())
355            },
356        ));
357        db_req.await.unwrap()
358    }
359
360    async fn create_transaction<'a>(db: &'a IdbDatabase, db_prefix: &str) -> IdbTransaction<'a> {
361        let store_name = format!("{db_prefix}_store");
362        db.transaction_on_one_with_mode(&store_name, IdbTransactionMode::Readwrite).unwrap()
363    }
364
365    async fn create_store<'a>(
366        transaction: &'a IdbTransaction<'a>,
367        db_prefix: &str,
368    ) -> IdbObjectStore<'a> {
369        let store_name = format!("{db_prefix}_store");
370        transaction.object_store(&store_name).unwrap()
371    }
372
373    fn create_session_key() -> SessionKey {
374        SessionKey::from_base64(
375            "\
376            AgAAAADBy9+YIYTIqBjFT67nyi31gIOypZQl8day2hkhRDCZaHoG+cZh4tZLQIAZimJail0\
377            0zq4DVJVljO6cZ2t8kIto/QVk+7p20Fcf2nvqZyL2ZCda2Ei7VsqWZHTM/gqa2IU9+ktkwz\
378            +KFhENnHvDhG9f+hjsAPZd5mTTpdO+tVcqtdWhX4dymaJ/2UpAAjuPXQW+nXhQWQhXgXOUa\
379            JCYurJtvbCbqZGeDMmVIoqukBs2KugNJ6j5WlTPoeFnMl6Guy9uH2iWWxGg8ZgT2xspqVl5\
380            CwujjC+m7Dh1toVkvu+bAw\
381            ",
382        )
383        .unwrap()
384    }
385
386    async fn create_inbound_group_sessions2_record(
387        i: usize,
388        session_key: &SessionKey,
389        cipher: &Arc<StoreCipher>,
390        serializer: &IndexeddbSerializer,
391    ) -> (JsValue, JsValue) {
392        let session = create_inbound_group_session(i, session_key);
393        let pickled_session = session.pickle().await;
394        let session_dbo = InboundGroupSessionIndexedDbObject2 {
395            pickled_session: cipher.encrypt_value(&pickled_session).unwrap(),
396            needs_backup: false,
397        };
398        let session_js: JsValue = serde_wasm_bindgen::to_value(&session_dbo).unwrap();
399
400        let key = serializer.encode_key(
401            old_keys::INBOUND_GROUP_SESSIONS_V2,
402            (&session.room_id, session.session_id()),
403        );
404
405        (key, session_js)
406    }
407
408    async fn create_inbound_group_sessions3_record(
409        i: usize,
410        session_key: &SessionKey,
411        serializer: &IndexeddbSerializer,
412    ) -> (JsValue, JsValue) {
413        let session = create_inbound_group_session(i, session_key);
414        let pickled_session = session.pickle().await;
415
416        let session_dbo = InboundGroupSessionIndexedDbObject {
417            pickled_session: serializer.maybe_encrypt_value(pickled_session).unwrap(),
418            session_id: None,
419            needs_backup: false,
420            backed_up_to: -1,
421            sender_key: None,
422            sender_data_type: None,
423        };
424        let session_js: JsValue = serde_wasm_bindgen::to_value(&session_dbo).unwrap();
425
426        let key = serializer.encode_key(
427            old_keys::INBOUND_GROUP_SESSIONS_V2,
428            (&session.room_id, session.session_id()),
429        );
430
431        (key, session_js)
432    }
433
434    async fn measure_performance<Fut, R>(
435        name: &str,
436        schema: &str,
437        num_records: usize,
438        f: impl Fn() -> Fut,
439    ) -> R
440    where
441        Fut: Future<Output = R>,
442    {
443        let window = web_sys::window().expect("should have a window in this context");
444        let performance = window.performance().expect("performance should be available");
445        let start = performance.now();
446
447        let ret = f().await;
448
449        let elapsed = performance.now() - start;
450        console::log_1(
451            &format!("{name} {num_records} records with {schema} schema took {elapsed:.2}ms.")
452                .into(),
453        );
454
455        ret
456    }
457
458    /// Create an example InboundGroupSession of known size
459    fn create_inbound_group_session(i: usize, session_key: &SessionKey) -> InboundGroupSession {
460        let sender_key = Curve25519PublicKey::from_bytes([
461            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
462            24, 25, 26, 27, 28, 29, 30, 31,
463        ]);
464        let signing_key = Ed25519PublicKey::from_slice(&[
465            0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
466            24, 25, 26, 27, 28, 29, 30, 31,
467        ])
468        .unwrap();
469        let room_id: OwnedRoomId = format!("!a{i}:b.co").try_into().unwrap();
470        let encryption_algorithm = EventEncryptionAlgorithm::MegolmV1AesSha2;
471        let history_visibility = None;
472
473        InboundGroupSession::new(
474            sender_key,
475            signing_key,
476            &room_id,
477            session_key,
478            SenderData::unknown(),
479            encryption_algorithm,
480            history_visibility,
481            false,
482        )
483        .unwrap()
484    }
485
486    /// Test migrating `inbound_group_sessions` data from store v5 to latest,
487    /// on a store with encryption disabled.
488    #[async_test]
489    async fn test_v8_v10_v12_migration_unencrypted() {
490        test_v8_v10_v12_migration_with_cipher("test_v8_migration_unencrypted", None).await
491    }
492
493    /// Test migrating `inbound_group_sessions` data from store v5 to store v8,
494    /// on a store with encryption enabled.
495    #[async_test]
496    async fn test_v8_v10_v12_migration_encrypted() {
497        let cipher = StoreCipher::new().unwrap();
498        test_v8_v10_v12_migration_with_cipher(
499            "test_v8_migration_encrypted",
500            Some(Arc::new(cipher)),
501        )
502        .await;
503    }
504
505    /// Helper function for `test_v8_v10_v12_migration_{un,}encrypted`: test
506    /// migrating `inbound_group_sessions` data from store v5 to store v12.
507    async fn test_v8_v10_v12_migration_with_cipher(
508        db_prefix: &str,
509        store_cipher: Option<Arc<StoreCipher>>,
510    ) {
511        let _ = make_tracing_subscriber(None).try_init();
512        let db_name = format!("{db_prefix:0}::matrix-sdk-crypto");
513
514        // delete the db in case it was used in a previous run
515        let _ = IdbDatabase::delete_by_name(&db_name);
516
517        // Given a DB with data in it as it was at v5
518        let room_id = room_id!("!test:localhost");
519        let (backed_up_session, not_backed_up_session) = create_sessions(&room_id);
520        populate_v5_db(
521            &db_name,
522            store_cipher.clone(),
523            &[&backed_up_session, &not_backed_up_session],
524        )
525        .await;
526
527        // When I open a store based on that DB, triggering an upgrade
528        let store =
529            IndexeddbCryptoStore::open_with_store_cipher(&db_prefix, store_cipher).await.unwrap();
530
531        // Then I can find the sessions using their keys and their info is correct
532        let fetched_backed_up_session = store
533            .get_inbound_group_session(room_id, backed_up_session.session_id())
534            .await
535            .unwrap()
536            .unwrap();
537        assert_eq!(fetched_backed_up_session.session_id(), backed_up_session.session_id());
538
539        let fetched_not_backed_up_session = store
540            .get_inbound_group_session(room_id, not_backed_up_session.session_id())
541            .await
542            .unwrap()
543            .unwrap();
544        assert_eq!(fetched_not_backed_up_session.session_id(), not_backed_up_session.session_id());
545
546        // For v8: the backed_up info is preserved
547        assert!(fetched_backed_up_session.backed_up());
548        assert!(!fetched_not_backed_up_session.backed_up());
549
550        // For v10: they have the backed_up_to property and it is indexed
551        assert_matches_v10_schema(&db_name, &store, &fetched_backed_up_session).await;
552
553        // For v12: they have the session_id, sender_key and sender_data_type properties
554        // and they are indexed
555        assert_matches_v12_schema(&db_name, &store, &fetched_backed_up_session).await;
556    }
557
558    async fn assert_matches_v10_schema(
559        db_name: &str,
560        store: &IndexeddbCryptoStore,
561        fetched_backed_up_session: &InboundGroupSession,
562    ) {
563        let db = IdbDatabase::open(&db_name).unwrap().await.unwrap();
564        assert!(db.version() >= 10.0);
565        let transaction = db.transaction_on_one("inbound_group_sessions3").unwrap();
566        let raw_store = transaction.object_store("inbound_group_sessions3").unwrap();
567        let key = store.serializer.encode_key(
568            keys::INBOUND_GROUP_SESSIONS_V3,
569            (fetched_backed_up_session.room_id(), fetched_backed_up_session.session_id()),
570        );
571        let idb_object: InboundGroupSessionIndexedDbObject =
572            serde_wasm_bindgen::from_value(raw_store.get(&key).unwrap().await.unwrap().unwrap())
573                .unwrap();
574
575        assert_eq!(idb_object.backed_up_to, -1);
576        assert!(raw_store.index_names().find(|idx| idx == "backed_up_to").is_some());
577
578        db.close();
579    }
580
581    async fn assert_matches_v12_schema(
582        db_name: &str,
583        store: &IndexeddbCryptoStore,
584        session: &InboundGroupSession,
585    ) {
586        let db = IdbDatabase::open(&db_name).unwrap().await.unwrap();
587        assert!(db.version() >= 12.0);
588        let transaction = db.transaction_on_one("inbound_group_sessions3").unwrap();
589        let raw_store = transaction.object_store("inbound_group_sessions3").unwrap();
590        let key = store
591            .serializer
592            .encode_key(keys::INBOUND_GROUP_SESSIONS_V3, (session.room_id(), session.session_id()));
593        let idb_object: InboundGroupSessionIndexedDbObject =
594            serde_wasm_bindgen::from_value(raw_store.get(&key).unwrap().await.unwrap().unwrap())
595                .unwrap();
596
597        assert_eq!(
598            idb_object.session_id,
599            Some(
600                store
601                    .serializer
602                    .encode_key_as_string(keys::INBOUND_GROUP_SESSIONS_V3, session.session_id())
603            )
604        );
605        assert_eq!(
606            idb_object.sender_key,
607            Some(store.serializer.encode_key_as_string(
608                keys::INBOUND_GROUP_SESSIONS_V3,
609                session.sender_key().to_base64()
610            ))
611        );
612        assert_eq!(idb_object.sender_data_type, Some(session.sender_data_type() as u8));
613        assert!(raw_store
614            .index_names()
615            .find(|idx| idx == "inbound_group_session_sender_key_sender_data_type_idx")
616            .is_some());
617
618        db.close();
619    }
620
621    fn create_sessions(room_id: &RoomId) -> (InboundGroupSession, InboundGroupSession) {
622        let curve_key = Curve25519PublicKey::from(&Curve25519SecretKey::new());
623        let ed_key = Ed25519SecretKey::new().public_key();
624
625        let backed_up_session = InboundGroupSession::new(
626            curve_key,
627            ed_key,
628            room_id,
629            &SessionKey::from_base64(
630                "AgAAAABTyn3CR8mzAxhsHH88td5DrRqfipJCnNbZeMrfzhON6O1Cyr9ewx/sDFLO6\
631                 +NvyW92yGvMub7nuAEQb+SgnZLm7nwvuVvJgSZKpoJMVliwg8iY9TXKFT286oBtT2\
632                 /8idy6TcpKax4foSHdMYlZXu5zOsGDdd9eYnYHpUEyDT0utuiaakZM3XBMNLEVDj9\
633                 Ps929j1FGgne1bDeFVoty2UAOQK8s/0JJigbKSu6wQ/SzaCYpE/LD4Egk2Nxs1JE2\
634                 33ii9J8RGPYOp7QWl0kTEc8mAlqZL7mKppo9AwgtmYweAg",
635            )
636            .unwrap(),
637            SenderData::legacy(),
638            EventEncryptionAlgorithm::MegolmV1AesSha2,
639            None,
640            false,
641        )
642        .unwrap();
643        backed_up_session.mark_as_backed_up();
644
645        let not_backed_up_session = InboundGroupSession::new(
646            curve_key,
647            ed_key,
648            room_id,
649            &SessionKey::from_base64(
650                "AgAAAACO1PjBdqucFUcNFU6JgXYAi7KMeeUqUibaLm6CkHJcMiDTFWq/K5SFAukJc\
651                 WjeyOpnZr4vpezRlbvNaQpNPMub2Cs2u14fHj9OpKFD7c4hFS4j94q4pTLZly3qEV\
652                 BIjWdOpcIVfN7QVGVIxYiI6KHEddCHrNCo9fc8GUdfzrMnmUooQr/m4ZAkRdErzUH\
653                 uUAlUBwOKcPi7Cs/KrMw/sHCRDkTntHZ3BOrzJsAVbHUgq+8/Sqy3YE+CX6uEnig+\
654                 1NWjZD9f1vvXnSKKDdHj1927WFMFZ/yYc24607zEVUaODQ",
655            )
656            .unwrap(),
657            SenderData::legacy(),
658            EventEncryptionAlgorithm::MegolmV1AesSha2,
659            None,
660            false,
661        )
662        .unwrap();
663
664        (backed_up_session, not_backed_up_session)
665    }
666
667    async fn populate_v5_db(
668        db_name: &str,
669        store_cipher: Option<Arc<StoreCipher>>,
670        session_entries: &[&InboundGroupSession],
671    ) {
672        // Schema V7 migrated the inbound group sessions to a new format.
673        // To test, first create a database and populate it with the *old* style of
674        // entry.
675        let db = create_v5_db(&db_name).await.unwrap();
676
677        let serializer = IndexeddbSerializer::new(store_cipher.clone());
678
679        let txn = db
680            .transaction_on_one_with_mode(
681                old_keys::INBOUND_GROUP_SESSIONS_V1,
682                IdbTransactionMode::Readwrite,
683            )
684            .unwrap();
685        let sessions = txn.object_store(old_keys::INBOUND_GROUP_SESSIONS_V1).unwrap();
686        for session in session_entries {
687            let room_id = session.room_id();
688            let session_id = session.session_id();
689            let key =
690                serializer.encode_key(old_keys::INBOUND_GROUP_SESSIONS_V1, (room_id, session_id));
691            let pickle = session.pickle().await;
692
693            // Serialize the session with the old style of serialization, since that's what
694            // we used at the time.
695            let serialized_session = serialize_value_as_legacy(&store_cipher, &pickle);
696            sessions.put_key_val(&key, &serialized_session).unwrap();
697        }
698        txn.await.into_result().unwrap();
699
700        // now close our DB, reopen it properly, and check that we can still read our
701        // data.
702        db.close();
703    }
704
705    /// Test migrating `backup_keys` data from store v10 to latest,
706    /// on a store with encryption disabled.
707    #[async_test]
708    async fn test_v10_v11_migration_unencrypted() {
709        test_v10_v11_migration_with_cipher("test_v10_migration_unencrypted", None).await
710    }
711
712    /// Test migrating `backup_keys` data from store v10 to latest,
713    /// on a store with encryption enabled.
714    #[async_test]
715    async fn test_v10_v11_migration_encrypted() {
716        let cipher = StoreCipher::new().unwrap();
717        test_v10_v11_migration_with_cipher("test_v10_migration_encrypted", Some(Arc::new(cipher)))
718            .await;
719    }
720
721    /// Helper function for `test_v10_v11_migration_{un,}encrypted`: test
722    /// migrating `backup_keys` data from store v10 to store v11.
723    async fn test_v10_v11_migration_with_cipher(
724        db_prefix: &str,
725        store_cipher: Option<Arc<StoreCipher>>,
726    ) {
727        let _ = make_tracing_subscriber(None).try_init();
728        let db_name = format!("{db_prefix:0}::matrix-sdk-crypto");
729
730        // delete the db in case it was used in a previous run
731        let _ = IdbDatabase::delete_by_name(&db_name);
732
733        // Given a DB with data in it as it was at v5
734        let db = create_v5_db(&db_name).await.unwrap();
735
736        let txn = db
737            .transaction_on_one_with_mode(keys::BACKUP_KEYS, IdbTransactionMode::Readwrite)
738            .unwrap();
739        let store = txn.object_store(keys::BACKUP_KEYS).unwrap();
740        store
741            .put_key_val(
742                &JsValue::from_str(old_keys::BACKUP_KEY_V1),
743                &serialize_value_as_legacy(&store_cipher, &"1".to_owned()),
744            )
745            .unwrap();
746        db.close();
747
748        // When I open a store based on that DB, triggering an upgrade
749        let store =
750            IndexeddbCryptoStore::open_with_store_cipher(&db_prefix, store_cipher).await.unwrap();
751
752        // Then I can read the backup settings
753        let backup_data = store.load_backup_keys().await.unwrap();
754        assert_eq!(backup_data.backup_version, Some("1".to_owned()));
755    }
756
757    async fn create_v5_db(name: &str) -> std::result::Result<IdbDatabase, DomException> {
758        v0_to_v5::schema_add(name).await?;
759        IdbDatabase::open_u32(name, 5)?.await
760    }
761
762    /// Opening a db that has been upgraded to MAX_SUPPORTED_SCHEMA_VERSION
763    /// should be ok
764    #[async_test]
765    async fn test_can_open_max_supported_schema_version() {
766        let _ = make_tracing_subscriber(None).try_init();
767
768        let db_prefix = "test_can_open_max_supported_schema_version";
769        // Create a database at MAX_SUPPORTED_SCHEMA_VERSION
770        create_future_schema_db(db_prefix, MAX_SUPPORTED_SCHEMA_VERSION).await;
771
772        // Now, try opening it again
773        IndexeddbCryptoStore::open_with_store_cipher(&db_prefix, None).await.unwrap();
774    }
775
776    /// Opening a db that has been upgraded beyond MAX_SUPPORTED_SCHEMA_VERSION
777    /// should throw an error
778    #[async_test]
779    async fn test_can_not_open_too_new_db() {
780        let _ = make_tracing_subscriber(None).try_init();
781
782        let db_prefix = "test_can_not_open_too_new_db";
783        // Create a database at MAX_SUPPORTED_SCHEMA_VERSION+1
784        create_future_schema_db(db_prefix, MAX_SUPPORTED_SCHEMA_VERSION + 1).await;
785
786        // Now, try opening it again
787        let result = IndexeddbCryptoStore::open_with_store_cipher(&db_prefix, None).await;
788        assert_matches!(
789            result,
790            Err(IndexeddbCryptoStoreError::SchemaTooNewError {
791                max_supported_version,
792                current_version
793            }) => {
794                assert_eq!(max_supported_version, MAX_SUPPORTED_SCHEMA_VERSION);
795                assert_eq!(current_version, MAX_SUPPORTED_SCHEMA_VERSION + 1);
796            }
797        );
798    }
799
800    // Create a database, and increase its schema version to the given version
801    // number.
802    async fn create_future_schema_db(db_prefix: &str, version: u32) {
803        let db_name = format!("{db_prefix}::matrix-sdk-crypto");
804
805        // delete the db in case it was used in a previous run
806        let _ = IdbDatabase::delete_by_name(&db_name);
807
808        // Open, and close, the store at the regular version.
809        IndexeddbCryptoStore::open_with_store_cipher(&db_prefix, None).await.unwrap();
810
811        // Now upgrade to the given version, keeping a record of the previous version so
812        // that we can double-check it.
813        let mut db_req: OpenDbRequest = IdbDatabase::open_u32(&db_name, version).unwrap();
814
815        let old_version: Rc<Cell<Option<u32>>> = Rc::new(Cell::new(None));
816        let old_version2 = old_version.clone();
817        db_req.set_on_upgrade_needed(Some(move |evt: &IdbVersionChangeEvent| {
818            old_version2.set(Some(evt.old_version() as u32));
819            Ok(())
820        }));
821
822        let db = db_req.await.unwrap();
823        assert_eq!(
824            old_version.get(),
825            Some(EXPECTED_SCHEMA_VERSION),
826            "Existing store had unexpected version number"
827        );
828        db.close();
829    }
830
831    /// Emulate the old behaviour of [`IndexeddbSerializer::serialize_value`].
832    ///
833    /// We used to use an inefficient format for serializing objects in the
834    /// indexeddb store. This replicates that old behaviour, for testing
835    /// purposes.
836    fn serialize_value_as_legacy<T: Serialize>(
837        store_cipher: &Option<Arc<StoreCipher>>,
838        value: &T,
839    ) -> JsValue {
840        if let Some(cipher) = &store_cipher {
841            // Old-style serialization/encryption. First JSON-serialize into a byte array...
842            let data = serde_json::to_vec(&value).unwrap();
843            // ... then encrypt...
844            let encrypted = cipher.encrypt_value_data(data).unwrap();
845            // ... then JSON-serialize into another byte array ...
846            let value = serde_json::to_vec(&encrypted).unwrap();
847            // and finally, turn it into a javascript array.
848            JsValue::from_serde(&value).unwrap()
849        } else {
850            JsValue::from_serde(&value).unwrap()
851        }
852    }
853}