matrix_sdk_crypto/store/
memorystore.rs

1// Copyright 2020 The Matrix.org Foundation C.I.C.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::{
16    collections::{BTreeMap, HashMap, HashSet},
17    convert::Infallible,
18    sync::Arc,
19};
20
21use async_trait::async_trait;
22use matrix_sdk_common::{
23    locks::RwLock as StdRwLock, store_locks::memory_store_helper::try_take_leased_lock,
24};
25use ruma::{
26    events::secret::request::SecretName, time::Instant, DeviceId, OwnedDeviceId, OwnedRoomId,
27    OwnedTransactionId, OwnedUserId, RoomId, TransactionId, UserId,
28};
29use tokio::sync::{Mutex, RwLock};
30use tracing::warn;
31use vodozemac::Curve25519PublicKey;
32
33use super::{
34    caches::DeviceStore, Account, BackupKeys, Changes, CryptoStore, DehydratedDeviceKey,
35    InboundGroupSession, PendingChanges, RoomKeyCounts, RoomSettings, Session,
36};
37use crate::{
38    gossiping::{GossipRequest, GossippedSecret, SecretInfo},
39    identities::{DeviceData, UserIdentityData},
40    olm::{
41        OutboundGroupSession, PickledAccount, PickledInboundGroupSession, PickledSession,
42        PrivateCrossSigningIdentity, SenderDataType, StaticAccountData,
43    },
44    types::events::room_key_withheld::RoomKeyWithheldEvent,
45    TrackedUser,
46};
47
48fn encode_key_info(info: &SecretInfo) -> String {
49    match info {
50        SecretInfo::KeyRequest(info) => {
51            format!("{}{}{}", info.room_id(), info.algorithm(), info.session_id())
52        }
53        SecretInfo::SecretRequest(i) => i.as_ref().to_owned(),
54    }
55}
56
57type SessionId = String;
58
59/// The "version" of a backup - newtype wrapper around a String.
60#[derive(Clone, Debug, PartialEq)]
61struct BackupVersion(String);
62
63impl BackupVersion {
64    fn from(s: &str) -> Self {
65        Self(s.to_owned())
66    }
67
68    fn as_str(&self) -> &str {
69        &self.0
70    }
71}
72
73/// An in-memory only store that will forget all the E2EE key once it's dropped.
74#[derive(Debug)]
75pub struct MemoryStore {
76    static_account: Arc<StdRwLock<Option<StaticAccountData>>>,
77
78    account: StdRwLock<Option<String>>,
79    // Map of sender_key to map of session_id to serialized pickle
80    sessions: StdRwLock<BTreeMap<String, BTreeMap<String, String>>>,
81    inbound_group_sessions: StdRwLock<BTreeMap<OwnedRoomId, HashMap<String, String>>>,
82
83    /// Map room id -> session id -> backup order number
84    /// The latest backup in which this session is stored. Equivalent to
85    /// `backed_up_to` in [`IndexedDbCryptoStore`]
86    inbound_group_sessions_backed_up_to:
87        StdRwLock<HashMap<OwnedRoomId, HashMap<SessionId, BackupVersion>>>,
88
89    outbound_group_sessions: StdRwLock<BTreeMap<OwnedRoomId, OutboundGroupSession>>,
90    private_identity: StdRwLock<Option<PrivateCrossSigningIdentity>>,
91    tracked_users: StdRwLock<HashMap<OwnedUserId, TrackedUser>>,
92    olm_hashes: StdRwLock<HashMap<String, HashSet<String>>>,
93    devices: DeviceStore,
94    identities: StdRwLock<HashMap<OwnedUserId, String>>,
95    outgoing_key_requests: StdRwLock<HashMap<OwnedTransactionId, GossipRequest>>,
96    key_requests_by_info: StdRwLock<HashMap<String, OwnedTransactionId>>,
97    direct_withheld_info: StdRwLock<HashMap<OwnedRoomId, HashMap<String, RoomKeyWithheldEvent>>>,
98    custom_values: StdRwLock<HashMap<String, Vec<u8>>>,
99    leases: StdRwLock<HashMap<String, (String, Instant)>>,
100    secret_inbox: StdRwLock<HashMap<String, Vec<GossippedSecret>>>,
101    backup_keys: RwLock<BackupKeys>,
102    dehydrated_device_pickle_key: RwLock<Option<DehydratedDeviceKey>>,
103    next_batch_token: RwLock<Option<String>>,
104    room_settings: StdRwLock<HashMap<OwnedRoomId, RoomSettings>>,
105    save_changes_lock: Arc<Mutex<()>>,
106}
107
108impl Default for MemoryStore {
109    fn default() -> Self {
110        MemoryStore {
111            static_account: Default::default(),
112            account: Default::default(),
113            sessions: Default::default(),
114            inbound_group_sessions: Default::default(),
115            inbound_group_sessions_backed_up_to: Default::default(),
116            outbound_group_sessions: Default::default(),
117            private_identity: Default::default(),
118            tracked_users: Default::default(),
119            olm_hashes: Default::default(),
120            devices: DeviceStore::new(),
121            identities: Default::default(),
122            outgoing_key_requests: Default::default(),
123            key_requests_by_info: Default::default(),
124            direct_withheld_info: Default::default(),
125            custom_values: Default::default(),
126            leases: Default::default(),
127            backup_keys: Default::default(),
128            dehydrated_device_pickle_key: Default::default(),
129            secret_inbox: Default::default(),
130            next_batch_token: Default::default(),
131            room_settings: Default::default(),
132            save_changes_lock: Default::default(),
133        }
134    }
135}
136
137impl MemoryStore {
138    /// Create a new empty `MemoryStore`.
139    pub fn new() -> Self {
140        Self::default()
141    }
142
143    fn get_static_account(&self) -> Option<StaticAccountData> {
144        self.static_account.read().clone()
145    }
146
147    pub(crate) fn save_devices(&self, devices: Vec<DeviceData>) {
148        for device in devices {
149            let _ = self.devices.add(device);
150        }
151    }
152
153    fn delete_devices(&self, devices: Vec<DeviceData>) {
154        for device in devices {
155            let _ = self.devices.remove(device.user_id(), device.device_id());
156        }
157    }
158
159    fn save_sessions(&self, sessions: Vec<(String, PickledSession)>) {
160        let mut session_store = self.sessions.write();
161
162        for (session_id, pickle) in sessions {
163            let entry = session_store.entry(pickle.sender_key.to_base64()).or_default();
164
165            // insert or replace if exists
166            entry.insert(
167                session_id,
168                serde_json::to_string(&pickle).expect("Failed to serialize olm session"),
169            );
170        }
171    }
172
173    fn save_outbound_group_sessions(&self, sessions: Vec<OutboundGroupSession>) {
174        self.outbound_group_sessions
175            .write()
176            .extend(sessions.into_iter().map(|s| (s.room_id().to_owned(), s)));
177    }
178
179    fn save_private_identity(&self, private_identity: Option<PrivateCrossSigningIdentity>) {
180        *self.private_identity.write() = private_identity;
181    }
182
183    /// Return all the [`InboundGroupSession`]s we have, paired with the
184    /// `backed_up_to` value for each one (or "" where it is missing, which
185    /// should never happen).
186    async fn get_inbound_group_sessions_and_backed_up_to(
187        &self,
188    ) -> Result<Vec<(InboundGroupSession, Option<BackupVersion>)>> {
189        let lookup = |s: &InboundGroupSession| {
190            self.inbound_group_sessions_backed_up_to
191                .read()
192                .get(&s.room_id)?
193                .get(s.session_id())
194                .cloned()
195        };
196
197        Ok(self
198            .get_inbound_group_sessions()
199            .await?
200            .into_iter()
201            .map(|s| {
202                let v = lookup(&s);
203                (s, v)
204            })
205            .collect())
206    }
207}
208
209type Result<T> = std::result::Result<T, Infallible>;
210
211#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
212#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
213impl CryptoStore for MemoryStore {
214    type Error = Infallible;
215
216    async fn load_account(&self) -> Result<Option<Account>> {
217        let pickled_account: Option<PickledAccount> = self.account.read().as_ref().map(|acc| {
218            serde_json::from_str(acc)
219                .expect("Deserialization failed: invalid pickled account JSON format")
220        });
221
222        if let Some(pickle) = pickled_account {
223            let account =
224                Account::from_pickle(pickle).expect("From pickle failed: invalid pickle format");
225
226            *self.static_account.write() = Some(account.static_data().clone());
227
228            Ok(Some(account))
229        } else {
230            Ok(None)
231        }
232    }
233
234    async fn load_identity(&self) -> Result<Option<PrivateCrossSigningIdentity>> {
235        Ok(self.private_identity.read().clone())
236    }
237
238    async fn next_batch_token(&self) -> Result<Option<String>> {
239        Ok(self.next_batch_token.read().await.clone())
240    }
241
242    async fn save_pending_changes(&self, changes: PendingChanges) -> Result<()> {
243        let _guard = self.save_changes_lock.lock().await;
244
245        let pickled_account = if let Some(account) = changes.account {
246            *self.static_account.write() = Some(account.static_data().clone());
247            Some(account.pickle())
248        } else {
249            None
250        };
251
252        *self.account.write() = pickled_account.map(|pickle| {
253            serde_json::to_string(&pickle)
254                .expect("Serialization failed: invalid pickled account JSON format")
255        });
256
257        Ok(())
258    }
259
260    async fn save_changes(&self, changes: Changes) -> Result<()> {
261        let _guard = self.save_changes_lock.lock().await;
262
263        let mut pickled_session: Vec<(String, PickledSession)> = Vec::new();
264        for session in changes.sessions {
265            let session_id = session.session_id().to_owned();
266            let pickle = session.pickle().await;
267            pickled_session.push((session_id.clone(), pickle));
268        }
269        self.save_sessions(pickled_session);
270
271        self.save_inbound_group_sessions(changes.inbound_group_sessions, None).await?;
272        self.save_outbound_group_sessions(changes.outbound_group_sessions);
273        self.save_private_identity(changes.private_identity);
274
275        self.save_devices(changes.devices.new);
276        self.save_devices(changes.devices.changed);
277        self.delete_devices(changes.devices.deleted);
278
279        {
280            let mut identities = self.identities.write();
281            for identity in changes.identities.new.into_iter().chain(changes.identities.changed) {
282                identities.insert(
283                    identity.user_id().to_owned(),
284                    serde_json::to_string(&identity)
285                        .expect("UserIdentityData should always serialize to json"),
286                );
287            }
288        }
289
290        {
291            let mut olm_hashes = self.olm_hashes.write();
292            for hash in changes.message_hashes {
293                olm_hashes.entry(hash.sender_key.to_owned()).or_default().insert(hash.hash.clone());
294            }
295        }
296
297        {
298            let mut outgoing_key_requests = self.outgoing_key_requests.write();
299            let mut key_requests_by_info = self.key_requests_by_info.write();
300
301            for key_request in changes.key_requests {
302                let id = key_request.request_id.clone();
303                let info_string = encode_key_info(&key_request.info);
304
305                outgoing_key_requests.insert(id.clone(), key_request);
306                key_requests_by_info.insert(info_string, id);
307            }
308        }
309
310        if let Some(key) = changes.backup_decryption_key {
311            self.backup_keys.write().await.decryption_key = Some(key);
312        }
313
314        if let Some(version) = changes.backup_version {
315            self.backup_keys.write().await.backup_version = Some(version);
316        }
317
318        if let Some(pickle_key) = changes.dehydrated_device_pickle_key {
319            let mut lock = self.dehydrated_device_pickle_key.write().await;
320            *lock = Some(pickle_key);
321        }
322
323        {
324            let mut secret_inbox = self.secret_inbox.write();
325            for secret in changes.secrets {
326                secret_inbox.entry(secret.secret_name.to_string()).or_default().push(secret);
327            }
328        }
329
330        {
331            let mut direct_withheld_info = self.direct_withheld_info.write();
332            for (room_id, data) in changes.withheld_session_info {
333                for (session_id, event) in data {
334                    direct_withheld_info
335                        .entry(room_id.to_owned())
336                        .or_default()
337                        .insert(session_id, event);
338                }
339            }
340        }
341
342        if let Some(next_batch_token) = changes.next_batch_token {
343            *self.next_batch_token.write().await = Some(next_batch_token);
344        }
345
346        if !changes.room_settings.is_empty() {
347            let mut settings = self.room_settings.write();
348            settings.extend(changes.room_settings);
349        }
350
351        Ok(())
352    }
353
354    async fn save_inbound_group_sessions(
355        &self,
356        sessions: Vec<InboundGroupSession>,
357        backed_up_to_version: Option<&str>,
358    ) -> Result<()> {
359        for session in sessions {
360            let room_id = session.room_id();
361            let session_id = session.session_id();
362
363            // Sanity-check that the data in the sessions corresponds to backed_up_version
364            let backed_up = session.backed_up();
365            if backed_up != backed_up_to_version.is_some() {
366                warn!(
367                    backed_up,
368                    backed_up_to_version,
369                    "Session backed-up flag does not correspond to backup version setting",
370                );
371            }
372
373            if let Some(backup_version) = backed_up_to_version {
374                self.inbound_group_sessions_backed_up_to
375                    .write()
376                    .entry(room_id.to_owned())
377                    .or_default()
378                    .insert(session_id.to_owned(), BackupVersion::from(backup_version));
379            }
380
381            let pickle = session.pickle().await;
382            self.inbound_group_sessions
383                .write()
384                .entry(session.room_id().to_owned())
385                .or_default()
386                .insert(
387                    session.session_id().to_owned(),
388                    serde_json::to_string(&pickle)
389                        .expect("Pickle pickle data should serialize to json"),
390                );
391        }
392        Ok(())
393    }
394
395    async fn get_sessions(&self, sender_key: &str) -> Result<Option<Vec<Session>>> {
396        let device_keys = self.get_own_device().await?.as_device_keys().clone();
397
398        if let Some(pickles) = self.sessions.read().get(sender_key) {
399            let mut sessions: Vec<Session> = Vec::new();
400            for serialized_pickle in pickles.values() {
401                let pickle: PickledSession = serde_json::from_str(serialized_pickle.as_str())
402                    .expect("Pickle pickle deserialization should work");
403                let session = Session::from_pickle(device_keys.clone(), pickle)
404                    .expect("Expect from pickle to always work");
405                sessions.push(session);
406            }
407            Ok(Some(sessions))
408        } else {
409            Ok(None)
410        }
411    }
412
413    async fn get_inbound_group_session(
414        &self,
415        room_id: &RoomId,
416        session_id: &str,
417    ) -> Result<Option<InboundGroupSession>> {
418        let pickle: Option<PickledInboundGroupSession> = self
419            .inbound_group_sessions
420            .read()
421            .get(room_id)
422            .and_then(|m| m.get(session_id))
423            .and_then(|ser| {
424                serde_json::from_str(ser).expect("Pickle pickle deserialization should work")
425            });
426
427        Ok(pickle.map(|p| {
428            InboundGroupSession::from_pickle(p).expect("Expect from pickle to always work")
429        }))
430    }
431
432    async fn get_withheld_info(
433        &self,
434        room_id: &RoomId,
435        session_id: &str,
436    ) -> Result<Option<RoomKeyWithheldEvent>> {
437        Ok(self
438            .direct_withheld_info
439            .read()
440            .get(room_id)
441            .and_then(|e| Some(e.get(session_id)?.to_owned())))
442    }
443
444    async fn get_inbound_group_sessions(&self) -> Result<Vec<InboundGroupSession>> {
445        let inbounds = self
446            .inbound_group_sessions
447            .read()
448            .values()
449            .flat_map(HashMap::values)
450            .map(|ser| {
451                let pickle: PickledInboundGroupSession =
452                    serde_json::from_str(ser).expect("Pickle deserialization should work");
453                InboundGroupSession::from_pickle(pickle).expect("Expect from pickle to always work")
454            })
455            .collect();
456        Ok(inbounds)
457    }
458
459    async fn inbound_group_session_counts(
460        &self,
461        backup_version: Option<&str>,
462    ) -> Result<RoomKeyCounts> {
463        let backed_up = if let Some(backup_version) = backup_version {
464            self.get_inbound_group_sessions_and_backed_up_to()
465                .await?
466                .into_iter()
467                // Count the sessions backed up in the required backup
468                .filter(|(_, o)| o.as_ref().is_some_and(|o| o.as_str() == backup_version))
469                .count()
470        } else {
471            // We asked about a nonexistent backup version - this doesn't make much sense,
472            // but we can easily answer that nothing is backed up in this
473            // nonexistent backup.
474            0
475        };
476
477        let total = self.inbound_group_sessions.read().values().map(HashMap::len).sum();
478        Ok(RoomKeyCounts { total, backed_up })
479    }
480
481    async fn get_inbound_group_sessions_for_device_batch(
482        &self,
483        sender_key: Curve25519PublicKey,
484        sender_data_type: SenderDataType,
485        after_session_id: Option<String>,
486        limit: usize,
487    ) -> Result<Vec<InboundGroupSession>> {
488        // First, find all InboundGroupSessions, filtering for those that match the
489        // device and sender_data type.
490        let mut sessions: Vec<_> = self
491            .get_inbound_group_sessions()
492            .await?
493            .into_iter()
494            .filter(|session: &InboundGroupSession| {
495                session.creator_info.curve25519_key == sender_key
496                    && session.sender_data.to_type() == sender_data_type
497            })
498            .collect();
499
500        // Then, sort the sessions in order of ascending session ID...
501        sessions.sort_by_key(|s| s.session_id().to_owned());
502
503        // Figure out where in the array to start returning results from
504        let start_index = {
505            match after_session_id {
506                None => 0,
507                Some(id) => {
508                    // We're looking for the first session with a session ID strictly after `id`; if
509                    // there are none, the end of the array.
510                    sessions
511                        .iter()
512                        .position(|session| session.session_id() > id.as_str())
513                        .unwrap_or(sessions.len())
514                }
515            }
516        };
517
518        // Return up to `limit` items from the array, starting from `start_index`
519        Ok(sessions.drain(start_index..).take(limit).collect())
520    }
521
522    async fn inbound_group_sessions_for_backup(
523        &self,
524        backup_version: &str,
525        limit: usize,
526    ) -> Result<Vec<InboundGroupSession>> {
527        Ok(self
528            .get_inbound_group_sessions_and_backed_up_to()
529            .await?
530            .into_iter()
531            .filter_map(|(session, backed_up_to)| {
532                if let Some(ref existing_version) = backed_up_to {
533                    if existing_version.as_str() == backup_version {
534                        // This session is already backed up in the required backup
535                        return None;
536                    }
537                }
538                // It's not backed up, or it's backed up in a different backup
539                Some(session)
540            })
541            .take(limit)
542            .collect())
543    }
544
545    async fn mark_inbound_group_sessions_as_backed_up(
546        &self,
547        backup_version: &str,
548        room_and_session_ids: &[(&RoomId, &str)],
549    ) -> Result<()> {
550        for &(room_id, session_id) in room_and_session_ids {
551            let session = self.get_inbound_group_session(room_id, session_id).await?;
552
553            if let Some(session) = session {
554                session.mark_as_backed_up();
555
556                self.inbound_group_sessions_backed_up_to
557                    .write()
558                    .entry(room_id.to_owned())
559                    .or_default()
560                    .insert(session_id.to_owned(), BackupVersion::from(backup_version));
561
562                // Save it back
563                let updated_pickle = session.pickle().await;
564
565                self.inbound_group_sessions.write().entry(room_id.to_owned()).or_default().insert(
566                    session_id.to_owned(),
567                    serde_json::to_string(&updated_pickle)
568                        .expect("Pickle serialization should work"),
569                );
570            }
571        }
572
573        Ok(())
574    }
575
576    async fn reset_backup_state(&self) -> Result<()> {
577        // Nothing to do here, because we remember which backup versions we backed up to
578        // in `mark_inbound_group_sessions_as_backed_up`, so we don't need to
579        // reset anything here because the required version is passed in to
580        // `inbound_group_sessions_for_backup`, and we can compare against the
581        // version we stored.
582
583        Ok(())
584    }
585
586    async fn load_backup_keys(&self) -> Result<BackupKeys> {
587        Ok(self.backup_keys.read().await.to_owned())
588    }
589
590    async fn load_dehydrated_device_pickle_key(&self) -> Result<Option<DehydratedDeviceKey>> {
591        Ok(self.dehydrated_device_pickle_key.read().await.to_owned())
592    }
593
594    async fn delete_dehydrated_device_pickle_key(&self) -> Result<()> {
595        let mut lock = self.dehydrated_device_pickle_key.write().await;
596        *lock = None;
597        Ok(())
598    }
599
600    async fn get_outbound_group_session(
601        &self,
602        room_id: &RoomId,
603    ) -> Result<Option<OutboundGroupSession>> {
604        Ok(self.outbound_group_sessions.read().get(room_id).cloned())
605    }
606
607    async fn load_tracked_users(&self) -> Result<Vec<TrackedUser>> {
608        Ok(self.tracked_users.read().values().cloned().collect())
609    }
610
611    async fn save_tracked_users(&self, tracked_users: &[(&UserId, bool)]) -> Result<()> {
612        self.tracked_users.write().extend(tracked_users.iter().map(|(user_id, dirty)| {
613            let user_id: OwnedUserId = user_id.to_owned().into();
614            (user_id.clone(), TrackedUser { user_id, dirty: *dirty })
615        }));
616        Ok(())
617    }
618
619    async fn get_device(
620        &self,
621        user_id: &UserId,
622        device_id: &DeviceId,
623    ) -> Result<Option<DeviceData>> {
624        Ok(self.devices.get(user_id, device_id))
625    }
626
627    async fn get_user_devices(
628        &self,
629        user_id: &UserId,
630    ) -> Result<HashMap<OwnedDeviceId, DeviceData>> {
631        Ok(self.devices.user_devices(user_id))
632    }
633
634    async fn get_own_device(&self) -> Result<DeviceData> {
635        let account =
636            self.get_static_account().expect("Expect account to exist when getting own device");
637
638        Ok(self
639            .devices
640            .get(&account.user_id, &account.device_id)
641            .expect("Invalid state: Should always have a own device"))
642    }
643
644    async fn get_user_identity(&self, user_id: &UserId) -> Result<Option<UserIdentityData>> {
645        let serialized = self.identities.read().get(user_id).cloned();
646        match serialized {
647            None => Ok(None),
648            Some(serialized) => {
649                let id: UserIdentityData = serde_json::from_str(serialized.as_str())
650                    .expect("Only valid serialized identity are saved");
651                Ok(Some(id))
652            }
653        }
654    }
655
656    async fn is_message_known(&self, message_hash: &crate::olm::OlmMessageHash) -> Result<bool> {
657        Ok(self
658            .olm_hashes
659            .write()
660            .entry(message_hash.sender_key.to_owned())
661            .or_default()
662            .contains(&message_hash.hash))
663    }
664
665    async fn get_outgoing_secret_requests(
666        &self,
667        request_id: &TransactionId,
668    ) -> Result<Option<GossipRequest>> {
669        Ok(self.outgoing_key_requests.read().get(request_id).cloned())
670    }
671
672    async fn get_secret_request_by_info(
673        &self,
674        key_info: &SecretInfo,
675    ) -> Result<Option<GossipRequest>> {
676        let key_info_string = encode_key_info(key_info);
677
678        Ok(self
679            .key_requests_by_info
680            .read()
681            .get(&key_info_string)
682            .and_then(|i| self.outgoing_key_requests.read().get(i).cloned()))
683    }
684
685    async fn get_unsent_secret_requests(&self) -> Result<Vec<GossipRequest>> {
686        Ok(self
687            .outgoing_key_requests
688            .read()
689            .values()
690            .filter(|req| !req.sent_out)
691            .cloned()
692            .collect())
693    }
694
695    async fn delete_outgoing_secret_requests(&self, request_id: &TransactionId) -> Result<()> {
696        let req = self.outgoing_key_requests.write().remove(request_id);
697        if let Some(i) = req {
698            let key_info_string = encode_key_info(&i.info);
699            self.key_requests_by_info.write().remove(&key_info_string);
700        }
701
702        Ok(())
703    }
704
705    async fn get_secrets_from_inbox(
706        &self,
707        secret_name: &SecretName,
708    ) -> Result<Vec<GossippedSecret>> {
709        Ok(self.secret_inbox.write().entry(secret_name.to_string()).or_default().to_owned())
710    }
711
712    async fn delete_secrets_from_inbox(&self, secret_name: &SecretName) -> Result<()> {
713        self.secret_inbox.write().remove(secret_name.as_str());
714
715        Ok(())
716    }
717
718    async fn get_room_settings(&self, room_id: &RoomId) -> Result<Option<RoomSettings>> {
719        Ok(self.room_settings.read().get(room_id).cloned())
720    }
721
722    async fn get_custom_value(&self, key: &str) -> Result<Option<Vec<u8>>> {
723        Ok(self.custom_values.read().get(key).cloned())
724    }
725
726    async fn set_custom_value(&self, key: &str, value: Vec<u8>) -> Result<()> {
727        self.custom_values.write().insert(key.to_owned(), value);
728        Ok(())
729    }
730
731    async fn remove_custom_value(&self, key: &str) -> Result<()> {
732        self.custom_values.write().remove(key);
733        Ok(())
734    }
735
736    async fn try_take_leased_lock(
737        &self,
738        lease_duration_ms: u32,
739        key: &str,
740        holder: &str,
741    ) -> Result<bool> {
742        Ok(try_take_leased_lock(&mut self.leases.write(), lease_duration_ms, key, holder))
743    }
744}
745
746#[cfg(test)]
747mod tests {
748    use std::collections::HashMap;
749
750    use matrix_sdk_test::async_test;
751    use ruma::{room_id, user_id, RoomId};
752    use vodozemac::{Curve25519PublicKey, Ed25519PublicKey};
753
754    use super::SessionId;
755    use crate::{
756        identities::device::testing::get_device,
757        olm::{
758            tests::get_account_and_session_test_helper, Account, InboundGroupSession,
759            OlmMessageHash, PrivateCrossSigningIdentity, SenderData,
760        },
761        store::{memorystore::MemoryStore, Changes, CryptoStore, DeviceChanges, PendingChanges},
762        DeviceData,
763    };
764
765    #[async_test]
766    async fn test_session_store() {
767        let (account, session) = get_account_and_session_test_helper();
768        let own_device = DeviceData::from_account(&account);
769        let store = MemoryStore::new();
770
771        assert!(store.load_account().await.unwrap().is_none());
772
773        store
774            .save_changes(Changes {
775                devices: DeviceChanges { new: vec![own_device], ..Default::default() },
776                ..Default::default()
777            })
778            .await
779            .unwrap();
780        store.save_pending_changes(PendingChanges { account: Some(account) }).await.unwrap();
781
782        store
783            .save_changes(Changes { sessions: (vec![session.clone()]), ..Default::default() })
784            .await
785            .unwrap();
786
787        let sessions = store.get_sessions(&session.sender_key.to_base64()).await.unwrap().unwrap();
788
789        let loaded_session = &sessions[0];
790
791        assert_eq!(&session, loaded_session);
792    }
793
794    #[async_test]
795    async fn test_inbound_group_session_store() {
796        let (account, _) = get_account_and_session_test_helper();
797        let room_id = room_id!("!test:localhost");
798        let curve_key = "Nn0L2hkcCMFKqynTjyGsJbth7QrVmX3lbrksMkrGOAw";
799
800        let (outbound, _) = account.create_group_session_pair_with_defaults(room_id).await;
801        let inbound = InboundGroupSession::new(
802            Curve25519PublicKey::from_base64(curve_key).unwrap(),
803            Ed25519PublicKey::from_base64("ee3Ek+J2LkkPmjGPGLhMxiKnhiX//xcqaVL4RP6EypE").unwrap(),
804            room_id,
805            &outbound.session_key().await,
806            SenderData::unknown(),
807            outbound.settings().algorithm.to_owned(),
808            None,
809        )
810        .unwrap();
811
812        let store = MemoryStore::new();
813        store.save_inbound_group_sessions(vec![inbound.clone()], None).await.unwrap();
814
815        let loaded_session =
816            store.get_inbound_group_session(room_id, outbound.session_id()).await.unwrap().unwrap();
817        assert_eq!(inbound, loaded_session);
818    }
819
820    #[async_test]
821    async fn test_backing_up_marks_sessions_as_backed_up() {
822        // Given there are 2 sessions
823        let room_id = room_id!("!test:localhost");
824        let (store, sessions) = store_with_sessions(2, room_id).await;
825
826        // When I mark them as backed up
827        mark_backed_up(&store, room_id, "bkp1", &sessions).await;
828
829        // Then their backed_up_to field is set
830        let but = backed_up_tos(&store).await;
831        assert_eq!(but[sessions[0].session_id()], "bkp1");
832        assert_eq!(but[sessions[1].session_id()], "bkp1");
833    }
834
835    #[async_test]
836    async fn test_backing_up_a_second_set_of_sessions_updates_their_backup_order() {
837        // Given there are 3 sessions
838        let room_id = room_id!("!test:localhost");
839        let (store, sessions) = store_with_sessions(3, room_id).await;
840
841        // When I mark 0 and 1 as backed up in bkp1
842        mark_backed_up(&store, room_id, "bkp1", &sessions[..2]).await;
843
844        // And 1 and 2 as backed up in bkp2
845        mark_backed_up(&store, room_id, "bkp2", &sessions[1..]).await;
846
847        // Then 0 is backed up in bkp1 and the 1 and 2 are backed up in bkp2
848        let but = backed_up_tos(&store).await;
849        assert_eq!(but[sessions[0].session_id()], "bkp1");
850        assert_eq!(but[sessions[1].session_id()], "bkp2");
851        assert_eq!(but[sessions[2].session_id()], "bkp2");
852    }
853
854    #[async_test]
855    async fn test_backing_up_again_to_the_same_version_has_no_effect() {
856        // Given there are 3 sessions
857        let room_id = room_id!("!test:localhost");
858        let (store, sessions) = store_with_sessions(3, room_id).await;
859
860        // When I mark the first two as backed up in the first backup
861        mark_backed_up(&store, room_id, "bkp1", &sessions[..2]).await;
862
863        // And the last 2 as backed up in the same backup version
864        mark_backed_up(&store, room_id, "bkp1", &sessions[1..]).await;
865
866        // Then they all get the same backed_up_to value
867        let but = backed_up_tos(&store).await;
868        assert_eq!(but[sessions[0].session_id()], "bkp1");
869        assert_eq!(but[sessions[1].session_id()], "bkp1");
870        assert_eq!(but[sessions[2].session_id()], "bkp1");
871    }
872
873    #[async_test]
874    async fn test_backing_up_to_an_old_backup_version_can_increase_backed_up_to() {
875        // Given we have backed up some sessions to 2 backup versions, an older and a
876        // newer
877        let room_id = room_id!("!test:localhost");
878        let (store, sessions) = store_with_sessions(4, room_id).await;
879        mark_backed_up(&store, room_id, "older_bkp", &sessions[..2]).await;
880        mark_backed_up(&store, room_id, "newer_bkp", &sessions[1..2]).await;
881
882        // When I ask to back up the un-backed-up ones to the older backup
883        mark_backed_up(&store, room_id, "older_bkp", &sessions[2..]).await;
884
885        // Then each session lists the backup it was most recently included in
886        let but = backed_up_tos(&store).await;
887        assert_eq!(but[sessions[0].session_id()], "older_bkp");
888        assert_eq!(but[sessions[1].session_id()], "newer_bkp");
889        assert_eq!(but[sessions[2].session_id()], "older_bkp");
890        assert_eq!(but[sessions[3].session_id()], "older_bkp");
891    }
892
893    #[async_test]
894    async fn test_backing_up_to_an_old_backup_version_overwrites_a_newer_one() {
895        // Given we have backed up to 2 backup versions, an older and a newer
896        let room_id = room_id!("!test:localhost");
897        let (store, sessions) = store_with_sessions(4, room_id).await;
898        mark_backed_up(&store, room_id, "older_bkp", &sessions).await;
899        // Sanity: they are backed up in order number 1
900        assert_eq!(backed_up_tos(&store).await[sessions[0].session_id()], "older_bkp");
901        mark_backed_up(&store, room_id, "newer_bkp", &sessions).await;
902        // Sanity: they are backed up in order number 2
903        assert_eq!(backed_up_tos(&store).await[sessions[0].session_id()], "newer_bkp");
904
905        // When I ask to back up some to the older version
906        mark_backed_up(&store, room_id, "older_bkp", &sessions[..2]).await;
907
908        // Then older backup overwrites: we don't consider the order here at all
909        let but = backed_up_tos(&store).await;
910        assert_eq!(but[sessions[0].session_id()], "older_bkp");
911        assert_eq!(but[sessions[1].session_id()], "older_bkp");
912        assert_eq!(but[sessions[2].session_id()], "newer_bkp");
913        assert_eq!(but[sessions[3].session_id()], "newer_bkp");
914    }
915
916    #[async_test]
917    async fn test_not_backed_up_sessions_are_eligible_for_backup() {
918        // Given there are 4 sessions, 2 of which are already backed up
919        let room_id = room_id!("!test:localhost");
920        let (store, sessions) = store_with_sessions(4, room_id).await;
921        mark_backed_up(&store, room_id, "bkp1", &sessions[..2]).await;
922
923        // When I ask which to back up
924        let mut to_backup = store
925            .inbound_group_sessions_for_backup("bkp1", 10)
926            .await
927            .expect("Failed to ask for sessions to backup");
928        to_backup.sort_by_key(|s| s.session_id().to_owned());
929
930        // Then I am told the last 2 only
931        assert_eq!(to_backup, &[sessions[2].clone(), sessions[3].clone()]);
932    }
933
934    #[async_test]
935    async fn test_all_sessions_are_eligible_for_backup_if_version_is_unknown() {
936        // Given there are 4 sessions, 2 of which are already backed up in bkp1
937        let room_id = room_id!("!test:localhost");
938        let (store, sessions) = store_with_sessions(4, room_id).await;
939        mark_backed_up(&store, room_id, "bkp1", &sessions[..2]).await;
940
941        // When I ask which to back up in an unknown version
942        let mut to_backup = store
943            .inbound_group_sessions_for_backup("unknown_bkp", 10)
944            .await
945            .expect("Failed to ask for sessions to backup");
946        to_backup.sort_by_key(|s| s.session_id().to_owned());
947
948        // Then I am told to back up all of them
949        assert_eq!(
950            to_backup,
951            &[sessions[0].clone(), sessions[1].clone(), sessions[2].clone(), sessions[3].clone()]
952        );
953    }
954
955    #[async_test]
956    async fn test_sessions_backed_up_to_a_later_version_are_eligible_for_backup() {
957        // Given there are 4 sessions, some backed up to three different versions
958        let room_id = room_id!("!test:localhost");
959        let (store, sessions) = store_with_sessions(4, room_id).await;
960        mark_backed_up(&store, room_id, "bkp0", &sessions[..1]).await;
961        mark_backed_up(&store, room_id, "bkp1", &sessions[1..2]).await;
962        mark_backed_up(&store, room_id, "bkp2", &sessions[2..3]).await;
963
964        // When I ask which to back up in the middle version
965        let mut to_backup = store
966            .inbound_group_sessions_for_backup("bkp1", 10)
967            .await
968            .expect("Failed to ask for sessions to backup");
969        to_backup.sort_by_key(|s| s.session_id().to_owned());
970
971        // Then I am told to back up everything not in the version I asked about
972        assert_eq!(
973            to_backup,
974            &[
975                sessions[0].clone(), // Backed up in bkp0
976                // sessions[1] is backed up in bkp1 already, which we asked about
977                sessions[2].clone(), // Backed up in bkp2
978                sessions[3].clone(), // Not backed up
979            ]
980        );
981    }
982
983    #[async_test]
984    async fn test_outbound_group_session_store() {
985        // Given an outbound session
986        let (account, _) = get_account_and_session_test_helper();
987        let room_id = room_id!("!test:localhost");
988        let (outbound, _) = account.create_group_session_pair_with_defaults(room_id).await;
989
990        // When we save it to the store
991        let store = MemoryStore::new();
992        store.save_outbound_group_sessions(vec![outbound.clone()]);
993
994        // Then we can get it out again
995        let loaded_session = store.get_outbound_group_session(room_id).await.unwrap().unwrap();
996        assert_eq!(
997            serde_json::to_string(&outbound.pickle().await).unwrap(),
998            serde_json::to_string(&loaded_session.pickle().await).unwrap()
999        );
1000    }
1001
1002    #[async_test]
1003    async fn test_tracked_users_are_stored_once_per_user_id() {
1004        // Given a store containing 2 tracked users, both dirty
1005        let user1 = user_id!("@user1:s");
1006        let user2 = user_id!("@user2:s");
1007        let user3 = user_id!("@user3:s");
1008        let store = MemoryStore::new();
1009        store.save_tracked_users(&[(user1, true), (user2, true)]).await.unwrap();
1010
1011        // When we mark one as clean and add another
1012        store.save_tracked_users(&[(user2, false), (user3, false)]).await.unwrap();
1013
1014        // Then we can get them out again and their dirty flags are correct
1015        let loaded_tracked_users =
1016            store.load_tracked_users().await.expect("failed to load tracked users");
1017
1018        let tracked_contains = |user_id, dirty| {
1019            loaded_tracked_users.iter().any(|u| u.user_id == user_id && u.dirty == dirty)
1020        };
1021
1022        assert!(tracked_contains(user1, true));
1023        assert!(tracked_contains(user2, false));
1024        assert!(tracked_contains(user3, false));
1025        assert_eq!(loaded_tracked_users.len(), 3);
1026    }
1027
1028    #[async_test]
1029    async fn test_private_identity_store() {
1030        // Given a private identity
1031        let private_identity = PrivateCrossSigningIdentity::empty(user_id!("@u:s"));
1032
1033        // When we save it to the store
1034        let store = MemoryStore::new();
1035        store.save_private_identity(Some(private_identity.clone()));
1036
1037        // Then we can get it out again
1038        let loaded_identity =
1039            store.load_identity().await.expect("failed to load private identity").unwrap();
1040
1041        assert_eq!(loaded_identity.user_id(), user_id!("@u:s"));
1042    }
1043
1044    #[async_test]
1045    async fn test_device_store() {
1046        let device = get_device();
1047        let store = MemoryStore::new();
1048
1049        store.save_devices(vec![device.clone()]);
1050
1051        let loaded_device =
1052            store.get_device(device.user_id(), device.device_id()).await.unwrap().unwrap();
1053
1054        assert_eq!(device, loaded_device);
1055
1056        let user_devices = store.get_user_devices(device.user_id()).await.unwrap();
1057
1058        assert_eq!(&**user_devices.keys().next().unwrap(), device.device_id());
1059        assert_eq!(user_devices.values().next().unwrap(), &device);
1060
1061        let loaded_device = user_devices.get(device.device_id()).unwrap();
1062
1063        assert_eq!(&device, loaded_device);
1064
1065        store.delete_devices(vec![device.clone()]);
1066        assert!(store.get_device(device.user_id(), device.device_id()).await.unwrap().is_none());
1067    }
1068
1069    #[async_test]
1070    async fn test_message_hash() {
1071        let store = MemoryStore::new();
1072
1073        let hash =
1074            OlmMessageHash { sender_key: "test_sender".to_owned(), hash: "test_hash".to_owned() };
1075
1076        let mut changes = Changes::default();
1077        changes.message_hashes.push(hash.clone());
1078
1079        assert!(!store.is_message_known(&hash).await.unwrap());
1080        store.save_changes(changes).await.unwrap();
1081        assert!(store.is_message_known(&hash).await.unwrap());
1082    }
1083
1084    #[async_test]
1085    async fn test_key_counts_of_empty_store_are_zero() {
1086        // Given an empty store
1087        let store = MemoryStore::new();
1088
1089        // When we count keys
1090        let key_counts = store.inbound_group_session_counts(Some("")).await.unwrap();
1091
1092        // Then the answer is zero
1093        assert_eq!(key_counts.total, 0);
1094        assert_eq!(key_counts.backed_up, 0);
1095    }
1096
1097    #[async_test]
1098    async fn test_counting_sessions_reports_the_number_of_sessions() {
1099        // Given a store with sessions
1100        let room_id = room_id!("!test:localhost");
1101        let (store, _) = store_with_sessions(4, room_id).await;
1102
1103        // When we count keys
1104        let key_counts = store.inbound_group_session_counts(Some("bkp")).await.unwrap();
1105
1106        // Then the answer equals the number of sessions we created
1107        assert_eq!(key_counts.total, 4);
1108        // And none are backed up
1109        assert_eq!(key_counts.backed_up, 0);
1110    }
1111
1112    #[async_test]
1113    async fn test_counting_backed_up_sessions_reports_the_number_backed_up_in_this_backup() {
1114        // Given a store with sessions, some backed up
1115        let room_id = room_id!("!test:localhost");
1116        let (store, sessions) = store_with_sessions(5, room_id).await;
1117        mark_backed_up(&store, room_id, "bkp", &sessions[..2]).await;
1118
1119        // When we count keys
1120        let key_counts = store.inbound_group_session_counts(Some("bkp")).await.unwrap();
1121
1122        // Then the answer equals the number of sessions we created
1123        assert_eq!(key_counts.total, 5);
1124        // And the backed_up count matches how many were backed up
1125        assert_eq!(key_counts.backed_up, 2);
1126    }
1127
1128    #[async_test]
1129    async fn test_counting_backed_up_sessions_for_null_backup_reports_zero() {
1130        // Given a store with sessions, some backed up
1131        let room_id = room_id!("!test:localhost");
1132        let (store, sessions) = store_with_sessions(4, room_id).await;
1133        mark_backed_up(&store, room_id, "bkp", &sessions[..2]).await;
1134
1135        // When we count keys, providing None as the backup version
1136        let key_counts = store.inbound_group_session_counts(None).await.unwrap();
1137
1138        // Then we ignore everything and just say zero
1139        assert_eq!(key_counts.backed_up, 0);
1140    }
1141
1142    #[async_test]
1143    async fn test_counting_backed_up_sessions_only_reports_sessions_in_the_version_specified() {
1144        // Given a store with sessions, backed up in several versions
1145        let room_id = room_id!("!test:localhost");
1146        let (store, sessions) = store_with_sessions(4, room_id).await;
1147        mark_backed_up(&store, room_id, "bkp1", &sessions[..2]).await;
1148        mark_backed_up(&store, room_id, "bkp2", &sessions[3..]).await;
1149
1150        // When we count keys for bkp2
1151        let key_counts = store.inbound_group_session_counts(Some("bkp2")).await.unwrap();
1152
1153        // Then the backed_up count reflects how many were backed up in bkp2 only
1154        assert_eq!(key_counts.backed_up, 1);
1155    }
1156
1157    /// Mark the supplied sessions as backed up in the supplied backup version
1158    async fn mark_backed_up(
1159        store: &MemoryStore,
1160        room_id: &RoomId,
1161        backup_version: &str,
1162        sessions: &[InboundGroupSession],
1163    ) {
1164        let rooms_and_ids: Vec<_> = sessions.iter().map(|s| (room_id, s.session_id())).collect();
1165
1166        store
1167            .mark_inbound_group_sessions_as_backed_up(backup_version, &rooms_and_ids)
1168            .await
1169            .expect("Failed to mark sessions as backed up");
1170    }
1171
1172    // Create a MemoryStore containing the supplied number of sessions.
1173    //
1174    // Sessions are returned in alphabetical order of session id.
1175    async fn store_with_sessions(
1176        num_sessions: usize,
1177        room_id: &RoomId,
1178    ) -> (MemoryStore, Vec<InboundGroupSession>) {
1179        let (account, _) = get_account_and_session_test_helper();
1180
1181        let mut sessions = Vec::with_capacity(num_sessions);
1182        for _ in 0..num_sessions {
1183            sessions.push(new_session(&account, room_id).await);
1184        }
1185        sessions.sort_by_key(|s| s.session_id().to_owned());
1186
1187        let store = MemoryStore::new();
1188        store.save_inbound_group_sessions(sessions.clone(), None).await.unwrap();
1189
1190        (store, sessions)
1191    }
1192
1193    // Create a new InboundGroupSession
1194    async fn new_session(account: &Account, room_id: &RoomId) -> InboundGroupSession {
1195        let curve_key = "Nn0L2hkcCMFKqynTjyGsJbth7QrVmX3lbrksMkrGOAw";
1196        let (outbound, _) = account.create_group_session_pair_with_defaults(room_id).await;
1197
1198        InboundGroupSession::new(
1199            Curve25519PublicKey::from_base64(curve_key).unwrap(),
1200            Ed25519PublicKey::from_base64("ee3Ek+J2LkkPmjGPGLhMxiKnhiX//xcqaVL4RP6EypE").unwrap(),
1201            room_id,
1202            &outbound.session_key().await,
1203            SenderData::unknown(),
1204            outbound.settings().algorithm.to_owned(),
1205            None,
1206        )
1207        .unwrap()
1208    }
1209
1210    /// Find the session_id and backed_up_to value for each of the sessions in
1211    /// the store.
1212    async fn backed_up_tos(store: &MemoryStore) -> HashMap<SessionId, String> {
1213        store
1214            .get_inbound_group_sessions_and_backed_up_to()
1215            .await
1216            .expect("Unable to get inbound group sessions and backup order")
1217            .iter()
1218            .map(|(s, o)| {
1219                (
1220                    s.session_id().to_owned(),
1221                    o.as_ref().map(|v| v.as_str().to_owned()).unwrap_or("".to_owned()),
1222                )
1223            })
1224            .collect()
1225    }
1226}
1227
1228#[cfg(test)]
1229mod integration_tests {
1230    use std::{
1231        collections::HashMap,
1232        sync::{Arc, Mutex, OnceLock},
1233    };
1234
1235    use async_trait::async_trait;
1236    use ruma::{
1237        events::secret::request::SecretName, DeviceId, OwnedDeviceId, RoomId, TransactionId, UserId,
1238    };
1239    use vodozemac::Curve25519PublicKey;
1240
1241    use super::MemoryStore;
1242    use crate::{
1243        cryptostore_integration_tests, cryptostore_integration_tests_time,
1244        olm::{
1245            InboundGroupSession, OlmMessageHash, OutboundGroupSession, PrivateCrossSigningIdentity,
1246            SenderDataType, StaticAccountData,
1247        },
1248        store::{
1249            BackupKeys, Changes, CryptoStore, DehydratedDeviceKey, PendingChanges, RoomKeyCounts,
1250            RoomSettings,
1251        },
1252        types::events::room_key_withheld::RoomKeyWithheldEvent,
1253        Account, DeviceData, GossipRequest, GossippedSecret, SecretInfo, Session, TrackedUser,
1254        UserIdentityData,
1255    };
1256
1257    /// Holds on to a MemoryStore during a test, and moves it back into STORES
1258    /// when this is dropped
1259    #[derive(Clone, Debug)]
1260    struct PersistentMemoryStore(Arc<MemoryStore>);
1261
1262    impl PersistentMemoryStore {
1263        fn new() -> Self {
1264            Self(Arc::new(MemoryStore::new()))
1265        }
1266
1267        fn get_static_account(&self) -> Option<StaticAccountData> {
1268            self.0.get_static_account()
1269        }
1270    }
1271
1272    /// Return a clone of the store for the test with the supplied name. Note:
1273    /// dropping this store won't destroy its data, since
1274    /// [PersistentMemoryStore] is a reference-counted smart pointer
1275    /// to an underlying [MemoryStore].
1276    async fn get_store(
1277        name: &str,
1278        _passphrase: Option<&str>,
1279        clear_data: bool,
1280    ) -> PersistentMemoryStore {
1281        // Holds on to one [PersistentMemoryStore] per test, so even if the test drops
1282        // the store, we keep its data alive. This simulates the behaviour of
1283        // the other stores, which keep their data in a real DB, allowing us to
1284        // test MemoryStore using the same code.
1285        static STORES: OnceLock<Mutex<HashMap<String, PersistentMemoryStore>>> = OnceLock::new();
1286        let stores = STORES.get_or_init(|| Mutex::new(HashMap::new()));
1287
1288        let mut stores = stores.lock().unwrap();
1289
1290        if clear_data {
1291            // Create a new PersistentMemoryStore
1292            let new_store = PersistentMemoryStore::new();
1293            stores.insert(name.to_owned(), new_store.clone());
1294            new_store
1295        } else {
1296            stores.entry(name.to_owned()).or_insert_with(PersistentMemoryStore::new).clone()
1297        }
1298    }
1299
1300    /// Forwards all methods to the underlying [MemoryStore].
1301    #[async_trait]
1302    impl CryptoStore for PersistentMemoryStore {
1303        type Error = <MemoryStore as CryptoStore>::Error;
1304
1305        async fn load_account(&self) -> Result<Option<Account>, Self::Error> {
1306            self.0.load_account().await
1307        }
1308
1309        async fn load_identity(&self) -> Result<Option<PrivateCrossSigningIdentity>, Self::Error> {
1310            self.0.load_identity().await
1311        }
1312
1313        async fn save_changes(&self, changes: Changes) -> Result<(), Self::Error> {
1314            self.0.save_changes(changes).await
1315        }
1316
1317        async fn save_pending_changes(&self, changes: PendingChanges) -> Result<(), Self::Error> {
1318            self.0.save_pending_changes(changes).await
1319        }
1320
1321        async fn save_inbound_group_sessions(
1322            &self,
1323            sessions: Vec<InboundGroupSession>,
1324            backed_up_to_version: Option<&str>,
1325        ) -> Result<(), Self::Error> {
1326            self.0.save_inbound_group_sessions(sessions, backed_up_to_version).await
1327        }
1328
1329        async fn get_sessions(
1330            &self,
1331            sender_key: &str,
1332        ) -> Result<Option<Vec<Session>>, Self::Error> {
1333            self.0.get_sessions(sender_key).await
1334        }
1335
1336        async fn get_inbound_group_session(
1337            &self,
1338            room_id: &RoomId,
1339            session_id: &str,
1340        ) -> Result<Option<InboundGroupSession>, Self::Error> {
1341            self.0.get_inbound_group_session(room_id, session_id).await
1342        }
1343
1344        async fn get_withheld_info(
1345            &self,
1346            room_id: &RoomId,
1347            session_id: &str,
1348        ) -> Result<Option<RoomKeyWithheldEvent>, Self::Error> {
1349            self.0.get_withheld_info(room_id, session_id).await
1350        }
1351
1352        async fn get_inbound_group_sessions(
1353            &self,
1354        ) -> Result<Vec<InboundGroupSession>, Self::Error> {
1355            self.0.get_inbound_group_sessions().await
1356        }
1357
1358        async fn inbound_group_session_counts(
1359            &self,
1360            backup_version: Option<&str>,
1361        ) -> Result<RoomKeyCounts, Self::Error> {
1362            self.0.inbound_group_session_counts(backup_version).await
1363        }
1364
1365        async fn get_inbound_group_sessions_for_device_batch(
1366            &self,
1367            sender_key: Curve25519PublicKey,
1368            sender_data_type: SenderDataType,
1369            after_session_id: Option<String>,
1370            limit: usize,
1371        ) -> Result<Vec<InboundGroupSession>, Self::Error> {
1372            self.0
1373                .get_inbound_group_sessions_for_device_batch(
1374                    sender_key,
1375                    sender_data_type,
1376                    after_session_id,
1377                    limit,
1378                )
1379                .await
1380        }
1381
1382        async fn inbound_group_sessions_for_backup(
1383            &self,
1384            backup_version: &str,
1385            limit: usize,
1386        ) -> Result<Vec<InboundGroupSession>, Self::Error> {
1387            self.0.inbound_group_sessions_for_backup(backup_version, limit).await
1388        }
1389
1390        async fn mark_inbound_group_sessions_as_backed_up(
1391            &self,
1392            backup_version: &str,
1393            room_and_session_ids: &[(&RoomId, &str)],
1394        ) -> Result<(), Self::Error> {
1395            self.0
1396                .mark_inbound_group_sessions_as_backed_up(backup_version, room_and_session_ids)
1397                .await
1398        }
1399
1400        async fn reset_backup_state(&self) -> Result<(), Self::Error> {
1401            self.0.reset_backup_state().await
1402        }
1403
1404        async fn load_backup_keys(&self) -> Result<BackupKeys, Self::Error> {
1405            self.0.load_backup_keys().await
1406        }
1407
1408        async fn load_dehydrated_device_pickle_key(
1409            &self,
1410        ) -> Result<Option<DehydratedDeviceKey>, Self::Error> {
1411            self.0.load_dehydrated_device_pickle_key().await
1412        }
1413
1414        async fn delete_dehydrated_device_pickle_key(&self) -> Result<(), Self::Error> {
1415            self.0.delete_dehydrated_device_pickle_key().await
1416        }
1417
1418        async fn get_outbound_group_session(
1419            &self,
1420            room_id: &RoomId,
1421        ) -> Result<Option<OutboundGroupSession>, Self::Error> {
1422            self.0.get_outbound_group_session(room_id).await
1423        }
1424
1425        async fn load_tracked_users(&self) -> Result<Vec<TrackedUser>, Self::Error> {
1426            self.0.load_tracked_users().await
1427        }
1428
1429        async fn save_tracked_users(&self, users: &[(&UserId, bool)]) -> Result<(), Self::Error> {
1430            self.0.save_tracked_users(users).await
1431        }
1432
1433        async fn get_device(
1434            &self,
1435            user_id: &UserId,
1436            device_id: &DeviceId,
1437        ) -> Result<Option<DeviceData>, Self::Error> {
1438            self.0.get_device(user_id, device_id).await
1439        }
1440
1441        async fn get_user_devices(
1442            &self,
1443            user_id: &UserId,
1444        ) -> Result<HashMap<OwnedDeviceId, DeviceData>, Self::Error> {
1445            self.0.get_user_devices(user_id).await
1446        }
1447
1448        async fn get_own_device(&self) -> Result<DeviceData, Self::Error> {
1449            self.0.get_own_device().await
1450        }
1451
1452        async fn get_user_identity(
1453            &self,
1454            user_id: &UserId,
1455        ) -> Result<Option<UserIdentityData>, Self::Error> {
1456            self.0.get_user_identity(user_id).await
1457        }
1458
1459        async fn is_message_known(
1460            &self,
1461            message_hash: &OlmMessageHash,
1462        ) -> Result<bool, Self::Error> {
1463            self.0.is_message_known(message_hash).await
1464        }
1465
1466        async fn get_outgoing_secret_requests(
1467            &self,
1468            request_id: &TransactionId,
1469        ) -> Result<Option<GossipRequest>, Self::Error> {
1470            self.0.get_outgoing_secret_requests(request_id).await
1471        }
1472
1473        async fn get_secret_request_by_info(
1474            &self,
1475            secret_info: &SecretInfo,
1476        ) -> Result<Option<GossipRequest>, Self::Error> {
1477            self.0.get_secret_request_by_info(secret_info).await
1478        }
1479
1480        async fn get_unsent_secret_requests(&self) -> Result<Vec<GossipRequest>, Self::Error> {
1481            self.0.get_unsent_secret_requests().await
1482        }
1483
1484        async fn delete_outgoing_secret_requests(
1485            &self,
1486            request_id: &TransactionId,
1487        ) -> Result<(), Self::Error> {
1488            self.0.delete_outgoing_secret_requests(request_id).await
1489        }
1490
1491        async fn get_secrets_from_inbox(
1492            &self,
1493            secret_name: &SecretName,
1494        ) -> Result<Vec<GossippedSecret>, Self::Error> {
1495            self.0.get_secrets_from_inbox(secret_name).await
1496        }
1497
1498        async fn delete_secrets_from_inbox(
1499            &self,
1500            secret_name: &SecretName,
1501        ) -> Result<(), Self::Error> {
1502            self.0.delete_secrets_from_inbox(secret_name).await
1503        }
1504
1505        async fn get_room_settings(
1506            &self,
1507            room_id: &RoomId,
1508        ) -> Result<Option<RoomSettings>, Self::Error> {
1509            self.0.get_room_settings(room_id).await
1510        }
1511
1512        async fn get_custom_value(&self, key: &str) -> Result<Option<Vec<u8>>, Self::Error> {
1513            self.0.get_custom_value(key).await
1514        }
1515
1516        async fn set_custom_value(&self, key: &str, value: Vec<u8>) -> Result<(), Self::Error> {
1517            self.0.set_custom_value(key, value).await
1518        }
1519
1520        async fn remove_custom_value(&self, key: &str) -> Result<(), Self::Error> {
1521            self.0.remove_custom_value(key).await
1522        }
1523
1524        async fn try_take_leased_lock(
1525            &self,
1526            lease_duration_ms: u32,
1527            key: &str,
1528            holder: &str,
1529        ) -> Result<bool, Self::Error> {
1530            self.0.try_take_leased_lock(lease_duration_ms, key, holder).await
1531        }
1532
1533        async fn next_batch_token(&self) -> Result<Option<String>, Self::Error> {
1534            self.0.next_batch_token().await
1535        }
1536    }
1537
1538    cryptostore_integration_tests!();
1539    cryptostore_integration_tests_time!();
1540}