matrix_sdk_crypto/session_manager/
sessions.rs

1// Copyright 2020 The Matrix.org Foundation C.I.C.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::{
16    collections::{BTreeMap, BTreeSet},
17    sync::Arc,
18    time::Duration,
19};
20
21use matrix_sdk_common::{failures_cache::FailuresCache, locks::RwLock as StdRwLock};
22use ruma::{
23    api::client::keys::claim_keys::v3::{
24        Request as KeysClaimRequest, Response as KeysClaimResponse,
25    },
26    assign,
27    events::dummy::ToDeviceDummyEventContent,
28    DeviceId, OneTimeKeyAlgorithm, OwnedDeviceId, OwnedOneTimeKeyId, OwnedServerName,
29    OwnedTransactionId, OwnedUserId, SecondsSinceUnixEpoch, ServerName, TransactionId, UserId,
30};
31use tracing::{debug, error, info, instrument, warn};
32use vodozemac::Curve25519PublicKey;
33
34use crate::{
35    error::OlmResult,
36    gossiping::GossipMachine,
37    store::{Changes, Result as StoreResult, Store},
38    types::{
39        events::EventType,
40        requests::{OutgoingRequest, ToDeviceRequest},
41        EventEncryptionAlgorithm,
42    },
43    DeviceData,
44};
45
46#[derive(Debug, Clone)]
47pub(crate) struct SessionManager {
48    store: Store,
49
50    /// If there is an active /keys/claim request, its details.
51    ///
52    /// This is used when processing the response, so that we can spot missing
53    /// users/devices.
54    ///
55    /// According to the doc on [`crate::OlmMachine::get_missing_sessions`],
56    /// there should only be one such request active at a time, so we only need
57    /// to keep a record of the most recent.
58    current_key_claim_request: Arc<StdRwLock<Option<(OwnedTransactionId, KeysClaimRequest)>>>,
59
60    /// A map of user/devices that we need to automatically claim keys for.
61    /// Submodules can insert user/device pairs into this map and the
62    /// user/device paris will be added to the list of users when
63    /// [`get_missing_sessions`](#method.get_missing_sessions) is called.
64    users_for_key_claim: Arc<StdRwLock<BTreeMap<OwnedUserId, BTreeSet<OwnedDeviceId>>>>,
65    wedged_devices: Arc<StdRwLock<BTreeMap<OwnedUserId, BTreeSet<OwnedDeviceId>>>>,
66    key_request_machine: GossipMachine,
67    outgoing_to_device_requests: Arc<StdRwLock<BTreeMap<OwnedTransactionId, OutgoingRequest>>>,
68
69    /// Servers that have previously appeared in the `failures` section of a
70    /// `/keys/claim` response.
71    ///
72    /// See also [`crate::identities::IdentityManager::failures`].
73    failures: FailuresCache<OwnedServerName>,
74
75    failed_devices: Arc<StdRwLock<BTreeMap<OwnedUserId, FailuresCache<OwnedDeviceId>>>>,
76}
77
78impl SessionManager {
79    const KEY_CLAIM_TIMEOUT: Duration = Duration::from_secs(10);
80    const UNWEDGING_INTERVAL: Duration = Duration::from_secs(60 * 60);
81
82    pub fn new(
83        users_for_key_claim: Arc<StdRwLock<BTreeMap<OwnedUserId, BTreeSet<OwnedDeviceId>>>>,
84        key_request_machine: GossipMachine,
85        store: Store,
86    ) -> Self {
87        Self {
88            store,
89            current_key_claim_request: Default::default(),
90            key_request_machine,
91            users_for_key_claim,
92            wedged_devices: Default::default(),
93            outgoing_to_device_requests: Default::default(),
94            failures: Default::default(),
95            failed_devices: Default::default(),
96        }
97    }
98
99    /// Mark the outgoing request as sent.
100    pub fn mark_outgoing_request_as_sent(&self, id: &TransactionId) {
101        self.outgoing_to_device_requests.write().remove(id);
102    }
103
104    pub async fn mark_device_as_wedged(
105        &self,
106        sender: &UserId,
107        curve_key: Curve25519PublicKey,
108    ) -> OlmResult<()> {
109        if let Some(device) = self.store.get_device_from_curve_key(sender, curve_key).await? {
110            if let Some(session) = device.get_most_recent_session().await? {
111                info!(sender_key = ?curve_key, "Marking session to be unwedged");
112
113                let creation_time = Duration::from_secs(session.creation_time.get().into());
114                let now = Duration::from_secs(SecondsSinceUnixEpoch::now().get().into());
115
116                let should_unwedge = now
117                    .checked_sub(creation_time)
118                    .map(|elapsed| elapsed > Self::UNWEDGING_INTERVAL)
119                    .unwrap_or(true);
120
121                if should_unwedge {
122                    self.users_for_key_claim
123                        .write()
124                        .entry(device.user_id().to_owned())
125                        .or_default()
126                        .insert(device.device_id().into());
127                    self.wedged_devices
128                        .write()
129                        .entry(device.user_id().to_owned())
130                        .or_default()
131                        .insert(device.device_id().into());
132                }
133            }
134        }
135
136        Ok(())
137    }
138
139    #[allow(dead_code)]
140    pub fn is_device_wedged(&self, device: &DeviceData) -> bool {
141        self.wedged_devices
142            .read()
143            .get(device.user_id())
144            .is_some_and(|d| d.contains(device.device_id()))
145    }
146
147    /// Check if the session was created to unwedge a Device.
148    ///
149    /// If the device was wedged this will queue up a dummy to-device message.
150    async fn check_if_unwedged(&self, user_id: &UserId, device_id: &DeviceId) -> OlmResult<()> {
151        if self.wedged_devices.write().get_mut(user_id).is_some_and(|d| d.remove(device_id)) {
152            if let Some(device) = self.store.get_device(user_id, device_id).await? {
153                let (_, content) =
154                    device.encrypt("m.dummy", ToDeviceDummyEventContent::new()).await?;
155
156                let request = ToDeviceRequest::new(
157                    device.user_id(),
158                    device.device_id().to_owned(),
159                    content.event_type(),
160                    content.cast(),
161                );
162
163                let request = OutgoingRequest {
164                    request_id: request.txn_id.clone(),
165                    request: Arc::new(request.into()),
166                };
167
168                self.outgoing_to_device_requests
169                    .write()
170                    .insert(request.request_id.clone(), request);
171            }
172        }
173
174        Ok(())
175    }
176
177    /// Get a key claiming request for the user/device pairs that we are
178    /// missing Olm sessions for.
179    ///
180    /// Returns None if no key claiming request needs to be sent out.
181    ///
182    /// Sessions need to be established between devices so group sessions for a
183    /// room can be shared with them.
184    ///
185    /// This should be called every time a group session needs to be shared as
186    /// well as between sync calls. After a sync some devices may request room
187    /// keys without us having a valid Olm session with them, making it
188    /// impossible to server the room key request, thus it's necessary to check
189    /// for missing sessions between sync as well.
190    ///
191    /// **Note**: Care should be taken that only one such request at a time is
192    /// in flight, e.g. using a lock.
193    ///
194    /// The response of a successful key claiming requests needs to be passed to
195    /// the `OlmMachine` with the [`receive_keys_claim_response`].
196    ///
197    /// # Arguments
198    ///
199    /// `users` - The list of users that we should check if we lack a session
200    /// with one of their devices. This can be an empty iterator when calling
201    /// this method between sync requests.
202    ///
203    /// [`receive_keys_claim_response`]: #method.receive_keys_claim_response
204    pub async fn get_missing_sessions(
205        &self,
206        users: impl Iterator<Item = &UserId>,
207    ) -> StoreResult<Option<(OwnedTransactionId, KeysClaimRequest)>> {
208        let mut missing_session_devices_by_user: BTreeMap<_, BTreeMap<_, _>> = BTreeMap::new();
209        let mut timed_out_devices_by_user: BTreeMap<_, BTreeSet<_>> = BTreeMap::new();
210
211        let unfailed_users = users.filter(|u| !self.failures.contains(u.server_name()));
212
213        // Get the current list of devices for each user.
214        let devices_by_user = Box::pin(
215            self.key_request_machine
216                .identity_manager()
217                .get_user_devices_for_encryption(unfailed_users),
218        )
219        .await?;
220
221        #[derive(Debug, Default)]
222        struct UserFailedDeviceInfo {
223            non_olm_devices: BTreeMap<OwnedDeviceId, Vec<EventEncryptionAlgorithm>>,
224            bad_key_devices: BTreeSet<OwnedDeviceId>,
225        }
226
227        let mut failed_devices_by_user: BTreeMap<_, UserFailedDeviceInfo> = BTreeMap::new();
228
229        for (user_id, user_devices) in devices_by_user {
230            for (device_id, device) in user_devices {
231                if !device.supports_olm() {
232                    failed_devices_by_user
233                        .entry(user_id.clone())
234                        .or_default()
235                        .non_olm_devices
236                        .insert(device_id, Vec::from(device.algorithms()));
237                } else if let Some(sender_key) = device.curve25519_key() {
238                    let sessions = self.store.get_sessions(&sender_key.to_base64()).await?;
239
240                    let is_missing = if let Some(sessions) = sessions {
241                        sessions.lock().await.is_empty()
242                    } else {
243                        true
244                    };
245
246                    let is_timed_out = self.is_user_timed_out(&user_id, &device_id);
247
248                    if is_missing && is_timed_out {
249                        timed_out_devices_by_user
250                            .entry(user_id.to_owned())
251                            .or_default()
252                            .insert(device_id);
253                    } else if is_missing && !is_timed_out {
254                        missing_session_devices_by_user
255                            .entry(user_id.to_owned())
256                            .or_default()
257                            .insert(device_id, OneTimeKeyAlgorithm::SignedCurve25519);
258                    }
259                } else {
260                    failed_devices_by_user
261                        .entry(user_id.clone())
262                        .or_default()
263                        .bad_key_devices
264                        .insert(device_id);
265                }
266            }
267        }
268
269        // Add the list of sessions that for some reason automatically need to
270        // create an Olm session.
271        for (user, device_ids) in self.users_for_key_claim.read().iter() {
272            missing_session_devices_by_user.entry(user.to_owned()).or_default().extend(
273                device_ids
274                    .iter()
275                    .map(|device_id| (device_id.clone(), OneTimeKeyAlgorithm::SignedCurve25519)),
276            );
277        }
278
279        if tracing::level_enabled!(tracing::Level::DEBUG) {
280            // Reformat the map to skip the encryption algorithm, which isn't very useful.
281            let missing_session_devices_by_user = missing_session_devices_by_user
282                .iter()
283                .map(|(user_id, devices)| (user_id, devices.keys().collect::<BTreeSet<_>>()))
284                .collect::<BTreeMap<_, _>>();
285            debug!(
286                ?missing_session_devices_by_user,
287                ?timed_out_devices_by_user,
288                "Collected user/device pairs that are missing an Olm session"
289            );
290        }
291
292        if !failed_devices_by_user.is_empty() {
293            warn!(
294                ?failed_devices_by_user,
295                "Can't establish an Olm session with some devices due to missing Olm support or bad keys",
296            );
297        }
298
299        let result = if missing_session_devices_by_user.is_empty() {
300            None
301        } else {
302            Some((
303                TransactionId::new(),
304                assign!(KeysClaimRequest::new(missing_session_devices_by_user), {
305                    timeout: Some(Self::KEY_CLAIM_TIMEOUT),
306                }),
307            ))
308        };
309
310        // stash the details of the request so that we can refer to it when handling the
311        // response
312        *(self.current_key_claim_request.write()) = result.clone();
313        Ok(result)
314    }
315
316    fn is_user_timed_out(&self, user_id: &UserId, device_id: &DeviceId) -> bool {
317        self.failed_devices.read().get(user_id).is_some_and(|d| d.contains(device_id))
318    }
319
320    /// This method will try to figure out for which devices a one-time key was
321    /// requested but is not present in the response.
322    ///
323    /// As per [spec], if a user/device pair does not have any one-time keys on
324    /// the homeserver, the server will just omit the user/device pair from
325    /// the response:
326    ///
327    /// > If the homeserver could be reached, but the user or device was
328    /// > unknown, no failure is recorded. Instead, the corresponding user
329    /// > or device is missing from the one_time_keys result.
330    ///
331    /// The user/device pairs which are missing from the response are going to
332    /// be put in the failures cache so we don't retry to claim a one-time
333    /// key right away next time the user tries to send a message.
334    ///
335    /// [spec]: https://spec.matrix.org/unstable/client-server-api/#post_matrixclientv3keysclaim
336    fn handle_otk_exhaustion_failure(
337        &self,
338        request_id: &TransactionId,
339        failed_servers: &BTreeSet<OwnedServerName>,
340        one_time_keys: &BTreeMap<
341            &OwnedUserId,
342            BTreeMap<&OwnedDeviceId, BTreeSet<&OwnedOneTimeKeyId>>,
343        >,
344    ) {
345        // First check that the response is for the request we were expecting.
346        let request = {
347            let mut guard = self.current_key_claim_request.write();
348            let expected_request_id = guard.as_ref().map(|e| e.0.as_ref());
349
350            if Some(request_id) == expected_request_id {
351                // We have a confirmed match. Clear the expectation, but hang onto the details
352                // of the request.
353                guard.take().map(|(_, request)| request)
354            } else {
355                warn!(
356                    ?request_id,
357                    ?expected_request_id,
358                    "Received a `/keys/claim` response for the wrong request"
359                );
360                None
361            }
362        };
363
364        // If we were able to pair this response with a request, look for devices that
365        // were present in the request but did not elicit a successful response.
366        if let Some(request) = request {
367            let devices_in_response: BTreeSet<_> = one_time_keys
368                .iter()
369                .flat_map(|(user_id, device_key_map)| {
370                    device_key_map
371                        .keys()
372                        .map(|device_id| (*user_id, *device_id))
373                        .collect::<BTreeSet<_>>()
374                })
375                .collect();
376
377            let devices_in_request: BTreeSet<(_, _)> = request
378                .one_time_keys
379                .iter()
380                .flat_map(|(user_id, device_key_map)| {
381                    device_key_map
382                        .keys()
383                        .map(|device_id| (user_id, device_id))
384                        .collect::<BTreeSet<_>>()
385                })
386                .collect();
387
388            let missing_devices: BTreeSet<_> = devices_in_request
389                .difference(&devices_in_response)
390                .filter(|(user_id, _)| {
391                    // Skip over users whose homeservers were in the "failed servers" list: we don't
392                    // want to mark individual devices as broken *as well as* the server.
393                    !failed_servers.contains(user_id.server_name())
394                })
395                .collect();
396
397            if !missing_devices.is_empty() {
398                let mut missing_devices_by_user: BTreeMap<_, BTreeSet<_>> = BTreeMap::new();
399
400                for &(user_id, device_id) in missing_devices {
401                    missing_devices_by_user.entry(user_id).or_default().insert(device_id.clone());
402                }
403
404                warn!(
405                    ?missing_devices_by_user,
406                    "Tried to create new Olm sessions, but the signed one-time key was missing for some devices",
407                );
408
409                let mut failed_devices_lock = self.failed_devices.write();
410
411                for (user_id, device_set) in missing_devices_by_user {
412                    failed_devices_lock.entry(user_id.clone()).or_default().extend(device_set);
413                }
414            }
415        };
416    }
417
418    /// Receive a successful key claim response and create new Olm sessions with
419    /// the claimed keys.
420    ///
421    /// # Arguments
422    ///
423    /// * `request_id` - The unique id of the request that was sent out. This is
424    ///   needed to couple the response with the sent out request.
425    ///
426    /// * `response` - The response containing the claimed one-time keys.
427    #[instrument(skip(self, response))]
428    pub async fn receive_keys_claim_response(
429        &self,
430        request_id: &TransactionId,
431        response: &KeysClaimResponse,
432    ) -> OlmResult<()> {
433        // Collect the (user_id, device_id, device_key_id) triple for logging reasons.
434        let one_time_keys: BTreeMap<_, BTreeMap<_, BTreeSet<_>>> = response
435            .one_time_keys
436            .iter()
437            .map(|(user_id, device_map)| {
438                (
439                    user_id,
440                    device_map
441                        .iter()
442                        .map(|(device_id, key_map)| {
443                            (device_id, key_map.keys().collect::<BTreeSet<_>>())
444                        })
445                        .collect::<BTreeMap<_, _>>(),
446                )
447            })
448            .collect();
449
450        debug!(?request_id, ?one_time_keys, failures = ?response.failures, "Received a `/keys/claim` response");
451
452        // Collect all the servers in the `failures` field of the response.
453        let failed_servers: BTreeSet<_> = response
454            .failures
455            .keys()
456            .filter_map(|s| ServerName::parse(s).ok())
457            .filter(|s| s != self.store.static_account().user_id.server_name())
458            .collect();
459        let successful_servers = response.one_time_keys.keys().map(|u| u.server_name());
460
461        // Add the user/device pairs that don't have any one-time keys to the failures
462        // cache.
463        self.handle_otk_exhaustion_failure(request_id, &failed_servers, &one_time_keys);
464        // Add the failed servers to the failures cache.
465        self.failures.extend(failed_servers);
466        // Remove the servers we successfully contacted from the failures cache.
467        self.failures.remove(successful_servers);
468
469        // Finally, create some 1-to-1 sessions.
470        self.create_sessions(response).await
471    }
472
473    /// Create new Olm sessions for the requested devices.
474    ///
475    /// # Arguments
476    ///
477    ///  * `device_map` - a map from (user ID, device ID) pairs to key object,
478    ///    for each device we should create a session for.
479    pub(crate) async fn create_sessions(&self, response: &KeysClaimResponse) -> OlmResult<()> {
480        struct SessionInfo {
481            session_id: String,
482            algorithm: EventEncryptionAlgorithm,
483            fallback_key_used: bool,
484        }
485
486        #[cfg(not(tarpaulin_include))]
487        impl std::fmt::Debug for SessionInfo {
488            fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
489                write!(
490                    f,
491                    "session_id: {}, algorithm: {}, fallback_key_used: {}",
492                    self.session_id, self.algorithm, self.fallback_key_used
493                )
494            }
495        }
496
497        let mut changes = Changes::default();
498        let mut new_sessions: BTreeMap<&UserId, BTreeMap<&DeviceId, SessionInfo>> = BTreeMap::new();
499        let mut store_transaction = self.store.transaction().await;
500
501        for (user_id, user_devices) in &response.one_time_keys {
502            for (device_id, key_map) in user_devices {
503                let device = match self.store.get_device_data(user_id, device_id).await {
504                    Ok(Some(d)) => d,
505                    Ok(None) => {
506                        warn!(
507                            ?user_id,
508                            ?device_id,
509                            "Tried to create an Olm session but the device is unknown",
510                        );
511                        continue;
512                    }
513                    Err(e) => {
514                        warn!(
515                            ?user_id, ?device_id, error = ?e,
516                            "Tried to create an Olm session, but we can't \
517                            fetch the device from the store",
518                        );
519                        continue;
520                    }
521                };
522
523                let account = store_transaction.account().await?;
524                let device_keys = self.store.get_own_device().await?.as_device_keys().clone();
525                let session = match account.create_outbound_session(&device, key_map, device_keys) {
526                    Ok(s) => s,
527                    Err(e) => {
528                        warn!(
529                            ?user_id, ?device_id, error = ?e,
530                            "Error creating Olm session"
531                        );
532
533                        self.failed_devices
534                            .write()
535                            .entry(user_id.to_owned())
536                            .or_default()
537                            .insert(device_id.to_owned());
538
539                        continue;
540                    }
541                };
542
543                self.key_request_machine.retry_keyshare(user_id, device_id);
544
545                if let Err(e) = self.check_if_unwedged(user_id, device_id).await {
546                    error!(?user_id, ?device_id, "Error while treating an unwedged device: {e:?}");
547                }
548
549                let session_info = SessionInfo {
550                    session_id: session.session_id().to_owned(),
551                    algorithm: session.algorithm().await,
552                    fallback_key_used: session.created_using_fallback_key,
553                };
554
555                changes.sessions.push(session);
556                new_sessions.entry(user_id).or_default().insert(device_id, session_info);
557            }
558        }
559
560        store_transaction.commit().await?;
561        self.store.save_changes(changes).await?;
562        info!(sessions = ?new_sessions, "Established new Olm sessions");
563
564        for (user, device_map) in new_sessions {
565            if let Some(user_cache) = self.failed_devices.read().get(user) {
566                user_cache.remove(device_map.into_keys());
567            }
568        }
569
570        let store_cache = self.store.cache().await?;
571        match self.key_request_machine.collect_incoming_key_requests(&store_cache).await {
572            Ok(sessions) => {
573                let changes = Changes { sessions, ..Default::default() };
574                self.store.save_changes(changes).await?
575            }
576            // We don't propagate the error here since the next sync will retry
577            // this.
578            Err(e) => {
579                warn!(error = ?e, "Error while trying to collect the incoming secret requests")
580            }
581        }
582
583        Ok(())
584    }
585}
586
587#[cfg(test)]
588mod tests {
589    use std::{collections::BTreeMap, iter, ops::Deref, sync::Arc, time::Duration};
590
591    use matrix_sdk_common::locks::RwLock as StdRwLock;
592    use matrix_sdk_test::{async_test, ruma_response_from_json};
593    use ruma::{
594        api::client::keys::claim_keys::v3::Response as KeyClaimResponse, device_id,
595        owned_server_name, user_id, DeviceId, OwnedUserId, UserId,
596    };
597    use serde_json::json;
598    use tokio::sync::Mutex;
599    use tracing::info;
600
601    use super::SessionManager;
602    use crate::{
603        gossiping::GossipMachine,
604        identities::{DeviceData, IdentityManager},
605        olm::{Account, PrivateCrossSigningIdentity},
606        session_manager::GroupSessionCache,
607        store::{Changes, CryptoStoreWrapper, DeviceChanges, MemoryStore, PendingChanges, Store},
608        verification::VerificationMachine,
609    };
610
611    fn user_id() -> &'static UserId {
612        user_id!("@example:localhost")
613    }
614
615    fn device_id() -> &'static DeviceId {
616        device_id!("DEVICEID")
617    }
618
619    fn bob_account() -> Account {
620        Account::with_device_id(user_id!("@bob:localhost"), device_id!("BOBDEVICE"))
621    }
622
623    fn keys_claim_with_failure() -> KeyClaimResponse {
624        let response = json!({
625            "one_time_keys": {},
626            "failures": {
627                "example.org": {
628                    "errcode": "M_RESOURCE_LIMIT_EXCEEDED",
629                    "error": "Not yet ready to retry",
630                }
631            }
632        });
633        ruma_response_from_json(&response)
634    }
635
636    fn keys_claim_without_failure() -> KeyClaimResponse {
637        let response = json!({
638            "one_time_keys": {
639                "@alice:example.org": {},
640            },
641            "failures": {},
642        });
643        ruma_response_from_json(&response)
644    }
645
646    async fn session_manager_test_helper() -> (SessionManager, IdentityManager) {
647        let user_id = user_id();
648        let device_id = device_id();
649
650        let account = Account::with_device_id(user_id, device_id);
651        let store = Arc::new(CryptoStoreWrapper::new(user_id, device_id, MemoryStore::new()));
652        let identity = Arc::new(Mutex::new(PrivateCrossSigningIdentity::empty(user_id)));
653        let verification = VerificationMachine::new(
654            account.static_data().clone(),
655            identity.clone(),
656            store.clone(),
657        );
658
659        let store = Store::new(account.static_data().clone(), identity, store, verification);
660        let device = DeviceData::from_account(&account);
661        store.save_pending_changes(PendingChanges { account: Some(account) }).await.unwrap();
662        store
663            .save_changes(Changes {
664                devices: DeviceChanges { new: vec![device], ..Default::default() },
665                ..Default::default()
666            })
667            .await
668            .unwrap();
669
670        let session_cache = GroupSessionCache::new(store.clone());
671        let identity_manager = IdentityManager::new(store.clone());
672
673        let users_for_key_claim = Arc::new(StdRwLock::new(BTreeMap::new()));
674        let key_request = GossipMachine::new(
675            store.clone(),
676            identity_manager.clone(),
677            session_cache,
678            users_for_key_claim.clone(),
679        );
680
681        (SessionManager::new(users_for_key_claim, key_request, store), identity_manager)
682    }
683
684    #[async_test]
685    async fn test_session_creation() {
686        let (manager, _identity_manager) = session_manager_test_helper().await;
687        let mut bob = bob_account();
688
689        let bob_device = DeviceData::from_account(&bob);
690
691        manager.store.save_device_data(&[bob_device]).await.unwrap();
692
693        let (txn_id, request) =
694            manager.get_missing_sessions(iter::once(bob.user_id())).await.unwrap().unwrap();
695
696        assert!(request.one_time_keys.contains_key(bob.user_id()));
697
698        bob.generate_one_time_keys(1);
699        let one_time = bob.signed_one_time_keys();
700        assert!(!one_time.is_empty());
701        bob.mark_keys_as_published();
702
703        let mut one_time_keys = BTreeMap::new();
704        one_time_keys
705            .entry(bob.user_id().to_owned())
706            .or_insert_with(BTreeMap::new)
707            .insert(bob.device_id().to_owned(), one_time);
708
709        let response = KeyClaimResponse::new(one_time_keys);
710
711        manager.receive_keys_claim_response(&txn_id, &response).await.unwrap();
712
713        assert!(manager.get_missing_sessions(iter::once(bob.user_id())).await.unwrap().is_none());
714    }
715
716    #[async_test]
717    async fn test_session_creation_waits_for_keys_query() {
718        let (manager, identity_manager) = session_manager_test_helper().await;
719
720        // start a `/keys/query` request. At this point, we are only interested in our
721        // own devices.
722        let (key_query_txn_id, key_query_request) =
723            identity_manager.users_for_key_query().await.unwrap().pop_first().unwrap();
724        info!("Initial key query: {:?}", key_query_request);
725
726        // now bob turns up, and we start tracking his devices...
727        let bob = bob_account();
728        let bob_device = DeviceData::from_account(&bob);
729        {
730            let cache = manager.store.cache().await.unwrap();
731            identity_manager
732                .key_query_manager
733                .synced(&cache)
734                .await
735                .unwrap()
736                .update_tracked_users(iter::once(bob.user_id()))
737                .await
738                .unwrap();
739        }
740
741        // ... and start off an attempt to get the missing sessions. This should block
742        // for now.
743        let missing_sessions_task = {
744            let manager = manager.clone();
745            let bob_user_id = bob.user_id().to_owned();
746
747            #[allow(unknown_lints, clippy::redundant_async_block)] // false positive
748            tokio::spawn(async move {
749                manager.get_missing_sessions(iter::once(bob_user_id.deref())).await
750            })
751        };
752
753        // the initial `/keys/query` completes, and we start another
754        let response_json =
755            json!({ "device_keys": { manager.store.static_account().user_id.to_owned(): {}}});
756        let response = ruma_response_from_json(&response_json);
757        identity_manager.receive_keys_query_response(&key_query_txn_id, &response).await.unwrap();
758
759        let (key_query_txn_id, key_query_request) =
760            identity_manager.users_for_key_query().await.unwrap().pop_first().unwrap();
761        info!("Second key query: {:?}", key_query_request);
762
763        // that second request completes with info on bob's device
764        let response_json = json!({ "device_keys": { bob.user_id(): {
765            bob_device.device_id(): bob_device.as_device_keys()
766        }}});
767        let response = ruma_response_from_json(&response_json);
768        identity_manager.receive_keys_query_response(&key_query_txn_id, &response).await.unwrap();
769
770        // the missing_sessions_task should now finally complete, with a claim
771        // including bob's device
772        let (_, keys_claim_request) = missing_sessions_task.await.unwrap().unwrap().unwrap();
773        info!("Key claim request: {:?}", keys_claim_request.one_time_keys);
774        let bob_key_claims = keys_claim_request.one_time_keys.get(bob.user_id()).unwrap();
775        assert!(bob_key_claims.contains_key(bob_device.device_id()));
776    }
777
778    #[async_test]
779    async fn test_session_creation_does_not_wait_for_keys_query_on_failed_server() {
780        let (manager, identity_manager) = session_manager_test_helper().await;
781
782        // We start tracking Bob's devices.
783        let other_user_id = OwnedUserId::try_from("@bob:example.com").unwrap();
784        {
785            let cache = manager.store.cache().await.unwrap();
786            identity_manager
787                .key_query_manager
788                .synced(&cache)
789                .await
790                .unwrap()
791                .update_tracked_users(iter::once(other_user_id.as_ref()))
792                .await
793                .unwrap();
794        }
795
796        // Do a `/keys/query` request, in which Bob's server is a failure.
797        let (key_query_txn_id, _key_query_request) =
798            identity_manager.users_for_key_query().await.unwrap().pop_first().unwrap();
799        let response = ruma_response_from_json(
800            &json!({ "device_keys": {}, "failures": { other_user_id.server_name(): "unreachable" }}),
801        );
802        identity_manager.receive_keys_query_response(&key_query_txn_id, &response).await.unwrap();
803
804        // Now, an attempt to get the missing sessions should now *not* block. We use a
805        // timeout so that we can detect the call blocking.
806        let result = tokio::time::timeout(
807            Duration::from_millis(10),
808            manager.get_missing_sessions(iter::once(other_user_id.as_ref())),
809        )
810        .await
811        .expect("get_missing_sessions blocked rather than completing quickly")
812        .expect("get_missing_sessions returned an error");
813
814        assert!(result.is_none(), "get_missing_sessions returned Some(...)");
815    }
816
817    // This test doesn't run on macos because we're modifying the session
818    // creation time so we can get around the UNWEDGING_INTERVAL.
819    #[async_test]
820    #[cfg(target_os = "linux")]
821    async fn test_session_unwedging() {
822        use ruma::{time::SystemTime, SecondsSinceUnixEpoch};
823
824        let (manager, _identity_manager) = session_manager_test_helper().await;
825        let mut bob = bob_account();
826
827        let (_, mut session) = manager
828            .store
829            .with_transaction(|mut tr| async {
830                let manager_account = tr.account().await.unwrap();
831                let res = bob.create_session_for_test_helper(manager_account).await;
832                Ok((tr, res))
833            })
834            .await
835            .unwrap();
836
837        let bob_device = DeviceData::from_account(&bob);
838        let time = SystemTime::now() - Duration::from_secs(3601);
839        session.creation_time = SecondsSinceUnixEpoch::from_system_time(time).unwrap();
840
841        manager.store.save_device_data(&[bob_device.clone()]).await.unwrap();
842        manager.store.save_sessions(&[session]).await.unwrap();
843
844        assert!(manager.get_missing_sessions(iter::once(bob.user_id())).await.unwrap().is_none());
845
846        let curve_key = bob_device.curve25519_key().unwrap();
847
848        assert!(!manager.users_for_key_claim.read().contains_key(bob.user_id()));
849        assert!(!manager.is_device_wedged(&bob_device));
850        manager.mark_device_as_wedged(bob_device.user_id(), curve_key).await.unwrap();
851        assert!(manager.is_device_wedged(&bob_device));
852        assert!(manager.users_for_key_claim.read().contains_key(bob.user_id()));
853
854        let (txn_id, request) =
855            manager.get_missing_sessions(iter::once(bob.user_id())).await.unwrap().unwrap();
856
857        assert!(request.one_time_keys.contains_key(bob.user_id()));
858
859        bob.generate_one_time_keys(1);
860        let one_time = bob.signed_one_time_keys();
861        assert!(!one_time.is_empty());
862        bob.mark_keys_as_published();
863
864        let mut one_time_keys = BTreeMap::new();
865        one_time_keys
866            .entry(bob.user_id().to_owned())
867            .or_insert_with(BTreeMap::new)
868            .insert(bob.device_id().to_owned(), one_time);
869
870        let response = KeyClaimResponse::new(one_time_keys);
871
872        assert!(manager.outgoing_to_device_requests.read().is_empty());
873
874        manager.receive_keys_claim_response(&txn_id, &response).await.unwrap();
875
876        assert!(!manager.is_device_wedged(&bob_device));
877        assert!(manager.get_missing_sessions(iter::once(bob.user_id())).await.unwrap().is_none());
878        assert!(!manager.outgoing_to_device_requests.read().is_empty())
879    }
880
881    #[async_test]
882    async fn test_failure_handling() {
883        let alice = user_id!("@alice:example.org");
884        let alice_account = Account::with_device_id(alice, "DEVICEID".into());
885        let alice_device = DeviceData::from_account(&alice_account);
886
887        let (manager, _identity_manager) = session_manager_test_helper().await;
888
889        manager.store.save_device_data(&[alice_device]).await.unwrap();
890
891        let (txn_id, users_for_key_claim) =
892            manager.get_missing_sessions(iter::once(alice)).await.unwrap().unwrap();
893        assert!(users_for_key_claim.one_time_keys.contains_key(alice));
894
895        manager.receive_keys_claim_response(&txn_id, &keys_claim_with_failure()).await.unwrap();
896        assert!(manager.get_missing_sessions(iter::once(alice)).await.unwrap().is_none());
897
898        // expire the failure
899        manager.failures.expire(&owned_server_name!("example.org"));
900
901        let (txn_id, users_for_key_claim) =
902            manager.get_missing_sessions(iter::once(alice)).await.unwrap().unwrap();
903        assert!(users_for_key_claim.one_time_keys.contains_key(alice));
904
905        manager.receive_keys_claim_response(&txn_id, &keys_claim_without_failure()).await.unwrap();
906    }
907
908    #[async_test]
909    async fn test_failed_devices_handling() {
910        // Alice is missing altogether
911        test_invalid_claim_response(json!({
912            "one_time_keys": {},
913            "failures": {},
914        }))
915        .await;
916
917        // Alice is present but with no devices
918        test_invalid_claim_response(json!({
919            "one_time_keys": {
920                "@alice:example.org": {}
921            },
922            "failures": {},
923        }))
924        .await;
925
926        // Alice's device is present but with no keys
927        test_invalid_claim_response(json!({
928            "one_time_keys": {
929                "@alice:example.org": {
930                    "DEVICEID": {}
931                }
932            },
933            "failures": {},
934        }))
935        .await;
936
937        // Alice's device is present with a bad signature
938        test_invalid_claim_response(json!({
939            "one_time_keys": {
940                "@alice:example.org": {
941                    "DEVICEID": {
942                        "signed_curve25519:AAAAAA": {
943                            "fallback": true,
944                            "key": "1sra5GVo1ONz478aQybxSEeHTSo2xq0Z+Q3Yzqvp3A4",
945                            "signatures": {
946                                "@example:morpheus.localhost": {
947                                    "ed25519:YAFLBLXAUK": "Zwk90fJhZWOYGNOgtOswZ6RSOGeTjTi/h2dMpyB0CR6EVtvTra0WJtp32ntifrxtwD710y2F3pe5Oyrm7jngCQ"
948                                }
949                            }
950                        }
951                    }
952                }
953            },
954            "failures": {},
955        })).await;
956    }
957
958    /// Helper for failed_devices_handling.
959    ///
960    /// Takes an invalid /keys/claim response for Alice's device DEVICEID and
961    /// checks that it is handled correctly. (The device should be marked as
962    /// 'failed'; and once that
963    async fn test_invalid_claim_response(response_json: serde_json::Value) {
964        let response = ruma_response_from_json(&response_json);
965
966        let alice = user_id!("@alice:example.org");
967        let mut alice_account = Account::with_device_id(alice, "DEVICEID".into());
968        let alice_device = DeviceData::from_account(&alice_account);
969
970        let (manager, _identity_manager) = session_manager_test_helper().await;
971        manager.store.save_device_data(&[alice_device]).await.unwrap();
972
973        // Since we don't have a session with Alice yet, the machine will try to claim
974        // some keys for alice.
975        let (txn_id, users_for_key_claim) =
976            manager.get_missing_sessions(iter::once(alice)).await.unwrap().unwrap();
977        assert!(users_for_key_claim.one_time_keys.contains_key(alice));
978
979        // We receive a response with an invalid one-time key, this will mark Alice as
980        // timed out.
981        manager.receive_keys_claim_response(&txn_id, &response).await.unwrap();
982        // Since alice is timed out, we won't claim keys for her.
983        assert!(manager.get_missing_sessions(iter::once(alice)).await.unwrap().is_none());
984
985        alice_account.generate_one_time_keys(1);
986        let one_time = alice_account.signed_one_time_keys();
987        assert!(!one_time.is_empty());
988
989        let mut one_time_keys = BTreeMap::new();
990        one_time_keys
991            .entry(alice.to_owned())
992            .or_insert_with(BTreeMap::new)
993            .insert(alice_account.device_id().to_owned(), one_time);
994
995        // Now we expire Alice's timeout, and receive a valid one-time key for her.
996        manager
997            .failed_devices
998            .write()
999            .get(alice)
1000            .unwrap()
1001            .expire(&alice_account.device_id().to_owned());
1002        let (txn_id, users_for_key_claim) =
1003            manager.get_missing_sessions(iter::once(alice)).await.unwrap().unwrap();
1004        assert!(users_for_key_claim.one_time_keys.contains_key(alice));
1005
1006        let response = KeyClaimResponse::new(one_time_keys);
1007        manager.receive_keys_claim_response(&txn_id, &response).await.unwrap();
1008
1009        // Alice isn't timed out anymore.
1010        assert!(manager
1011            .failed_devices
1012            .read()
1013            .get(alice)
1014            .unwrap()
1015            .failure_count(alice_account.device_id())
1016            .is_none());
1017    }
1018}