matrix_sdk_crypto/store/
memorystore.rs

1// Copyright 2020 The Matrix.org Foundation C.I.C.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::{
16    collections::{BTreeMap, HashMap, HashSet},
17    convert::Infallible,
18    sync::Arc,
19};
20
21use async_trait::async_trait;
22use matrix_sdk_common::{
23    locks::RwLock as StdRwLock, store_locks::memory_store_helper::try_take_leased_lock,
24};
25use ruma::{
26    events::secret::request::SecretName, time::Instant, DeviceId, OwnedDeviceId, OwnedRoomId,
27    OwnedTransactionId, OwnedUserId, RoomId, TransactionId, UserId,
28};
29use tokio::sync::{Mutex, RwLock};
30use tracing::warn;
31use vodozemac::Curve25519PublicKey;
32
33use super::{
34    caches::DeviceStore, Account, BackupKeys, Changes, CryptoStore, DehydratedDeviceKey,
35    InboundGroupSession, PendingChanges, RoomKeyCounts, RoomSettings, Session,
36    StoredRoomKeyBundleData,
37};
38use crate::{
39    gossiping::{GossipRequest, GossippedSecret, SecretInfo},
40    identities::{DeviceData, UserIdentityData},
41    olm::{
42        OutboundGroupSession, PickledAccount, PickledInboundGroupSession, PickledSession,
43        PrivateCrossSigningIdentity, SenderDataType, StaticAccountData,
44    },
45    types::events::room_key_withheld::RoomKeyWithheldEvent,
46    TrackedUser,
47};
48
49fn encode_key_info(info: &SecretInfo) -> String {
50    match info {
51        SecretInfo::KeyRequest(info) => {
52            format!("{}{}{}", info.room_id(), info.algorithm(), info.session_id())
53        }
54        SecretInfo::SecretRequest(i) => i.as_ref().to_owned(),
55    }
56}
57
58type SessionId = String;
59
60/// The "version" of a backup - newtype wrapper around a String.
61#[derive(Clone, Debug, PartialEq)]
62struct BackupVersion(String);
63
64impl BackupVersion {
65    fn from(s: &str) -> Self {
66        Self(s.to_owned())
67    }
68
69    fn as_str(&self) -> &str {
70        &self.0
71    }
72}
73
74/// An in-memory only store that will forget all the E2EE key once it's dropped.
75#[derive(Default, Debug)]
76pub struct MemoryStore {
77    static_account: Arc<StdRwLock<Option<StaticAccountData>>>,
78
79    account: StdRwLock<Option<String>>,
80    // Map of sender_key to map of session_id to serialized pickle
81    sessions: StdRwLock<BTreeMap<String, BTreeMap<String, String>>>,
82    inbound_group_sessions: StdRwLock<BTreeMap<OwnedRoomId, HashMap<String, String>>>,
83
84    /// Map room id -> session id -> backup order number
85    /// The latest backup in which this session is stored. Equivalent to
86    /// `backed_up_to` in [`IndexedDbCryptoStore`]
87    inbound_group_sessions_backed_up_to:
88        StdRwLock<HashMap<OwnedRoomId, HashMap<SessionId, BackupVersion>>>,
89
90    outbound_group_sessions: StdRwLock<BTreeMap<OwnedRoomId, OutboundGroupSession>>,
91    private_identity: StdRwLock<Option<PrivateCrossSigningIdentity>>,
92    tracked_users: StdRwLock<HashMap<OwnedUserId, TrackedUser>>,
93    olm_hashes: StdRwLock<HashMap<String, HashSet<String>>>,
94    devices: DeviceStore,
95    identities: StdRwLock<HashMap<OwnedUserId, String>>,
96    outgoing_key_requests: StdRwLock<HashMap<OwnedTransactionId, GossipRequest>>,
97    key_requests_by_info: StdRwLock<HashMap<String, OwnedTransactionId>>,
98    direct_withheld_info: StdRwLock<HashMap<OwnedRoomId, HashMap<String, RoomKeyWithheldEvent>>>,
99    custom_values: StdRwLock<HashMap<String, Vec<u8>>>,
100    leases: StdRwLock<HashMap<String, (String, Instant)>>,
101    secret_inbox: StdRwLock<HashMap<String, Vec<GossippedSecret>>>,
102    backup_keys: RwLock<BackupKeys>,
103    dehydrated_device_pickle_key: RwLock<Option<DehydratedDeviceKey>>,
104    next_batch_token: RwLock<Option<String>>,
105    room_settings: StdRwLock<HashMap<OwnedRoomId, RoomSettings>>,
106    room_key_bundles:
107        StdRwLock<HashMap<OwnedRoomId, HashMap<OwnedUserId, StoredRoomKeyBundleData>>>,
108
109    save_changes_lock: Arc<Mutex<()>>,
110}
111
112impl MemoryStore {
113    /// Create a new empty `MemoryStore`.
114    pub fn new() -> Self {
115        Self::default()
116    }
117
118    fn get_static_account(&self) -> Option<StaticAccountData> {
119        self.static_account.read().clone()
120    }
121
122    pub(crate) fn save_devices(&self, devices: Vec<DeviceData>) {
123        for device in devices {
124            let _ = self.devices.add(device);
125        }
126    }
127
128    fn delete_devices(&self, devices: Vec<DeviceData>) {
129        for device in devices {
130            let _ = self.devices.remove(device.user_id(), device.device_id());
131        }
132    }
133
134    fn save_sessions(&self, sessions: Vec<(String, PickledSession)>) {
135        let mut session_store = self.sessions.write();
136
137        for (session_id, pickle) in sessions {
138            let entry = session_store.entry(pickle.sender_key.to_base64()).or_default();
139
140            // insert or replace if exists
141            entry.insert(
142                session_id,
143                serde_json::to_string(&pickle).expect("Failed to serialize olm session"),
144            );
145        }
146    }
147
148    fn save_outbound_group_sessions(&self, sessions: Vec<OutboundGroupSession>) {
149        self.outbound_group_sessions
150            .write()
151            .extend(sessions.into_iter().map(|s| (s.room_id().to_owned(), s)));
152    }
153
154    fn save_private_identity(&self, private_identity: Option<PrivateCrossSigningIdentity>) {
155        *self.private_identity.write() = private_identity;
156    }
157
158    /// Return all the [`InboundGroupSession`]s we have, paired with the
159    /// `backed_up_to` value for each one (or "" where it is missing, which
160    /// should never happen).
161    async fn get_inbound_group_sessions_and_backed_up_to(
162        &self,
163    ) -> Result<Vec<(InboundGroupSession, Option<BackupVersion>)>> {
164        let lookup = |s: &InboundGroupSession| {
165            self.inbound_group_sessions_backed_up_to
166                .read()
167                .get(&s.room_id)?
168                .get(s.session_id())
169                .cloned()
170        };
171
172        Ok(self
173            .get_inbound_group_sessions()
174            .await?
175            .into_iter()
176            .map(|s| {
177                let v = lookup(&s);
178                (s, v)
179            })
180            .collect())
181    }
182}
183
184type Result<T> = std::result::Result<T, Infallible>;
185
186#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
187#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
188impl CryptoStore for MemoryStore {
189    type Error = Infallible;
190
191    async fn load_account(&self) -> Result<Option<Account>> {
192        let pickled_account: Option<PickledAccount> = self.account.read().as_ref().map(|acc| {
193            serde_json::from_str(acc)
194                .expect("Deserialization failed: invalid pickled account JSON format")
195        });
196
197        if let Some(pickle) = pickled_account {
198            let account =
199                Account::from_pickle(pickle).expect("From pickle failed: invalid pickle format");
200
201            *self.static_account.write() = Some(account.static_data().clone());
202
203            Ok(Some(account))
204        } else {
205            Ok(None)
206        }
207    }
208
209    async fn load_identity(&self) -> Result<Option<PrivateCrossSigningIdentity>> {
210        Ok(self.private_identity.read().clone())
211    }
212
213    async fn next_batch_token(&self) -> Result<Option<String>> {
214        Ok(self.next_batch_token.read().await.clone())
215    }
216
217    async fn save_pending_changes(&self, changes: PendingChanges) -> Result<()> {
218        let _guard = self.save_changes_lock.lock().await;
219
220        let pickled_account = if let Some(account) = changes.account {
221            *self.static_account.write() = Some(account.static_data().clone());
222            Some(account.pickle())
223        } else {
224            None
225        };
226
227        *self.account.write() = pickled_account.map(|pickle| {
228            serde_json::to_string(&pickle)
229                .expect("Serialization failed: invalid pickled account JSON format")
230        });
231
232        Ok(())
233    }
234
235    async fn save_changes(&self, changes: Changes) -> Result<()> {
236        let _guard = self.save_changes_lock.lock().await;
237
238        let mut pickled_session: Vec<(String, PickledSession)> = Vec::new();
239        for session in changes.sessions {
240            let session_id = session.session_id().to_owned();
241            let pickle = session.pickle().await;
242            pickled_session.push((session_id.clone(), pickle));
243        }
244        self.save_sessions(pickled_session);
245
246        self.save_inbound_group_sessions(changes.inbound_group_sessions, None).await?;
247        self.save_outbound_group_sessions(changes.outbound_group_sessions);
248        self.save_private_identity(changes.private_identity);
249
250        self.save_devices(changes.devices.new);
251        self.save_devices(changes.devices.changed);
252        self.delete_devices(changes.devices.deleted);
253
254        {
255            let mut identities = self.identities.write();
256            for identity in changes.identities.new.into_iter().chain(changes.identities.changed) {
257                identities.insert(
258                    identity.user_id().to_owned(),
259                    serde_json::to_string(&identity)
260                        .expect("UserIdentityData should always serialize to json"),
261                );
262            }
263        }
264
265        {
266            let mut olm_hashes = self.olm_hashes.write();
267            for hash in changes.message_hashes {
268                olm_hashes.entry(hash.sender_key.to_owned()).or_default().insert(hash.hash.clone());
269            }
270        }
271
272        {
273            let mut outgoing_key_requests = self.outgoing_key_requests.write();
274            let mut key_requests_by_info = self.key_requests_by_info.write();
275
276            for key_request in changes.key_requests {
277                let id = key_request.request_id.clone();
278                let info_string = encode_key_info(&key_request.info);
279
280                outgoing_key_requests.insert(id.clone(), key_request);
281                key_requests_by_info.insert(info_string, id);
282            }
283        }
284
285        if let Some(key) = changes.backup_decryption_key {
286            self.backup_keys.write().await.decryption_key = Some(key);
287        }
288
289        if let Some(version) = changes.backup_version {
290            self.backup_keys.write().await.backup_version = Some(version);
291        }
292
293        if let Some(pickle_key) = changes.dehydrated_device_pickle_key {
294            let mut lock = self.dehydrated_device_pickle_key.write().await;
295            *lock = Some(pickle_key);
296        }
297
298        {
299            let mut secret_inbox = self.secret_inbox.write();
300            for secret in changes.secrets {
301                secret_inbox.entry(secret.secret_name.to_string()).or_default().push(secret);
302            }
303        }
304
305        {
306            let mut direct_withheld_info = self.direct_withheld_info.write();
307            for (room_id, data) in changes.withheld_session_info {
308                for (session_id, event) in data {
309                    direct_withheld_info
310                        .entry(room_id.to_owned())
311                        .or_default()
312                        .insert(session_id, event);
313                }
314            }
315        }
316
317        if let Some(next_batch_token) = changes.next_batch_token {
318            *self.next_batch_token.write().await = Some(next_batch_token);
319        }
320
321        if !changes.room_settings.is_empty() {
322            let mut settings = self.room_settings.write();
323            settings.extend(changes.room_settings);
324        }
325
326        if !changes.received_room_key_bundles.is_empty() {
327            let mut room_key_bundles = self.room_key_bundles.write();
328            for bundle in changes.received_room_key_bundles {
329                room_key_bundles
330                    .entry(bundle.bundle_data.room_id.clone())
331                    .or_default()
332                    .insert(bundle.sender_user.clone(), bundle);
333            }
334        }
335
336        Ok(())
337    }
338
339    async fn save_inbound_group_sessions(
340        &self,
341        sessions: Vec<InboundGroupSession>,
342        backed_up_to_version: Option<&str>,
343    ) -> Result<()> {
344        for session in sessions {
345            let room_id = session.room_id();
346            let session_id = session.session_id();
347
348            // Sanity-check that the data in the sessions corresponds to backed_up_version
349            let backed_up = session.backed_up();
350            if backed_up != backed_up_to_version.is_some() {
351                warn!(
352                    backed_up,
353                    backed_up_to_version,
354                    "Session backed-up flag does not correspond to backup version setting",
355                );
356            }
357
358            if let Some(backup_version) = backed_up_to_version {
359                self.inbound_group_sessions_backed_up_to
360                    .write()
361                    .entry(room_id.to_owned())
362                    .or_default()
363                    .insert(session_id.to_owned(), BackupVersion::from(backup_version));
364            }
365
366            let pickle = session.pickle().await;
367            self.inbound_group_sessions
368                .write()
369                .entry(session.room_id().to_owned())
370                .or_default()
371                .insert(
372                    session.session_id().to_owned(),
373                    serde_json::to_string(&pickle)
374                        .expect("Pickle pickle data should serialize to json"),
375                );
376        }
377        Ok(())
378    }
379
380    async fn get_sessions(&self, sender_key: &str) -> Result<Option<Vec<Session>>> {
381        let device_keys = self.get_own_device().await?.as_device_keys().clone();
382
383        if let Some(pickles) = self.sessions.read().get(sender_key) {
384            let mut sessions: Vec<Session> = Vec::new();
385            for serialized_pickle in pickles.values() {
386                let pickle: PickledSession = serde_json::from_str(serialized_pickle.as_str())
387                    .expect("Pickle pickle deserialization should work");
388                let session = Session::from_pickle(device_keys.clone(), pickle)
389                    .expect("Expect from pickle to always work");
390                sessions.push(session);
391            }
392            Ok(Some(sessions))
393        } else {
394            Ok(None)
395        }
396    }
397
398    async fn get_inbound_group_session(
399        &self,
400        room_id: &RoomId,
401        session_id: &str,
402    ) -> Result<Option<InboundGroupSession>> {
403        let pickle: Option<PickledInboundGroupSession> = self
404            .inbound_group_sessions
405            .read()
406            .get(room_id)
407            .and_then(|m| m.get(session_id))
408            .and_then(|ser| {
409                serde_json::from_str(ser).expect("Pickle pickle deserialization should work")
410            });
411
412        Ok(pickle.map(|p| {
413            InboundGroupSession::from_pickle(p).expect("Expect from pickle to always work")
414        }))
415    }
416
417    async fn get_withheld_info(
418        &self,
419        room_id: &RoomId,
420        session_id: &str,
421    ) -> Result<Option<RoomKeyWithheldEvent>> {
422        Ok(self
423            .direct_withheld_info
424            .read()
425            .get(room_id)
426            .and_then(|e| Some(e.get(session_id)?.to_owned())))
427    }
428
429    async fn get_inbound_group_sessions(&self) -> Result<Vec<InboundGroupSession>> {
430        let inbounds = self
431            .inbound_group_sessions
432            .read()
433            .values()
434            .flat_map(HashMap::values)
435            .map(|ser| {
436                let pickle: PickledInboundGroupSession =
437                    serde_json::from_str(ser).expect("Pickle deserialization should work");
438                InboundGroupSession::from_pickle(pickle).expect("Expect from pickle to always work")
439            })
440            .collect();
441        Ok(inbounds)
442    }
443
444    async fn inbound_group_session_counts(
445        &self,
446        backup_version: Option<&str>,
447    ) -> Result<RoomKeyCounts> {
448        let backed_up = if let Some(backup_version) = backup_version {
449            self.get_inbound_group_sessions_and_backed_up_to()
450                .await?
451                .into_iter()
452                // Count the sessions backed up in the required backup
453                .filter(|(_, o)| o.as_ref().is_some_and(|o| o.as_str() == backup_version))
454                .count()
455        } else {
456            // We asked about a nonexistent backup version - this doesn't make much sense,
457            // but we can easily answer that nothing is backed up in this
458            // nonexistent backup.
459            0
460        };
461
462        let total = self.inbound_group_sessions.read().values().map(HashMap::len).sum();
463        Ok(RoomKeyCounts { total, backed_up })
464    }
465
466    async fn get_inbound_group_sessions_for_device_batch(
467        &self,
468        sender_key: Curve25519PublicKey,
469        sender_data_type: SenderDataType,
470        after_session_id: Option<String>,
471        limit: usize,
472    ) -> Result<Vec<InboundGroupSession>> {
473        // First, find all InboundGroupSessions, filtering for those that match the
474        // device and sender_data type.
475        let mut sessions: Vec<_> = self
476            .get_inbound_group_sessions()
477            .await?
478            .into_iter()
479            .filter(|session: &InboundGroupSession| {
480                session.creator_info.curve25519_key == sender_key
481                    && session.sender_data.to_type() == sender_data_type
482            })
483            .collect();
484
485        // Then, sort the sessions in order of ascending session ID...
486        sessions.sort_by_key(|s| s.session_id().to_owned());
487
488        // Figure out where in the array to start returning results from
489        let start_index = {
490            match after_session_id {
491                None => 0,
492                Some(id) => {
493                    // We're looking for the first session with a session ID strictly after `id`; if
494                    // there are none, the end of the array.
495                    sessions
496                        .iter()
497                        .position(|session| session.session_id() > id.as_str())
498                        .unwrap_or(sessions.len())
499                }
500            }
501        };
502
503        // Return up to `limit` items from the array, starting from `start_index`
504        Ok(sessions.drain(start_index..).take(limit).collect())
505    }
506
507    async fn inbound_group_sessions_for_backup(
508        &self,
509        backup_version: &str,
510        limit: usize,
511    ) -> Result<Vec<InboundGroupSession>> {
512        Ok(self
513            .get_inbound_group_sessions_and_backed_up_to()
514            .await?
515            .into_iter()
516            .filter_map(|(session, backed_up_to)| {
517                if let Some(ref existing_version) = backed_up_to {
518                    if existing_version.as_str() == backup_version {
519                        // This session is already backed up in the required backup
520                        return None;
521                    }
522                }
523                // It's not backed up, or it's backed up in a different backup
524                Some(session)
525            })
526            .take(limit)
527            .collect())
528    }
529
530    async fn mark_inbound_group_sessions_as_backed_up(
531        &self,
532        backup_version: &str,
533        room_and_session_ids: &[(&RoomId, &str)],
534    ) -> Result<()> {
535        for &(room_id, session_id) in room_and_session_ids {
536            let session = self.get_inbound_group_session(room_id, session_id).await?;
537
538            if let Some(session) = session {
539                session.mark_as_backed_up();
540
541                self.inbound_group_sessions_backed_up_to
542                    .write()
543                    .entry(room_id.to_owned())
544                    .or_default()
545                    .insert(session_id.to_owned(), BackupVersion::from(backup_version));
546
547                // Save it back
548                let updated_pickle = session.pickle().await;
549
550                self.inbound_group_sessions.write().entry(room_id.to_owned()).or_default().insert(
551                    session_id.to_owned(),
552                    serde_json::to_string(&updated_pickle)
553                        .expect("Pickle serialization should work"),
554                );
555            }
556        }
557
558        Ok(())
559    }
560
561    async fn reset_backup_state(&self) -> Result<()> {
562        // Nothing to do here, because we remember which backup versions we backed up to
563        // in `mark_inbound_group_sessions_as_backed_up`, so we don't need to
564        // reset anything here because the required version is passed in to
565        // `inbound_group_sessions_for_backup`, and we can compare against the
566        // version we stored.
567
568        Ok(())
569    }
570
571    async fn load_backup_keys(&self) -> Result<BackupKeys> {
572        Ok(self.backup_keys.read().await.to_owned())
573    }
574
575    async fn load_dehydrated_device_pickle_key(&self) -> Result<Option<DehydratedDeviceKey>> {
576        Ok(self.dehydrated_device_pickle_key.read().await.to_owned())
577    }
578
579    async fn delete_dehydrated_device_pickle_key(&self) -> Result<()> {
580        let mut lock = self.dehydrated_device_pickle_key.write().await;
581        *lock = None;
582        Ok(())
583    }
584
585    async fn get_outbound_group_session(
586        &self,
587        room_id: &RoomId,
588    ) -> Result<Option<OutboundGroupSession>> {
589        Ok(self.outbound_group_sessions.read().get(room_id).cloned())
590    }
591
592    async fn load_tracked_users(&self) -> Result<Vec<TrackedUser>> {
593        Ok(self.tracked_users.read().values().cloned().collect())
594    }
595
596    async fn save_tracked_users(&self, tracked_users: &[(&UserId, bool)]) -> Result<()> {
597        self.tracked_users.write().extend(tracked_users.iter().map(|(user_id, dirty)| {
598            let user_id: OwnedUserId = user_id.to_owned().into();
599            (user_id.clone(), TrackedUser { user_id, dirty: *dirty })
600        }));
601        Ok(())
602    }
603
604    async fn get_device(
605        &self,
606        user_id: &UserId,
607        device_id: &DeviceId,
608    ) -> Result<Option<DeviceData>> {
609        Ok(self.devices.get(user_id, device_id))
610    }
611
612    async fn get_user_devices(
613        &self,
614        user_id: &UserId,
615    ) -> Result<HashMap<OwnedDeviceId, DeviceData>> {
616        Ok(self.devices.user_devices(user_id))
617    }
618
619    async fn get_own_device(&self) -> Result<DeviceData> {
620        let account =
621            self.get_static_account().expect("Expect account to exist when getting own device");
622
623        Ok(self
624            .devices
625            .get(&account.user_id, &account.device_id)
626            .expect("Invalid state: Should always have a own device"))
627    }
628
629    async fn get_user_identity(&self, user_id: &UserId) -> Result<Option<UserIdentityData>> {
630        let serialized = self.identities.read().get(user_id).cloned();
631        match serialized {
632            None => Ok(None),
633            Some(serialized) => {
634                let id: UserIdentityData = serde_json::from_str(serialized.as_str())
635                    .expect("Only valid serialized identity are saved");
636                Ok(Some(id))
637            }
638        }
639    }
640
641    async fn is_message_known(&self, message_hash: &crate::olm::OlmMessageHash) -> Result<bool> {
642        Ok(self
643            .olm_hashes
644            .write()
645            .entry(message_hash.sender_key.to_owned())
646            .or_default()
647            .contains(&message_hash.hash))
648    }
649
650    async fn get_outgoing_secret_requests(
651        &self,
652        request_id: &TransactionId,
653    ) -> Result<Option<GossipRequest>> {
654        Ok(self.outgoing_key_requests.read().get(request_id).cloned())
655    }
656
657    async fn get_secret_request_by_info(
658        &self,
659        key_info: &SecretInfo,
660    ) -> Result<Option<GossipRequest>> {
661        let key_info_string = encode_key_info(key_info);
662
663        Ok(self
664            .key_requests_by_info
665            .read()
666            .get(&key_info_string)
667            .and_then(|i| self.outgoing_key_requests.read().get(i).cloned()))
668    }
669
670    async fn get_unsent_secret_requests(&self) -> Result<Vec<GossipRequest>> {
671        Ok(self
672            .outgoing_key_requests
673            .read()
674            .values()
675            .filter(|req| !req.sent_out)
676            .cloned()
677            .collect())
678    }
679
680    async fn delete_outgoing_secret_requests(&self, request_id: &TransactionId) -> Result<()> {
681        let req = self.outgoing_key_requests.write().remove(request_id);
682        if let Some(i) = req {
683            let key_info_string = encode_key_info(&i.info);
684            self.key_requests_by_info.write().remove(&key_info_string);
685        }
686
687        Ok(())
688    }
689
690    async fn get_secrets_from_inbox(
691        &self,
692        secret_name: &SecretName,
693    ) -> Result<Vec<GossippedSecret>> {
694        Ok(self.secret_inbox.write().entry(secret_name.to_string()).or_default().to_owned())
695    }
696
697    async fn delete_secrets_from_inbox(&self, secret_name: &SecretName) -> Result<()> {
698        self.secret_inbox.write().remove(secret_name.as_str());
699
700        Ok(())
701    }
702
703    async fn get_room_settings(&self, room_id: &RoomId) -> Result<Option<RoomSettings>> {
704        Ok(self.room_settings.read().get(room_id).cloned())
705    }
706
707    async fn get_received_room_key_bundle_data(
708        &self,
709        room_id: &RoomId,
710        user_id: &UserId,
711    ) -> Result<Option<StoredRoomKeyBundleData>> {
712        let guard = self.room_key_bundles.read();
713
714        let result = guard.get(room_id).and_then(|bundles| bundles.get(user_id).cloned());
715
716        Ok(result)
717    }
718
719    async fn get_custom_value(&self, key: &str) -> Result<Option<Vec<u8>>> {
720        Ok(self.custom_values.read().get(key).cloned())
721    }
722
723    async fn set_custom_value(&self, key: &str, value: Vec<u8>) -> Result<()> {
724        self.custom_values.write().insert(key.to_owned(), value);
725        Ok(())
726    }
727
728    async fn remove_custom_value(&self, key: &str) -> Result<()> {
729        self.custom_values.write().remove(key);
730        Ok(())
731    }
732
733    async fn try_take_leased_lock(
734        &self,
735        lease_duration_ms: u32,
736        key: &str,
737        holder: &str,
738    ) -> Result<bool> {
739        Ok(try_take_leased_lock(&mut self.leases.write(), lease_duration_ms, key, holder))
740    }
741}
742
743#[cfg(test)]
744mod tests {
745    use std::collections::HashMap;
746
747    use matrix_sdk_test::async_test;
748    use ruma::{room_id, user_id, RoomId};
749    use vodozemac::{Curve25519PublicKey, Ed25519PublicKey};
750
751    use super::SessionId;
752    use crate::{
753        identities::device::testing::get_device,
754        olm::{
755            tests::get_account_and_session_test_helper, Account, InboundGroupSession,
756            OlmMessageHash, PrivateCrossSigningIdentity, SenderData,
757        },
758        store::{memorystore::MemoryStore, Changes, CryptoStore, DeviceChanges, PendingChanges},
759        DeviceData,
760    };
761
762    #[async_test]
763    async fn test_session_store() {
764        let (account, session) = get_account_and_session_test_helper();
765        let own_device = DeviceData::from_account(&account);
766        let store = MemoryStore::new();
767
768        assert!(store.load_account().await.unwrap().is_none());
769
770        store
771            .save_changes(Changes {
772                devices: DeviceChanges { new: vec![own_device], ..Default::default() },
773                ..Default::default()
774            })
775            .await
776            .unwrap();
777        store.save_pending_changes(PendingChanges { account: Some(account) }).await.unwrap();
778
779        store
780            .save_changes(Changes { sessions: (vec![session.clone()]), ..Default::default() })
781            .await
782            .unwrap();
783
784        let sessions = store.get_sessions(&session.sender_key.to_base64()).await.unwrap().unwrap();
785
786        let loaded_session = &sessions[0];
787
788        assert_eq!(&session, loaded_session);
789    }
790
791    #[async_test]
792    async fn test_inbound_group_session_store() {
793        let (account, _) = get_account_and_session_test_helper();
794        let room_id = room_id!("!test:localhost");
795        let curve_key = "Nn0L2hkcCMFKqynTjyGsJbth7QrVmX3lbrksMkrGOAw";
796
797        let (outbound, _) = account.create_group_session_pair_with_defaults(room_id).await;
798        let inbound = InboundGroupSession::new(
799            Curve25519PublicKey::from_base64(curve_key).unwrap(),
800            Ed25519PublicKey::from_base64("ee3Ek+J2LkkPmjGPGLhMxiKnhiX//xcqaVL4RP6EypE").unwrap(),
801            room_id,
802            &outbound.session_key().await,
803            SenderData::unknown(),
804            outbound.settings().algorithm.to_owned(),
805            None,
806            false,
807        )
808        .unwrap();
809
810        let store = MemoryStore::new();
811        store.save_inbound_group_sessions(vec![inbound.clone()], None).await.unwrap();
812
813        let loaded_session =
814            store.get_inbound_group_session(room_id, outbound.session_id()).await.unwrap().unwrap();
815        assert_eq!(inbound, loaded_session);
816    }
817
818    #[async_test]
819    async fn test_backing_up_marks_sessions_as_backed_up() {
820        // Given there are 2 sessions
821        let room_id = room_id!("!test:localhost");
822        let (store, sessions) = store_with_sessions(2, room_id).await;
823
824        // When I mark them as backed up
825        mark_backed_up(&store, room_id, "bkp1", &sessions).await;
826
827        // Then their backed_up_to field is set
828        let but = backed_up_tos(&store).await;
829        assert_eq!(but[sessions[0].session_id()], "bkp1");
830        assert_eq!(but[sessions[1].session_id()], "bkp1");
831    }
832
833    #[async_test]
834    async fn test_backing_up_a_second_set_of_sessions_updates_their_backup_order() {
835        // Given there are 3 sessions
836        let room_id = room_id!("!test:localhost");
837        let (store, sessions) = store_with_sessions(3, room_id).await;
838
839        // When I mark 0 and 1 as backed up in bkp1
840        mark_backed_up(&store, room_id, "bkp1", &sessions[..2]).await;
841
842        // And 1 and 2 as backed up in bkp2
843        mark_backed_up(&store, room_id, "bkp2", &sessions[1..]).await;
844
845        // Then 0 is backed up in bkp1 and the 1 and 2 are backed up in bkp2
846        let but = backed_up_tos(&store).await;
847        assert_eq!(but[sessions[0].session_id()], "bkp1");
848        assert_eq!(but[sessions[1].session_id()], "bkp2");
849        assert_eq!(but[sessions[2].session_id()], "bkp2");
850    }
851
852    #[async_test]
853    async fn test_backing_up_again_to_the_same_version_has_no_effect() {
854        // Given there are 3 sessions
855        let room_id = room_id!("!test:localhost");
856        let (store, sessions) = store_with_sessions(3, room_id).await;
857
858        // When I mark the first two as backed up in the first backup
859        mark_backed_up(&store, room_id, "bkp1", &sessions[..2]).await;
860
861        // And the last 2 as backed up in the same backup version
862        mark_backed_up(&store, room_id, "bkp1", &sessions[1..]).await;
863
864        // Then they all get the same backed_up_to value
865        let but = backed_up_tos(&store).await;
866        assert_eq!(but[sessions[0].session_id()], "bkp1");
867        assert_eq!(but[sessions[1].session_id()], "bkp1");
868        assert_eq!(but[sessions[2].session_id()], "bkp1");
869    }
870
871    #[async_test]
872    async fn test_backing_up_to_an_old_backup_version_can_increase_backed_up_to() {
873        // Given we have backed up some sessions to 2 backup versions, an older and a
874        // newer
875        let room_id = room_id!("!test:localhost");
876        let (store, sessions) = store_with_sessions(4, room_id).await;
877        mark_backed_up(&store, room_id, "older_bkp", &sessions[..2]).await;
878        mark_backed_up(&store, room_id, "newer_bkp", &sessions[1..2]).await;
879
880        // When I ask to back up the un-backed-up ones to the older backup
881        mark_backed_up(&store, room_id, "older_bkp", &sessions[2..]).await;
882
883        // Then each session lists the backup it was most recently included in
884        let but = backed_up_tos(&store).await;
885        assert_eq!(but[sessions[0].session_id()], "older_bkp");
886        assert_eq!(but[sessions[1].session_id()], "newer_bkp");
887        assert_eq!(but[sessions[2].session_id()], "older_bkp");
888        assert_eq!(but[sessions[3].session_id()], "older_bkp");
889    }
890
891    #[async_test]
892    async fn test_backing_up_to_an_old_backup_version_overwrites_a_newer_one() {
893        // Given we have backed up to 2 backup versions, an older and a newer
894        let room_id = room_id!("!test:localhost");
895        let (store, sessions) = store_with_sessions(4, room_id).await;
896        mark_backed_up(&store, room_id, "older_bkp", &sessions).await;
897        // Sanity: they are backed up in order number 1
898        assert_eq!(backed_up_tos(&store).await[sessions[0].session_id()], "older_bkp");
899        mark_backed_up(&store, room_id, "newer_bkp", &sessions).await;
900        // Sanity: they are backed up in order number 2
901        assert_eq!(backed_up_tos(&store).await[sessions[0].session_id()], "newer_bkp");
902
903        // When I ask to back up some to the older version
904        mark_backed_up(&store, room_id, "older_bkp", &sessions[..2]).await;
905
906        // Then older backup overwrites: we don't consider the order here at all
907        let but = backed_up_tos(&store).await;
908        assert_eq!(but[sessions[0].session_id()], "older_bkp");
909        assert_eq!(but[sessions[1].session_id()], "older_bkp");
910        assert_eq!(but[sessions[2].session_id()], "newer_bkp");
911        assert_eq!(but[sessions[3].session_id()], "newer_bkp");
912    }
913
914    #[async_test]
915    async fn test_not_backed_up_sessions_are_eligible_for_backup() {
916        // Given there are 4 sessions, 2 of which are already backed up
917        let room_id = room_id!("!test:localhost");
918        let (store, sessions) = store_with_sessions(4, room_id).await;
919        mark_backed_up(&store, room_id, "bkp1", &sessions[..2]).await;
920
921        // When I ask which to back up
922        let mut to_backup = store
923            .inbound_group_sessions_for_backup("bkp1", 10)
924            .await
925            .expect("Failed to ask for sessions to backup");
926        to_backup.sort_by_key(|s| s.session_id().to_owned());
927
928        // Then I am told the last 2 only
929        assert_eq!(to_backup, &[sessions[2].clone(), sessions[3].clone()]);
930    }
931
932    #[async_test]
933    async fn test_all_sessions_are_eligible_for_backup_if_version_is_unknown() {
934        // Given there are 4 sessions, 2 of which are already backed up in bkp1
935        let room_id = room_id!("!test:localhost");
936        let (store, sessions) = store_with_sessions(4, room_id).await;
937        mark_backed_up(&store, room_id, "bkp1", &sessions[..2]).await;
938
939        // When I ask which to back up in an unknown version
940        let mut to_backup = store
941            .inbound_group_sessions_for_backup("unknown_bkp", 10)
942            .await
943            .expect("Failed to ask for sessions to backup");
944        to_backup.sort_by_key(|s| s.session_id().to_owned());
945
946        // Then I am told to back up all of them
947        assert_eq!(
948            to_backup,
949            &[sessions[0].clone(), sessions[1].clone(), sessions[2].clone(), sessions[3].clone()]
950        );
951    }
952
953    #[async_test]
954    async fn test_sessions_backed_up_to_a_later_version_are_eligible_for_backup() {
955        // Given there are 4 sessions, some backed up to three different versions
956        let room_id = room_id!("!test:localhost");
957        let (store, sessions) = store_with_sessions(4, room_id).await;
958        mark_backed_up(&store, room_id, "bkp0", &sessions[..1]).await;
959        mark_backed_up(&store, room_id, "bkp1", &sessions[1..2]).await;
960        mark_backed_up(&store, room_id, "bkp2", &sessions[2..3]).await;
961
962        // When I ask which to back up in the middle version
963        let mut to_backup = store
964            .inbound_group_sessions_for_backup("bkp1", 10)
965            .await
966            .expect("Failed to ask for sessions to backup");
967        to_backup.sort_by_key(|s| s.session_id().to_owned());
968
969        // Then I am told to back up everything not in the version I asked about
970        assert_eq!(
971            to_backup,
972            &[
973                sessions[0].clone(), // Backed up in bkp0
974                // sessions[1] is backed up in bkp1 already, which we asked about
975                sessions[2].clone(), // Backed up in bkp2
976                sessions[3].clone(), // Not backed up
977            ]
978        );
979    }
980
981    #[async_test]
982    async fn test_outbound_group_session_store() {
983        // Given an outbound session
984        let (account, _) = get_account_and_session_test_helper();
985        let room_id = room_id!("!test:localhost");
986        let (outbound, _) = account.create_group_session_pair_with_defaults(room_id).await;
987
988        // When we save it to the store
989        let store = MemoryStore::new();
990        store.save_outbound_group_sessions(vec![outbound.clone()]);
991
992        // Then we can get it out again
993        let loaded_session = store.get_outbound_group_session(room_id).await.unwrap().unwrap();
994        assert_eq!(
995            serde_json::to_string(&outbound.pickle().await).unwrap(),
996            serde_json::to_string(&loaded_session.pickle().await).unwrap()
997        );
998    }
999
1000    #[async_test]
1001    async fn test_tracked_users_are_stored_once_per_user_id() {
1002        // Given a store containing 2 tracked users, both dirty
1003        let user1 = user_id!("@user1:s");
1004        let user2 = user_id!("@user2:s");
1005        let user3 = user_id!("@user3:s");
1006        let store = MemoryStore::new();
1007        store.save_tracked_users(&[(user1, true), (user2, true)]).await.unwrap();
1008
1009        // When we mark one as clean and add another
1010        store.save_tracked_users(&[(user2, false), (user3, false)]).await.unwrap();
1011
1012        // Then we can get them out again and their dirty flags are correct
1013        let loaded_tracked_users =
1014            store.load_tracked_users().await.expect("failed to load tracked users");
1015
1016        let tracked_contains = |user_id, dirty| {
1017            loaded_tracked_users.iter().any(|u| u.user_id == user_id && u.dirty == dirty)
1018        };
1019
1020        assert!(tracked_contains(user1, true));
1021        assert!(tracked_contains(user2, false));
1022        assert!(tracked_contains(user3, false));
1023        assert_eq!(loaded_tracked_users.len(), 3);
1024    }
1025
1026    #[async_test]
1027    async fn test_private_identity_store() {
1028        // Given a private identity
1029        let private_identity = PrivateCrossSigningIdentity::empty(user_id!("@u:s"));
1030
1031        // When we save it to the store
1032        let store = MemoryStore::new();
1033        store.save_private_identity(Some(private_identity.clone()));
1034
1035        // Then we can get it out again
1036        let loaded_identity =
1037            store.load_identity().await.expect("failed to load private identity").unwrap();
1038
1039        assert_eq!(loaded_identity.user_id(), user_id!("@u:s"));
1040    }
1041
1042    #[async_test]
1043    async fn test_device_store() {
1044        let device = get_device();
1045        let store = MemoryStore::new();
1046
1047        store.save_devices(vec![device.clone()]);
1048
1049        let loaded_device =
1050            store.get_device(device.user_id(), device.device_id()).await.unwrap().unwrap();
1051
1052        assert_eq!(device, loaded_device);
1053
1054        let user_devices = store.get_user_devices(device.user_id()).await.unwrap();
1055
1056        assert_eq!(&**user_devices.keys().next().unwrap(), device.device_id());
1057        assert_eq!(user_devices.values().next().unwrap(), &device);
1058
1059        let loaded_device = user_devices.get(device.device_id()).unwrap();
1060
1061        assert_eq!(&device, loaded_device);
1062
1063        store.delete_devices(vec![device.clone()]);
1064        assert!(store.get_device(device.user_id(), device.device_id()).await.unwrap().is_none());
1065    }
1066
1067    #[async_test]
1068    async fn test_message_hash() {
1069        let store = MemoryStore::new();
1070
1071        let hash =
1072            OlmMessageHash { sender_key: "test_sender".to_owned(), hash: "test_hash".to_owned() };
1073
1074        let mut changes = Changes::default();
1075        changes.message_hashes.push(hash.clone());
1076
1077        assert!(!store.is_message_known(&hash).await.unwrap());
1078        store.save_changes(changes).await.unwrap();
1079        assert!(store.is_message_known(&hash).await.unwrap());
1080    }
1081
1082    #[async_test]
1083    async fn test_key_counts_of_empty_store_are_zero() {
1084        // Given an empty store
1085        let store = MemoryStore::new();
1086
1087        // When we count keys
1088        let key_counts = store.inbound_group_session_counts(Some("")).await.unwrap();
1089
1090        // Then the answer is zero
1091        assert_eq!(key_counts.total, 0);
1092        assert_eq!(key_counts.backed_up, 0);
1093    }
1094
1095    #[async_test]
1096    async fn test_counting_sessions_reports_the_number_of_sessions() {
1097        // Given a store with sessions
1098        let room_id = room_id!("!test:localhost");
1099        let (store, _) = store_with_sessions(4, room_id).await;
1100
1101        // When we count keys
1102        let key_counts = store.inbound_group_session_counts(Some("bkp")).await.unwrap();
1103
1104        // Then the answer equals the number of sessions we created
1105        assert_eq!(key_counts.total, 4);
1106        // And none are backed up
1107        assert_eq!(key_counts.backed_up, 0);
1108    }
1109
1110    #[async_test]
1111    async fn test_counting_backed_up_sessions_reports_the_number_backed_up_in_this_backup() {
1112        // Given a store with sessions, some backed up
1113        let room_id = room_id!("!test:localhost");
1114        let (store, sessions) = store_with_sessions(5, room_id).await;
1115        mark_backed_up(&store, room_id, "bkp", &sessions[..2]).await;
1116
1117        // When we count keys
1118        let key_counts = store.inbound_group_session_counts(Some("bkp")).await.unwrap();
1119
1120        // Then the answer equals the number of sessions we created
1121        assert_eq!(key_counts.total, 5);
1122        // And the backed_up count matches how many were backed up
1123        assert_eq!(key_counts.backed_up, 2);
1124    }
1125
1126    #[async_test]
1127    async fn test_counting_backed_up_sessions_for_null_backup_reports_zero() {
1128        // Given a store with sessions, some backed up
1129        let room_id = room_id!("!test:localhost");
1130        let (store, sessions) = store_with_sessions(4, room_id).await;
1131        mark_backed_up(&store, room_id, "bkp", &sessions[..2]).await;
1132
1133        // When we count keys, providing None as the backup version
1134        let key_counts = store.inbound_group_session_counts(None).await.unwrap();
1135
1136        // Then we ignore everything and just say zero
1137        assert_eq!(key_counts.backed_up, 0);
1138    }
1139
1140    #[async_test]
1141    async fn test_counting_backed_up_sessions_only_reports_sessions_in_the_version_specified() {
1142        // Given a store with sessions, backed up in several versions
1143        let room_id = room_id!("!test:localhost");
1144        let (store, sessions) = store_with_sessions(4, room_id).await;
1145        mark_backed_up(&store, room_id, "bkp1", &sessions[..2]).await;
1146        mark_backed_up(&store, room_id, "bkp2", &sessions[3..]).await;
1147
1148        // When we count keys for bkp2
1149        let key_counts = store.inbound_group_session_counts(Some("bkp2")).await.unwrap();
1150
1151        // Then the backed_up count reflects how many were backed up in bkp2 only
1152        assert_eq!(key_counts.backed_up, 1);
1153    }
1154
1155    /// Mark the supplied sessions as backed up in the supplied backup version
1156    async fn mark_backed_up(
1157        store: &MemoryStore,
1158        room_id: &RoomId,
1159        backup_version: &str,
1160        sessions: &[InboundGroupSession],
1161    ) {
1162        let rooms_and_ids: Vec<_> = sessions.iter().map(|s| (room_id, s.session_id())).collect();
1163
1164        store
1165            .mark_inbound_group_sessions_as_backed_up(backup_version, &rooms_and_ids)
1166            .await
1167            .expect("Failed to mark sessions as backed up");
1168    }
1169
1170    // Create a MemoryStore containing the supplied number of sessions.
1171    //
1172    // Sessions are returned in alphabetical order of session id.
1173    async fn store_with_sessions(
1174        num_sessions: usize,
1175        room_id: &RoomId,
1176    ) -> (MemoryStore, Vec<InboundGroupSession>) {
1177        let (account, _) = get_account_and_session_test_helper();
1178
1179        let mut sessions = Vec::with_capacity(num_sessions);
1180        for _ in 0..num_sessions {
1181            sessions.push(new_session(&account, room_id).await);
1182        }
1183        sessions.sort_by_key(|s| s.session_id().to_owned());
1184
1185        let store = MemoryStore::new();
1186        store.save_inbound_group_sessions(sessions.clone(), None).await.unwrap();
1187
1188        (store, sessions)
1189    }
1190
1191    // Create a new InboundGroupSession
1192    async fn new_session(account: &Account, room_id: &RoomId) -> InboundGroupSession {
1193        let curve_key = "Nn0L2hkcCMFKqynTjyGsJbth7QrVmX3lbrksMkrGOAw";
1194        let (outbound, _) = account.create_group_session_pair_with_defaults(room_id).await;
1195
1196        InboundGroupSession::new(
1197            Curve25519PublicKey::from_base64(curve_key).unwrap(),
1198            Ed25519PublicKey::from_base64("ee3Ek+J2LkkPmjGPGLhMxiKnhiX//xcqaVL4RP6EypE").unwrap(),
1199            room_id,
1200            &outbound.session_key().await,
1201            SenderData::unknown(),
1202            outbound.settings().algorithm.to_owned(),
1203            None,
1204            false,
1205        )
1206        .unwrap()
1207    }
1208
1209    /// Find the session_id and backed_up_to value for each of the sessions in
1210    /// the store.
1211    async fn backed_up_tos(store: &MemoryStore) -> HashMap<SessionId, String> {
1212        store
1213            .get_inbound_group_sessions_and_backed_up_to()
1214            .await
1215            .expect("Unable to get inbound group sessions and backup order")
1216            .iter()
1217            .map(|(s, o)| {
1218                (
1219                    s.session_id().to_owned(),
1220                    o.as_ref().map(|v| v.as_str().to_owned()).unwrap_or("".to_owned()),
1221                )
1222            })
1223            .collect()
1224    }
1225}
1226
1227#[cfg(test)]
1228mod integration_tests {
1229    use std::{
1230        collections::HashMap,
1231        sync::{Arc, Mutex, OnceLock},
1232    };
1233
1234    use async_trait::async_trait;
1235    use ruma::{
1236        events::secret::request::SecretName, DeviceId, OwnedDeviceId, RoomId, TransactionId, UserId,
1237    };
1238    use vodozemac::Curve25519PublicKey;
1239
1240    use super::MemoryStore;
1241    use crate::{
1242        cryptostore_integration_tests, cryptostore_integration_tests_time,
1243        olm::{
1244            InboundGroupSession, OlmMessageHash, OutboundGroupSession, PrivateCrossSigningIdentity,
1245            SenderDataType, StaticAccountData,
1246        },
1247        store::{
1248            BackupKeys, Changes, CryptoStore, DehydratedDeviceKey, PendingChanges, RoomKeyCounts,
1249            RoomSettings, StoredRoomKeyBundleData,
1250        },
1251        types::events::room_key_withheld::RoomKeyWithheldEvent,
1252        Account, DeviceData, GossipRequest, GossippedSecret, SecretInfo, Session, TrackedUser,
1253        UserIdentityData,
1254    };
1255
1256    /// Holds on to a MemoryStore during a test, and moves it back into STORES
1257    /// when this is dropped
1258    #[derive(Clone, Debug)]
1259    struct PersistentMemoryStore(Arc<MemoryStore>);
1260
1261    impl PersistentMemoryStore {
1262        fn new() -> Self {
1263            Self(Arc::new(MemoryStore::new()))
1264        }
1265
1266        fn get_static_account(&self) -> Option<StaticAccountData> {
1267            self.0.get_static_account()
1268        }
1269    }
1270
1271    /// Return a clone of the store for the test with the supplied name. Note:
1272    /// dropping this store won't destroy its data, since
1273    /// [PersistentMemoryStore] is a reference-counted smart pointer
1274    /// to an underlying [MemoryStore].
1275    async fn get_store(
1276        name: &str,
1277        _passphrase: Option<&str>,
1278        clear_data: bool,
1279    ) -> PersistentMemoryStore {
1280        // Holds on to one [PersistentMemoryStore] per test, so even if the test drops
1281        // the store, we keep its data alive. This simulates the behaviour of
1282        // the other stores, which keep their data in a real DB, allowing us to
1283        // test MemoryStore using the same code.
1284        static STORES: OnceLock<Mutex<HashMap<String, PersistentMemoryStore>>> = OnceLock::new();
1285        let stores = STORES.get_or_init(|| Mutex::new(HashMap::new()));
1286
1287        let mut stores = stores.lock().unwrap();
1288
1289        if clear_data {
1290            // Create a new PersistentMemoryStore
1291            let new_store = PersistentMemoryStore::new();
1292            stores.insert(name.to_owned(), new_store.clone());
1293            new_store
1294        } else {
1295            stores.entry(name.to_owned()).or_insert_with(PersistentMemoryStore::new).clone()
1296        }
1297    }
1298
1299    /// Forwards all methods to the underlying [MemoryStore].
1300    #[async_trait]
1301    impl CryptoStore for PersistentMemoryStore {
1302        type Error = <MemoryStore as CryptoStore>::Error;
1303
1304        async fn load_account(&self) -> Result<Option<Account>, Self::Error> {
1305            self.0.load_account().await
1306        }
1307
1308        async fn load_identity(&self) -> Result<Option<PrivateCrossSigningIdentity>, Self::Error> {
1309            self.0.load_identity().await
1310        }
1311
1312        async fn save_changes(&self, changes: Changes) -> Result<(), Self::Error> {
1313            self.0.save_changes(changes).await
1314        }
1315
1316        async fn save_pending_changes(&self, changes: PendingChanges) -> Result<(), Self::Error> {
1317            self.0.save_pending_changes(changes).await
1318        }
1319
1320        async fn save_inbound_group_sessions(
1321            &self,
1322            sessions: Vec<InboundGroupSession>,
1323            backed_up_to_version: Option<&str>,
1324        ) -> Result<(), Self::Error> {
1325            self.0.save_inbound_group_sessions(sessions, backed_up_to_version).await
1326        }
1327
1328        async fn get_sessions(
1329            &self,
1330            sender_key: &str,
1331        ) -> Result<Option<Vec<Session>>, Self::Error> {
1332            self.0.get_sessions(sender_key).await
1333        }
1334
1335        async fn get_inbound_group_session(
1336            &self,
1337            room_id: &RoomId,
1338            session_id: &str,
1339        ) -> Result<Option<InboundGroupSession>, Self::Error> {
1340            self.0.get_inbound_group_session(room_id, session_id).await
1341        }
1342
1343        async fn get_withheld_info(
1344            &self,
1345            room_id: &RoomId,
1346            session_id: &str,
1347        ) -> Result<Option<RoomKeyWithheldEvent>, Self::Error> {
1348            self.0.get_withheld_info(room_id, session_id).await
1349        }
1350
1351        async fn get_inbound_group_sessions(
1352            &self,
1353        ) -> Result<Vec<InboundGroupSession>, Self::Error> {
1354            self.0.get_inbound_group_sessions().await
1355        }
1356
1357        async fn inbound_group_session_counts(
1358            &self,
1359            backup_version: Option<&str>,
1360        ) -> Result<RoomKeyCounts, Self::Error> {
1361            self.0.inbound_group_session_counts(backup_version).await
1362        }
1363
1364        async fn get_inbound_group_sessions_for_device_batch(
1365            &self,
1366            sender_key: Curve25519PublicKey,
1367            sender_data_type: SenderDataType,
1368            after_session_id: Option<String>,
1369            limit: usize,
1370        ) -> Result<Vec<InboundGroupSession>, Self::Error> {
1371            self.0
1372                .get_inbound_group_sessions_for_device_batch(
1373                    sender_key,
1374                    sender_data_type,
1375                    after_session_id,
1376                    limit,
1377                )
1378                .await
1379        }
1380
1381        async fn inbound_group_sessions_for_backup(
1382            &self,
1383            backup_version: &str,
1384            limit: usize,
1385        ) -> Result<Vec<InboundGroupSession>, Self::Error> {
1386            self.0.inbound_group_sessions_for_backup(backup_version, limit).await
1387        }
1388
1389        async fn mark_inbound_group_sessions_as_backed_up(
1390            &self,
1391            backup_version: &str,
1392            room_and_session_ids: &[(&RoomId, &str)],
1393        ) -> Result<(), Self::Error> {
1394            self.0
1395                .mark_inbound_group_sessions_as_backed_up(backup_version, room_and_session_ids)
1396                .await
1397        }
1398
1399        async fn reset_backup_state(&self) -> Result<(), Self::Error> {
1400            self.0.reset_backup_state().await
1401        }
1402
1403        async fn load_backup_keys(&self) -> Result<BackupKeys, Self::Error> {
1404            self.0.load_backup_keys().await
1405        }
1406
1407        async fn load_dehydrated_device_pickle_key(
1408            &self,
1409        ) -> Result<Option<DehydratedDeviceKey>, Self::Error> {
1410            self.0.load_dehydrated_device_pickle_key().await
1411        }
1412
1413        async fn delete_dehydrated_device_pickle_key(&self) -> Result<(), Self::Error> {
1414            self.0.delete_dehydrated_device_pickle_key().await
1415        }
1416
1417        async fn get_outbound_group_session(
1418            &self,
1419            room_id: &RoomId,
1420        ) -> Result<Option<OutboundGroupSession>, Self::Error> {
1421            self.0.get_outbound_group_session(room_id).await
1422        }
1423
1424        async fn load_tracked_users(&self) -> Result<Vec<TrackedUser>, Self::Error> {
1425            self.0.load_tracked_users().await
1426        }
1427
1428        async fn save_tracked_users(&self, users: &[(&UserId, bool)]) -> Result<(), Self::Error> {
1429            self.0.save_tracked_users(users).await
1430        }
1431
1432        async fn get_device(
1433            &self,
1434            user_id: &UserId,
1435            device_id: &DeviceId,
1436        ) -> Result<Option<DeviceData>, Self::Error> {
1437            self.0.get_device(user_id, device_id).await
1438        }
1439
1440        async fn get_user_devices(
1441            &self,
1442            user_id: &UserId,
1443        ) -> Result<HashMap<OwnedDeviceId, DeviceData>, Self::Error> {
1444            self.0.get_user_devices(user_id).await
1445        }
1446
1447        async fn get_own_device(&self) -> Result<DeviceData, Self::Error> {
1448            self.0.get_own_device().await
1449        }
1450
1451        async fn get_user_identity(
1452            &self,
1453            user_id: &UserId,
1454        ) -> Result<Option<UserIdentityData>, Self::Error> {
1455            self.0.get_user_identity(user_id).await
1456        }
1457
1458        async fn is_message_known(
1459            &self,
1460            message_hash: &OlmMessageHash,
1461        ) -> Result<bool, Self::Error> {
1462            self.0.is_message_known(message_hash).await
1463        }
1464
1465        async fn get_outgoing_secret_requests(
1466            &self,
1467            request_id: &TransactionId,
1468        ) -> Result<Option<GossipRequest>, Self::Error> {
1469            self.0.get_outgoing_secret_requests(request_id).await
1470        }
1471
1472        async fn get_secret_request_by_info(
1473            &self,
1474            secret_info: &SecretInfo,
1475        ) -> Result<Option<GossipRequest>, Self::Error> {
1476            self.0.get_secret_request_by_info(secret_info).await
1477        }
1478
1479        async fn get_unsent_secret_requests(&self) -> Result<Vec<GossipRequest>, Self::Error> {
1480            self.0.get_unsent_secret_requests().await
1481        }
1482
1483        async fn delete_outgoing_secret_requests(
1484            &self,
1485            request_id: &TransactionId,
1486        ) -> Result<(), Self::Error> {
1487            self.0.delete_outgoing_secret_requests(request_id).await
1488        }
1489
1490        async fn get_secrets_from_inbox(
1491            &self,
1492            secret_name: &SecretName,
1493        ) -> Result<Vec<GossippedSecret>, Self::Error> {
1494            self.0.get_secrets_from_inbox(secret_name).await
1495        }
1496
1497        async fn delete_secrets_from_inbox(
1498            &self,
1499            secret_name: &SecretName,
1500        ) -> Result<(), Self::Error> {
1501            self.0.delete_secrets_from_inbox(secret_name).await
1502        }
1503
1504        async fn get_room_settings(
1505            &self,
1506            room_id: &RoomId,
1507        ) -> Result<Option<RoomSettings>, Self::Error> {
1508            self.0.get_room_settings(room_id).await
1509        }
1510
1511        async fn get_received_room_key_bundle_data(
1512            &self,
1513            room_id: &RoomId,
1514            user_id: &UserId,
1515        ) -> crate::store::Result<Option<StoredRoomKeyBundleData>, Self::Error> {
1516            self.0.get_received_room_key_bundle_data(room_id, user_id).await
1517        }
1518
1519        async fn get_custom_value(&self, key: &str) -> Result<Option<Vec<u8>>, Self::Error> {
1520            self.0.get_custom_value(key).await
1521        }
1522
1523        async fn set_custom_value(&self, key: &str, value: Vec<u8>) -> Result<(), Self::Error> {
1524            self.0.set_custom_value(key, value).await
1525        }
1526
1527        async fn remove_custom_value(&self, key: &str) -> Result<(), Self::Error> {
1528            self.0.remove_custom_value(key).await
1529        }
1530
1531        async fn try_take_leased_lock(
1532            &self,
1533            lease_duration_ms: u32,
1534            key: &str,
1535            holder: &str,
1536        ) -> Result<bool, Self::Error> {
1537            self.0.try_take_leased_lock(lease_duration_ms, key, holder).await
1538        }
1539
1540        async fn next_batch_token(&self) -> Result<Option<String>, Self::Error> {
1541            self.0.next_batch_token().await
1542        }
1543    }
1544
1545    cryptostore_integration_tests!();
1546    cryptostore_integration_tests_time!();
1547}