heed/
cookbook.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
//! A cookbook of examples on how to use heed. Here is the list of the different topics you can learn about:
//!
//! - [Decode Values on Demand](#decode-values-on-demand)
//! - [Listing and Opening the Named Databases](#listing-and-opening-the-named-databases)
//! - [Create Custom and Prefix Codecs](#create-custom-and-prefix-codecs)
//! - [Change the Environment Size Dynamically](#change-the-environment-size-dynamically)
//! - [Advanced Multithreaded Access of Entries](#advanced-multithreaded-access-of-entries)
//!
//! # Decode Values on Demand
//!
//! Sometimes, you need to iterate on the content of a database and
//! conditionnaly decode the value depending on the key. You can use the
//! [`Database::lazily_decode_data`] method to indicate this to heed.
//!
//! ```
//! use std::collections::HashMap;
//! use std::error::Error;
//! use std::fs;
//! use std::path::Path;
//!
//! use heed::types::*;
//! use heed::{Database, EnvOpenOptions};
//!
//! pub type StringMap = HashMap<String, String>;
//!
//! fn main() -> Result<(), Box<dyn Error + Send + Sync>> {
//!     let path = Path::new("target").join("heed.mdb");
//!
//!     fs::create_dir_all(&path)?;
//!
//!     let env = unsafe {
//!         EnvOpenOptions::new()
//!             .map_size(1024 * 1024 * 100) // 100 MiB
//!             .open(&path)?
//!     };
//!
//!     let mut wtxn = env.write_txn()?;
//!     let db: Database<Str, SerdeJson<StringMap>> = env.create_database(&mut wtxn, None)?;
//!
//!     fill_with_data(&mut wtxn, db)?;
//!
//!     // We make sure that iterating over this database will
//!     // not deserialize the values. We just want to decode
//!     // the value corresponding to 43th key.
//!     for (i, result) in db.lazily_decode_data().iter(&wtxn)?.enumerate() {
//!         let (_key, lazy_value) = result?;
//!         if i == 43 {
//!             // This is where the magic happens. We receive a Lazy type
//!             // that wraps a slice of bytes. We can decode on purpose.
//!             let value = lazy_value.decode()?;
//!             assert_eq!(value.get("secret"), Some(&String::from("434343")));
//!             break;
//!         }
//!     }
//!
//!     Ok(())
//! }
//!
//! fn fill_with_data(
//!     wtxn: &mut heed::RwTxn,
//!     db: Database<Str, SerdeJson<StringMap>>,
//! ) -> heed::Result<()> {
//!     // This represents a very big value that we only want to decode when necessary.
//!     let mut big_string_map = HashMap::new();
//!     big_string_map.insert("key1".into(), "I am a very long string".into());
//!     big_string_map.insert("key2".into(), "I am a also very long string".into());
//!
//!     for i in 0..100 {
//!         let key = format!("{i:5}");
//!         big_string_map.insert("secret".into(), format!("{i}{i}{i}"));
//!         db.put(wtxn, &key, &big_string_map)?;
//!     }
//!     Ok(())
//! }
//! ```
//!
//! # Listing and Opening the Named Databases
//!
//! Sometimes it is useful to list the databases available in an environment.
//! LMDB automatically stores their names in the unnamed database, a database that doesn't
//! need to be created in which you can write.
//!
//! Once you create new databases, after defining the [`EnvOpenOptions::max_dbs`]
//! parameter, the names of those databases are automatically stored in the unnamed one.
//!
//! ```
//! use std::error::Error;
//! use std::fs;
//! use std::path::Path;
//!
//! use heed::types::*;
//! use heed::{Database, EnvOpenOptions};
//!
//! fn main() -> Result<(), Box<dyn Error>> {
//!     let env_path = Path::new("target").join("heed.mdb");
//!
//!     fs::create_dir_all(&env_path)?;
//!
//!     let env = unsafe {
//!         EnvOpenOptions::new()
//!             .map_size(10 * 1024 * 1024) // 10MB
//!             .max_dbs(3) // Number of opened databases
//!             .open(env_path)?
//!     };
//!
//!     let rtxn = env.read_txn()?;
//!     // The database names are mixed with the user entries therefore we prefer
//!     // ignoring the values and try to open the databases one by one using the keys.
//!     let unnamed: Database<Str, DecodeIgnore> =
//!         env.open_database(&rtxn, None)?.expect("the unnamed database always exists");
//!
//!     // The unnamed (or main) database contains the other
//!     // database names associated to empty values.
//!     for result in unnamed.iter(&rtxn)? {
//!         let (name, ()) = result?;
//!
//!         if let Ok(Some(_db)) = env.open_database::<Str, Bytes>(&rtxn, Some(name)) {
//!             // We succeeded into opening a new database that
//!             // contains strings associated to raw bytes.
//!         }
//!     }
//!
//!     // When opening databases in a read-only transaction
//!     // you must commit your read transaction to make your
//!     // freshly opened databases globally available.
//!     rtxn.commit()?;
//!
//!     // If you abort (or drop) your read-only transaction
//!     // the database handle will be invalid outside
//!     // the transaction scope.
//!
//!     Ok(())
//! }
//! ```
//!
//! # Create Custom and Prefix Codecs
//!
//! With heed you can store any kind of data and serialize it the way you want.
//! To do so you'll need to create a codec by using the [`BytesEncode`] and [`BytesDecode`] traits.
//!
//! Now imagine that your data is lexicographically well ordered. You can now leverage
//! the use of prefix codecs. Those are classic codecs but are only used to encode key prefixes.
//!
//! In this example we will store logs associated to a timestamp. By encoding the timestamp
//! in big endian we can create a prefix codec that restricts a subset of the data. It is recommended
//! to create codecs to encode prefixes when possible instead of using a slice of bytes.
//!
//! ```
//! use std::borrow::Cow;
//! use std::error::Error;
//! use std::fs;
//! use std::path::Path;
//!
//! use heed::types::*;
//! use heed::{BoxedError, BytesDecode, BytesEncode, Database, EnvOpenOptions};
//!
//! #[derive(Debug, PartialEq, Eq)]
//! pub enum Level {
//!     Debug,
//!     Warn,
//!     Error,
//! }
//!
//! #[derive(Debug, PartialEq, Eq)]
//! pub struct LogKey {
//!     timestamp: u32,
//!     level: Level,
//! }
//!
//! pub struct LogKeyCodec;
//!
//! impl<'a> BytesEncode<'a> for LogKeyCodec {
//!     type EItem = LogKey;
//!
//!     /// Encodes the u32 timestamp in big endian followed by the log level with a single byte.
//!     fn bytes_encode(log: &Self::EItem) -> Result<Cow<[u8]>, BoxedError> {
//!         let (timestamp_bytes, level_byte) = match log {
//!             LogKey { timestamp, level: Level::Debug } => (timestamp.to_be_bytes(), 0),
//!             LogKey { timestamp, level: Level::Warn } => (timestamp.to_be_bytes(), 1),
//!             LogKey { timestamp, level: Level::Error } => (timestamp.to_be_bytes(), 2),
//!         };
//!
//!         let mut output = Vec::new();
//!         output.extend_from_slice(&timestamp_bytes);
//!         output.push(level_byte);
//!         Ok(Cow::Owned(output))
//!     }
//! }
//!
//! impl<'a> BytesDecode<'a> for LogKeyCodec {
//!     type DItem = LogKey;
//!
//!     fn bytes_decode(bytes: &'a [u8]) -> Result<Self::DItem, BoxedError> {
//!         use std::mem::size_of;
//!
//!         let timestamp = match bytes.get(..size_of::<u32>()) {
//!             Some(bytes) => bytes.try_into().map(u32::from_be_bytes).unwrap(),
//!             None => return Err("invalid log key: cannot extract timestamp".into()),
//!         };
//!
//!         let level = match bytes.get(size_of::<u32>()) {
//!             Some(&0) => Level::Debug,
//!             Some(&1) => Level::Warn,
//!             Some(&2) => Level::Error,
//!             Some(_) => return Err("invalid log key: invalid log level".into()),
//!             None => return Err("invalid log key: cannot extract log level".into()),
//!         };
//!
//!         Ok(LogKey { timestamp, level })
//!     }
//! }
//!
//! /// Encodes the high part of a timestamp. As it is located
//! /// at the start of the key it can be used to only return
//! /// the logs that appeared during a, rather long, period.
//! pub struct LogAtHalfTimestampCodec;
//!
//! impl<'a> BytesEncode<'a> for LogAtHalfTimestampCodec {
//!     type EItem = u32;
//!
//!     /// This method encodes only the prefix of the keys in this particular case, the timestamp.
//!     fn bytes_encode(half_timestamp: &Self::EItem) -> Result<Cow<[u8]>, BoxedError> {
//!         Ok(Cow::Owned(half_timestamp.to_be_bytes()[..2].to_vec()))
//!     }
//! }
//!
//! impl<'a> BytesDecode<'a> for LogAtHalfTimestampCodec {
//!     type DItem = LogKey;
//!
//!     fn bytes_decode(bytes: &'a [u8]) -> Result<Self::DItem, BoxedError> {
//!         LogKeyCodec::bytes_decode(bytes)
//!     }
//! }
//!
//! fn main() -> Result<(), Box<dyn Error>> {
//!     let path = Path::new("target").join("heed.mdb");
//!
//!     fs::create_dir_all(&path)?;
//!
//!     let env = unsafe {
//!         EnvOpenOptions::new()
//!             .map_size(10 * 1024 * 1024) // 10MB
//!             .max_dbs(3000)
//!             .open(path)?
//!     };
//!
//!     let mut wtxn = env.write_txn()?;
//!     let db: Database<LogKeyCodec, Str> = env.create_database(&mut wtxn, None)?;
//!
//!     db.put(
//!         &mut wtxn,
//!         &LogKey { timestamp: 1608326232, level: Level::Debug },
//!         "this is a very old log",
//!     )?;
//!     db.put(
//!         &mut wtxn,
//!         &LogKey { timestamp: 1708326232, level: Level::Debug },
//!         "fibonacci was executed in 21ms",
//!     )?;
//!     db.put(&mut wtxn, &LogKey { timestamp: 1708326242, level: Level::Error }, "fibonacci crashed")?;
//!     db.put(
//!         &mut wtxn,
//!         &LogKey { timestamp: 1708326272, level: Level::Warn },
//!         "fibonacci is running since 12s",
//!     )?;
//!
//!     // We change the way we want to read our database by changing the key codec.
//!     // In this example we can prefix search only for the logs between a period of time
//!     // (the two high bytes of the u32 timestamp).
//!     let iter = db.remap_key_type::<LogAtHalfTimestampCodec>().prefix_iter(&wtxn, &1708326232)?;
//!
//!     // As we filtered the log for a specific
//!     // period of time we must not see the very old log.
//!     for result in iter {
//!         let (LogKey { timestamp: _, level: _ }, content) = result?;
//!         assert_ne!(content, "this is a very old log");
//!     }
//!
//!     Ok(())
//! }
//! ```
//!
//! # Change the Environment Size Dynamically
//!
//! You must specify the maximum size of an LMDB environment when you open it.
//! Environment do not dynamically increase there size for performance reasons and also to
//! have more control on it.
//!
//! Here is a simple example on the way to go to dynamically increase the size
//! of an environment when you detect that it is going out of space.
//!
//! ```
//! use std::error::Error;
//! use std::fs;
//! use std::path::Path;
//!
//! use heed::types::*;
//! use heed::{Database, EnvOpenOptions};
//!
//! fn main() -> Result<(), Box<dyn Error>> {
//!     let path = Path::new("target").join("small-space.mdb");
//!
//!     fs::create_dir_all(&path)?;
//!
//!     let env = unsafe {
//!         EnvOpenOptions::new()
//!             .map_size(16384) // one page
//!             .open(&path)?
//!     };
//!
//!     let mut wtxn = env.write_txn()?;
//!     let db: Database<Str, Str> = env.create_database(&mut wtxn, None)?;
//!
//!     // Ho! Crap! We don't have enough space in this environment...
//!     assert!(matches!(
//!         fill_with_data(&mut wtxn, db),
//!         Err(heed::Error::Mdb(heed::MdbError::MapFull))
//!     ));
//!
//!     drop(wtxn);
//!
//!     // We need to increase the page size and we can only do that
//!     // when no transaction are running so closing the env is easier.
//!     env.prepare_for_closing().wait();
//!
//!     let env = unsafe {
//!         EnvOpenOptions::new()
//!             .map_size(10 * 16384) // 10 pages
//!             .open(&path)?
//!     };
//!
//!     let mut wtxn = env.write_txn()?;
//!     let db: Database<Str, Str> = env.create_database(&mut wtxn, None)?;
//!
//!     // We now have enough space in the env to store all of our entries.
//!     assert!(matches!(fill_with_data(&mut wtxn, db), Ok(())));
//!
//!     Ok(())
//! }
//!
//! fn fill_with_data(wtxn: &mut heed::RwTxn, db: Database<Str, Str>) -> heed::Result<()> {
//!     for i in 0..1000 {
//!         let key = i.to_string();
//!         db.put(wtxn, &key, "I am a very long string")?;
//!     }
//!     Ok(())
//! }
//! ```
//!
//! # Advanced Multithreaded Access of Entries
//!
//! LMDB disallow sharing cursors amongs threads. It is only possible to send
//! them between threads when the heed `read-txn-no-tls` feature is enabled.
//!
//! This limits some usecases that require a parallel access to the content of the databases
//! to process stuff faster. This is the case of arroy, a multithreads fast approximate
//! neighbors search library. I wrote [an article explaining how
//! to read entries in parallel][arroy article].
//!
//! It is forbidden to write in an environement while reading in it. However, it is possible
//! to keep pointers to the values of the entries returned by LMDB. Those pointers are valid
//! until the end of the transaction.
//!
//! Here is a small example on how to declare a datastructure to be used in parallel across thread,
//! safely. The unsafe part declare that the datastructure can be shared between thread despite
//! the write transaction not being `Send` nor `Sync`.
//!
//! [arroy article]: https://blog.kerollmops.com/multithreading-and-memory-mapping-refining-ann-performance-with-arroy
//!
//! ```
//! use std::collections::HashMap;
//! use std::error::Error;
//! use std::fs;
//! use std::path::Path;
//!
//! use heed::types::*;
//! use heed::{Database, EnvOpenOptions, RoTxn};
//!
//! fn main() -> Result<(), Box<dyn Error + Send + Sync>> {
//!     let path = Path::new("target").join("heed.mdb");
//!
//!     fs::create_dir_all(&path)?;
//!
//!     let env = unsafe {
//!         EnvOpenOptions::new()
//!             .map_size(1024 * 1024 * 100) // 100 MiB
//!             .open(&path)?
//!     };
//!
//!     let mut wtxn = env.write_txn()?;
//!     let db: Database<Str, Str> = env.create_database(&mut wtxn, None)?;
//!
//!     fill_with_data(&mut wtxn, db)?;
//!
//!     let immutable_map = ImmutableMap::from_db(&wtxn, db)?;
//!
//!     // We can share the immutable map over multiple threads because it is Sync.
//!     // It is safe because we keep the write transaction lifetime in this type.
//!     std::thread::scope(|s| {
//!         s.spawn(|| {
//!             let value = immutable_map.get("10");
//!             assert_eq!(value, Some("I am a very long string"));
//!         });
//!         s.spawn(|| {
//!             let value = immutable_map.get("20");
//!             assert_eq!(value, Some("I am a very long string"));
//!         });
//!     });
//!
//!     // You can see that we always have it on the main thread.
//!     // We didn't sent it over threads.
//!     let value = immutable_map.get("50");
//!     assert_eq!(value, Some("I am a very long string"));
//!
//!     Ok(())
//! }
//!
//! fn fill_with_data(wtxn: &mut heed::RwTxn, db: Database<Str, Str>) -> heed::Result<()> {
//!     for i in 0..100 {
//!         let key = i.to_string();
//!         db.put(wtxn, &key, "I am a very long string")?;
//!     }
//!     Ok(())
//! }
//!
//! struct ImmutableMap<'a> {
//!     map: HashMap<&'a str, &'a str>,
//! }
//!
//! impl<'t> ImmutableMap<'t> {
//!     fn from_db(rtxn: &'t RoTxn, db: Database<Str, Str>) -> heed::Result<Self> {
//!         let mut map = HashMap::new();
//!         for result in db.iter(rtxn)? {
//!             let (k, v) = result?;
//!             map.insert(k, v);
//!         }
//!         Ok(ImmutableMap { map })
//!     }
//!
//!     fn get(&self, key: &str) -> Option<&'t str> {
//!         self.map.get(key).copied()
//!     }
//! }
//!
//! unsafe impl Sync for ImmutableMap<'_> {}
//! ```
//!

// To let cargo generate doc links
#![allow(unused_imports)]

use crate::{BytesDecode, BytesEncode, Database, EnvOpenOptions};