contrib/faiss/faiss/impl/index_write.cpp
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) Meta Platforms, Inc. and affiliates. |
3 | | * |
4 | | * This source code is licensed under the MIT license found in the |
5 | | * LICENSE file in the root directory of this source tree. |
6 | | */ |
7 | | |
8 | | #include <faiss/index_io.h> |
9 | | |
10 | | #include <faiss/impl/io.h> |
11 | | #include <faiss/impl/io_macros.h> |
12 | | |
13 | | #include <cstdio> |
14 | | #include <cstdlib> |
15 | | |
16 | | #include <faiss/invlists/InvertedListsIOHook.h> |
17 | | |
18 | | #include <faiss/impl/FaissAssert.h> |
19 | | #include <faiss/impl/io_macros.h> |
20 | | #include <faiss/utils/hamming.h> |
21 | | |
22 | | #include <faiss/Index2Layer.h> |
23 | | #include <faiss/IndexAdditiveQuantizer.h> |
24 | | #include <faiss/IndexAdditiveQuantizerFastScan.h> |
25 | | #include <faiss/IndexFlat.h> |
26 | | #include <faiss/IndexHNSW.h> |
27 | | #include <faiss/IndexIVF.h> |
28 | | #include <faiss/IndexIVFAdditiveQuantizer.h> |
29 | | #include <faiss/IndexIVFAdditiveQuantizerFastScan.h> |
30 | | #include <faiss/IndexIVFFlat.h> |
31 | | #include <faiss/IndexIVFIndependentQuantizer.h> |
32 | | #include <faiss/IndexIVFPQ.h> |
33 | | #include <faiss/IndexIVFPQFastScan.h> |
34 | | #include <faiss/IndexIVFPQR.h> |
35 | | #include <faiss/IndexIVFRaBitQ.h> |
36 | | #include <faiss/IndexIVFSpectralHash.h> |
37 | | #include <faiss/IndexLSH.h> |
38 | | #include <faiss/IndexLattice.h> |
39 | | #include <faiss/IndexNNDescent.h> |
40 | | #include <faiss/IndexNSG.h> |
41 | | #include <faiss/IndexPQ.h> |
42 | | #include <faiss/IndexPQFastScan.h> |
43 | | #include <faiss/IndexPreTransform.h> |
44 | | #include <faiss/IndexRaBitQ.h> |
45 | | #include <faiss/IndexRefine.h> |
46 | | #include <faiss/IndexRowwiseMinMax.h> |
47 | | #include <faiss/IndexScalarQuantizer.h> |
48 | | #include <faiss/MetaIndexes.h> |
49 | | #include <faiss/VectorTransform.h> |
50 | | |
51 | | #include <faiss/IndexBinaryFlat.h> |
52 | | #include <faiss/IndexBinaryFromFloat.h> |
53 | | #include <faiss/IndexBinaryHNSW.h> |
54 | | #include <faiss/IndexBinaryHash.h> |
55 | | #include <faiss/IndexBinaryIVF.h> |
56 | | |
57 | | /************************************************************* |
58 | | * The I/O format is the content of the class. For objects that are |
59 | | * inherited, like Index, a 4-character-code (fourcc) indicates which |
60 | | * child class this is an instance of. |
61 | | * |
62 | | * In this case, the fields of the parent class are written first, |
63 | | * then the ones for the child classes. Note that this requires |
64 | | * classes to be serialized to have a constructor without parameters, |
65 | | * so that the fields can be filled in later. The default constructor |
66 | | * should set reasonable defaults for all fields. |
67 | | * |
68 | | * The fourccs are assigned arbitrarily. When the class changed (added |
69 | | * or deprecated fields), the fourcc can be replaced. New code should |
70 | | * be able to read the old fourcc and fill in new classes. |
71 | | * |
72 | | * TODO: in this file, the read functions that encouter errors may |
73 | | * leak memory. |
74 | | **************************************************************/ |
75 | | |
76 | | namespace faiss { |
77 | | |
78 | | /************************************************************* |
79 | | * Write |
80 | | **************************************************************/ |
81 | 60 | static void write_index_header(const Index* idx, IOWriter* f) { |
82 | 60 | WRITE1(idx->d); |
83 | 60 | WRITE1(idx->ntotal); |
84 | 60 | idx_t dummy = 1 << 20; |
85 | 60 | WRITE1(dummy); |
86 | 60 | WRITE1(dummy); |
87 | 60 | WRITE1(idx->is_trained); |
88 | 60 | WRITE1(idx->metric_type); |
89 | 60 | if (idx->metric_type > 1) { |
90 | 0 | WRITE1(idx->metric_arg); |
91 | 0 | } |
92 | 60 | } |
93 | | |
94 | 0 | void write_VectorTransform(const VectorTransform* vt, IOWriter* f) { |
95 | 0 | if (const LinearTransform* lt = dynamic_cast<const LinearTransform*>(vt)) { |
96 | 0 | if (dynamic_cast<const RandomRotationMatrix*>(lt)) { |
97 | 0 | uint32_t h = fourcc("rrot"); |
98 | 0 | WRITE1(h); |
99 | 0 | } else if (const PCAMatrix* pca = dynamic_cast<const PCAMatrix*>(lt)) { |
100 | 0 | uint32_t h = fourcc("Pcam"); |
101 | 0 | WRITE1(h); |
102 | 0 | WRITE1(pca->eigen_power); |
103 | 0 | WRITE1(pca->epsilon); |
104 | 0 | WRITE1(pca->random_rotation); |
105 | 0 | WRITE1(pca->balanced_bins); |
106 | 0 | WRITEVECTOR(pca->mean); |
107 | 0 | WRITEVECTOR(pca->eigenvalues); |
108 | 0 | WRITEVECTOR(pca->PCAMat); |
109 | 0 | } else if (const ITQMatrix* itqm = dynamic_cast<const ITQMatrix*>(lt)) { |
110 | 0 | uint32_t h = fourcc("Viqm"); |
111 | 0 | WRITE1(h); |
112 | 0 | WRITE1(itqm->max_iter); |
113 | 0 | WRITE1(itqm->seed); |
114 | 0 | } else { |
115 | | // generic LinearTransform (includes OPQ) |
116 | 0 | uint32_t h = fourcc("LTra"); |
117 | 0 | WRITE1(h); |
118 | 0 | } |
119 | 0 | WRITE1(lt->have_bias); |
120 | 0 | WRITEVECTOR(lt->A); |
121 | 0 | WRITEVECTOR(lt->b); |
122 | 0 | } else if ( |
123 | 0 | const RemapDimensionsTransform* rdt = |
124 | 0 | dynamic_cast<const RemapDimensionsTransform*>(vt)) { |
125 | 0 | uint32_t h = fourcc("RmDT"); |
126 | 0 | WRITE1(h); |
127 | 0 | WRITEVECTOR(rdt->map); |
128 | 0 | } else if ( |
129 | 0 | const NormalizationTransform* nt = |
130 | 0 | dynamic_cast<const NormalizationTransform*>(vt)) { |
131 | 0 | uint32_t h = fourcc("VNrm"); |
132 | 0 | WRITE1(h); |
133 | 0 | WRITE1(nt->norm); |
134 | 0 | } else if ( |
135 | 0 | const CenteringTransform* ct = |
136 | 0 | dynamic_cast<const CenteringTransform*>(vt)) { |
137 | 0 | uint32_t h = fourcc("VCnt"); |
138 | 0 | WRITE1(h); |
139 | 0 | WRITEVECTOR(ct->mean); |
140 | 0 | } else if ( |
141 | 0 | const ITQTransform* itqt = dynamic_cast<const ITQTransform*>(vt)) { |
142 | 0 | uint32_t h = fourcc("Viqt"); |
143 | 0 | WRITE1(h); |
144 | 0 | WRITEVECTOR(itqt->mean); |
145 | 0 | WRITE1(itqt->do_pca); |
146 | 0 | write_VectorTransform(&itqt->itq, f); |
147 | 0 | write_VectorTransform(&itqt->pca_then_itq, f); |
148 | 0 | } else { |
149 | 0 | FAISS_THROW_MSG("cannot serialize this"); |
150 | 0 | } |
151 | | // common fields |
152 | 0 | WRITE1(vt->d_in); |
153 | 0 | WRITE1(vt->d_out); |
154 | 0 | WRITE1(vt->is_trained); |
155 | 0 | } |
156 | | |
157 | 2 | void write_ProductQuantizer(const ProductQuantizer* pq, IOWriter* f) { |
158 | 2 | WRITE1(pq->d); |
159 | 2 | WRITE1(pq->M); |
160 | 2 | WRITE1(pq->nbits); |
161 | 4 | WRITEVECTOR(pq->centroids); |
162 | 4 | } |
163 | | |
164 | 0 | static void write_AdditiveQuantizer(const AdditiveQuantizer* aq, IOWriter* f) { |
165 | 0 | WRITE1(aq->d); |
166 | 0 | WRITE1(aq->M); |
167 | 0 | WRITEVECTOR(aq->nbits); |
168 | 0 | WRITE1(aq->is_trained); |
169 | 0 | WRITEVECTOR(aq->codebooks); |
170 | 0 | WRITE1(aq->search_type); |
171 | 0 | WRITE1(aq->norm_min); |
172 | 0 | WRITE1(aq->norm_max); |
173 | 0 | if (aq->search_type == AdditiveQuantizer::ST_norm_cqint8 || |
174 | 0 | aq->search_type == AdditiveQuantizer::ST_norm_cqint4 || |
175 | 0 | aq->search_type == AdditiveQuantizer::ST_norm_lsq2x4 || |
176 | 0 | aq->search_type == AdditiveQuantizer::ST_norm_rq2x4) { |
177 | 0 | WRITEXBVECTOR(aq->qnorm.codes); |
178 | 0 | } |
179 | | |
180 | 0 | if (aq->search_type == AdditiveQuantizer::ST_norm_lsq2x4 || |
181 | 0 | aq->search_type == AdditiveQuantizer::ST_norm_rq2x4) { |
182 | 0 | WRITEVECTOR(aq->norm_tabs); |
183 | 0 | } |
184 | 0 | } |
185 | | |
186 | 0 | static void write_ResidualQuantizer(const ResidualQuantizer* rq, IOWriter* f) { |
187 | 0 | write_AdditiveQuantizer(rq, f); |
188 | 0 | WRITE1(rq->train_type); |
189 | 0 | WRITE1(rq->max_beam_size); |
190 | 0 | } |
191 | | |
192 | | static void write_LocalSearchQuantizer( |
193 | | const LocalSearchQuantizer* lsq, |
194 | 0 | IOWriter* f) { |
195 | 0 | write_AdditiveQuantizer(lsq, f); |
196 | 0 | WRITE1(lsq->K); |
197 | 0 | WRITE1(lsq->train_iters); |
198 | 0 | WRITE1(lsq->encode_ils_iters); |
199 | 0 | WRITE1(lsq->train_ils_iters); |
200 | 0 | WRITE1(lsq->icm_iters); |
201 | 0 | WRITE1(lsq->p); |
202 | 0 | WRITE1(lsq->lambd); |
203 | 0 | WRITE1(lsq->chunk_size); |
204 | 0 | WRITE1(lsq->random_seed); |
205 | 0 | WRITE1(lsq->nperts); |
206 | 0 | WRITE1(lsq->update_codebooks_with_double); |
207 | 0 | } |
208 | | |
209 | | static void write_ProductAdditiveQuantizer( |
210 | | const ProductAdditiveQuantizer* paq, |
211 | 0 | IOWriter* f) { |
212 | 0 | write_AdditiveQuantizer(paq, f); |
213 | 0 | WRITE1(paq->nsplits); |
214 | 0 | } |
215 | | |
216 | | static void write_ProductResidualQuantizer( |
217 | | const ProductResidualQuantizer* prq, |
218 | 0 | IOWriter* f) { |
219 | 0 | write_ProductAdditiveQuantizer(prq, f); |
220 | 0 | for (const auto aq : prq->quantizers) { |
221 | 0 | auto rq = dynamic_cast<const ResidualQuantizer*>(aq); |
222 | 0 | write_ResidualQuantizer(rq, f); |
223 | 0 | } |
224 | 0 | } |
225 | | |
226 | | static void write_ProductLocalSearchQuantizer( |
227 | | const ProductLocalSearchQuantizer* plsq, |
228 | 0 | IOWriter* f) { |
229 | 0 | write_ProductAdditiveQuantizer(plsq, f); |
230 | 0 | for (const auto aq : plsq->quantizers) { |
231 | 0 | auto lsq = dynamic_cast<const LocalSearchQuantizer*>(aq); |
232 | 0 | write_LocalSearchQuantizer(lsq, f); |
233 | 0 | } |
234 | 0 | } |
235 | | |
236 | 4 | static void write_ScalarQuantizer(const ScalarQuantizer* ivsc, IOWriter* f) { |
237 | 4 | WRITE1(ivsc->qtype); |
238 | 4 | WRITE1(ivsc->rangestat); |
239 | 4 | WRITE1(ivsc->rangestat_arg); |
240 | 4 | WRITE1(ivsc->d); |
241 | 4 | WRITE1(ivsc->code_size); |
242 | 8 | WRITEVECTOR(ivsc->trained); |
243 | 8 | } |
244 | | |
245 | 5 | void write_InvertedLists(const InvertedLists* ils, IOWriter* f) { |
246 | 5 | if (ils == nullptr) { |
247 | 0 | uint32_t h = fourcc("il00"); |
248 | 0 | WRITE1(h); |
249 | 5 | } else if ( |
250 | 5 | const auto& ails = dynamic_cast<const ArrayInvertedLists*>(ils)) { |
251 | 5 | uint32_t h = fourcc("ilar"); |
252 | 5 | WRITE1(h); |
253 | 5 | WRITE1(ails->nlist); |
254 | 5 | WRITE1(ails->code_size); |
255 | | // here we store either as a full or a sparse data buffer |
256 | 5 | size_t n_non0 = 0; |
257 | 29 | for (size_t i = 0; i < ails->nlist; i++) { |
258 | 24 | if (ails->ids[i].size() > 0) |
259 | 12 | n_non0++; |
260 | 24 | } |
261 | 5 | if (n_non0 > ails->nlist / 2) { |
262 | 3 | uint32_t list_type = fourcc("full"); |
263 | 3 | WRITE1(list_type); |
264 | 3 | std::vector<size_t> sizes; |
265 | 15 | for (size_t i = 0; i < ails->nlist; i++) { |
266 | 12 | sizes.push_back(ails->ids[i].size()); |
267 | 12 | } |
268 | 6 | WRITEVECTOR(sizes); |
269 | 6 | } else { |
270 | 2 | int list_type = fourcc("sprs"); // sparse |
271 | 2 | WRITE1(list_type); |
272 | 2 | std::vector<size_t> sizes; |
273 | 14 | for (size_t i = 0; i < ails->nlist; i++) { |
274 | 12 | size_t n = ails->ids[i].size(); |
275 | 12 | if (n > 0) { |
276 | 0 | sizes.push_back(i); |
277 | 0 | sizes.push_back(n); |
278 | 0 | } |
279 | 12 | } |
280 | 4 | WRITEVECTOR(sizes); |
281 | 4 | } |
282 | | // make a single contiguous data buffer (useful for mmapping) |
283 | 29 | for (size_t i = 0; i < ails->nlist; i++) { |
284 | 24 | size_t n = ails->ids[i].size(); |
285 | 24 | if (n > 0) { |
286 | 12 | WRITEANDCHECK(ails->codes[i].data(), n * ails->code_size); |
287 | 12 | WRITEANDCHECK(ails->ids[i].data(), n); |
288 | 12 | } |
289 | 24 | } |
290 | | |
291 | 5 | } else { |
292 | 0 | InvertedListsIOHook::lookup_classname(typeid(*ils).name()) |
293 | 0 | ->write(ils, f); |
294 | 0 | } |
295 | 5 | } |
296 | | |
297 | 0 | void write_ProductQuantizer(const ProductQuantizer* pq, const char* fname) { |
298 | 0 | FileIOWriter writer(fname); |
299 | 0 | write_ProductQuantizer(pq, &writer); |
300 | 0 | } |
301 | | |
302 | 25 | static void write_HNSW(const HNSW* hnsw, IOWriter* f) { |
303 | 50 | WRITEVECTOR(hnsw->assign_probas); |
304 | 50 | WRITEVECTOR(hnsw->cum_nneighbor_per_level); |
305 | 50 | WRITEVECTOR(hnsw->levels); |
306 | 50 | WRITEVECTOR(hnsw->offsets); |
307 | 50 | WRITEVECTOR(hnsw->neighbors); |
308 | | |
309 | 50 | WRITE1(hnsw->entry_point); |
310 | 25 | WRITE1(hnsw->max_level); |
311 | 25 | WRITE1(hnsw->efConstruction); |
312 | 25 | WRITE1(hnsw->efSearch); |
313 | | |
314 | | // // deprecated field |
315 | | // WRITE1(hnsw->upper_beam); |
316 | 25 | constexpr int tmp_upper_beam = 1; |
317 | 25 | WRITE1(tmp_upper_beam); |
318 | 25 | } |
319 | | |
320 | 0 | static void write_NSG(const NSG* nsg, IOWriter* f) { |
321 | 0 | WRITE1(nsg->ntotal); |
322 | 0 | WRITE1(nsg->R); |
323 | 0 | WRITE1(nsg->L); |
324 | 0 | WRITE1(nsg->C); |
325 | 0 | WRITE1(nsg->search_L); |
326 | 0 | WRITE1(nsg->enterpoint); |
327 | 0 | WRITE1(nsg->is_built); |
328 | |
|
329 | 0 | if (!nsg->is_built) { |
330 | 0 | return; |
331 | 0 | } |
332 | | |
333 | 0 | constexpr int EMPTY_ID = -1; |
334 | 0 | auto& graph = nsg->final_graph; |
335 | 0 | int K = graph->K; |
336 | 0 | int N = graph->N; |
337 | 0 | FAISS_THROW_IF_NOT(N == nsg->ntotal); |
338 | 0 | FAISS_THROW_IF_NOT(K == nsg->R); |
339 | 0 | FAISS_THROW_IF_NOT(true == graph->own_fields); |
340 | | |
341 | 0 | for (int i = 0; i < N; i++) { |
342 | 0 | for (int j = 0; j < K; j++) { |
343 | 0 | int id = graph->at(i, j); |
344 | 0 | if (id != EMPTY_ID) { |
345 | 0 | WRITE1(id); |
346 | 0 | } else { |
347 | 0 | break; |
348 | 0 | } |
349 | 0 | } |
350 | 0 | WRITE1(EMPTY_ID); |
351 | 0 | } |
352 | 0 | } |
353 | | |
354 | 0 | static void write_NNDescent(const NNDescent* nnd, IOWriter* f) { |
355 | 0 | WRITE1(nnd->ntotal); |
356 | 0 | WRITE1(nnd->d); |
357 | 0 | WRITE1(nnd->K); |
358 | 0 | WRITE1(nnd->S); |
359 | 0 | WRITE1(nnd->R); |
360 | 0 | WRITE1(nnd->L); |
361 | 0 | WRITE1(nnd->iter); |
362 | 0 | WRITE1(nnd->search_L); |
363 | 0 | WRITE1(nnd->random_seed); |
364 | 0 | WRITE1(nnd->has_built); |
365 | |
|
366 | 0 | WRITEVECTOR(nnd->final_graph); |
367 | 0 | } |
368 | | |
369 | 0 | static void write_RaBitQuantizer(const RaBitQuantizer* rabitq, IOWriter* f) { |
370 | | // don't care about rabitq->centroid |
371 | 0 | WRITE1(rabitq->d); |
372 | 0 | WRITE1(rabitq->code_size); |
373 | 0 | WRITE1(rabitq->metric_type); |
374 | 0 | } |
375 | | |
376 | 5 | static void write_direct_map(const DirectMap* dm, IOWriter* f) { |
377 | 5 | char maintain_direct_map = |
378 | 5 | (char)dm->type; // for backwards compatibility with bool |
379 | 5 | WRITE1(maintain_direct_map); |
380 | 10 | WRITEVECTOR(dm->array); |
381 | 10 | if (dm->type == DirectMap::Hashtable) { |
382 | 0 | std::vector<std::pair<idx_t, idx_t>> v; |
383 | 0 | const std::unordered_map<idx_t, idx_t>& map = dm->hashtable; |
384 | 0 | v.resize(map.size()); |
385 | 0 | std::copy(map.begin(), map.end(), v.begin()); |
386 | 0 | WRITEVECTOR(v); |
387 | 0 | } |
388 | 10 | } |
389 | | |
390 | 5 | static void write_ivf_header(const IndexIVF* ivf, IOWriter* f) { |
391 | 5 | write_index_header(ivf, f); |
392 | 5 | WRITE1(ivf->nlist); |
393 | 5 | WRITE1(ivf->nprobe); |
394 | | // subclasses write by_residual (some of them support only one setting of |
395 | | // by_residual). |
396 | 5 | write_index(ivf->quantizer, f); |
397 | 5 | write_direct_map(&ivf->direct_map, f); |
398 | 5 | } |
399 | | |
400 | 60 | void write_index(const Index* idx, IOWriter* f, int io_flags) { |
401 | 60 | if (idx == nullptr) { |
402 | | // eg. for a storage component of HNSW that is set to nullptr |
403 | 0 | uint32_t h = fourcc("null"); |
404 | 0 | WRITE1(h); |
405 | 60 | } else if (const IndexFlat* idxf = dynamic_cast<const IndexFlat*>(idx)) { |
406 | 27 | uint32_t h = |
407 | 27 | fourcc(idxf->metric_type == METRIC_INNER_PRODUCT ? "IxFI" |
408 | 27 | : idxf->metric_type == METRIC_L2 ? "IxF2" |
409 | 27 | : "IxFl"); |
410 | 27 | WRITE1(h); |
411 | 27 | write_index_header(idx, f); |
412 | 81 | WRITEXBVECTOR(idxf->codes); |
413 | 81 | } else if (const IndexLSH* idxl = dynamic_cast<const IndexLSH*>(idx)) { |
414 | 0 | uint32_t h = fourcc("IxHe"); |
415 | 0 | WRITE1(h); |
416 | 0 | write_index_header(idx, f); |
417 | 0 | WRITE1(idxl->nbits); |
418 | 0 | WRITE1(idxl->rotate_data); |
419 | 0 | WRITE1(idxl->train_thresholds); |
420 | 0 | WRITEVECTOR(idxl->thresholds); |
421 | 0 | int code_size_i = idxl->code_size; |
422 | 0 | WRITE1(code_size_i); |
423 | 0 | write_VectorTransform(&idxl->rrot, f); |
424 | 0 | WRITEVECTOR(idxl->codes); |
425 | 33 | } else if (const IndexPQ* idxp = dynamic_cast<const IndexPQ*>(idx)) { |
426 | 1 | uint32_t h = fourcc("IxPq"); |
427 | 1 | WRITE1(h); |
428 | 1 | write_index_header(idx, f); |
429 | 1 | write_ProductQuantizer(&idxp->pq, f); |
430 | 2 | WRITEVECTOR(idxp->codes); |
431 | | // search params -- maybe not useful to store? |
432 | 2 | WRITE1(idxp->search_type); |
433 | 1 | WRITE1(idxp->encode_signs); |
434 | 1 | WRITE1(idxp->polysemous_ht); |
435 | 32 | } else if ( |
436 | 32 | const IndexResidualQuantizer* idxr = |
437 | 32 | dynamic_cast<const IndexResidualQuantizer*>(idx)) { |
438 | 0 | uint32_t h = fourcc("IxRq"); |
439 | 0 | WRITE1(h); |
440 | 0 | write_index_header(idx, f); |
441 | 0 | write_ResidualQuantizer(&idxr->rq, f); |
442 | 0 | WRITE1(idxr->code_size); |
443 | 0 | WRITEVECTOR(idxr->codes); |
444 | 32 | } else if ( |
445 | 32 | auto* idxr_2 = |
446 | 32 | dynamic_cast<const IndexLocalSearchQuantizer*>(idx)) { |
447 | 0 | uint32_t h = fourcc("IxLS"); |
448 | 0 | WRITE1(h); |
449 | 0 | write_index_header(idx, f); |
450 | 0 | write_LocalSearchQuantizer(&idxr_2->lsq, f); |
451 | 0 | WRITE1(idxr_2->code_size); |
452 | 0 | WRITEVECTOR(idxr_2->codes); |
453 | 32 | } else if ( |
454 | 32 | const IndexProductResidualQuantizer* idxpr = |
455 | 32 | dynamic_cast<const IndexProductResidualQuantizer*>(idx)) { |
456 | 0 | uint32_t h = fourcc("IxPR"); |
457 | 0 | WRITE1(h); |
458 | 0 | write_index_header(idx, f); |
459 | 0 | write_ProductResidualQuantizer(&idxpr->prq, f); |
460 | 0 | WRITE1(idxpr->code_size); |
461 | 0 | WRITEVECTOR(idxpr->codes); |
462 | 32 | } else if ( |
463 | 32 | const IndexProductLocalSearchQuantizer* idxpl = |
464 | 32 | dynamic_cast<const IndexProductLocalSearchQuantizer*>( |
465 | 32 | idx)) { |
466 | 0 | uint32_t h = fourcc("IxPL"); |
467 | 0 | WRITE1(h); |
468 | 0 | write_index_header(idx, f); |
469 | 0 | write_ProductLocalSearchQuantizer(&idxpl->plsq, f); |
470 | 0 | WRITE1(idxpl->code_size); |
471 | 0 | WRITEVECTOR(idxpl->codes); |
472 | 32 | } else if ( |
473 | 32 | auto* idxaqfs = |
474 | 32 | dynamic_cast<const IndexAdditiveQuantizerFastScan*>(idx)) { |
475 | 0 | auto idxlsqfs = |
476 | 0 | dynamic_cast<const IndexLocalSearchQuantizerFastScan*>(idx); |
477 | 0 | auto idxrqfs = dynamic_cast<const IndexResidualQuantizerFastScan*>(idx); |
478 | 0 | auto idxplsqfs = |
479 | 0 | dynamic_cast<const IndexProductLocalSearchQuantizerFastScan*>( |
480 | 0 | idx); |
481 | 0 | auto idxprqfs = |
482 | 0 | dynamic_cast<const IndexProductResidualQuantizerFastScan*>(idx); |
483 | 0 | FAISS_THROW_IF_NOT(idxlsqfs || idxrqfs || idxplsqfs || idxprqfs); |
484 | | |
485 | 0 | if (idxlsqfs) { |
486 | 0 | uint32_t h = fourcc("ILfs"); |
487 | 0 | WRITE1(h); |
488 | 0 | } else if (idxrqfs) { |
489 | 0 | uint32_t h = fourcc("IRfs"); |
490 | 0 | WRITE1(h); |
491 | 0 | } else if (idxplsqfs) { |
492 | 0 | uint32_t h = fourcc("IPLf"); |
493 | 0 | WRITE1(h); |
494 | 0 | } else if (idxprqfs) { |
495 | 0 | uint32_t h = fourcc("IPRf"); |
496 | 0 | WRITE1(h); |
497 | 0 | } |
498 | | |
499 | 0 | write_index_header(idxaqfs, f); |
500 | |
|
501 | 0 | if (idxlsqfs) { |
502 | 0 | write_LocalSearchQuantizer(&idxlsqfs->lsq, f); |
503 | 0 | } else if (idxrqfs) { |
504 | 0 | write_ResidualQuantizer(&idxrqfs->rq, f); |
505 | 0 | } else if (idxplsqfs) { |
506 | 0 | write_ProductLocalSearchQuantizer(&idxplsqfs->plsq, f); |
507 | 0 | } else if (idxprqfs) { |
508 | 0 | write_ProductResidualQuantizer(&idxprqfs->prq, f); |
509 | 0 | } |
510 | 0 | WRITE1(idxaqfs->implem); |
511 | 0 | WRITE1(idxaqfs->bbs); |
512 | 0 | WRITE1(idxaqfs->qbs); |
513 | |
|
514 | 0 | WRITE1(idxaqfs->M); |
515 | 0 | WRITE1(idxaqfs->nbits); |
516 | 0 | WRITE1(idxaqfs->ksub); |
517 | 0 | WRITE1(idxaqfs->code_size); |
518 | 0 | WRITE1(idxaqfs->ntotal2); |
519 | 0 | WRITE1(idxaqfs->M2); |
520 | |
|
521 | 0 | WRITE1(idxaqfs->rescale_norm); |
522 | 0 | WRITE1(idxaqfs->norm_scale); |
523 | 0 | WRITE1(idxaqfs->max_train_points); |
524 | |
|
525 | 0 | WRITEVECTOR(idxaqfs->codes); |
526 | 32 | } else if ( |
527 | 32 | auto* ivaqfs = |
528 | 32 | dynamic_cast<const IndexIVFAdditiveQuantizerFastScan*>( |
529 | 32 | idx)) { |
530 | 0 | auto ivlsqfs = |
531 | 0 | dynamic_cast<const IndexIVFLocalSearchQuantizerFastScan*>(idx); |
532 | 0 | auto ivrqfs = |
533 | 0 | dynamic_cast<const IndexIVFResidualQuantizerFastScan*>(idx); |
534 | 0 | auto ivplsqfs = dynamic_cast< |
535 | 0 | const IndexIVFProductLocalSearchQuantizerFastScan*>(idx); |
536 | 0 | auto ivprqfs = |
537 | 0 | dynamic_cast<const IndexIVFProductResidualQuantizerFastScan*>( |
538 | 0 | idx); |
539 | 0 | FAISS_THROW_IF_NOT(ivlsqfs || ivrqfs || ivplsqfs || ivprqfs); |
540 | | |
541 | 0 | if (ivlsqfs) { |
542 | 0 | uint32_t h = fourcc("IVLf"); |
543 | 0 | WRITE1(h); |
544 | 0 | } else if (ivrqfs) { |
545 | 0 | uint32_t h = fourcc("IVRf"); |
546 | 0 | WRITE1(h); |
547 | 0 | } else if (ivplsqfs) { |
548 | 0 | uint32_t h = fourcc("NPLf"); // N means IV ... |
549 | 0 | WRITE1(h); |
550 | 0 | } else { |
551 | 0 | uint32_t h = fourcc("NPRf"); |
552 | 0 | WRITE1(h); |
553 | 0 | } |
554 | | |
555 | 0 | write_ivf_header(ivaqfs, f); |
556 | |
|
557 | 0 | if (ivlsqfs) { |
558 | 0 | write_LocalSearchQuantizer(&ivlsqfs->lsq, f); |
559 | 0 | } else if (ivrqfs) { |
560 | 0 | write_ResidualQuantizer(&ivrqfs->rq, f); |
561 | 0 | } else if (ivplsqfs) { |
562 | 0 | write_ProductLocalSearchQuantizer(&ivplsqfs->plsq, f); |
563 | 0 | } else { |
564 | 0 | write_ProductResidualQuantizer(&ivprqfs->prq, f); |
565 | 0 | } |
566 | |
|
567 | 0 | WRITE1(ivaqfs->by_residual); |
568 | 0 | WRITE1(ivaqfs->implem); |
569 | 0 | WRITE1(ivaqfs->bbs); |
570 | 0 | WRITE1(ivaqfs->qbs); |
571 | |
|
572 | 0 | WRITE1(ivaqfs->M); |
573 | 0 | WRITE1(ivaqfs->nbits); |
574 | 0 | WRITE1(ivaqfs->ksub); |
575 | 0 | WRITE1(ivaqfs->code_size); |
576 | 0 | WRITE1(ivaqfs->qbs2); |
577 | 0 | WRITE1(ivaqfs->M2); |
578 | |
|
579 | 0 | WRITE1(ivaqfs->rescale_norm); |
580 | 0 | WRITE1(ivaqfs->norm_scale); |
581 | 0 | WRITE1(ivaqfs->max_train_points); |
582 | |
|
583 | 0 | write_InvertedLists(ivaqfs->invlists, f); |
584 | 32 | } else if ( |
585 | 32 | const ResidualCoarseQuantizer* idxr_2 = |
586 | 32 | dynamic_cast<const ResidualCoarseQuantizer*>(idx)) { |
587 | 0 | uint32_t h = fourcc("ImRQ"); |
588 | 0 | WRITE1(h); |
589 | 0 | write_index_header(idx, f); |
590 | 0 | write_ResidualQuantizer(&idxr_2->rq, f); |
591 | 0 | WRITE1(idxr_2->beam_factor); |
592 | 32 | } else if ( |
593 | 32 | const Index2Layer* idxp_2 = dynamic_cast<const Index2Layer*>(idx)) { |
594 | 0 | uint32_t h = fourcc("Ix2L"); |
595 | 0 | WRITE1(h); |
596 | 0 | write_index_header(idx, f); |
597 | 0 | write_index(idxp_2->q1.quantizer, f); |
598 | 0 | WRITE1(idxp_2->q1.nlist); |
599 | 0 | WRITE1(idxp_2->q1.quantizer_trains_alone); |
600 | 0 | write_ProductQuantizer(&idxp_2->pq, f); |
601 | 0 | WRITE1(idxp_2->code_size_1); |
602 | 0 | WRITE1(idxp_2->code_size_2); |
603 | 0 | WRITE1(idxp_2->code_size); |
604 | 0 | WRITEVECTOR(idxp_2->codes); |
605 | 32 | } else if ( |
606 | 32 | const IndexScalarQuantizer* idxs = |
607 | 32 | dynamic_cast<const IndexScalarQuantizer*>(idx)) { |
608 | 2 | uint32_t h = fourcc("IxSQ"); |
609 | 2 | WRITE1(h); |
610 | 2 | write_index_header(idx, f); |
611 | 2 | write_ScalarQuantizer(&idxs->sq, f); |
612 | 4 | WRITEVECTOR(idxs->codes); |
613 | 30 | } else if ( |
614 | 30 | const IndexLattice* idxl_2 = |
615 | 30 | dynamic_cast<const IndexLattice*>(idx)) { |
616 | 0 | uint32_t h = fourcc("IxLa"); |
617 | 0 | WRITE1(h); |
618 | 0 | WRITE1(idxl_2->d); |
619 | 0 | WRITE1(idxl_2->nsq); |
620 | 0 | WRITE1(idxl_2->scale_nbit); |
621 | 0 | WRITE1(idxl_2->zn_sphere_codec.r2); |
622 | 0 | write_index_header(idx, f); |
623 | 0 | WRITEVECTOR(idxl_2->trained); |
624 | 30 | } else if ( |
625 | 30 | const IndexIVFFlatDedup* ivfl = |
626 | 30 | dynamic_cast<const IndexIVFFlatDedup*>(idx)) { |
627 | 0 | uint32_t h = fourcc("IwFd"); |
628 | 0 | WRITE1(h); |
629 | 0 | write_ivf_header(ivfl, f); |
630 | 0 | { |
631 | 0 | std::vector<idx_t> tab(2 * ivfl->instances.size()); |
632 | 0 | long i = 0; |
633 | 0 | for (auto it = ivfl->instances.begin(); it != ivfl->instances.end(); |
634 | 0 | ++it) { |
635 | 0 | tab[i++] = it->first; |
636 | 0 | tab[i++] = it->second; |
637 | 0 | } |
638 | 0 | WRITEVECTOR(tab); |
639 | 0 | } |
640 | 0 | write_InvertedLists(ivfl->invlists, f); |
641 | 30 | } else if ( |
642 | 30 | const IndexIVFFlat* ivfl_2 = |
643 | 30 | dynamic_cast<const IndexIVFFlat*>(idx)) { |
644 | 2 | uint32_t h = fourcc("IwFl"); |
645 | 2 | WRITE1(h); |
646 | 2 | write_ivf_header(ivfl_2, f); |
647 | 2 | write_InvertedLists(ivfl_2->invlists, f); |
648 | 28 | } else if ( |
649 | 28 | const IndexIVFScalarQuantizer* ivsc = |
650 | 28 | dynamic_cast<const IndexIVFScalarQuantizer*>(idx)) { |
651 | 2 | uint32_t h = fourcc("IwSq"); |
652 | 2 | WRITE1(h); |
653 | 2 | write_ivf_header(ivsc, f); |
654 | 2 | write_ScalarQuantizer(&ivsc->sq, f); |
655 | 2 | WRITE1(ivsc->code_size); |
656 | 2 | WRITE1(ivsc->by_residual); |
657 | 2 | write_InvertedLists(ivsc->invlists, f); |
658 | 26 | } else if (auto iva = dynamic_cast<const IndexIVFAdditiveQuantizer*>(idx)) { |
659 | 0 | bool is_LSQ = dynamic_cast<const IndexIVFLocalSearchQuantizer*>(iva); |
660 | 0 | bool is_RQ = dynamic_cast<const IndexIVFResidualQuantizer*>(iva); |
661 | 0 | bool is_PLSQ = |
662 | 0 | dynamic_cast<const IndexIVFProductLocalSearchQuantizer*>(iva); |
663 | 0 | uint32_t h; |
664 | 0 | if (is_LSQ) { |
665 | 0 | h = fourcc("IwLS"); |
666 | 0 | } else if (is_RQ) { |
667 | 0 | h = fourcc("IwRQ"); |
668 | 0 | } else if (is_PLSQ) { |
669 | 0 | h = fourcc("IwPL"); |
670 | 0 | } else { |
671 | 0 | h = fourcc("IwPR"); |
672 | 0 | } |
673 | |
|
674 | 0 | WRITE1(h); |
675 | 0 | write_ivf_header(iva, f); |
676 | 0 | WRITE1(iva->code_size); |
677 | 0 | if (is_LSQ) { |
678 | 0 | write_LocalSearchQuantizer((LocalSearchQuantizer*)iva->aq, f); |
679 | 0 | } else if (is_RQ) { |
680 | 0 | write_ResidualQuantizer((ResidualQuantizer*)iva->aq, f); |
681 | 0 | } else if (is_PLSQ) { |
682 | 0 | write_ProductLocalSearchQuantizer( |
683 | 0 | (ProductLocalSearchQuantizer*)iva->aq, f); |
684 | 0 | } else { |
685 | 0 | write_ProductResidualQuantizer( |
686 | 0 | (ProductResidualQuantizer*)iva->aq, f); |
687 | 0 | } |
688 | 0 | WRITE1(iva->by_residual); |
689 | 0 | WRITE1(iva->use_precomputed_table); |
690 | 0 | write_InvertedLists(iva->invlists, f); |
691 | 26 | } else if ( |
692 | 26 | const IndexIVFSpectralHash* ivsp = |
693 | 26 | dynamic_cast<const IndexIVFSpectralHash*>(idx)) { |
694 | 0 | uint32_t h = fourcc("IwSh"); |
695 | 0 | WRITE1(h); |
696 | 0 | write_ivf_header(ivsp, f); |
697 | 0 | write_VectorTransform(ivsp->vt, f); |
698 | 0 | WRITE1(ivsp->nbit); |
699 | 0 | WRITE1(ivsp->period); |
700 | 0 | WRITE1(ivsp->threshold_type); |
701 | 0 | WRITEVECTOR(ivsp->trained); |
702 | 0 | write_InvertedLists(ivsp->invlists, f); |
703 | 26 | } else if (const IndexIVFPQ* ivpq = dynamic_cast<const IndexIVFPQ*>(idx)) { |
704 | 1 | const IndexIVFPQR* ivfpqr = dynamic_cast<const IndexIVFPQR*>(idx); |
705 | | |
706 | 1 | uint32_t h = fourcc(ivfpqr ? "IwQR" : "IwPQ"); |
707 | 1 | WRITE1(h); |
708 | 1 | write_ivf_header(ivpq, f); |
709 | 1 | WRITE1(ivpq->by_residual); |
710 | 1 | WRITE1(ivpq->code_size); |
711 | 1 | write_ProductQuantizer(&ivpq->pq, f); |
712 | 1 | write_InvertedLists(ivpq->invlists, f); |
713 | 1 | if (ivfpqr) { |
714 | 0 | write_ProductQuantizer(&ivfpqr->refine_pq, f); |
715 | 0 | WRITEVECTOR(ivfpqr->refine_codes); |
716 | 0 | WRITE1(ivfpqr->k_factor); |
717 | 0 | } |
718 | 25 | } else if ( |
719 | 25 | auto* indep = |
720 | 25 | dynamic_cast<const IndexIVFIndependentQuantizer*>(idx)) { |
721 | 0 | uint32_t h = fourcc("IwIQ"); |
722 | 0 | WRITE1(h); |
723 | 0 | write_index_header(indep, f); |
724 | 0 | write_index(indep->quantizer, f); |
725 | 0 | bool has_vt = indep->vt != nullptr; |
726 | 0 | WRITE1(has_vt); |
727 | 0 | if (has_vt) { |
728 | 0 | write_VectorTransform(indep->vt, f); |
729 | 0 | } |
730 | 0 | write_index(indep->index_ivf, f); |
731 | 0 | if (auto index_ivfpq = dynamic_cast<IndexIVFPQ*>(indep->index_ivf)) { |
732 | 0 | WRITE1(index_ivfpq->use_precomputed_table); |
733 | 0 | } |
734 | 25 | } else if ( |
735 | 25 | const IndexPreTransform* ixpt = |
736 | 25 | dynamic_cast<const IndexPreTransform*>(idx)) { |
737 | 0 | uint32_t h = fourcc("IxPT"); |
738 | 0 | WRITE1(h); |
739 | 0 | write_index_header(ixpt, f); |
740 | 0 | int nt = ixpt->chain.size(); |
741 | 0 | WRITE1(nt); |
742 | 0 | for (int i = 0; i < nt; i++) |
743 | 0 | write_VectorTransform(ixpt->chain[i], f); |
744 | 0 | write_index(ixpt->index, f); |
745 | 25 | } else if ( |
746 | 25 | const MultiIndexQuantizer* imiq = |
747 | 25 | dynamic_cast<const MultiIndexQuantizer*>(idx)) { |
748 | 0 | uint32_t h = fourcc("Imiq"); |
749 | 0 | WRITE1(h); |
750 | 0 | write_index_header(imiq, f); |
751 | 0 | write_ProductQuantizer(&imiq->pq, f); |
752 | 25 | } else if ( |
753 | 25 | const IndexRefine* idxrf = dynamic_cast<const IndexRefine*>(idx)) { |
754 | 0 | uint32_t h = fourcc("IxRF"); |
755 | 0 | WRITE1(h); |
756 | 0 | write_index_header(idxrf, f); |
757 | 0 | write_index(idxrf->base_index, f); |
758 | 0 | write_index(idxrf->refine_index, f); |
759 | 0 | WRITE1(idxrf->k_factor); |
760 | 25 | } else if ( |
761 | 25 | const IndexIDMap* idxmap = dynamic_cast<const IndexIDMap*>(idx)) { |
762 | 0 | uint32_t h = dynamic_cast<const IndexIDMap2*>(idx) ? fourcc("IxM2") |
763 | 0 | : fourcc("IxMp"); |
764 | | // no need to store additional info for IndexIDMap2 |
765 | 0 | WRITE1(h); |
766 | 0 | write_index_header(idxmap, f); |
767 | 0 | write_index(idxmap->index, f); |
768 | 0 | WRITEVECTOR(idxmap->id_map); |
769 | 25 | } else if (const IndexHNSW* idxhnsw = dynamic_cast<const IndexHNSW*>(idx)) { |
770 | 25 | uint32_t h = dynamic_cast<const IndexHNSWFlat*>(idx) ? fourcc("IHNf") |
771 | 25 | : dynamic_cast<const IndexHNSWPQ*>(idx) ? fourcc("IHNp") |
772 | 3 | : dynamic_cast<const IndexHNSWSQ*>(idx) ? fourcc("IHNs") |
773 | 2 | : dynamic_cast<const IndexHNSW2Level*>(idx) ? fourcc("IHN2") |
774 | 0 | : dynamic_cast<const IndexHNSWCagra*>(idx) ? fourcc("IHNc") |
775 | 0 | : 0; |
776 | 25 | FAISS_THROW_IF_NOT(h != 0); |
777 | 25 | WRITE1(h); |
778 | 25 | write_index_header(idxhnsw, f); |
779 | 25 | if (h == fourcc("IHNc")) { |
780 | 0 | WRITE1(idxhnsw->keep_max_size_level0); |
781 | 0 | auto idx_hnsw_cagra = dynamic_cast<const IndexHNSWCagra*>(idxhnsw); |
782 | 0 | WRITE1(idx_hnsw_cagra->base_level_only); |
783 | 0 | WRITE1(idx_hnsw_cagra->num_base_level_search_entrypoints); |
784 | 0 | } |
785 | 25 | write_HNSW(&idxhnsw->hnsw, f); |
786 | 25 | if (io_flags & IO_FLAG_SKIP_STORAGE) { |
787 | 0 | uint32_t n4 = fourcc("null"); |
788 | 0 | WRITE1(n4); |
789 | 25 | } else { |
790 | 25 | write_index(idxhnsw->storage, f); |
791 | 25 | } |
792 | 25 | } else if (const IndexNSG* idxnsg = dynamic_cast<const IndexNSG*>(idx)) { |
793 | 0 | uint32_t h = dynamic_cast<const IndexNSGFlat*>(idx) ? fourcc("INSf") |
794 | 0 | : dynamic_cast<const IndexNSGPQ*>(idx) ? fourcc("INSp") |
795 | 0 | : dynamic_cast<const IndexNSGSQ*>(idx) ? fourcc("INSs") |
796 | 0 | : 0; |
797 | 0 | FAISS_THROW_IF_NOT(h != 0); |
798 | 0 | WRITE1(h); |
799 | 0 | write_index_header(idxnsg, f); |
800 | 0 | WRITE1(idxnsg->GK); |
801 | 0 | WRITE1(idxnsg->build_type); |
802 | 0 | WRITE1(idxnsg->nndescent_S); |
803 | 0 | WRITE1(idxnsg->nndescent_R); |
804 | 0 | WRITE1(idxnsg->nndescent_L); |
805 | 0 | WRITE1(idxnsg->nndescent_iter); |
806 | 0 | write_NSG(&idxnsg->nsg, f); |
807 | 0 | write_index(idxnsg->storage, f); |
808 | 0 | } else if ( |
809 | 0 | const IndexNNDescent* idxnnd = |
810 | 0 | dynamic_cast<const IndexNNDescent*>(idx)) { |
811 | 0 | auto idxnndflat = dynamic_cast<const IndexNNDescentFlat*>(idx); |
812 | 0 | FAISS_THROW_IF_NOT(idxnndflat != nullptr); |
813 | 0 | uint32_t h = fourcc("INNf"); |
814 | 0 | FAISS_THROW_IF_NOT(h != 0); |
815 | 0 | WRITE1(h); |
816 | 0 | write_index_header(idxnnd, f); |
817 | 0 | write_NNDescent(&idxnnd->nndescent, f); |
818 | 0 | write_index(idxnnd->storage, f); |
819 | 0 | } else if ( |
820 | 0 | const IndexPQFastScan* idxpqfs = |
821 | 0 | dynamic_cast<const IndexPQFastScan*>(idx)) { |
822 | 0 | uint32_t h = fourcc("IPfs"); |
823 | 0 | WRITE1(h); |
824 | 0 | write_index_header(idxpqfs, f); |
825 | 0 | write_ProductQuantizer(&idxpqfs->pq, f); |
826 | 0 | WRITE1(idxpqfs->implem); |
827 | 0 | WRITE1(idxpqfs->bbs); |
828 | 0 | WRITE1(idxpqfs->qbs); |
829 | 0 | WRITE1(idxpqfs->ntotal2); |
830 | 0 | WRITE1(idxpqfs->M2); |
831 | 0 | WRITEVECTOR(idxpqfs->codes); |
832 | 0 | } else if ( |
833 | 0 | const IndexIVFPQFastScan* ivpq_2 = |
834 | 0 | dynamic_cast<const IndexIVFPQFastScan*>(idx)) { |
835 | 0 | uint32_t h = fourcc("IwPf"); |
836 | 0 | WRITE1(h); |
837 | 0 | write_ivf_header(ivpq_2, f); |
838 | 0 | WRITE1(ivpq_2->by_residual); |
839 | 0 | WRITE1(ivpq_2->code_size); |
840 | 0 | WRITE1(ivpq_2->bbs); |
841 | 0 | WRITE1(ivpq_2->M2); |
842 | 0 | WRITE1(ivpq_2->implem); |
843 | 0 | WRITE1(ivpq_2->qbs2); |
844 | 0 | write_ProductQuantizer(&ivpq_2->pq, f); |
845 | 0 | write_InvertedLists(ivpq_2->invlists, f); |
846 | 0 | } else if ( |
847 | 0 | const IndexRowwiseMinMax* imm = |
848 | 0 | dynamic_cast<const IndexRowwiseMinMax*>(idx)) { |
849 | | // IndexRowwiseMinmaxFloat |
850 | 0 | uint32_t h = fourcc("IRMf"); |
851 | 0 | WRITE1(h); |
852 | 0 | write_index_header(imm, f); |
853 | 0 | write_index(imm->index, f); |
854 | 0 | } else if ( |
855 | 0 | const IndexRowwiseMinMaxFP16* imm_2 = |
856 | 0 | dynamic_cast<const IndexRowwiseMinMaxFP16*>(idx)) { |
857 | | // IndexRowwiseMinmaxHalf |
858 | 0 | uint32_t h = fourcc("IRMh"); |
859 | 0 | WRITE1(h); |
860 | 0 | write_index_header(imm_2, f); |
861 | 0 | write_index(imm_2->index, f); |
862 | 0 | } else if ( |
863 | 0 | const IndexRaBitQ* idxq = dynamic_cast<const IndexRaBitQ*>(idx)) { |
864 | 0 | uint32_t h = fourcc("Ixrq"); |
865 | 0 | WRITE1(h); |
866 | 0 | write_index_header(idx, f); |
867 | 0 | write_RaBitQuantizer(&idxq->rabitq, f); |
868 | 0 | WRITEVECTOR(idxq->codes); |
869 | 0 | WRITEVECTOR(idxq->center); |
870 | 0 | WRITE1(idxq->qb); |
871 | 0 | } else if ( |
872 | 0 | const IndexIVFRaBitQ* ivrq = |
873 | 0 | dynamic_cast<const IndexIVFRaBitQ*>(idx)) { |
874 | 0 | uint32_t h = fourcc("Iwrq"); |
875 | 0 | WRITE1(h); |
876 | 0 | write_ivf_header(ivrq, f); |
877 | 0 | write_RaBitQuantizer(&ivrq->rabitq, f); |
878 | 0 | WRITE1(ivrq->code_size); |
879 | 0 | WRITE1(ivrq->by_residual); |
880 | 0 | WRITE1(ivrq->qb); |
881 | 0 | write_InvertedLists(ivrq->invlists, f); |
882 | 0 | } else { |
883 | 0 | FAISS_THROW_MSG("don't know how to serialize this type of index"); |
884 | 0 | } |
885 | 60 | } |
886 | | |
887 | 0 | void write_index(const Index* idx, FILE* f, int io_flags) { |
888 | 0 | FileIOWriter writer(f); |
889 | 0 | write_index(idx, &writer, io_flags); |
890 | 0 | } |
891 | | |
892 | 0 | void write_index(const Index* idx, const char* fname, int io_flags) { |
893 | 0 | FileIOWriter writer(fname); |
894 | 0 | write_index(idx, &writer, io_flags); |
895 | 0 | } |
896 | | |
897 | 0 | void write_VectorTransform(const VectorTransform* vt, const char* fname) { |
898 | 0 | FileIOWriter writer(fname); |
899 | 0 | write_VectorTransform(vt, &writer); |
900 | 0 | } |
901 | | |
902 | | /************************************************************* |
903 | | * Write binary indexes |
904 | | **************************************************************/ |
905 | | |
906 | 0 | static void write_index_binary_header(const IndexBinary* idx, IOWriter* f) { |
907 | 0 | WRITE1(idx->d); |
908 | 0 | WRITE1(idx->code_size); |
909 | 0 | WRITE1(idx->ntotal); |
910 | 0 | WRITE1(idx->is_trained); |
911 | 0 | WRITE1(idx->metric_type); |
912 | 0 | } |
913 | | |
914 | 0 | static void write_binary_ivf_header(const IndexBinaryIVF* ivf, IOWriter* f) { |
915 | 0 | write_index_binary_header(ivf, f); |
916 | 0 | WRITE1(ivf->nlist); |
917 | 0 | WRITE1(ivf->nprobe); |
918 | 0 | write_index_binary(ivf->quantizer, f); |
919 | 0 | write_direct_map(&ivf->direct_map, f); |
920 | 0 | } |
921 | | |
922 | | static void write_binary_hash_invlists( |
923 | | const IndexBinaryHash::InvertedListMap& invlists, |
924 | | int b, |
925 | 0 | IOWriter* f) { |
926 | 0 | size_t sz = invlists.size(); |
927 | 0 | WRITE1(sz); |
928 | 0 | size_t maxil = 0; |
929 | 0 | for (auto it = invlists.begin(); it != invlists.end(); ++it) { |
930 | 0 | if (it->second.ids.size() > maxil) { |
931 | 0 | maxil = it->second.ids.size(); |
932 | 0 | } |
933 | 0 | } |
934 | 0 | int il_nbit = 0; |
935 | 0 | while (maxil >= ((uint64_t)1 << il_nbit)) { |
936 | 0 | il_nbit++; |
937 | 0 | } |
938 | 0 | WRITE1(il_nbit); |
939 | | |
940 | | // first write sizes then data, may be useful if we want to |
941 | | // memmap it at some point |
942 | | |
943 | | // buffer for bitstrings |
944 | 0 | std::vector<uint8_t> buf(((b + il_nbit) * sz + 7) / 8); |
945 | 0 | BitstringWriter wr(buf.data(), buf.size()); |
946 | 0 | for (auto it = invlists.begin(); it != invlists.end(); ++it) { |
947 | 0 | wr.write(it->first, b); |
948 | 0 | wr.write(it->second.ids.size(), il_nbit); |
949 | 0 | } |
950 | 0 | WRITEVECTOR(buf); |
951 | |
|
952 | 0 | for (auto it = invlists.begin(); it != invlists.end(); ++it) { |
953 | 0 | WRITEVECTOR(it->second.ids); |
954 | 0 | WRITEVECTOR(it->second.vecs); |
955 | 0 | } |
956 | 0 | } |
957 | | |
958 | | static void write_binary_multi_hash_map( |
959 | | const IndexBinaryMultiHash::Map& map, |
960 | | int b, |
961 | | size_t ntotal, |
962 | 0 | IOWriter* f) { |
963 | 0 | int id_bits = 0; |
964 | 0 | while ((ntotal > ((idx_t)1 << id_bits))) { |
965 | 0 | id_bits++; |
966 | 0 | } |
967 | 0 | WRITE1(id_bits); |
968 | 0 | size_t sz = map.size(); |
969 | 0 | WRITE1(sz); |
970 | 0 | size_t nbit = (b + id_bits) * sz + ntotal * id_bits; |
971 | 0 | std::vector<uint8_t> buf((nbit + 7) / 8); |
972 | 0 | BitstringWriter wr(buf.data(), buf.size()); |
973 | 0 | for (auto it = map.begin(); it != map.end(); ++it) { |
974 | 0 | wr.write(it->first, b); |
975 | 0 | wr.write(it->second.size(), id_bits); |
976 | 0 | for (auto id : it->second) { |
977 | 0 | wr.write(id, id_bits); |
978 | 0 | } |
979 | 0 | } |
980 | 0 | WRITEVECTOR(buf); |
981 | 0 | } |
982 | | |
983 | 0 | void write_index_binary(const IndexBinary* idx, IOWriter* f) { |
984 | 0 | if (const IndexBinaryFlat* idxf = |
985 | 0 | dynamic_cast<const IndexBinaryFlat*>(idx)) { |
986 | 0 | uint32_t h = fourcc("IBxF"); |
987 | 0 | WRITE1(h); |
988 | 0 | write_index_binary_header(idx, f); |
989 | 0 | WRITEVECTOR(idxf->xb); |
990 | 0 | } else if ( |
991 | 0 | const IndexBinaryIVF* ivf = |
992 | 0 | dynamic_cast<const IndexBinaryIVF*>(idx)) { |
993 | 0 | uint32_t h = fourcc("IBwF"); |
994 | 0 | WRITE1(h); |
995 | 0 | write_binary_ivf_header(ivf, f); |
996 | 0 | write_InvertedLists(ivf->invlists, f); |
997 | 0 | } else if ( |
998 | 0 | const IndexBinaryFromFloat* idxff = |
999 | 0 | dynamic_cast<const IndexBinaryFromFloat*>(idx)) { |
1000 | 0 | uint32_t h = fourcc("IBFf"); |
1001 | 0 | WRITE1(h); |
1002 | 0 | write_index_binary_header(idxff, f); |
1003 | 0 | write_index(idxff->index, f); |
1004 | 0 | } else if ( |
1005 | 0 | const IndexBinaryHNSW* idxhnsw = |
1006 | 0 | dynamic_cast<const IndexBinaryHNSW*>(idx)) { |
1007 | 0 | uint32_t h = fourcc("IBHf"); |
1008 | 0 | WRITE1(h); |
1009 | 0 | write_index_binary_header(idxhnsw, f); |
1010 | 0 | write_HNSW(&idxhnsw->hnsw, f); |
1011 | 0 | write_index_binary(idxhnsw->storage, f); |
1012 | 0 | } else if ( |
1013 | 0 | const IndexBinaryIDMap* idxmap = |
1014 | 0 | dynamic_cast<const IndexBinaryIDMap*>(idx)) { |
1015 | 0 | uint32_t h = dynamic_cast<const IndexBinaryIDMap2*>(idx) |
1016 | 0 | ? fourcc("IBM2") |
1017 | 0 | : fourcc("IBMp"); |
1018 | | // no need to store additional info for IndexIDMap2 |
1019 | 0 | WRITE1(h); |
1020 | 0 | write_index_binary_header(idxmap, f); |
1021 | 0 | write_index_binary(idxmap->index, f); |
1022 | 0 | WRITEVECTOR(idxmap->id_map); |
1023 | 0 | } else if ( |
1024 | 0 | const IndexBinaryHash* idxh = |
1025 | 0 | dynamic_cast<const IndexBinaryHash*>(idx)) { |
1026 | 0 | uint32_t h = fourcc("IBHh"); |
1027 | 0 | WRITE1(h); |
1028 | 0 | write_index_binary_header(idxh, f); |
1029 | 0 | WRITE1(idxh->b); |
1030 | 0 | WRITE1(idxh->nflip); |
1031 | 0 | write_binary_hash_invlists(idxh->invlists, idxh->b, f); |
1032 | 0 | } else if ( |
1033 | 0 | const IndexBinaryMultiHash* idxmh = |
1034 | 0 | dynamic_cast<const IndexBinaryMultiHash*>(idx)) { |
1035 | 0 | uint32_t h = fourcc("IBHm"); |
1036 | 0 | WRITE1(h); |
1037 | 0 | write_index_binary_header(idxmh, f); |
1038 | 0 | write_index_binary(idxmh->storage, f); |
1039 | 0 | WRITE1(idxmh->b); |
1040 | 0 | WRITE1(idxmh->nhash); |
1041 | 0 | WRITE1(idxmh->nflip); |
1042 | 0 | for (int i = 0; i < idxmh->nhash; i++) { |
1043 | 0 | write_binary_multi_hash_map( |
1044 | 0 | idxmh->maps[i], idxmh->b, idxmh->ntotal, f); |
1045 | 0 | } |
1046 | 0 | } else { |
1047 | 0 | FAISS_THROW_MSG("don't know how to serialize this type of index"); |
1048 | 0 | } |
1049 | 0 | } |
1050 | | |
1051 | 0 | void write_index_binary(const IndexBinary* idx, FILE* f) { |
1052 | 0 | FileIOWriter writer(f); |
1053 | 0 | write_index_binary(idx, &writer); |
1054 | 0 | } |
1055 | | |
1056 | 0 | void write_index_binary(const IndexBinary* idx, const char* fname) { |
1057 | 0 | FileIOWriter writer(fname); |
1058 | 0 | write_index_binary(idx, &writer); |
1059 | 0 | } |
1060 | | |
1061 | | } // namespace faiss |