forked from ggerganov/llama.cpp
-
Notifications
You must be signed in to change notification settings - Fork 2
/
update-llama.cpp
36 lines (29 loc) · 1.23 KB
/
update-llama.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
#include "ggml.h"
#include "llama.h"
#include <cstdio>
#include <map>
#include <string>
// usage:
// ./update models/llama/ggml-model-input.bin models/llama/ggml-model-output.bin
// The intent of this executable is to open old model formats and save them in the most recent format.
// Updating a model in ggml/ggmf format to ggjt will results in much faster load times as mmap can be used to map the model to the address space directly.
// This will maintain ftype (f16, f32, q4_[n], etc)
// Also, if modified locally (along with llama.cpp), you can use this to add or remove hparams, score, etc from a model. For instance, I used this to open a model in ggjt (no score) and saved it again as ggjt (with score).
//
int main(int argc, char ** argv) {
ggml_time_init();
if (argc < 3) {
fprintf(stderr, "usage: %s model-input.bin model-output.bin\n", argv[0]);
return 1;
}
// needed to initialize f16 tables
{
struct ggml_init_params params = { 0, NULL, false };
struct ggml_context * ctx = ggml_init(params);
ggml_free(ctx);
}
const std::string fname_inp = argv[1];
const std::string fname_out = argv[2];
llama_model_update(fname_inp.c_str(), fname_out.c_str());
return 0;
}