lucas-ventura commited on
Commit
0f36c12
·
verified ·
1 Parent(s): a2f436f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -20
app.py CHANGED
@@ -34,27 +34,25 @@ def load_base_model():
34
  global base_model, tokenizer
35
 
36
  if base_model is None:
37
- try:
38
  print(f"Loading base model: {LLAMA_CKPT_PATH}")
39
- base_model = load_model_llamarecipes(
40
- model_name=LLAMA_CKPT_PATH,
41
- device_map="auto",
42
- quantization=None,
43
- use_fast_kernels=True,
44
- )
45
- tokenizer = AutoTokenizer.from_pretrained(LLAMA_CKPT_PATH)
46
- except Exception as e:
47
- # Try to get the local path using the download function
48
- model_path = download_base_model("lucas-ventura/chapter-llama", local_dir=".")
49
- model_path += "/" + LLAMA_CKPT_PATH
50
- print(f"Model path: {model_path}")
51
- base_model = load_model_llamarecipes(
52
- model_name=model_path,
53
- device_map="auto",
54
- quantization=None,
55
- use_fast_kernels=True,
56
- )
57
- tokenizer = AutoTokenizer.from_pretrained(model_path)
58
 
59
  base_model.eval()
60
  tokenizer.pad_token = tokenizer.eos_token
 
34
  global base_model, tokenizer
35
 
36
  if base_model is None:
 
37
  print(f"Loading base model: {LLAMA_CKPT_PATH}")
38
+ # base_model = load_model_llamarecipes(
39
+ # model_name=LLAMA_CKPT_PATH,
40
+ # device_map="auto",
41
+ # quantization=None,
42
+ # use_fast_kernels=True,
43
+ # )
44
+ # tokenizer = AutoTokenizer.from_pretrained(LLAMA_CKPT_PATH)
45
+ # Try to get the local path using the download function
46
+ model_path = download_base_model("lucas-ventura/chapter-llama", local_dir=".")
47
+ model_path += f"lucas-ventura/chapter-llama/{LLAMA_CKPT_PATH}"
48
+ print(f"Model path: {model_path}")
49
+ base_model = load_model_llamarecipes(
50
+ model_name=model_path,
51
+ device_map="auto",
52
+ quantization=None,
53
+ use_fast_kernels=True,
54
+ )
55
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
 
56
 
57
  base_model.eval()
58
  tokenizer.pad_token = tokenizer.eos_token