Skip to content

Commit 66fe2ee

Browse files
authored
initial support of IPEX_LLM_PERFORMANCE_MODE (#11754)
* add perf mode * update * fix style
1 parent 4b9c57c commit 66fe2ee

File tree

2 files changed

+9
-5
lines changed

2 files changed

+9
-5
lines changed

python/llm/dev/benchmark/all-in-one/run.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -53,8 +53,8 @@ def run_model_in_thread(model, in_out, tokenizer, result, warm_up, num_beams, in
5353
for i in range(num_trials + warm_up):
5454
st = time.perf_counter()
5555
if lookahead:
56-
output_ids = model.generate(input_ids, lookahead=3, do_sample=False, max_matching_ngram_size=2, max_new_tokens=out_len,
57-
min_new_tokens=out_len, num_beams=num_beams)
56+
output_ids = model.generate(input_ids, lookahead=2, do_sample=False, max_matching_ngram_size=2, max_new_tokens=out_len,
57+
min_new_tokens=out_len, num_beams=num_beams)
5858
else:
5959
output_ids = model.generate(input_ids, do_sample=False, max_new_tokens=out_len,
6060
min_new_tokens=out_len, num_beams=num_beams)
@@ -67,8 +67,8 @@ def run_model_in_thread(model, in_out, tokenizer, result, warm_up, num_beams, in
6767
torch.xpu.empty_cache()
6868
actual_out_len = output_ids.shape[1] - actual_in_len
6969
if i >= warm_up:
70-
if lookahead:
71-
result[in_out].append([model.first_token_time, (end - st - model.first_token_time)/model.n_token_generated, 0,
70+
if lookahead or os.environ.get("IPEX_LLM_PERFORMANCE_MODE", None) == "1":
71+
result[in_out].append([model.first_token_time, (end - st - model.first_token_time)/(model.n_token_generated - 1), 0,
7272
actual_in_len, actual_out_len, load_time, 0])
7373
else:
7474
result[in_out].append([model.first_cost, model.rest_cost_mean, model.encoder_time,
@@ -510,7 +510,7 @@ def run_transformer_int4_gpu(repo_id,
510510
load_time = end - st
511511
print(">> loading of model costs {}s and {}GB".format(load_time, torch.xpu.memory.memory_reserved()/(1024**3)))
512512

513-
if not lookahead:
513+
if not lookahead and os.environ.get("IPEX_LLM_PERFORMANCE_MODE", None) != "1":
514514
model = BenchmarkWrapper(model)
515515

516516
result = {}

python/llm/src/ipex_llm/transformers/lookup.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
#
2222

2323
from typing import Callable, List, Optional, Tuple
24+
import os
2425
import torch
2526
import time
2627
import copy
@@ -54,6 +55,9 @@ def generate(
5455
**kwargs,
5556
):
5657
lookahead = kwargs.pop("lookahead", None)
58+
perf_mode = os.environ.get("IPEX_LLM_PERFORMANCE_MODE", None)
59+
if perf_mode == "1" and lookahead is None:
60+
lookahead = 2 # default to 2 now
5761
if lookahead:
5862
from ipex_llm.transformers.convert import get_enable_ipex
5963
_enable_ipex = get_enable_ipex()

0 commit comments

Comments
 (0)