From 66fe2ee46465306e241296b2d3440f6ba31b7305 Mon Sep 17 00:00:00 2001 From: Ruonan Wang Date: Fri, 9 Aug 2024 14:04:09 +0300 Subject: [PATCH] initial support of `IPEX_LLM_PERFORMANCE_MODE` (#11754) * add perf mode * update * fix style --- python/llm/dev/benchmark/all-in-one/run.py | 10 +++++----- python/llm/src/ipex_llm/transformers/lookup.py | 4 ++++ 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/python/llm/dev/benchmark/all-in-one/run.py b/python/llm/dev/benchmark/all-in-one/run.py index bb5acf261bf..9d9f16cf271 100644 --- a/python/llm/dev/benchmark/all-in-one/run.py +++ b/python/llm/dev/benchmark/all-in-one/run.py @@ -53,8 +53,8 @@ def run_model_in_thread(model, in_out, tokenizer, result, warm_up, num_beams, in for i in range(num_trials + warm_up): st = time.perf_counter() if lookahead: - output_ids = model.generate(input_ids, lookahead=3, do_sample=False, max_matching_ngram_size=2, max_new_tokens=out_len, - min_new_tokens=out_len, num_beams=num_beams) + output_ids = model.generate(input_ids, lookahead=2, do_sample=False, max_matching_ngram_size=2, max_new_tokens=out_len, + min_new_tokens=out_len, num_beams=num_beams) else: output_ids = model.generate(input_ids, do_sample=False, max_new_tokens=out_len, min_new_tokens=out_len, num_beams=num_beams) @@ -67,8 +67,8 @@ def run_model_in_thread(model, in_out, tokenizer, result, warm_up, num_beams, in torch.xpu.empty_cache() actual_out_len = output_ids.shape[1] - actual_in_len if i >= warm_up: - if lookahead: - result[in_out].append([model.first_token_time, (end - st - model.first_token_time)/model.n_token_generated, 0, + if lookahead or os.environ.get("IPEX_LLM_PERFORMANCE_MODE", None) == "1": + result[in_out].append([model.first_token_time, (end - st - model.first_token_time)/(model.n_token_generated - 1), 0, actual_in_len, actual_out_len, load_time, 0]) else: result[in_out].append([model.first_cost, model.rest_cost_mean, model.encoder_time, @@ -510,7 +510,7 @@ def run_transformer_int4_gpu(repo_id, load_time = end - st print(">> loading of model costs {}s and {}GB".format(load_time, torch.xpu.memory.memory_reserved()/(1024**3))) - if not lookahead: + if not lookahead and os.environ.get("IPEX_LLM_PERFORMANCE_MODE", None) != "1": model = BenchmarkWrapper(model) result = {} diff --git a/python/llm/src/ipex_llm/transformers/lookup.py b/python/llm/src/ipex_llm/transformers/lookup.py index 36815902445..e5725ff76d6 100644 --- a/python/llm/src/ipex_llm/transformers/lookup.py +++ b/python/llm/src/ipex_llm/transformers/lookup.py @@ -21,6 +21,7 @@ # from typing import Callable, List, Optional, Tuple +import os import torch import time import copy @@ -54,6 +55,9 @@ def generate( **kwargs, ): lookahead = kwargs.pop("lookahead", None) + perf_mode = os.environ.get("IPEX_LLM_PERFORMANCE_MODE", None) + if perf_mode == "1" and lookahead is None: + lookahead = 2 # default to 2 now if lookahead: from ipex_llm.transformers.convert import get_enable_ipex _enable_ipex = get_enable_ipex()