diff --git a/README.md b/README.md index 75ec69f..6049440 100644 --- a/README.md +++ b/README.md @@ -198,7 +198,8 @@ model_name = "deepseek-ai/DeepSeek-V2" tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) # `max_memory` should be set based on your devices max_memory = {i: "75GB" for i in range(8)} -model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, device_map="auto", torch_dtype=torch.bfloat16, max_memory=max_memory) +# `device_map` cannot be set to `auto` +model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, device_map="sequential", torch_dtype=torch.bfloat16, max_memory=max_memory, attn_implementation="eager") model.generation_config = GenerationConfig.from_pretrained(model_name) model.generation_config.pad_token_id = model.generation_config.eos_token_id @@ -219,7 +220,8 @@ model_name = "deepseek-ai/DeepSeek-V2-Chat" tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) # `max_memory` should be set based on your devices max_memory = {i: "75GB" for i in range(8)} -model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, device_map="auto", torch_dtype=torch.bfloat16, max_memory=max_memory) +# `device_map` cannot be set to `auto` +model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, device_map="sequential", torch_dtype=torch.bfloat16, max_memory=max_memory, attn_implementation="eager") model.generation_config = GenerationConfig.from_pretrained(model_name) model.generation_config.pad_token_id = model.generation_config.eos_token_id