Increase default max_new_tokens in generate method to enhance text generation capacity
This commit is contained in:
parent
8978cf74e3
commit
79227d6def
|
|
@ -30,7 +30,7 @@ class TextGenerator:
|
|||
|
||||
print("Model loaded successfully!")
|
||||
|
||||
def generate(self, prompt: str, max_new_tokens: int = 100, num_return_sequences: int = 1,
|
||||
def generate(self, prompt: str, max_new_tokens: int = 500, num_return_sequences: int = 1,
|
||||
temperature: float = 1.0, do_sample: bool = True) -> Dict:
|
||||
"""
|
||||
Generate text from a prompt
|
||||
|
|
|
|||
Loading…
Reference in New Issue