Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -353,7 +353,7 @@ class Translators:
|
|
| 353 |
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
|
| 354 |
tokenized_input = pipe.tokenizer(self.input_text, return_tensors="pt")
|
| 355 |
num_input_tokens = len(tokenized_input["input_ids"][0])
|
| 356 |
-
max_new_tokens = round(num_input_tokens + 0.
|
| 357 |
outputs = pipe(prompt, max_new_tokens=max_new_tokens, do_sample=False)
|
| 358 |
translated_text = outputs[0]["generated_text"]
|
| 359 |
print(f"Input chars: {len(self.input_text)}", f"Input tokens: {num_input_tokens}", f"max_new_tokens: {max_new_tokens}",
|
|
|
|
| 353 |
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
|
| 354 |
tokenized_input = pipe.tokenizer(self.input_text, return_tensors="pt")
|
| 355 |
num_input_tokens = len(tokenized_input["input_ids"][0])
|
| 356 |
+
max_new_tokens = round(num_input_tokens + 0.5 * num_input_tokens)
|
| 357 |
outputs = pipe(prompt, max_new_tokens=max_new_tokens, do_sample=False)
|
| 358 |
translated_text = outputs[0]["generated_text"]
|
| 359 |
print(f"Input chars: {len(self.input_text)}", f"Input tokens: {num_input_tokens}", f"max_new_tokens: {max_new_tokens}",
|