Spaces:
Sleeping
Sleeping
| import asyncio | |
| import torch | |
| from datetime import datetime | |
| from services.model_visitor import ModelVisitor | |
| from utils.logger import Logger | |
| logger = Logger.get_logger(__name__) | |
| class IbmTextGenerator(ModelVisitor): | |
| async def visit(self, model_generator, input_text, max_length_per_chunk=50): | |
| return await self._generate_text(model_generator, input_text, max_length_per_chunk) | |
| async def _generate_text_chunk(self, model_generator, input_ids, max_length_per_chunk): | |
| with torch.no_grad(): | |
| outputs = await asyncio.to_thread(model_generator.model.generate, input_ids, max_new_tokens=max_length_per_chunk) | |
| continuation = model_generator.tokenizer.decode( | |
| outputs[0], skip_special_tokens=False) | |
| logger.info('Chunk generated: {}'.format(continuation)) | |
| return continuation | |
| async def _generate_text(self, model_generator, input_text, max_length_per_chunk): | |
| """ | |
| Generates the text based on input provided | |
| Args: | |
| input_text (str): The input string containing text blocks. | |
| max_length_per_chunk: Max length per chunk (Default: 50 / Optional) | |
| """ | |
| try: | |
| start_time = datetime.now() | |
| logger.info('Started at: {}'.format( | |
| start_time.strftime(model_generator._format_data_time))) | |
| input_ids = model_generator.tokenizer.encode( | |
| input_text, return_tensors='pt').to(model_generator.device) | |
| output_text = input_text | |
| while True: | |
| continuation = await self._generate_text_chunk( | |
| model_generator, input_ids, max_length_per_chunk) | |
| new_text = continuation[len(model_generator.tokenizer.decode( | |
| input_ids[0], skip_special_tokens=False)):] | |
| output_text += new_text | |
| input_ids = model_generator.tokenizer.encode( | |
| output_text, return_tensors='pt').to(model_generator.device) | |
| if "<|endoftext|>" in new_text or new_text.count('```') > 1: | |
| break | |
| end_time = datetime.now() | |
| logger.info('Output generated at: {}'.format( | |
| end_time.strftime(model_generator._format_data_time))) | |
| logger.info('Time taken: {}'.format(end_time - start_time)) | |
| return output_text | |
| except asyncio.CancelledError: | |
| logger.error( | |
| 'Cancelling model generation due to disconnection in network.') | |
| return "" | |