We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
I define a logger class like this:
import logging import json import os, sys from datetime import datetime class LLMLogger: def __init__(self, name="LLMApp", log_level=logging.INFO, json_format=False): self.logger = logging.getLogger(name) self.logger.setLevel(log_level) self.json_format = json_format log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s" formatter = logging.Formatter(log_format) console_handler = logging.StreamHandler() console_handler.setFormatter(formatter) self.logger.addHandler(console_handler) def _format_message(self, message, **kwargs): if self.json_format: log_entry = { "timestamp": datetime.utcnow().isoformat(), "message": message, "context": kwargs } return json.dumps(log_entry) return f"{message} | Context: {kwargs}" def debug(self, message, **kwargs): self.logger.debug(self._format_message(message, **kwargs)) def info(self, message, **kwargs): self.logger.info(self._format_message(message, **kwargs)) def warning(self, message, **kwargs): self.logger.warning(self._format_message(message, **kwargs)) def error(self, message, **kwargs): self.logger.error(self._format_message(message, **kwargs)) def critical(self, message, **kwargs): self.logger.critical(self._format_message(message, **kwargs)) if __name__ == "__main__": logger = LLMLogger(name="LLMProductionApp", log_level=logging.DEBUG, json_format=True) logger.info("LLM service launch", service="LLMModel", version="v1.0") logger.warning("high workload", queue_size=50, requests_in_queue=20) logger.error("generate failed", request_id="12345", error="Timeout") logger.debug("debug message", computation_time=0.123)
python3 local/logger.py 2024-12-24 07:52:19,956 - LLMProductionApp - INFO - {"timestamp": "2024-12-24T07:52:19.956471", "message": "LLM service launch", "context": {"service": "LLMModel", "version": "v1.0"}} 2024-12-24 07:52:19,956 - LLMProductionApp - WARNING - {"timestamp": "2024-12-24T07:52:19.956633", "message": "high workload", "context": {"queue_size": 50, "requests_in_queue": 20}} 2024-12-24 07:52:19,956 - LLMProductionApp - ERROR - {"timestamp": "2024-12-24T07:52:19.956727", "message": "generate failed", "context": {"request_id": "12345", "error": "Timeout"}} 2024-12-24 07:52:19,956 - LLMProductionApp - DEBUG - {"timestamp": "2024-12-24T07:52:19.956808", "message": "debug message", "context": {"computation_time": 0.123}}
WARN PyProcess W-190-model-stderr: 2024-12-24 07:53:52,585 - LLMProductionApp - INFO - {"timestamp": "2024-12-24T07:53:52.585513", "message": "============================load model successfully====================", "context": {}} INFO PyProcess W-190-model-stdout: INFO::{"timestamp": "2024-12-24T07:53:52.585513", "message": "============================load model successfully====================", "context": {}}
custom_logger = LLMLogger(name="LLMProductionApp", log_level=logging.DEBUG, json_format=True) def load_model(properties): custom_logger.info(f"Model configuration:{properties}") if "model_id" in properties: model_location = properties['model_id'] custom_logger.info(f"Loading model in {model_location}") tokenizer = AutoTokenizer.from_pretrained(model_location, trust_remote_code=True) custom_logger.info("============================load tokenizer successfully====================") model = AutoModelForCausalLM.from_pretrained( model_location, device_map="auto", trust_remote_code=True, torch_dtype="auto" ) custom_logger.info('============================load model successfully====================') return model, tokenizer
why has duplicate output logging?
The text was updated successfully, but these errors were encountered:
No branches or pull requests
Description
I define a logger class like this:
Expected Behavior
python3 local/logger.py
2024-12-24 07:52:19,956 - LLMProductionApp - INFO - {"timestamp": "2024-12-24T07:52:19.956471", "message": "LLM service launch", "context": {"service": "LLMModel", "version": "v1.0"}}
2024-12-24 07:52:19,956 - LLMProductionApp - WARNING - {"timestamp": "2024-12-24T07:52:19.956633", "message": "high workload", "context": {"queue_size": 50, "requests_in_queue": 20}}
2024-12-24 07:52:19,956 - LLMProductionApp - ERROR - {"timestamp": "2024-12-24T07:52:19.956727", "message": "generate failed", "context": {"request_id": "12345", "error": "Timeout"}}
2024-12-24 07:52:19,956 - LLMProductionApp - DEBUG - {"timestamp": "2024-12-24T07:52:19.956808", "message": "debug message", "context": {"computation_time": 0.123}}
Error Message
WARN PyProcess W-190-model-stderr: 2024-12-24 07:53:52,585 - LLMProductionApp - INFO - {"timestamp": "2024-12-24T07:53:52.585513", "message": "============================load model successfully====================", "context": {}}
INFO PyProcess W-190-model-stdout: INFO::{"timestamp": "2024-12-24T07:53:52.585513", "message": "============================load model successfully====================", "context": {}}
How to Reproduce?
why has duplicate output logging?
The text was updated successfully, but these errors were encountered: