Rotary Logger  1.0.2
The middleware rotary logger
Loading...
Searching...
No Matches
rotary_logger_cls.py
Go to the documentation of this file.
1"""
2# +==== BEGIN rotary_logger =================+
3# LOGO:
4# ..........####...####..........
5# ......###.....#.#########......
6# ....##........#.###########....
7# ...#..........#.############...
8# ...#..........#.#####.######...
9# ..#.....##....#.###..#...####..
10# .#.....#.##...#.##..##########.
11# #.....##########....##...######
12# #.....#...##..#.##..####.######
13# .#...##....##.#.##..###..#####.
14# ..#.##......#.#.####...######..
15# ..#...........#.#############..
16# ..#...........#.#############..
17# ...##.........#.############...
18# ......#.......#.#########......
19# .......#......#.########.......
20# .........#####...#####.........
21# /STOP
22# PROJECT: rotary_logger
23# FILE: rotary_logger.py
24# CREATION DATE: 29-10-2025
25# LAST Modified: 10:56:50 27-03-2026
26# DESCRIPTION:
27# A module that provides a universal python light on iops way of logging to files your program execution.
28# /STOP
29# COPYRIGHT: (c) Asperguide
30# PURPOSE: This is the main file of the module, it contains the core code for the module.
31# // AR
32# +==== END rotary_logger =================+
33#
34"""
35
36import os
37import sys
38import atexit
39from warnings import warn
40from pathlib import Path
41from typing import Any, Optional, List, Callable
42from threading import RLock
43
44try:
45 from . import constants as CONST
46 from .tee_stream import TeeStream
47 from .file_instance import FileInstance
48 from .rogger import Rogger, RI
49except ImportError:
50 import constants as CONST
51 from tee_stream import TeeStream
52 from file_instance import FileInstance
53 from rogger import Rogger, RI
54
55
57 """High-level coordinator that installs `TeeStream` wrappers.
58
59 Responsibilities:
60 - Validate and create the target log folder.
61 - Configure a `FileInstance` with encoding, prefix and rotation policy.
62 - Replace `sys.stdout` and `sys.stderr` with `TeeStream` instances.
63 """
64
66 self,
67 log_to_file: bool = CONST.LOG_TO_FILE_ENV,
68 override: bool = False,
69 raw_log_folder: str = CONST.RAW_LOG_FOLDER_ENV,
70 default_log_folder: Path = CONST.DEFAULT_LOG_FOLDER,
71 default_max_filesize: int = CONST.DEFAULT_LOG_MAX_FILE_SIZE,
72 merge_streams: bool = True,
73 *,
74 encoding: str = CONST.DEFAULT_ENCODING,
75 merge_stdin: bool = False,
76 capture_stdin: bool = False,
77 capture_stdout: bool = True,
78 capture_stderr: bool = True,
79 prefix_in_stream: bool = True,
80 prefix_out_stream: bool = True,
81 prefix_err_stream: bool = True,
82 log_function_calls_stdin: bool = False,
83 log_function_calls_stdout: bool = False,
84 log_function_calls_stderr: bool = False,
85 program_log: bool = False,
86 program_debug_log: bool = False,
87 suppress_program_warning_logs: bool = False,
88 suppress_program_error_logs: bool = False,
89 ) -> None:
90 """Initialise a new RotaryLogger.
91
92 Does not start logging; call start_logging() to install TeeStream
93 wrappers and begin mirroring output.
94
95 Arguments:
96 log_to_file (bool): Whether file logging is enabled. Default: CONST.LOG_TO_FILE_ENV
97 override (bool): Whether existing log files may be overwritten. Default: False
98 raw_log_folder (str): Raw path string for the log folder. Default: CONST.RAW_LOG_FOLDER_ENV
99 default_log_folder (Path): Fallback log folder path. Default: CONST.DEFAULT_LOG_FOLDER
100 default_max_filesize (int): Maximum log file size in MB before rotation. Default: CONST.DEFAULT_LOG_MAX_FILE_SIZE
101 merge_streams (bool): Whether stdout and stderr share a single log file. Default: True
102
103 Keyword Arguments:
104 encoding (str): File encoding for log files. Default: CONST.DEFAULT_ENCODING
105 merge_stdin (bool): Whether stdin is merged into the shared log file. Default: False
106 capture_stdin (bool): Whether stdin is wrapped with a TeeStream. Default: False
107 capture_stdout (bool): Whether stdout is wrapped with a TeeStream. Default: True
108 capture_stderr (bool): Whether stderr is wrapped with a TeeStream. Default: True
109 prefix_in_stream (bool): Whether stdin entries are prefixed. Default: True
110 prefix_out_stream (bool): Whether stdout entries are prefixed. Default: True
111 prefix_err_stream (bool): Whether stderr entries are prefixed. Default: True
112 log_function_calls_stdin (bool): Whether TeeStream function calls on stdin are logged. Default: False
113 log_function_calls_stdout (bool): Whether TeeStream function calls on stdout are logged. Default: False
114 log_function_calls_stderr (bool): Whether TeeStream function calls on stderr are logged. Default: False
115 program_log (bool): Whether to let the module (rotary_logger) output status logs about what it is doing. Default: False
116 program_debug_log (bool): Whether to let the module (rotary_logger) output debug logs. Default: False
117 suppress_program_warning_logs (bool): Whether to prevent the module (rotary_logger) from outputing warnings (ex: initialising an already initialised stream). Default: False
118 suppress_program_error_logs (bool): Whether to prevent the module (rotary_logger) from outputing error (ex: a broken pipe). Default: False
119
120 """
122 self.log_to_filelog_to_file: bool = log_to_file
123 self.raw_log_folderraw_log_folder: Path = Path(raw_log_folder)
124 self.default_log_folder: Path = default_log_folder
125 self.default_max_filesize: int = default_max_filesize
126 # Prefix tracker
127 self.prefix: CONST.Prefix = CONST.Prefix()
128 self.prefix.std_in = prefix_in_stream
129 self.prefix.std_out = prefix_out_stream
130 self.prefix.std_err = prefix_err_stream
131 # The general file config
132 self.file_data: FileInstance = FileInstance(None)
133 self.file_data.set_encoding(encoding)
134 self.file_data.set_merged(merge_streams)
135 self.file_data.set_prefix(self.prefix)
136 self.file_data.set_override(override)
137 self.file_data.set_merge_stdin(merge_stdin)
138 # Toggles to specify whether to capture a stream or not; used by start_logging to determine which streams to wrap.
139 self.capture_stdin: bool = capture_stdin
140 self.capture_stdout: bool = capture_stdout
141 self.capture_stderr: bool = capture_stderr
142 # Log the std function calls configuration
143 self.log_function_calls_stdin = log_function_calls_stdin
144 self.log_function_calls_stdout = log_function_calls_stdout
145 self.log_function_calls_stderr = log_function_calls_stderr
146 # File stream instances are created in start_logging and assigned to these attributes for later reference and cleanup.
147 self._file_stream_instances: CONST.FileStreamInstances = CONST.FileStreamInstances()
148 # Stream instance tracking
149 self.stdout_stream: Optional[TeeStream] = None
150 self.stderr_stream: Optional[TeeStream] = None
151 self.stdin_stream: Optional[TeeStream] = None
152 # Logging status
153 self.paused: bool = False
154 # Track whether we've registered atexit handlers to avoid duplicates
155 self._atexit_registered: bool = False
156 self._registered_flushers: List[Callable] = []
157 # Logging section
158 self.program_log = program_log
159 self.program_debug_log = program_debug_log
160 self.suppress_program_warning_logs = suppress_program_warning_logs
161 self.suppress_program_error_logs = suppress_program_error_logs
162 self.rogger: Rogger = RI
163 self.rogger.re_toggle(
164 self.program_log,
168 )
169 # Internal module startup log
170 self.rogger.log_info("RotaryLogger initialized", stream=sys.stdout)
171
172 def __del__(self) -> None:
173 """Best-effort cleanup on object deletion.
174
175 Calls stop_logging() to restore original streams. Errors are not
176 raised since __del__ may run during interpreter shutdown.
177 """
178 self.stop_logging()
179
180 def __call__(self, *args: Any, **kwds: Any) -> None:
181 """Allow the instance to be called as a function to start logging.
182
183 Calling the instance is equivalent to calling start_logging() and
184 is provided for compact initialisation patterns.
185
186 Arguments:
187 *args (Any): Ignored positional arguments.
188 **kwds (Any): Ignored keyword arguments.
189 """
191 self.start_logging()
192
193 def _get_user_max_file_size(self) -> int:
194 """Return the maximum log file size from the environment or the current default.
195
196 Reads the `LOG_MAX_SIZE` environment variable and coerces it to an
197 integer. Falls back to the value stored in `file_data` if the variable
198 is absent or non-numeric.
199
200 Returns:
201 The resolved maximum log file size in bytes (as stored by `FileInstance`).
202 """
203 default_max_log_size: int = self.file_data.get_max_size()
204 try:
205 val = int(
206 os.environ.get(
207 "LOG_MAX_SIZE",
208 str(default_max_log_size)
209 )
210 )
211 self.rogger.log_debug(
212 f"Resolved user max file size: {val}",
213 stream=sys.stdout
214 )
215 return val
216 except ValueError:
217 self.rogger.log_warning(
218 f"Invalid LOG_MAX_SIZE env value, falling back to default: {default_max_log_size}",
219 stream=sys.stderr
220 )
221 return default_max_log_size
222
223 def _verify_user_log_path(self, raw_log_folder: Path = CONST.DEFAULT_LOG_FOLDER) -> Path:
224 """Validate, resolve and ensure writability of the requested log folder.
225
226 Resolves relative paths against the package directory, appends the
227 standard base-folder name when missing, and performs a write-test.
228 Falls back to the default log folder on any validation failure.
229
230 Keyword Arguments:
231 raw_log_folder (Path): Candidate log folder path. Default: CONST.DEFAULT_LOG_FOLDER
232
233 Raises:
234 RuntimeError: If both the requested path and the default fallback are not writable.
235
236 Returns:
237 The validated, writable, resolved log folder path.
238 """
239 # Snapshot inputs and minimal state under lock, then perform
240 # filesystem operations outside the lock to avoid blocking other
241 # threads. Any path validation/mkdir/write attempts happen below
242 # without holding `self._file_lock`.
243 try:
244 raw = Path(raw_log_folder)
245 if raw.is_absolute():
246 candidate = raw.resolve(strict=False)
247 else:
248 candidate = (
249 Path(__file__).parent /
250 raw
251 ).resolve(strict=False)
252
253 # If the user didn't explicitly end with our base folder name, append it.
254 if candidate.name != CONST.LOG_FOLDER_BASE_NAME:
255 candidate = candidate / CONST.LOG_FOLDER_BASE_NAME
256
257 # Basic validation: protect against overly long paths.
258 if len(str(candidate)) > 255:
259 raise ValueError(f"{CONST.MODULE_NAME} Path too long")
260
261 # Ensure we can create and write into the folder. Do I/O here
262 # outside of any locks to avoid blocking other threads.
263 try:
264 candidate.mkdir(parents=True, exist_ok=True)
265 testfile = candidate / ".rotary_write_test"
266 with open(testfile, "w", encoding="utf-8") as fh:
267 fh.write("x")
268 testfile.unlink()
269 self.rogger.log_info(
270 f"Verified writable log folder: {candidate}",
271 stream=sys.stdout
272 )
273 except OSError as e:
274 self.rogger.log_error(
275 f"Log folder not writable: {candidate} -> {e}",
276 stream=sys.stderr
277 )
278 raise ValueError(
279 f"{CONST.MODULE_NAME} Path not writable: {e}") from e
280
281 return candidate
282 except ValueError as e:
283 self.rogger.log_warning(
284 f"Invalid LOG_FOLDER_NAME ({raw_log_folder!r}): {e}. Falling back to default.",
285 stream=sys.stderr
286 )
287 warn(
288 f"{CONST.MODULE_NAME} [WARN] Invalid LOG_FOLDER_NAME ({raw_log_folder!r}): {e}. Falling back to default."
289 )
290 try:
291 CONST.DEFAULT_LOG_FOLDER.mkdir(parents=True, exist_ok=True)
292 self.rogger.log_info(
293 f"Falling back to default log folder: {CONST.DEFAULT_LOG_FOLDER}",
294 stream=sys.stdout
295 )
296 except OSError as err:
297 raise RuntimeError(
298 f"{CONST.MODULE_NAME} The provided and default folder paths are not writable"
299 ) from err
300 return CONST.DEFAULT_LOG_FOLDER
301
302 def _resolve_log_folder(self, log_folder: Optional[Path]) -> Path:
303 """Resolve and verify the final log folder to use.
304
305 Centralises the logic of falling back to the configured default and
306 delegates validation to _verify_user_log_path().
307
308 Arguments:
309 log_folder (Optional[Path]): Requested log folder, or None to use the default.
310
311 Returns:
312 The validated, writable, resolved log folder path.
313 """
315 if log_folder is None:
316 log_folder = self.default_log_folder
317 return self._verify_user_log_path(log_folder)
318
319 def _handle_stream_assignments(self, log_folder: Path) -> None:
320 """Create `FileInstance` objects and store them in `self._file_stream_instances`.
321
322 Reads the current configuration snapshot from `self.file_data` (outside the
323 lock) and constructs either a single shared `FileInstance` (when `merged` is True)
324 or three separate per-stream instances for stdin, stdout, and stderr.
325
326 When merged, stdout and stderr share the same descriptor. stdin is also merged
327 into that file when `merge_stdin` is True; otherwise its own unmerged instance
328 is created. Merged/unmerged state is recorded in
329 `self._file_stream_instances.merged_streams`.
330
331 Arguments:
332 log_folder (Path): The validated, writable root folder for log files.
333 """
335 _override = self.file_data.get_override()
336 _encoding = self.file_data.get_encoding()
337 _prefix = self.file_data.get_prefix()
338 _max_size_mb = self.file_data.get_max_size()
339 _flush_size_kb = self.file_data.get_flush_size()
340 _merged_flag = self.file_data.get_merged()
341 _merge_stdin_flag = self.file_data.get_merge_stdin()
342
343 self.rogger.log_debug(
344 f"Handling stream assignments (merged={_merged_flag}, merge_stdin={_merge_stdin_flag})",
345 stream=sys.stdout
346 )
347 if _merged_flag:
348 mixed_inst: FileInstance = FileInstance(
349 file_path=log_folder,
350 override=_override,
351 merged=True,
352 encoding=_encoding,
353 prefix=_prefix,
354 max_size_mb=_max_size_mb,
355 flush_size_kb=_flush_size_kb,
356 folder_prefix=None,
357 merge_stdin=_merge_stdin_flag,
358 log_to_file=self.log_to_filelog_to_file,
359 )
360
361 self._file_stream_instances.stdout = mixed_inst
362 self._file_stream_instances.stderr = mixed_inst
363 self._file_stream_instances.merged_streams[CONST.StdMode.STDOUT] = True
364 self._file_stream_instances.merged_streams[CONST.StdMode.STDERR] = True
365 if _merge_stdin_flag:
366 self._file_stream_instances.stdin = mixed_inst
367 self._file_stream_instances.merged_streams[CONST.StdMode.STDIN] = True
368 else:
370 file_path=log_folder,
371 override=_override,
372 merged=False,
373 encoding=_encoding,
374 prefix=_prefix,
375 max_size_mb=_max_size_mb,
376 flush_size_kb=_flush_size_kb,
377 folder_prefix=CONST.StdMode.STDIN,
378 merge_stdin=False,
379 log_to_file=self.log_to_filelog_to_file,
380 )
381 self.rogger.log_info(
382 f"Created merged FileInstance for stdout/stderr at {log_folder}",
383 stream=sys.stdout
384 )
385 else:
387 file_path=log_folder,
388 override=_override,
389 merged=False,
390 encoding=_encoding,
391 prefix=_prefix,
392 max_size_mb=_max_size_mb,
393 flush_size_kb=_flush_size_kb,
394 folder_prefix=CONST.StdMode.STDIN,
395 merge_stdin=False,
396 log_to_file=self.log_to_filelog_to_file,
397 )
399 file_path=log_folder,
400 override=_override,
401 merged=False,
402 encoding=_encoding,
403 prefix=_prefix,
404 max_size_mb=_max_size_mb,
405 flush_size_kb=_flush_size_kb,
406 folder_prefix=CONST.StdMode.STDOUT,
407 merge_stdin=_merge_stdin_flag,
408 log_to_file=self.log_to_filelog_to_file,
409 )
411 file_path=log_folder,
412 override=_override,
413 merged=False,
414 encoding=_encoding,
415 prefix=_prefix,
416 max_size_mb=_max_size_mb,
417 flush_size_kb=_flush_size_kb,
418 folder_prefix=CONST.StdMode.STDERR,
419 merge_stdin=_merge_stdin_flag,
420 log_to_file=self.log_to_filelog_to_file,
421 )
422
423 self._file_stream_instances.merged_streams[CONST.StdMode.STDOUT] = False
424 self._file_stream_instances.merged_streams[CONST.StdMode.STDERR] = False
425 self._file_stream_instances.merged_streams[CONST.StdMode.STDIN] = False
426 self.rogger.log_info(
427 f"Created separate FileInstance objects for each stream at {log_folder}",
428 stream=sys.stdout
429 )
430
432 self,
433 *,
434 log_folder: Optional[Path] = None,
435 max_filesize: Optional[int] = None,
436 merged: Optional[bool] = None,
437 log_to_file: bool = True,
438 merge_stdin: Optional[bool] = None,
439 skip_redirect_check_stdin: bool = False,
440 skip_redirect_check_stdout: bool = False,
441 skip_redirect_check_stderr: bool = False,
442 ) -> None:
443 """Start capturing stdout and stderr and configure file output.
444
445 Installs TeeStream wrappers for sys.stdout and sys.stderr so output
446 continues to appear on the terminal while being mirrored to rotating
447 files on disk. Configuration snapshots are taken under the internal
448 lock; filesystem operations (mkdir, write-test) are performed outside
449 it to keep critical sections short. The sys.* assignments are made
450 while holding the lock to keep the replacement atomic.
451
452 Keyword Arguments:
453 log_folder (Optional[Path]): Base folder to write logs; falls back to configured defaults. Default: None
454 max_filesize (Optional[int]): Override for the rotation size in MB. Default: None
455 merged (Optional[bool]): Whether to merge stdout and stderr into one file. Default: None
456 log_to_file (bool): Whether file writes are enabled. Default: True
457 merge_stdin (Optional[bool]): Whether to merge stdin into the shared log file. Default: None
458 skip_redirect_check_stdin (bool, optional): Skip the existing redirection check for stdin, allowing multiple logger instances to redirect the same stream (legacy behavior). Default: False
459 skip_redirect_check_stdout (bool, optional): Skip the existing redirection check for stdout, allowing multiple logger instances to redirect the same stream (legacy behavior). Default: False,
460 skip_redirect_check_stderr (bool, optional): Skip the existing redirection check for stderr, allowing multiple logger instances to redirect the same stream (legacy behavior). Default: False,
461 """
462
463 # Entry log
464 self.rogger.log_info(
465 f"start_logging called (log_folder={log_folder}, max_filesize={max_filesize}, merged={merged}, log_to_file={log_to_file})",
466 stream=sys.stdout
467 )
468
469 # Prepare FileInstance configurations based on the provided arguments and current settings.
471 # Defaults (snapshot)
472 if log_folder is None:
474 _raw_folder = self.default_log_folder
475 else:
476 _raw_folder = self.raw_log_folderraw_log_folder
477 else:
478 _raw_folder = log_folder
479
480 if max_filesize is not None:
481 self.file_data.set_max_size(max_filesize)
482 # Apply user-provided max size
483 self.file_data.set_max_size(self._get_user_max_file_size())
484
485 # snapshot file_data-derived configuration to avoid nested locks
486 if merged is not None:
487 self.file_data.set_merged(merged)
488 if merge_stdin is not None:
489 self.file_data.set_merge_stdin(merge_stdin)
490 # Honor the requested log_to_file flag for newly-created FileInstance
491 # objects so we don't create/ open descriptors when file logging is
492 # explicitly disabled by the caller.
493 self.log_to_filelog_to_file = bool(log_to_file)
494 self.rogger.log_debug(
495 f"Self Log to file = {self.log_to_file}, Log to file = {log_to_file}"
496 )
497
498 # Determine final log folder using the built-in verification (outside lock).
499 # If file logging is requested, perform full verification and create
500 # the folder; otherwise compute a candidate path for internal use
501 # without touching the filesystem (avoids creating dirs when
502 # log_to_file is False).
503 if log_to_file is True:
504 _log_folder: Path = self._verify_user_log_path(_raw_folder)
505 _log_folder.mkdir(parents=True, exist_ok=True)
506 else:
507 # Do not perform verification that may create or write files;
508 # instead create a non-strict Path that mirrors the final
509 # expected layout (append base folder name when missing).
510 candidate = Path(_raw_folder)
511 if candidate.name != CONST.LOG_FOLDER_BASE_NAME:
512 candidate = candidate / CONST.LOG_FOLDER_BASE_NAME
513 _log_folder = candidate.resolve(strict=False)
514
515 # Create the file descriptor instances based on the current configuration (outside lock)
516 self._handle_stream_assignments(_log_folder)
517 # Construct TeeStream instances outside the lock to avoid holding
518 # `RotaryLogger._file_lock` while the TeeStream initializer may
519 # acquire `FileInstance` locks. Then assign the globals under the
520 # lock to keep the replacement atomic.
521 _stdin_stream: Optional[TeeStream] = None
522 _stdout_stream: Optional[TeeStream] = None
523 _stderr_stream: Optional[TeeStream] = None
524 if self._file_stream_instances.stdin:
525 if skip_redirect_check_stdin:
526 self.rogger.log_warning(
527 "Skipping redirect check for stdin because 'skip_redirect_check_stdin' is True",
528 stream=CONST.RAW_STDERR
529 )
530 if not skip_redirect_check_stdin and isinstance(sys.stdin, TeeStream):
531 self.rogger.log_warning(
532 "Stdin stream is already being redirected, skipping redirection",
533 stream=CONST.RAW_STDERR
534 )
535 _stdin_stream = sys.stdin
536 else:
537 self.rogger.log_info(
538 "Stdin is not yet being redirected, redirecting",
539 stream=CONST.RAW_STDOUT
540 )
541 self.rogger.log_debug(
542 f"(stdin) Log to file: {log_to_file}"
543 )
544 _stdin_stream = TeeStream(
545 self._file_stream_instances.stdin,
546 CONST.MIM_STDIN,
547 mode=CONST.StdMode.STDIN,
548 log_to_file=log_to_file,
549 log_function_calls=self.log_function_calls_stdin
550 )
551 self.rogger.log_debug(
552 "Constructed TeeStream for stdin",
553 stream=CONST.RAW_STDOUT
554 )
555 if self._file_stream_instances.stdout:
556 if skip_redirect_check_stdout:
557 self.rogger.log_warning(
558 "Skipping redirect check for stdout because 'skip_redirect_check_stdin' is True",
559 stream=CONST.RAW_STDERR
560 )
561 if not skip_redirect_check_stdout and isinstance(sys.stdout, TeeStream):
562 self.rogger.log_warning(
563 "Stdout stream is already being redirected, skipping redirection",
564 stream=CONST.RAW_STDERR
565 )
566 _stdout_stream = sys.stdout
567 else:
568 self.rogger.log_info(
569 "Stdout is not yet being redirected, redirecting",
570 stream=CONST.RAW_STDOUT
571 )
572 self.rogger.log_debug(
573 f"(stdout) Log to file: {log_to_file}",
574 stream=CONST.RAW_STDOUT
575 )
576 _stdout_stream = TeeStream(
577 self._file_stream_instances.stdout,
578 CONST.MIM_STDOUT,
579 mode=CONST.StdMode.STDOUT,
580 log_to_file=log_to_file,
581 log_function_calls=self.log_function_calls_stdout
582 )
583 self.rogger.log_debug(
584 "Constructed TeeStream for stdout",
585 stream=sys.stdout
586 )
587 if self._file_stream_instances.stderr:
588 if skip_redirect_check_stderr:
589 self.rogger.log_warning(
590 "Skipping redirect check for stderr because 'skip_redirect_check_stderr' is True",
591 stream=CONST.RAW_STDERR
592 )
593 if not skip_redirect_check_stderr and isinstance(sys.stderr, TeeStream):
594 self.rogger.log_warning(
595 "Stderr stream is already being redirected, skipping redirection",
596 stream=CONST.RAW_STDERR
597 )
598 _stderr_stream = sys.stderr
599 else:
600 self.rogger.log_info(
601 "Stderr is not yet being redirected, redirecting",
602 stream=CONST.RAW_STDOUT
603 )
604 self.rogger.log_debug(
605 f"(stderr) Log to file: {log_to_file}"
606 )
607 _stderr_stream = TeeStream(
608 self._file_stream_instances.stderr,
609 CONST.MIM_STDERR,
610 mode=CONST.StdMode.STDERR,
611 log_to_file=log_to_file,
612 log_function_calls=self.log_function_calls_stderr
613 )
614 self.rogger.log_debug(
615 "Constructed TeeStream for stderr",
616 stream=CONST.RAW_STDOUT
617 )
618
620 self.rogger.log_info(
621 "redirecting streams",
622 stream=CONST.RAW_STDOUT
623 )
624 self.rogger.log_debug(
625 f"Will assign streams: stdin={bool(_stdin_stream)}, stdout={bool(_stdout_stream)}, stderr={bool(_stderr_stream)}",
626 stream=CONST.RAW_STDOUT
627 )
628 if _stdin_stream:
629 sys.stdin = _stdin_stream
630 self.stdin_stream = _stdin_stream
631 if _stdout_stream:
632 sys.stdout = _stdout_stream
633 self.stdout_stream = _stdout_stream
634 if _stderr_stream:
635 sys.stderr = _stderr_stream
636 self.stderr_stream = _stderr_stream
637
638 # Ensure final flush at exit, but only register once
639 if not self._atexit_registered:
640 self._registered_flushers = []
641 if self.stdin_stream:
642 self._registered_flushers.append(self.stdin_stream.flush)
643 if self.stdout_stream:
644 self._registered_flushers.append(self.stdout_stream.flush)
645 if self.stderr_stream:
646 self._registered_flushers.append(self.stderr_stream.flush)
647 try:
648 for f in self._registered_flushers:
649 atexit.register(f)
650 self._atexit_registered = True
651 except (TypeError, AttributeError):
652 # Registration may fail if the objects are not callable
653 # or lack attributes; handle only the expected errors.
654 # Clear the list to avoid false expectations.
655 self._registered_flushers = []
656 else:
657 self.rogger.log_info(
658 "Registered atexit flush handlers",
659 stream=CONST.RAW_STDOUT
660 )
661
662 def _resume_logging_locked(self, to_flush: List[TeeStream]) -> None:
663 """Restore TeeStream wrappers on sys.stdin/stdout/stderr.
664
665 Must be called while `self._file_lock` is already held. Sets
666 `self.paused` to False and reassigns `sys.stdout`, `sys.stderr`,
667 and `sys.stdin` to their respective TeeStream instances. Each
668 stream that is reinstalled is appended to `to_flush` so the
669 caller can flush them after releasing the lock.
670
671 Arguments:
672 to_flush (List[TeeStream]): Accumulator list; streams to flush after the lock is released.
673 """
674 self.paused = False
675 self.rogger.log_info(
676 "Resuming logging (reinstalling TeeStream wrappers)",
677 stream=CONST.RAW_STDOUT
678 )
679 out = self.stdout_stream
680 err = self.stderr_stream
681 inn = self.stdin_stream
682 # currently pause -> resume logging
683 if out is not None:
684 sys.stdout = out
685 to_flush.append(out)
686 if err is not None:
687 sys.stderr = err
688 to_flush.append(err)
689 if inn is not None:
690 sys.stdin = inn
691 to_flush.append(inn)
692 self.rogger.log_debug(
693 "Reinstalled stdin TeeStream",
694 stream=CONST.RAW_STDOUT
695 )
696
697 def _pause_logging_locked(self, to_flush: List[TeeStream]) -> None:
698 """Replace TeeStream wrappers with the original standard streams.
699
700 Must be called while `self._file_lock` is already held. Sets
701 `self.paused` to True and reassigns `sys.stdout`, `sys.stderr`,
702 and `sys.stdin` back to their original (pre-TeeStream) counterparts.
703 Each stream that is uninstalled is appended to `to_flush` so the
704 caller can flush buffered data after releasing the lock.
705
706 Arguments:
707 to_flush (List[TeeStream]): Accumulator list; streams to flush after the lock is released.
708 """
709 self.paused = True
710 self.rogger.log_info(
711 "Pausing logging (restoring original streams)",
712 stream=CONST.RAW_STDOUT
713 )
714 out = self.stdout_stream
715 err = self.stderr_stream
716 inn = self.stdin_stream
717 # currently running -> pause logging
718 if out is not None:
719 sys.stdout = out.original_stream
720 to_flush.append(out)
721 self.rogger.log_debug(
722 "Uninstalled stdout TeeStream",
723 stream=CONST.RAW_STDOUT
724 )
725 if err is not None:
726 sys.stderr = err.original_stream
727 to_flush.append(err)
728 self.rogger.log_debug(
729 "Uninstalled stderr TeeStream",
730 stream=CONST.RAW_STDOUT
731 )
732 if inn is not None:
733 sys.stdin = inn.original_stream
734 to_flush.append(inn)
735 self.rogger.log_debug(
736 "Uninstalled stdin TeeStream",
737 stream=CONST.RAW_STDOUT
738 )
739
740 def _flush_streams(self, to_flush: List[TeeStream]) -> None:
741 """Flush a list of TeeStream instances, suppressing expected I/O errors.
742
743 Iterates over `to_flush` and calls `flush()` on each stream. `OSError`
744 and `ValueError` (e.g. broken pipe, closed file descriptor) are caught
745 and silently ignored; all other exceptions propagate.
746
747 Arguments:
748 to_flush (List[TeeStream]): Streams to flush.
749 """
750 # Perform flushes outside the lock (may do I/O)
751 for s in to_flush:
752 try:
753 self.rogger.log_debug(
754 f"Flushing stream: {getattr(s, 'mode', 'unknown')}",
755 stream=CONST.RAW_STDOUT
756 )
757 s.flush()
758 self.rogger.log_debug(
759 f"Flushed stream: {getattr(s, 'mode', 'unknown')}",
760 stream=CONST.RAW_STDOUT
761 )
762 except (OSError, ValueError) as e:
763 self.rogger.log_warning(
764 f"Ignored flush error for stream: {getattr(s, 'mode', 'unknown')} -> {e}",
765 stream=CONST.RAW_STDERR
766 )
767
768 def pause_logging(self, *, toggle: bool = True) -> bool:
769 """Toggle the logger pause state.
770
771 When the logger is paused the TeeStream objects are uninstalled and
772 the original streams restored. When called again the TeeStream objects
773 are reinstalled. sys.* assignments are performed while holding the
774 internal lock; flushing is done afterwards to keep critical sections
775 short.
776
777 Keyword Arguments:
778 toggle (bool): When True and the logger is currently running, pause it; when True and already paused, resume it. When False, always pause. Default: True
779
780 Returns:
781 The new paused state (True when now paused, False when now resumed).
782 """
783 # Snapshot streams and current state under the lock, and perform
784 # the sys.* assignment while still holding the lock to avoid races.
785 to_flush = []
787 if toggle is True and self.paused is True:
788 self.rogger.log_debug(
789 "pause_logging: toggle requested and currently paused -> resume",
790 stream=CONST.RAW_STDOUT
791 )
792 self._resume_logging_locked(to_flush)
793 else:
794 self.rogger.log_debug(
795 "pause_logging: pausing or toggling into pause",
796 stream=CONST.RAW_STDOUT
797 )
798 self._pause_logging_locked(to_flush)
799 _paused = self.paused
800 self._flush_streams(to_flush)
801 return _paused
802
803 def resume_logging(self, *, toggle: bool = False) -> bool:
804 """Explicitly resume logging (idempotent).
805
806 Equivalent to calling pause_logging() while paused, but provided as
807 a convenience. sys.* assignments are made under the internal lock;
808 flushing is done afterwards.
809
810 Keyword Arguments:
811 toggle (bool): When True and the logger is not paused, pause it instead of resuming. When False, always resume. Default: False
812
813 Returns:
814 The paused state after the call (False when logging was resumed, True when toggled into pause).
815 """
816 to_flush = []
818 if toggle is True and self.paused is False:
819 self.rogger.log_debug(
820 "resume_logging: toggle requested and currently running -> pause",
821 stream=CONST.RAW_STDOUT
822 )
823 self._pause_logging_locked(to_flush)
824 else:
825 self.rogger.log_debug(
826 "resume_logging: resuming logging",
827 stream=CONST.RAW_STDOUT
828 )
829 self._resume_logging_locked(to_flush)
830 _paused = self.paused
831 self._flush_streams(to_flush)
832 return _paused
833
834 def is_redirected(self, stream: CONST.StdMode) -> bool:
835 """Return whether the given standard stream is currently redirected.
836
837 Lightweight query; safe to call concurrently.
838
839 Arguments:
840 stream (CONST.StdMode): One of CONST.StdMode.STDOUT, STDERR, or STDIN.
841
842 Returns:
843 True if the corresponding stream has a TeeStream installed, False otherwise.
844 """
845 _stderr_stream: Optional[TeeStream] = None
846 _stdout_stream: Optional[TeeStream] = None
847 _stdin_stream: Optional[TeeStream] = None
849 _stderr_stream = self.stderr_stream
850 _stdout_stream = self.stdout_stream
851 _stdin_stream = self.stdin_stream
852 if stream == CONST.StdMode.STDERR:
853 return _stderr_stream is not None
854 if stream == CONST.StdMode.STDOUT:
855 return _stdout_stream is not None
856 if stream == CONST.StdMode.STDIN:
857 return _stdin_stream is not None
858 return False
859
860 def is_logging(self) -> bool:
861 """Return True if logging is currently active (not paused).
862
863 Checks whether any TeeStream is installed and the logger is not
864 marked as paused. Safe to call concurrently.
865
866 Returns:
867 True if at least one TeeStream is installed and the logger is not paused.
868 """
870 has_stream = (
871 self.stdout_stream is not None
872 ) or (
873 self.stderr_stream is not None
874 ) or (
875 self.stdin_stream is not None
876 )
877
878 return has_stream and (not bool(self.paused))
879
880 def stop_logging(self) -> None:
881 """Stop logging and restore the original standard streams.
882
883 Restores sys.stdout, sys.stderr, and sys.stdin to their original
884 values, attempts to unregister any atexit flush handlers registered
885 by start_logging(), and flushes remaining buffers. Stream replacement
886 and atexit unregistration are done under the internal lock; flushing
887 is performed afterwards.
888 """
889 to_flush = []
891 if self.stdout_stream is not None:
892 sys.stdout = self.stdout_stream.original_stream
893 to_flush.append(self.stdout_stream)
894 self.stdout_stream = None
895 if self.stderr_stream is not None:
896 sys.stderr = self.stderr_stream.original_stream
897 to_flush.append(self.stderr_stream)
898 self.stderr_stream = None
899 if self.stdin_stream is not None:
900 sys.stdin = self.stdin_stream.original_stream
901 to_flush.append(self.stdin_stream)
902 self.stdin_stream = None
903 self.paused = False
904
905 if getattr(self, "_atexit_registered", False):
906 self.rogger.log_debug(
907 "Unregistering atexit flush handlers",
908 stream=sys.stdout
909 )
910 for f in getattr(self, "_registered_flushers", []):
911 try:
912 atexit.unregister(f)
913 except ValueError:
914 pass
915 except AttributeError:
916 pass
917 self._registered_flushers = []
918 self._atexit_registered = False
919 self.rogger.log_info(
920 "Unregistered atexit flush handlers",
921 stream=CONST.RAW_STDOUT
922 )
923
924 for s in to_flush:
925 try:
926 self.rogger.log_debug(
927 f"stop_logging: flushing stream {getattr(s, 'mode', 'unknown')}",
928 stream=CONST.RAW_STDOUT
929 )
930 s.flush()
931 except (OSError, ValueError):
932 self.rogger.log_warning(
933 f"stop_logging: ignored flush error for {getattr(s, 'mode', 'unknown')}",
934 stream=CONST.RAW_STDERR
935 )
936
937
938if __name__ == "__main__":
None _resume_logging_locked(self, List[TeeStream] to_flush)
None _handle_stream_assignments(self, Path log_folder)
None __init__(self, bool log_to_file=CONST.LOG_TO_FILE_ENV, bool override=False, str raw_log_folder=CONST.RAW_LOG_FOLDER_ENV, Path default_log_folder=CONST.DEFAULT_LOG_FOLDER, int default_max_filesize=CONST.DEFAULT_LOG_MAX_FILE_SIZE, bool merge_streams=True, *, str encoding=CONST.DEFAULT_ENCODING, bool merge_stdin=False, bool capture_stdin=False, bool capture_stdout=True, bool capture_stderr=True, bool prefix_in_stream=True, bool prefix_out_stream=True, bool prefix_err_stream=True, bool log_function_calls_stdin=False, bool log_function_calls_stdout=False, bool log_function_calls_stderr=False, bool program_log=False, bool program_debug_log=False, bool suppress_program_warning_logs=False, bool suppress_program_error_logs=False)
Path _resolve_log_folder(self, Optional[Path] log_folder)
None _flush_streams(self, List[TeeStream] to_flush)
bool resume_logging(self, *, bool toggle=False)
bool pause_logging(self, *, bool toggle=True)
Path _verify_user_log_path(self, Path raw_log_folder=CONST.DEFAULT_LOG_FOLDER)
None __call__(self, *Any args, **Any kwds)
None _pause_logging_locked(self, List[TeeStream] to_flush)
bool is_redirected(self, CONST.StdMode stream)
None start_logging(self, *, Optional[Path] log_folder=None, Optional[int] max_filesize=None, Optional[bool] merged=None, bool log_to_file=True, Optional[bool] merge_stdin=None, bool skip_redirect_check_stdin=False, bool skip_redirect_check_stdout=False, bool skip_redirect_check_stderr=False)