diff --git a/helpers/hasyncio.py b/helpers/hasyncio.py index 14e7a6550..94f74fe72 100644 --- a/helpers/hasyncio.py +++ b/helpers/hasyncio.py @@ -58,7 +58,7 @@ class _EventLoop(async_solipsism.EventLoop): # the replayed time approach and can remove `ReplayedTime` object. def __init__(self) -> None: super().__init__() - self._initial_dt = datetime.datetime.utcnow() + self._initial_dt = datetime.datetime.now(datetime.timezone.utc) def get_current_time(self) -> datetime.datetime: # `loop.time()` returns the number of seconds as `float` from when the event @@ -100,7 +100,7 @@ def solipsism_context() -> Iterator: async def gather_coroutines_with_wall_clock( - event_loop: asyncio.AbstractEventLoop, *coroutines: List[Coroutine] + event_loop: asyncio.AbstractEventLoop, *coroutines: Callable[..., Coroutine] ) -> List[Any]: """ Inject a wall clock associated to `event_loop` in all the coroutines and @@ -111,9 +111,9 @@ async def gather_coroutines_with_wall_clock( ) # Construct the coroutines here by passing the `get_wall_clock_time()` # function. - coroutines = [coro(get_wall_clock_time) for coro in coroutines] + coroutine_instances = [coro(get_wall_clock_time) for coro in coroutines] # - result: List[Any] = await asyncio.gather(*coroutines) + result: List[Any] = await asyncio.gather(*coroutine_instances) return result diff --git a/helpers/hcache.py b/helpers/hcache.py index 6f57e5792..1f5fc73e0 100644 --- a/helpers/hcache.py +++ b/helpers/hcache.py @@ -182,7 +182,10 @@ def _get_cache_size(path: str, description: str) -> str: else: if os.path.exists(path): size_in_bytes = hsystem.du(path) - size_as_str = hintros.format_size(size_in_bytes) + if isinstance(size_in_bytes, str): + size_as_str = size_in_bytes + else: + size_as_str = hintros.format_size(size_in_bytes) else: size_as_str = "nan" # TODO(gp): Compute number of files. diff --git a/helpers/hcache_simple.py b/helpers/hcache_simple.py index c45645886..0fa1166e1 100644 --- a/helpers/hcache_simple.py +++ b/helpers/hcache_simple.py @@ -5,7 +5,10 @@ import os import pickle import re -from typing import Any, Callable, Dict, List, Union, cast +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Union, cast + +if TYPE_CHECKING: + import pandas as pd import helpers.hdbg as hdbg import helpers.hprint as hprint @@ -406,7 +409,7 @@ def get_cache(func_name: str) -> _CacheType: # ############################################################################# -def cache_stats_to_str(func_name: str = "") -> "pd.DataFrame": # noqa: F821 +def cache_stats_to_str(func_name: str = "") -> "pd.DataFrame": """ Print the cache stats for a function or for all functions. diff --git a/helpers/hchatgpt.py b/helpers/hchatgpt.py index e2a5549d9..ce0492030 100644 --- a/helpers/hchatgpt.py +++ b/helpers/hchatgpt.py @@ -9,14 +9,17 @@ import os import sys import time -from typing import Dict, List, Optional +from typing import TYPE_CHECKING, Dict, List, Optional import helpers.hdbg as hdbg import helpers.henv as henv import helpers.hio as hio -henv.install_module_if_not_present("openai") -import openai # noqa: E402 +if TYPE_CHECKING: + import openai # type: ignore +else: + henv.install_module_if_not_present("openai") + import openai # noqa: E402 _LOG = logging.getLogger(__name__) @@ -41,7 +44,7 @@ def create_assistant( model: str = "gpt-3.5-turbo-1106", use_retrieval: bool = True, use_code_interpreter: bool = True, - use_function: Dict = None, + use_function: Optional[Dict] = None, ) -> str: """ Create an OpenAI Assistant for your OpenAI Organization. All configs can @@ -175,6 +178,7 @@ def _path_to_dict(path: str) -> Dict: tree = {d: _path_to_dict(os.path.join(root, d)) for d in dirs} tree.update({f: {"name": f} for f in files}) return tree + return {} # TODO(Henry): We use fileIO here to store the directory structure, which may diff --git a/helpers/hcsv.py b/helpers/hcsv.py index b3168e817..6ab295364 100644 --- a/helpers/hcsv.py +++ b/helpers/hcsv.py @@ -7,7 +7,7 @@ import ast import logging import os -from typing import Any, Callable, Dict, List, Optional +from typing import Any, Callable, Dict, List, Optional, cast import pandas as pd @@ -45,7 +45,7 @@ def _read_csv_range( """ hdbg.dassert_lt(0, from_, msg="Row 0 assumed to be header row") hdbg.dassert_lt(from_, to, msg="Empty range requested!") - skiprows = range(1, from_) + skiprows = list(range(1, from_)) nrows = to - from_ df = pd.read_csv(csv_path, skiprows=skiprows, nrows=nrows, **kwargs) if df.shape[0] < to: @@ -236,7 +236,7 @@ def convert_csv_dir_to_pq_dir( if hs3.is_s3_path(csv_dir): # TODO(gp): Pass aws_profile. s3fs = hs3.get_s3fs("am") - filenames = s3fs.ls(csv_dir) + filenames = cast(Any, s3fs).ls(csv_dir) else: # Local filesystem. hdbg.dassert_dir_exists(csv_dir) diff --git a/helpers/hdataframe.py b/helpers/hdataframe.py index ba5aed460..36a450a7d 100644 --- a/helpers/hdataframe.py +++ b/helpers/hdataframe.py @@ -236,9 +236,10 @@ def infer_sampling_points_per_year(df: Union[pd.Series, pd.DataFrame]) -> float: :return: number of time points per year (approximate) """ hdbg.dassert(hasattr(df.index, "freq") and df.index.freq is not None) + assert hasattr(df.index, "freq") and df.index.freq is not None freq = df.index.freq # TODO(*): Make start, end dates parameters that can be passed in. - return compute_points_per_year_for_given_freq(freq) + return compute_points_per_year_for_given_freq(str(cast(Any, freq))) @functools.lru_cache() @@ -274,7 +275,7 @@ def compute_count_per_year(df: Union[pd.Series, pd.DataFrame]) -> float: assert hasattr(df.index, "freq") and df.index.freq is not None freq = df.index.freq # Calculate the time span of `df` in years. - points_per_year = compute_points_per_year_for_given_freq(freq) + points_per_year = compute_points_per_year_for_given_freq(str(freq)) span_in_years = df.size / points_per_year # Determine the number of non-NaN/inf/etc. data points per year. count_per_year = df.count() / span_in_years @@ -302,7 +303,7 @@ def remove_duplicates( # Fix maximum value of control column at the bottom. if control_column: df = df.sort_values(by=control_column) - duplicate_columns = duplicate_columns or df.columns + duplicate_columns = duplicate_columns or list(df.columns) df = df.drop_duplicates(subset=duplicate_columns) # Sort by index to return to original view. df = df.sort_index() diff --git a/helpers/hdatetime.py b/helpers/hdatetime.py index 08f7497ba..e23a965c3 100644 --- a/helpers/hdatetime.py +++ b/helpers/hdatetime.py @@ -9,7 +9,7 @@ import datetime import logging import re -from typing import Callable, Iterable, Optional, Tuple, Union +from typing import Any, Callable, Iterable, Optional, Tuple, Union, cast # TODO(gp): Use hdbg.WARNING _WARNING = "\033[33mWARNING\033[0m" @@ -371,11 +371,12 @@ def get_current_time( # We accept only `hasyncio.EventLoop` here. If we are using standard asyncio # EventLoop we rely on wall-clock time instead of `loop.time()`. hdbg.dassert_isinstance(event_loop, asyncio.AbstractEventLoop) + assert isinstance(event_loop, asyncio.AbstractEventLoop) hdbg.dassert(hasattr(event_loop, "get_current_time")) - timestamp = event_loop.get_current_time() + timestamp = cast(Any, event_loop).get_current_time() else: # Use true real-time. - timestamp = datetime.datetime.utcnow() + timestamp = datetime.datetime.now(datetime.timezone.utc) # Convert it into the right timestamp = pd.Timestamp(timestamp, tz=get_UTC_tz()) if tz == "UTC": @@ -616,9 +617,9 @@ def to_generalized_datetime( # Handle both scalar and array cases for `pd.isna()`. if hasattr(datetime_dates, "all"): # datetime_dates is a Series or array-like - all_na = pd.isna(datetime_dates).all() + all_na = cast(Any, pd.isna(datetime_dates)).all() datetime_example = ( - datetime_dates.tolist()[format_example_index] + cast(Any, datetime_dates).tolist()[format_example_index] if hasattr(datetime_dates, "tolist") else datetime_dates ) @@ -632,25 +633,25 @@ def to_generalized_datetime( and hasattr(datetime_example, "strftime") and datetime_example.strftime("%Y-%m-%d") == date_example ): - return datetime_dates + return cast(Union[pd.Series, pd.Index], datetime_dates) shift_func = _shift_to_period_end(date_example) if shift_func is not None: if hasattr(datetime_dates, "map"): - datetime_dates = datetime_dates.map(shift_func) + datetime_dates = cast(Any, datetime_dates).map(shift_func) else: # For scalar case, apply the shift function directly datetime_dates = shift_func(datetime_dates) - return datetime_dates + return cast(Union[pd.Series, pd.Index], datetime_dates) # If standard conversion fails, attempt our own conversion. date_standard = date_standard or "standard" format_determination_output = _determine_date_format( date_example, date_standard ) if format_determination_output is None: - return datetime_dates + return cast(Union[pd.Series, pd.Index], datetime_dates) format_, date_modification_func = format_determination_output dates = dates.map(date_modification_func) - return pd.to_datetime(dates, format=format_) + return cast(Union[pd.Series, pd.Index], pd.to_datetime(dates, format=format_)) def _handle_incorrect_conversions( @@ -722,7 +723,7 @@ def shift_to_year_end(x: StrictDatetime) -> StrictDatetime: # shift the month aliases by one to get the correct order. # E.g., `calendar.month_name[1:]` is `['January', 'February', ...]` and # `calendar.month_abbr[1:]` is `['Jan', 'Feb', ...]`. - month_aliases = calendar.month_name[1:] + calendar.month_abbr[1:] + month_aliases = list(calendar.month_name[1:]) + list(calendar.month_abbr[1:]) pattern = re.compile("|".join(month_aliases), re.IGNORECASE) match = pattern.search(date) if match is None: diff --git a/helpers/hdict.py b/helpers/hdict.py index 13d388249..2e9e002b3 100644 --- a/helpers/hdict.py +++ b/helpers/hdict.py @@ -6,6 +6,7 @@ import logging from typing import ( + TYPE_CHECKING, Any, Dict, Generator, @@ -23,6 +24,9 @@ import helpers.hdbg as hdbg +if TYPE_CHECKING: + from config_root.config.config_ import Config + _LOG = logging.getLogger(__name__) @@ -71,7 +75,7 @@ def extract_leaf_values(nested: Dict[Any, Any], key: Any) -> Dict[Any, Any]: def typed_get( - dict_: Union[Dict, "Config"], # noqa: F821 + dict_: Union[Dict, "Config"], # noqa: F821 # type: ignore key: Any, default_value: Optional[Any] = _NO_VALUE_SPECIFIED, *, diff --git a/helpers/hdockerized_executables.py b/helpers/hdockerized_executables.py index 5f44ededc..f5c1c4db6 100644 --- a/helpers/hdockerized_executables.py +++ b/helpers/hdockerized_executables.py @@ -247,18 +247,18 @@ def convert_pandoc_cmd_to_arguments(cmd: str) -> Dict[str, Any]: :return: A dictionary with the parsed arguments. """ # Use shlex.split to tokenize the string like a shell would. - cmd = shlex.split(cmd) + cmd_list = shlex.split(cmd) # Remove the newline character that come from multiline commands with `\n`. - cmd = [arg for arg in cmd if arg != "\n"] + cmd_list = [arg for arg in cmd_list if arg != "\n"] _LOG.debug(hprint.to_str("cmd")) # The first option is the executable. - hdbg.dassert_eq(cmd[0], "pandoc") + hdbg.dassert_eq(cmd_list[0], "pandoc") # pandoc parser is difficult to emulate with `argparse`, since pandoc allows # the input file to be anywhere in the command line options. In our case we # don't know all the possible command line options so for simplicity we # assume that the first option is always the input file. - in_file_path = cmd[1] - cmd = cmd[2:] + in_file_path = cmd_list[1] + cmd_list = cmd_list[2:] _LOG.debug(hprint.to_str("cmd")) # parser = argparse.ArgumentParser() @@ -267,7 +267,7 @@ def convert_pandoc_cmd_to_arguments(cmd: str) -> Dict[str, Any]: parser.add_argument("--template", default=None) parser.add_argument("--extract-media", default=None) # Parse known arguments and capture the rest. - args, unknown_args = parser.parse_known_args(cmd) + args, unknown_args = parser.parse_known_args(cmd_list) _LOG.debug(hprint.to_str("args unknown_args")) # Filter out the option terminator if present. # Remove the `--` option terminator to treat `--option-after-terminator` as a regular argument, not as an option. @@ -707,31 +707,31 @@ def convert_latex_cmd_to_arguments(cmd: str) -> Dict[str, Any]: :return: A dictionary with the parsed arguments. """ # Use shlex.split to tokenize the string like a shell would. - cmd = shlex.split(cmd) + cmd_list = shlex.split(cmd) # Remove the newline character that come from multiline commands with `\n`. - cmd = [arg for arg in cmd if arg != "\n"] + cmd_list = [arg for arg in cmd_list if arg != "\n"] _LOG.debug(hprint.to_str("cmd")) # The first option is the executable. - hdbg.dassert_eq(cmd[0], "pdflatex") + hdbg.dassert_eq(cmd_list[0], "pdflatex") # We assume that the first option is always the input file. - in_file_path = cmd[-1] + in_file_path = cmd_list[-1] hdbg.dassert( not in_file_path.startswith("-"), "Invalid input file '%s'", in_file_path, ) hdbg.dassert_file_exists(in_file_path) - cmd = cmd[1:-1] + cmd_list = cmd_list[1:-1] _LOG.debug(hprint.to_str("cmd")) # parser = argparse.ArgumentParser() parser.add_argument("--output-directory", required=True) # Latex uses options like `-XYZ` which confuse `argparse` so we need to # replace `-XYZ` with `--XYZ`. - cmd = [re.sub(r"^-", r"--", cmd_opts) for cmd_opts in cmd] + cmd_list = [re.sub(r"^-", r"--", cmd_opts) for cmd_opts in cmd_list] _LOG.debug(hprint.to_str("cmd")) # # Parse known arguments and capture the rest. - args, unknown_args = parser.parse_known_args(cmd) + args, unknown_args = parser.parse_known_args(cmd_list) _LOG.debug(hprint.to_str("args unknown_args")) # Return all the arguments in a dictionary with names that match the # function signature of `run_dockerized_pandoc()`. diff --git a/helpers/henv.py b/helpers/henv.py index 9b59557b0..2709248d4 100644 --- a/helpers/henv.py +++ b/helpers/henv.py @@ -453,7 +453,7 @@ def _get_library_version(lib_name: str) -> str: return version -def _get_package_info() -> Tuple[List[str], int]: +def _get_package_info() -> Tuple[str, int]: """ Get package version information. @@ -559,8 +559,8 @@ def get_system_signature(git_commit_type: str = "all") -> Tuple[str, int]: hprint.dassert_one_trailing_newline(txt_tmp) txt.append(txt_tmp) # - txt = hprint.to_info("System signature", txt) - return txt, failed_imports + txt_str: str = hprint.to_info("System signature", txt) + return txt_str, failed_imports # ############################################################################# diff --git a/helpers/hgit.py b/helpers/hgit.py index 5a35f5987..225fcd15e 100644 --- a/helpers/hgit.py +++ b/helpers/hgit.py @@ -11,7 +11,7 @@ import random import re import string -from typing import List, Optional, Tuple +from typing import Any, List, Optional, Tuple, cast import helpers.hdbg as hdbg import helpers.hprint as hprint @@ -242,7 +242,8 @@ def find_git_root(path: str = ".") -> str: ) # Update the path to the parent directory for the next iteration. path = parent - return git_root_dir + hdbg.dassert_is_not(git_root_dir, None, "Git root directory should have been found") + return str(git_root_dir) # ############################################################################# @@ -853,7 +854,7 @@ def get_path_from_git_root( super_module, ret, ) - return ret + return str(ret) # TODO(gp): Rewrite this function in a better way. @@ -1394,7 +1395,7 @@ def does_branch_exist( exists_tmp = does_branch_exist( branch_name, mode_tmp, dir_name=dir_name ) - exists |= exists_tmp + exists = exists or exists_tmp return exists # hdbg.dassert_in(mode, ("git_local", "git_remote", "github")) diff --git a/helpers/hgoogle_drive_api.py b/helpers/hgoogle_drive_api.py index f42c178c5..fe4ef5598 100644 --- a/helpers/hgoogle_drive_api.py +++ b/helpers/hgoogle_drive_api.py @@ -28,11 +28,11 @@ # ``` -import google.oauth2.service_account as goasea -import googleapiclient.discovery as godisc -import gspread +import google.oauth2.service_account as goasea # type: ignore +import googleapiclient.discovery as godisc # type: ignore +import gspread # type: ignore import pandas as pd -from googleapiclient.discovery import build +from googleapiclient.discovery import build # type: ignore import helpers.hdbg as hdbg @@ -408,7 +408,7 @@ def create_empty_google_file( move_gfile_to_dir(gfile_id, gdrive_folder_id, credentials=credentials) # Share the Google file to the user and send an email. if user: - share_google_file(gfile_id, user) + share_google_file(gfile_id, user, credentials=credentials) _LOG.debug( "The new Google '%s': '%s' is shared with '%s'", gfile_type, @@ -454,7 +454,7 @@ def create_google_drive_folder( # ############################################################################# -def _get_folders_in_gdrive(*, service: godisc.Resource = None) -> list: +def _get_folders_in_gdrive(*, service: godisc.Resource = None, credentials: Optional[goasea.Credentials] = None) -> list: """ Get a list of folders in Google Drive. @@ -462,7 +462,9 @@ def _get_folders_in_gdrive(*, service: godisc.Resource = None) -> list: - Will use GDrive file service as default if None is given. """ if service is None: - service = get_gdrive_service() + if credentials is None: + raise ValueError("Either service or credentials must be provided") + service = get_gdrive_service(credentials=credentials) response = ( service.files() .list( @@ -476,14 +478,14 @@ def _get_folders_in_gdrive(*, service: godisc.Resource = None) -> list: return response.get("files") -def get_folder_id_by_name(name: str) -> Optional[list]: +def get_folder_id_by_name(name: str, *, credentials: Optional[goasea.Credentials] = None) -> Optional[list]: """ Get the folder id by the folder name. :param name: str, the name of the folder. :return: list, the list of the folder id and folder name. """ - folders = _get_folders_in_gdrive() + folders = _get_folders_in_gdrive(credentials=credentials) folder_list = [] # for folder in folders: @@ -636,8 +638,10 @@ def read_google_file( return df except gspread.exceptions.SpreadsheetNotFound: _LOG.error("Spreadsheet with URL '%s' not found.", url) + return pd.DataFrame() except Exception as e: _LOG.error("An error occurred: '%s'", str(e)) + return pd.DataFrame() def write_to_google_sheet( diff --git a/helpers/hintrospection.py b/helpers/hintrospection.py index 98f1ba68b..8e0046a1f 100644 --- a/helpers/hintrospection.py +++ b/helpers/hintrospection.py @@ -62,7 +62,7 @@ def get_function_name(count: int = 0) -> str: return func_name -def get_name_from_function(func: callable) -> str: +def get_name_from_function(func: Callable) -> str: """ Return the name of the passed function. @@ -245,11 +245,13 @@ def get_size_in_bytes(obj: object, seen: Optional[set] = None) -> int: ): size += sum((get_size_in_bytes(i, seen) for i in obj)) if hasattr(obj, "__slots__"): # can have __slots__ with __dict__ - size += sum( - get_size_in_bytes(getattr(obj, s), seen) - for s in obj.__slots__ - if hasattr(obj, s) - ) + slots = getattr(obj, '__slots__', None) + if slots is not None: + size += sum( + get_size_in_bytes(getattr(obj, s), seen) + for s in slots + if hasattr(obj, s) + ) return size diff --git a/helpers/hio.py b/helpers/hio.py index 3186d15b1..4203cb115 100644 --- a/helpers/hio.py +++ b/helpers/hio.py @@ -829,7 +829,7 @@ def from_json(file_name: str, *, use_types: bool = False) -> Dict: # TODO(gp): -> pandas_helpers.py -def load_df_from_json(path_to_json: str) -> "pd.DataFrame": # noqa: F821 +def load_df_from_json(path_to_json: str) -> "pd.DataFrame": # noqa: F821 # type: ignore """ Load a dataframe from a json file. diff --git a/helpers/hjoblib.py b/helpers/hjoblib.py index e6a38c4c9..9c1cab16b 100644 --- a/helpers/hjoblib.py +++ b/helpers/hjoblib.py @@ -4,6 +4,7 @@ import helpers.hjoblib as hjoblib """ +import collections.abc as cabc import concurrent.futures import logging import math @@ -115,9 +116,8 @@ def split_list_in_tasks( hdbg.dassert_lte(1, n) hdbg.dassert_lte(n, len(list_in), "There are fewer tasks than threads") if keep_order: - hdbg.dassert_is( - num_elems_per_task, - None, + hdbg.dassert( + num_elems_per_task is None, "Can't specify num_elems_per_task with keep_order", ) list_out: List[list] = [[] for _ in range(n)] @@ -242,7 +242,7 @@ def validate_workload(workload: Workload) -> bool: # Parse. workload_func, func_name, tasks = workload # Check each component. - hdbg.dassert_isinstance(workload_func, Callable) + hdbg.dassert_callable(workload_func) hdbg.dassert_isinstance(func_name, str) hdbg.dassert_container_type(tasks, List, tuple) hdbg.dassert(all(validate_task(task) for task in tasks)) @@ -374,7 +374,7 @@ def _workload_function(*args: Any, **kwargs: Any) -> str: def _get_workload( # args: argparse.Namespace -) -> Workload: +) -> None: """ Prepare the workload using the parameters from command line. """ @@ -493,7 +493,7 @@ def _parallel_execute_decorator( hdbg.dassert_isinstance(abort_on_error, bool) hdbg.dassert_lte(1, num_attempts) hdbg.dassert_isinstance(log_file, str) - hdbg.dassert_isinstance(workload_func, Callable) + hdbg.dassert_callable(workload_func) hdbg.dassert_isinstance(func_name, str) hdbg.dassert(validate_task(task)) # Redirect the logging output of each task to a different file. diff --git a/helpers/hllm.py b/helpers/hllm.py index 07426199e..99ce65377 100644 --- a/helpers/hllm.py +++ b/helpers/hllm.py @@ -10,7 +10,7 @@ import re from typing import Any, Dict, List, Optional, Tuple -import openai +import openai # type: ignore import pandas as pd import requests import tqdm diff --git a/helpers/hlogging.py b/helpers/hlogging.py index 34532cdeb..9b985b494 100644 --- a/helpers/hlogging.py +++ b/helpers/hlogging.py @@ -9,7 +9,7 @@ import copy import datetime import logging -from typing import Any, Iterable, List, Optional, Tuple, Union +from typing import Any, Generator, Iterable, List, Optional, Tuple, Union # Avoid dependency from other helpers modules since this is used when the code # is bootstrapped. @@ -235,9 +235,8 @@ def converter(self, timestamp: float) -> datetime.datetime: # superclass method. _ = self # timestamp=1622423570.0147252 - dt = datetime.datetime.utcfromtimestamp(timestamp) - # Convert it to an aware datetime object in UTC time. - dt = dt.replace(tzinfo=datetime.timezone.utc) + # Use timezone-aware datetime creation instead of deprecated utcfromtimestamp + dt = datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc) if self._tzinfo is not None: # Convert it to desired timezone. dt = dt.astimezone(self._tzinfo) @@ -676,7 +675,7 @@ def _convert_time_to_string( return time_as_str def _get_wall_clock_time(self) -> str: - dt = datetime.datetime.utcnow() + dt = datetime.datetime.now(datetime.timezone.utc) return self._convert_time_to_string(dt, self._date_fmt) def _colorize_level(self, level_name: str) -> str: @@ -748,7 +747,7 @@ def set_v2_formatter( # TODO(gp): Not sure it works properly. @contextlib.contextmanager -def set_level(logger: Any, level: int) -> None: +def set_level(logger: Any, level: int) -> Generator[None, None, None]: """ Context manager changing the verbosity level. """ @@ -790,9 +789,9 @@ def test_logger() -> None: # if hasattr(_LOG, "trace"): if hasattr(logging, "TRACE"): - _LOG.trace("TRACE=%s", logging.TRACE) + _LOG.trace("TRACE=%s", logging.TRACE) # type: ignore else: - _LOG.trace("TRACE level not available") + _LOG.trace("TRACE level not available") # type: ignore # _LOG.debug("DEBUG=%s", logging.DEBUG) # diff --git a/helpers/hmarkdown_coloring.py b/helpers/hmarkdown_coloring.py index 8df97d563..f4975281e 100644 --- a/helpers/hmarkdown_coloring.py +++ b/helpers/hmarkdown_coloring.py @@ -192,6 +192,7 @@ def _interpolate_colors(num_bolds: int) -> List[str]: """ Sample `num_bolds` colors evenly spaced from the available colors. """ + assert all_md_colors is not None step = len(all_md_colors) // num_bolds colors = list(all_md_colors)[::step][:num_bolds] return colors diff --git a/helpers/hnumba.py b/helpers/hnumba.py index 47fc37975..fcab5238e 100644 --- a/helpers/hnumba.py +++ b/helpers/hnumba.py @@ -8,7 +8,7 @@ from typing import Any, Callable, TypeVar try: - import numba + import numba # type: ignore numba_available = True except ImportError: diff --git a/helpers/hpandas.py b/helpers/hpandas.py index 5be1b281a..d7374bd73 100644 --- a/helpers/hpandas.py +++ b/helpers/hpandas.py @@ -29,11 +29,11 @@ from s3fs import S3File, S3FileSystem except ImportError: # Fallback to dynamic import - S3File = getattr(s3fs, "S3File", None) - S3FileSystem = getattr(s3fs, "S3FileSystem", None) + S3File = getattr(s3fs, "S3File", None) # type: ignore + S3FileSystem = getattr(s3fs, "S3FileSystem", None) # type: ignore except ImportError: # If s3fs is not available, define dummy classes for type hints. - s3fs = None + s3fs = None # type: ignore class S3File: pass @@ -165,7 +165,7 @@ def dassert_unique_index( msg = dup_msg else: msg = dup_msg + msg - hdbg.dassert(index.is_unique, msg=msg, *args) + hdbg.dassert(index.is_unique, msg, *args) # TODO(gp): @all Add unit tests. @@ -205,7 +205,7 @@ def dassert_increasing_index( msg = dup_msg + msg # Dump the data to file for further inspection. # obj.to_csv("index.csv") - hdbg.dassert(index.is_monotonic_increasing, msg=msg, *args) + hdbg.dassert(index.is_monotonic_increasing, msg, *args) # TODO(gp): @all Add more info in case of failures and unit tests. @@ -234,7 +234,7 @@ def dassert_monotonic_index( dassert_unique_index(obj, msg, *args) index = _get_index(obj) cond = index.is_monotonic_increasing or index.is_monotonic_decreasing - hdbg.dassert(cond, msg=msg, *args) + hdbg.dassert(cond, msg, *args) # TODO(Paul): @gp -> dassert_datetime_indexed_df @@ -819,7 +819,7 @@ def drop_axis_with_all_nans( _LOG.info( "removed cols with all nans: %s %s", pct_removed, - hprint.list_to_str(removed_cols), + hprint.list_to_str(list(removed_cols)), ) if drop_rows: # Remove rows with all nans, if any. @@ -1286,7 +1286,7 @@ def _df_to_str( display_width, ): if use_tabulate: - import tabulate + import tabulate # type: ignore out.append(tabulate.tabulate(df, headers="keys", tablefmt="psql")) # TODO(Grisha): Add an option to display all rows since if `num_rows` @@ -1547,6 +1547,9 @@ def _report_srs_stats(srs: pd.Series) -> List[Any]: if not hsystem.is_running_in_ipynb(): out.append(df_as_str) txt = "\n".join(out) + else: + # When running in ipynb, return the df_as_str since we already printed above + txt = df_as_str return txt @@ -2160,7 +2163,7 @@ def compare_dfs( # Check `is_ok` and raise an assertion depending on `only_warning`. if not is_ok: hdbg._dfatal( - _, + str(_), "df1=\n%s\n and df2=\n%s\n are not equal.", df_to_str(df1, log_level=log_level), df_to_str(df2, log_level=log_level), @@ -2211,7 +2214,7 @@ def compare_dfs( # Check `is_ok` and raise assertion depending on `only_warning`. if not is_ok: hdbg._dfatal( - _, + str(_), "df1=\n%s\n and df2=\n%s\n have pct_change more than `assert_diff_threshold`.", df_to_str(df1, log_level=log_level), df_to_str(df2, log_level=log_level), @@ -2451,7 +2454,7 @@ def compute_duration_df( intersect indices as is :return: timestamp stats and updated dict of dfs, see `intersect_dfs` param """ - hdbg.dassert_isinstance(tag_to_df, Dict) + hdbg.dassert_isinstance(tag_to_df, dict) # Create df and assign columns. data_stats = pd.DataFrame() min_col = "min_index" @@ -2519,7 +2522,7 @@ def to_gsheet( before saving the dataframe into it; if False, the dataframe is appended to the contents of the sheet """ - import gspread_pandas + import gspread_pandas # type: ignore spread = gspread_pandas.Spread( gsheet_name, sheet=gsheet_sheet_name, create_sheet=True @@ -2607,7 +2610,7 @@ def report_outcome( print(self.title) # Convert DataFrame to HTML with colored rows based on 'is_ok' column. - def _color_rows(row: bool) -> str: + def _color_rows(row) -> List[str]: """ Apply red/green color based on boolean value in `row["is_ok"]`. """ @@ -2680,5 +2683,6 @@ def to_perc(vals: Union[List, pd.Series], **perc_kwargs: Dict[str, Any]) -> str: """ if isinstance(vals, list): vals = pd.Series(vals) + perc_kwargs['use_float'] = False ret = hprint.perc(vals.sum(), len(vals), **perc_kwargs) - return ret + return str(ret) diff --git a/helpers/hparquet.py b/helpers/hparquet.py index eece9f813..6a62b2126 100644 --- a/helpers/hparquet.py +++ b/helpers/hparquet.py @@ -249,7 +249,7 @@ def generate_parquet_files( freq: str = "1H", output_type: str = "basic", partition_mode: str = "by_date", - custom_partition_cols: str = None, + custom_partition_cols: Optional[str] = None, reset_index: bool = False, ) -> None: """ @@ -401,6 +401,7 @@ def from_parquet( ) as ts: if n_rows: # Get the latest parquet file in the directory. + hdbg.dassert_isinstance(aws_profile, str, "aws_profile must be a string for S3 operations") last_pq_file = hs3.get_latest_pq_in_s3_dir(file_name, aws_profile) file = s3_filesystem.open(last_pq_file, "rb") # Load the data. @@ -563,7 +564,7 @@ def to_parquet( def _yield_parquet_tile( file_name: str, - columns: List[str], + columns: Optional[List[str]], filters: List[Any], asset_id_col: str, ) -> Iterator[pd.DataFrame]: diff --git a/helpers/hprint.py b/helpers/hprint.py index 48f44a130..a9417b5e4 100644 --- a/helpers/hprint.py +++ b/helpers/hprint.py @@ -81,8 +81,8 @@ def pprint_pformat(obj: Any, *, sort_dicts: bool = False) -> str: Pretty-print in color. """ from pygments import highlight - from pygments.formatters import Terminal256Formatter - from pygments.lexers import PythonLexer + from pygments.formatters import Terminal256Formatter # type: ignore + from pygments.lexers import PythonLexer # type: ignore txt = pprint.pformat(obj, sort_dicts=sort_dicts) txt = highlight(txt, PythonLexer(), Terminal256Formatter()) @@ -381,8 +381,8 @@ def to_object_repr(obj: Any) -> str: def thousand_separator(v: float) -> str: - v = "{0:,}".format(v) - return v + formatted_v = "{0:,}".format(v) + return formatted_v # TODO(gp): -> to_percentage @@ -976,6 +976,7 @@ def filter_text(regex: str, txt: str) -> str: def dassert_one_trailing_newline(txt: str) -> None: + hdbg.dassert_isinstance(txt, str) match = re.search(r"\n*$", txt) hdbg.dassert(match) assert match is not None diff --git a/helpers/hpytest.py b/helpers/hpytest.py index b2eb454b9..a357dbd04 100644 --- a/helpers/hpytest.py +++ b/helpers/hpytest.py @@ -10,7 +10,7 @@ import sys from typing import List, Optional -import junitparser +import junitparser # type: ignore import helpers.hdbg as hdbg import helpers.hprint as hprint diff --git a/helpers/hretry.py b/helpers/hretry.py index 2ee2166f9..54447d502 100644 --- a/helpers/hretry.py +++ b/helpers/hretry.py @@ -50,6 +50,7 @@ def retry_wrapper(*args, **kwargs): _LOG.error( "Function %s failed after %d attempts", func, num_attempts ) + assert last_exception is not None raise last_exception return retry_wrapper @@ -87,6 +88,7 @@ async def retry_wrapper(*args, **kwargs): _LOG.error( "Function %s failed after %d attempts", func, num_attempts ) + assert last_exception is not None raise last_exception return retry_wrapper diff --git a/helpers/hs3.py b/helpers/hs3.py index 2c1548ede..195830c4f 100644 --- a/helpers/hs3.py +++ b/helpers/hs3.py @@ -30,13 +30,13 @@ from s3fs import S3File, S3FileSystem except ImportError: # Fallback to dynamic import - S3File = getattr(s3fs, "S3File", None) - S3FileSystem = getattr(s3fs, "S3FileSystem", None) + S3File = getattr(s3fs, "S3File", None) # type: ignore + S3FileSystem = getattr(s3fs, "S3FileSystem", None) # type: ignore except ModuleNotFoundError: _module = "s3fs" print(_WARNING + f": Can't find {_module}: continuing") # Define dummy classes for type hints when s3fs is not available - s3fs = None + s3fs = None # type: ignore class S3File: pass @@ -1086,7 +1086,7 @@ def expand_archived_data(src_tgz_file: str, dst_dir: str) -> str: return tgz_dst_dir -def get_s3_bucket_from_stage(stage: str, *, add_suffix: str = None) -> str: +def get_s3_bucket_from_stage(stage: str, *, add_suffix: Optional[str] = None) -> str: """ Retrieve the S3 bucket name based on the provided deployment stage. diff --git a/helpers/hsecrets.py b/helpers/hsecrets.py index 8b1568f35..c08186469 100644 --- a/helpers/hsecrets.py +++ b/helpers/hsecrets.py @@ -228,4 +228,4 @@ def dassert_valid_secret(secret_id: str) -> None: ) hdbg.dassert_in(values[1], ["local", "preprod"]) hdbg.dassert_in(values[2], ["trading", "sandbox"]) - hdbg.dassert_is(values[3].isnumeric(), True) + hdbg.dassert(values[3].isnumeric(), "values[3] should be numeric, got: %s", values[3]) diff --git a/helpers/hserver.py b/helpers/hserver.py index fd46f2d2f..b6f6cc907 100644 --- a/helpers/hserver.py +++ b/helpers/hserver.py @@ -86,7 +86,7 @@ def get_dev_csfy_host_names() -> List[str]: Return the names of the Causify dev servers. """ host_names = ("dev1", "dev2", "dev3") - return host_names + return list(host_names) def _get_host_name() -> str: diff --git a/helpers/hsftp.py b/helpers/hsftp.py index 6563f3173..e0c1b61cb 100644 --- a/helpers/hsftp.py +++ b/helpers/hsftp.py @@ -17,7 +17,7 @@ henv.install_module_if_not_present("pysftp") -import pysftp # noqa: E402 +import pysftp # noqa: E402 # type: ignore # Create a logger instance. _LOG = logging.getLogger(__name__) diff --git a/helpers/hsystem.py b/helpers/hsystem.py index db2d99878..05847c5ac 100644 --- a/helpers/hsystem.py +++ b/helpers/hsystem.py @@ -18,7 +18,7 @@ import subprocess import sys import time -from typing import Any, Callable, List, Match, Optional, Tuple, Union, cast +from typing import Any, Callable, Generator, List, Match, Optional, Tuple, Union, cast import helpers.hdbg as hdbg import helpers.hintrospection as hintros @@ -985,7 +985,7 @@ def find_file_with_dir( # https://stackoverflow.com/questions/169070 @contextlib.contextmanager -def cd(dir_name: str) -> None: +def cd(dir_name: str) -> Generator[None, None, None]: """ Context manager managing changing directory. """ diff --git a/helpers/hwall_clock_time.py b/helpers/hwall_clock_time.py index ea8392f6e..177841013 100644 --- a/helpers/hwall_clock_time.py +++ b/helpers/hwall_clock_time.py @@ -21,7 +21,7 @@ # # Function returning the current (true, replayed, simulated) wall-clock time as a # timestamp. -_GetWallClockTime = Callable[[], "pd.Timestamp"] # noqa: F821 +_GetWallClockTime = Callable[[], "pd.Timestamp"] # noqa: F821 # type: ignore _get_wall_clock_time_func: Optional[_GetWallClockTime] = None @@ -43,7 +43,7 @@ def get_wall_clock_time_func() -> Optional[_GetWallClockTime]: # We don't want to import `Pandas` just for a type. -def get_wall_clock_time() -> Optional["pd.Timestamp"]: # noqa: F821 +def get_wall_clock_time() -> Optional["pd.Timestamp"]: # noqa: F821 # type: ignore """ Return the wall clock time (according to the set function) or `None` if no function was set. @@ -63,7 +63,7 @@ def get_wall_clock_time() -> Optional["pd.Timestamp"]: # noqa: F821 # TODO(Sameep): Redundant fuction replace by `hdatetime.timestamp_to_str()`. def to_timestamp_str( - timestamp: "pd.Timestamp", # noqa: F821 + timestamp: "pd.Timestamp", # noqa: F821 # type: ignore include_msec: bool = False, ) -> str: if include_msec: @@ -81,7 +81,7 @@ def get_machine_wall_clock_time( as_str: bool = False, include_msec: bool = False, ) -> Union[str, datetime.datetime]: - ret = datetime.datetime.utcnow() + ret = datetime.datetime.now(datetime.timezone.utc) if as_str: ret = to_timestamp_str(ret, include_msec) return ret @@ -92,7 +92,7 @@ def get_machine_wall_clock_time( # ############################################################################# -_CURR_BAR_TIMESTAMP: Optional["pd.Timestamp"] = None # noqa: F821 +_CURR_BAR_TIMESTAMP: Optional["pd.Timestamp"] = None # noqa: F821 # type: ignore def reset_current_bar_timestamp() -> None: @@ -101,7 +101,7 @@ def reset_current_bar_timestamp() -> None: _CURR_BAR_TIMESTAMP = None -def set_current_bar_timestamp(timestamp: "pd.Timestamp") -> None: # noqa: F821 +def set_current_bar_timestamp(timestamp: "pd.Timestamp") -> None: # noqa: F821 # type: ignore _LOG.debug("timestamp=%s", timestamp) global _CURR_BAR_TIMESTAMP if _CURR_BAR_TIMESTAMP is not None: @@ -118,7 +118,7 @@ def get_current_bar_timestamp( *, as_str: bool = False, include_msec: bool = False, -) -> Optional[Union[str, "pd.Timestamp"]]: # noqa: F821 +) -> Optional[Union[str, "pd.Timestamp"]]: # noqa: F821 # type: ignore ret = _CURR_BAR_TIMESTAMP if _CURR_BAR_TIMESTAMP and as_str: ret = to_timestamp_str(ret, include_msec=include_msec) diff --git a/helpers/lib_tasks_aws.py b/helpers/lib_tasks_aws.py index 27577fafc..63cc06cba 100644 --- a/helpers/lib_tasks_aws.py +++ b/helpers/lib_tasks_aws.py @@ -11,7 +11,7 @@ import logging import os import re -from typing import Any, Dict +from typing import Any, Dict, Optional from invoke import task @@ -300,7 +300,7 @@ def aws_update_ecs_task_definition( @task def aws_create_test_task_definition( ctx, - issue_id: int = None, + issue_id: Optional[int] = None, region: str = hs3.AWS_EUROPE_REGION_1, ) -> None: """ diff --git a/helpers/lib_tasks_docker_release.py b/helpers/lib_tasks_docker_release.py index a5d9824bd..2d15eeacc 100644 --- a/helpers/lib_tasks_docker_release.py +++ b/helpers/lib_tasks_docker_release.py @@ -1294,8 +1294,8 @@ def docker_create_candidate_image(ctx, container_dir_name=".", user_tag=""): # @task def docker_release_test_task_definition( ctx, - task_definition: str = None, - user_tag: str = None, + task_definition: Optional[str] = None, + user_tag: Optional[str] = None, region: str = hs3.AWS_EUROPE_REGION_1, ): # type: ignore """ diff --git a/helpers/lib_tasks_gh.py b/helpers/lib_tasks_gh.py index 44ac2c5e8..a2adba89a 100644 --- a/helpers/lib_tasks_gh.py +++ b/helpers/lib_tasks_gh.py @@ -635,7 +635,7 @@ def _get_best_workflow_run( def gh_get_details_for_all_workflows( repo_list: List[str], -) -> "pd.DataFrame": # noqa: F821 +) -> "pd.DataFrame": # noqa: F821 # type: ignore """ Get status for all the workflows. @@ -707,7 +707,7 @@ def gh_get_details_for_all_workflows( def gh_get_overall_build_status_for_repo( - repo_df: "pd.Dataframe", # noqa: F821 + repo_df: "pd.Dataframe", # noqa: F821 # type: ignore *, use_colors: bool = True, ) -> str: @@ -879,7 +879,7 @@ def color_format(val: str, status_color_mapping: Dict[str, str]) -> str: def render_repo_workflow_status_table( - workflow_df: "pd.DataFrame", # noqa: F821 + workflow_df: "pd.DataFrame", # noqa: F821 # type: ignore status_color_mapping: Dict[str, str], timezone: str = "America/New_York", ) -> None: diff --git a/helpers/notebooks/sage_tutorial.py b/helpers/notebooks/sage_tutorial.py index 473280cef..7af8b1046 100644 --- a/helpers/notebooks/sage_tutorial.py +++ b/helpers/notebooks/sage_tutorial.py @@ -39,8 +39,8 @@ # https://docs.sympy.org/latest/tutorials/intro-tutorial/intro.html#what-is-symbolic-computation # %% -import sympy -from sympy import * # noqa: F403 +import sympy # type: ignore +from sympy import * # noqa: F403 # type: ignore # %% x, y = sympy.symbols("x,y") @@ -64,7 +64,7 @@ # %% # Compute truth table. -from sympy.logic.boolalg import truth_table # noqa: E402 +from sympy.logic.boolalg import truth_table # noqa: E402 # type: ignore table = truth_table(x >> y, [x, y]) for t in table: diff --git a/helpers/repo_config_utils.py b/helpers/repo_config_utils.py index b5b9d8b2c..058cd4c9f 100644 --- a/helpers/repo_config_utils.py +++ b/helpers/repo_config_utils.py @@ -132,7 +132,7 @@ def from_file(cls, file_name: Optional[str] = None) -> "RepoConfig": type(data), ) except Exception as e: - raise f"Error reading YAML file {file_name}: {e}" + raise ValueError(f"Error reading YAML file {file_name}: {e}") return cls(data) # TODO(gp): -> __str__? diff --git a/linters/dockerized_ty.py b/linters/dockerized_ty.py index bd79f044c..917d00504 100755 --- a/linters/dockerized_ty.py +++ b/linters/dockerized_ty.py @@ -13,7 +13,9 @@ from typing import List import helpers.hdbg as hdbg +import helpers.hio as hio import helpers.hdocker as hdocker +import helpers.hsystem as hsystem import helpers.hserver as hserver import helpers.hprint as hprint import helpers.hdockerized_executables as hdocexec @@ -21,6 +23,9 @@ _LOG = logging.getLogger(__name__) +_STANDARD_TY_ARGS = ( + "--output-format concise --color never --exclude '**/outcomes/**' --exclude '**/import_check/example/**' | tee ty.log" +) def _parse() -> argparse.ArgumentParser: # Create an ArgumentParser instance with the provided docstring. @@ -28,6 +33,13 @@ def _parse() -> argparse.ArgumentParser: description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) + parser.add_argument( + "--no_use_standard_ty_args", + dest="use_standard_ty_args", + action="store_false", + default=True, + help=f"Use the standard ty arguments ({_STANDARD_TY_ARGS})", + ) # Add Docker-specific arguments (e.g., --dockerized_force_rebuild, # --dockerized_use_sudo). hparser.add_dockerized_script_arg(parser) @@ -36,14 +48,14 @@ def _parse() -> argparse.ArgumentParser: return parser -def run_dockerized_ty( +def _run_dockerized_ty( cmd_opts: List[str], + use_standard_ty_args: bool, *, mode: str = "system", force_rebuild: bool = False, use_sudo: bool = False, ) -> str: - """ """ _LOG.debug(hprint.func_signature_to_str()) container_image = "tmp.ty" dockerfile = r""" @@ -60,6 +72,13 @@ def run_dockerized_ty( caller_mount_path, callee_mount_path, mount = hdocker.get_docker_mount_info( is_caller_host, use_sibling_container_for_callee ) + # docker run -it --rm --user $(id -u):$(id -g) \ + # -e AM_GDRIVE_PATH -e AM_TELEGRAM_TOKEN \ + # ... + # --workdir /app --mount + # type=bind,source=/Users/saggese/src/umd_msml6101,target=/app \ + # --entrypoint "" tmp.ty.arm64.c94f3fcd bash -c "/venv/bin/ty check + # /app/helpers_root/dev_scripts_helpers/documentation/test/test_preprocess_notes.py" cmd_opts_out = hdocker.convert_all_paths_from_caller_to_callee_docker_path( cmd_opts, caller_mount_path, @@ -67,12 +86,16 @@ def run_dockerized_ty( is_caller_host, use_sibling_container_for_callee, ) + if use_standard_ty_args: + cmd_opts_out.extend(_STANDARD_TY_ARGS.split()) cmd_opts_str = " ".join(cmd_opts_out) + cmd_opts_str = f"bash -c '/venv/bin/ty {cmd_opts_str}'" # Build the docker command. docker_cmd = hdocker.get_docker_base_cmd(use_sudo) docker_cmd.extend( [ f"--workdir {callee_mount_path} --mount {mount}", + "--entrypoint ''", container_image, cmd_opts_str, ] @@ -86,38 +109,42 @@ def run_dockerized_ty( def _main(parser: argparse.ArgumentParser) -> None: - args = parser.parse_args() # Parse everything that can be parsed and returns the rest. args, cmd_opts = parser.parse_known_args() if not cmd_opts: cmd_opts = [] + _LOG.info("cmd_opts=%s", cmd_opts) # Start the logger. hdbg.init_logger( verbosity=args.log_level, use_exec_path=True, force_white=False ) - # Run latex. - hdocexec.run_dockerized_ty( - args.input, - cmd_opts, - args.run_latex_again, - args.output, - force_rebuild=args.dockerized_force_rebuild, - use_sudo=args.dockerized_use_sudo, - ) - _LOG.info("Output written to '%s'", args.output) - hdbg.init_logger( - verbosity=args.log_level, use_exec_path=True, force_white=False - ) - # TODO(*): Implement this. - # pandoc_cmd = () - # _LOG.debug("Command: %s", pandoc_cmd) - # hdocker.run_dockerized_pandoc( - # pandoc_cmd, - # container_type="pandoc_only", - # force_rebuild=args.dockerized_force_rebuild, - # use_sudo=args.dockerized_use_sudo, + # Run ty. + # TODO(gp): This approach doesn't work since we need to configure PYTHONPATH. + # _run_dockerized_ty( + # cmd_opts, + # args.use_standard_ty_args, + # force_rebuild=args.dockerized_force_rebuild, + # use_sudo=args.dockerized_use_sudo, # ) - # _LOG.info("Finished converting '%s' to '%s'.", args.docx_file, args.md_file) + # Create a script with instructions to install and run ty. + if args.use_standard_ty_args: + cmd_opts.extend(_STANDARD_TY_ARGS.split()) + cmd_opts_str = " ".join(cmd_opts) + script = f""" + #!/bin/bash -xe + sudo bash -c "(source /venv/bin/activate; pip install ty)" + /venv/bin/ty {cmd_opts_str} + """ + script = hprint.dedent(script) + file_name = "tmp.dockerized_ty.sh" + hio.create_executable_script( file_name, script) + # + cmd = f"invoke docker_cmd --cmd='{file_name}'" + # ty returns an error code if there are linting errors. + abort_on_error = False + hsystem.system(cmd, abort_on_error=abort_on_error, suppress_output=False) + # + _LOG.info("Output written to 'ty.log'") if __name__ == "__main__":