Conversation
| def __init__(self, filename): | ||
| self.filename = filename | ||
| paths = glob.glob(os.path.join(SCRIPT_DIR, "images", self.filename + ".*")) | ||
| paths = glob.glob(os.path.join(SCRIPT_DIR, "images", f"{self.filename}.*")) |
There was a problem hiding this comment.
Function Image.__init__ refactored with the following changes:
- Use f-string instead of string concatenation (
use-fstring-for-concatenation)
| def gaussian_cdf_approx(x, radius): | ||
| return 0.5 * (1 + tf.tanh(1.12 * x / (math.sqrt(2.) * radius))) | ||
| def gaussian_cdf_approx(x, radius): | ||
| return 0.5 * (1 + tf.tanh(1.12 * x / (math.sqrt(2.) * radius))) | ||
|
|
||
| def gaussian_cdf(x, radius): | ||
| return 0.5 * (1 + tf.erf(x / (math.sqrt(2.) * radius))) | ||
| def gaussian_cdf(x, radius): | ||
| return 0.5 * (1 + tf.erf(x / (math.sqrt(2.) * radius))) | ||
|
|
||
| dims = inputs.shape[-1] | ||
| with tf.name_scope(name): | ||
| # When there are no input dims, there is nothing to encode. | ||
| # This special case is needed because tf.reshape does strange | ||
| # things when 0-dims are involved. | ||
| if dims == 0: | ||
| return inputs | ||
| results = [] | ||
| boundaries = tf.linspace(0., 1., self.n_bins + 1) | ||
| boundaries = tf.reshape(boundaries, [1 for _ in inputs.shape] + [-1]) | ||
| dims = inputs.shape[-1] | ||
| with tf.name_scope(name): | ||
| # When there are no input dims, there is nothing to encode. | ||
| # This special case is needed because tf.reshape does strange | ||
| # things when 0-dims are involved. | ||
| if dims == 0: | ||
| return inputs | ||
| results = [] | ||
| boundaries = tf.linspace(0., 1., self.n_bins + 1) | ||
| boundaries = tf.reshape(boundaries, [1 for _ in inputs.shape] + [-1]) | ||
|
|
||
| for level in range(self.n_levels): | ||
| with tf.name_scope(f"level{level}"): | ||
| scale = self.n_bins**level | ||
| for level in range(self.n_levels): | ||
| with tf.name_scope(f"level{level}"): | ||
| scale = self.n_bins**level | ||
|
|
||
| # We use the absolute value here just in case the inputs are erroneously negative. | ||
| # Even a negative epsilon would totally wreck the following code. | ||
| if level == 0: | ||
| scaled = tf.abs(inputs) | ||
| else: | ||
| scaled = tf.abs(inputs * scale) % 1 | ||
| # We use the absolute value here just in case the inputs are erroneously negative. | ||
| # Even a negative epsilon would totally wreck the following code. | ||
| scaled = tf.abs(inputs) if level == 0 else tf.abs(inputs * scale) % 1 | ||
| diffs = boundaries - scaled[..., tf.newaxis] | ||
| cdfs = gaussian_cdf_approx(diffs, self.radius) | ||
| result = cdfs[...,1:] - cdfs[...,:-1] | ||
|
|
||
| diffs = boundaries - scaled[..., tf.newaxis] | ||
| cdfs = gaussian_cdf_approx(diffs, self.radius) | ||
| result = cdfs[...,1:] - cdfs[...,:-1] | ||
| # print_op = tf.print("result: ", result) | ||
|
|
||
| # print_op = tf.print("result: ", result) | ||
| # In the outermost level we don't want to carry over... | ||
| # otherwise we introduce ambiguities. | ||
| if level != 0 or wraparound: | ||
| cdfs_right = gaussian_cdf_approx(diffs + 1., self.radius) | ||
| cdfs_left = gaussian_cdf_approx(diffs - 1., self.radius) | ||
| result = result + cdfs_right[...,1:] - cdfs_right[...,:-1] + cdfs_left[...,1:] - cdfs_left[...,:-1] | ||
|
|
||
| # In the outermost level we don't want to carry over... | ||
| # otherwise we introduce ambiguities. | ||
| if level != 0 or wraparound: | ||
| cdfs_right = gaussian_cdf_approx(diffs + 1., self.radius) | ||
| cdfs_left = gaussian_cdf_approx(diffs - 1., self.radius) | ||
| result = result + cdfs_right[...,1:] - cdfs_right[...,:-1] + cdfs_left[...,1:] - cdfs_left[...,:-1] | ||
| # with tf.control_dependencies([print_op]): | ||
| result = result / scale | ||
|
|
||
| # with tf.control_dependencies([print_op]): | ||
| result = result / scale | ||
| results.append(result) | ||
|
|
||
| results.append(result) | ||
|
|
||
| result = tf.concat(results, axis=-1) | ||
| result = tf.reshape(result, [-1, self.n_bins * self.n_levels * dims]) | ||
| return result | ||
| result = tf.concat(results, axis=-1) | ||
| result = tf.reshape(result, [-1, self.n_bins * self.n_levels * dims]) | ||
| return result |
There was a problem hiding this comment.
Function OneBlob.__call__ refactored with the following changes:
- Replace if statement with if expression (
assign-if-exp)
| args = parser.parse_args() | ||
| return args | ||
| return parser.parse_args() |
There was a problem hiding this comment.
Function get_args refactored with the following changes:
- Inline variable that is immediately returned (
inline-immediately-returned-variable)
| if gradients and not all(grad is None for grad in gradients): | ||
| if gradients and any(grad is not None for grad in gradients): |
There was a problem hiding this comment.
Function get_train_op refactored with the following changes:
- Invert any/all to simplify comparisons (
invert-any-all)
| output_tensor = linear_layer(current_tensor, target_fun.n_channels, tf.float16, f"fc_out", False) | ||
| output_tensor = linear_layer( | ||
| current_tensor, target_fun.n_channels, tf.float16, "fc_out", False | ||
| ) |
There was a problem hiding this comment.
Function make_graph refactored with the following changes:
- Replace f-string with no interpolated values with string (
remove-redundant-fstring)
| ninja.variable(toolchain + 'builddir', builddir) | ||
| ninja.variable(f'{toolchain}builddir', builddir) | ||
| else: | ||
| builddir = '' | ||
|
|
||
| ninja.variable(toolchain + 'defines', config.defines[toolchain] or []) | ||
| ninja.variable(toolchain + 'includes', config.includes[toolchain] or []) | ||
| ninja.variable(toolchain + 'cflags', config.cflags[toolchain] or []) | ||
| ninja.variable(toolchain + 'cxxflags', config.cxxflags[toolchain] or []) | ||
| ninja.variable(toolchain + 'ldflags', config.ldflags[toolchain] or []) | ||
| ninja.variable(f'{toolchain}defines', config.defines[toolchain] or []) | ||
| ninja.variable(f'{toolchain}includes', config.includes[toolchain] or []) | ||
| ninja.variable(f'{toolchain}cflags', config.cflags[toolchain] or []) | ||
| ninja.variable(f'{toolchain}cxxflags', config.cxxflags[toolchain] or []) | ||
| ninja.variable(f'{toolchain}ldflags', config.ldflags[toolchain] or []) |
There was a problem hiding this comment.
Function gen refactored with the following changes:
- Use f-string instead of string concatenation [×12] (
use-fstring-for-concatenation)
| f = open('build.ninja', 'w') | ||
| ninja = Writer(f) | ||
| with open('build.ninja', 'w') as f: | ||
| ninja = Writer(f) | ||
|
|
||
| if hasattr(config, "register_toolchain"): | ||
| config.register_toolchain(ninja) | ||
|
|
||
| if hasattr(config, "register_toolchain"): | ||
| config.register_toolchain(ninja) | ||
|
|
||
| gen(ninja, config.toolchain, config) | ||
| f.close() | ||
| gen(ninja, config.toolchain, config) |
There was a problem hiding this comment.
Function main refactored with the following changes:
- Use
withwhen opening file to ensure closure (ensure-file-closed)
| @@ -41,6 +41,7 @@ | |||
| same line, but it is far from perfect (in either direction). | |||
| """ | |||
|
|
|||
There was a problem hiding this comment.
Lines 423-502 refactored with the following changes:
- Replace interpolated string formatting with f-string [×10] (
replace-interpolation-with-fstring) - Unwrap a constant iterable constructor (
unwrap-iterable-construction)
| matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line) | ||
| if matched: | ||
| if matched.group(1): | ||
| suppressed_line = linenum + 1 | ||
| else: | ||
| suppressed_line = linenum | ||
| category = matched.group(2) | ||
| if category in (None, '(*)'): # => "suppress all" | ||
| _error_suppressions.setdefault(None, set()).add(suppressed_line) | ||
| else: | ||
| if category.startswith('(') and category.endswith(')'): | ||
| category = category[1:-1] | ||
| if category in _ERROR_CATEGORIES: | ||
| _error_suppressions.setdefault(category, set()).add(suppressed_line) | ||
| elif category not in _LEGACY_ERROR_CATEGORIES: | ||
| error(filename, linenum, 'readability/nolint', 5, | ||
| 'Unknown NOLINT error category: %s' % category) | ||
| if not (matched := Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)): | ||
| return | ||
| suppressed_line = linenum + 1 if matched.group(1) else linenum | ||
| category = matched.group(2) | ||
| if category in (None, '(*)'): # => "suppress all" | ||
| _error_suppressions.setdefault(None, set()).add(suppressed_line) | ||
| elif category.startswith('(') and category.endswith(')'): | ||
| category = category[1:-1] | ||
| if category in _ERROR_CATEGORIES: | ||
| _error_suppressions.setdefault(category, set()).add(suppressed_line) | ||
| elif category not in _LEGACY_ERROR_CATEGORIES: | ||
| error( | ||
| filename, | ||
| linenum, | ||
| 'readability/nolint', | ||
| 5, | ||
| f'Unknown NOLINT error category: {category}', | ||
| ) |
There was a problem hiding this comment.
Function ParseNolintSuppressions refactored with the following changes:
- Use named expression to simplify assignment and conditional (
use-named-expression) - Add guard clause (
last-if-guard) - Replace if statement with if expression (
assign-if-exp) - Merge else clause's nested if statement into elif (
merge-else-if-into-elif) - Replace interpolated string formatting with f-string (
replace-interpolation-with-fstring)
| if (self._last_header > header_path and | ||
| Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])): | ||
| return False | ||
| return True | ||
| return self._last_header <= header_path or not Match( | ||
| r'^\s*#\s*include\b', clean_lines.elided[linenum - 1]) |
There was a problem hiding this comment.
Function _IncludeState.IsInAlphabeticalOrder refactored with the following changes:
- Lift code into else after jump in control flow (
reintroduce-else) - Replace if statement with if expression (
assign-if-exp) - Simplify boolean if expression (
boolean-if-exp-identity) - Remove unnecessary casts to int, str, float or bool (
remove-unnecessary-cast)
There was a problem hiding this comment.
PR Type: Refactoring
Summary of PR: This PR introduces a series of refactoring changes made by the Sourcery tool, which include the use of f-strings for string formatting, simplification of conditional statements, and other Pythonic improvements for better readability and performance.
General PR suggestions
- Ensure that the refactoring maintains the original intent and functionality of the code, especially where behavior may be altered, such as the regex pattern application in
CleanseRawStrings. - Verify that the changes in indentation and formatting align with the project's style guidelines to maintain consistency throughout the codebase.
- Review the use of f-strings and other modern Python features for compatibility with the project's Python version requirements.
- Consider the readability of the code after refactoring, particularly when negations are introduced in conditional statements, and weigh this against the benefits of conciseness.
- Confirm that the simplifications made by the tool do not introduce logical errors, especially in cases where the semantics of the code may change, such as with the use of
anyversusnot all.
Your trial expires on December 18, 2023. Please email tim@sourcery.ai to continue using Sourcery ✨
| # The allowed extensions for file names | ||
| # This is set by --extensions flag. | ||
| _valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh']) | ||
| _valid_extensions = {'cc', 'h', 'cpp', 'cu', 'cuh'} |
There was a problem hiding this comment.
suggestion (llm): While using a set literal is more performant than calling the set constructor, consider if this change is necessary if the original codebase prefers a consistent style of set initialization.
| _valid_extensions = {'cc', 'h', 'cpp', 'cu', 'cuh'} | |
| _valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh']) |
| Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])): | ||
| return False | ||
| return True | ||
| return self._last_header <= header_path or not Match( |
There was a problem hiding this comment.
suggestion (llm): The refactored condition is more concise but slightly less readable due to the negation of the match. Consider if the readability trade-off is worth the conciseness in this context.
| return self._last_header <= header_path or not Match( | |
| return not (self._last_header > header_path and Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])) |
| def NoExtension(self): | ||
| """File has no source file extension.""" | ||
| return '/'.join(self.Split()[0:2]) | ||
| return '/'.join(self.Split()[:2]) |
There was a problem hiding this comment.
suggestion (llm): The change to use slicing with [:2] instead of [0:2] is a nice touch for brevity, but ensure it aligns with the code style guidelines of the project.
| # of the line since this is probably a multiline string. | ||
| collapsed += elided | ||
| break | ||
| elif Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head): |
There was a problem hiding this comment.
issue (llm): The refactoring within the CleanseRawStrings function seems to have introduced a bug. The regex pattern is now applied to head instead of tail, which changes the behavior of the function. This needs to be corrected to maintain the original logic.
| elif Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head): | |
| elif Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', tail): |
| break | ||
|
|
||
| self.output.write(leading_space + text[0:space] + ' $\n') | ||
| self.output.write(leading_space + text[:space] + ' $\n') |
There was a problem hiding this comment.
nitpick (llm): The change to use slicing with [:space] instead of [0:space] is a nice touch for brevity, but ensure it aligns with the code style guidelines of the project.
| # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... | ||
| if error_level > 5: | ||
| error_level = 5 | ||
| error_level = min(error_level, 5) |
There was a problem hiding this comment.
praise (llm): Good use of the min function to simplify the logic for capping the error level.
Branch
masterrefactored by Sourcery.If you're happy with these changes, merge this Pull Request using the Squash and merge strategy.
See our documentation here.
Run Sourcery locally
Reduce the feedback loop during development by using the Sourcery editor plugin:
Review changes via command line
To manually merge these changes, make sure you're on the
masterbranch, then run:Help us improve this pull request!