mirror of
https://github.com/quantum5/win2xcur.git
synced 2025-04-24 10:11:57 -04:00
Convert all string formatting to f-strings
This commit is contained in:
parent
b4fc7812e4
commit
8b1b5c6f80
|
@ -14,7 +14,7 @@ class CursorImage:
|
|||
self.nominal = nominal
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return 'CursorImage(image=%r, hotspot=%r, nominal=%r)' % (self.image, self.hotspot, self.nominal)
|
||||
return f'CursorImage(image={self.image!r}, hotspot={self.hotspot!r}, nominal={self.nominal!r})'
|
||||
|
||||
|
||||
class CursorFrame:
|
||||
|
@ -35,4 +35,4 @@ class CursorFrame:
|
|||
return iter(self.images)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return 'CursorFrame(images=%r, delay=%r)' % (self.images, self.delay)
|
||||
return f'CursorFrame(images={self.images!r}, delay={self.delay!r})'
|
||||
|
|
|
@ -43,7 +43,7 @@ def main() -> None:
|
|||
cursor = open_blob(blob)
|
||||
except Exception:
|
||||
with print_lock:
|
||||
print('Error occurred while processing %s:' % (name,), file=sys.stderr)
|
||||
print(f'Error occurred while processing {name}:', file=sys.stderr)
|
||||
traceback.print_exc()
|
||||
else:
|
||||
if args.shadow:
|
||||
|
|
|
@ -28,7 +28,7 @@ def main() -> None:
|
|||
cursor = open_blob(blob)
|
||||
except Exception:
|
||||
with print_lock:
|
||||
print('Error occurred while processing %s:' % (name,), file=sys.stderr)
|
||||
print(f'Error occurred while processing {name}:', file=sys.stderr)
|
||||
traceback.print_exc()
|
||||
else:
|
||||
ext, result = to_smart(cursor.frames)
|
||||
|
|
|
@ -53,20 +53,20 @@ class ANIParser(BaseParser):
|
|||
found += [name]
|
||||
offset += size
|
||||
if offset >= len(self.blob):
|
||||
raise ValueError('Expected chunk %r, found %r' % (expected, found))
|
||||
raise ValueError(f'Expected chunk {expected!r}, found {found!r}')
|
||||
return name, size, offset
|
||||
|
||||
def _parse(self, offset: int) -> List[CursorFrame]:
|
||||
_, size, offset = self._read_chunk(offset, expected=[self.HEADER_CHUNK])
|
||||
|
||||
if size != self.ANIH_HEADER.size:
|
||||
raise ValueError('Unexpected anih header size %d, expected %d' % (size, self.ANIH_HEADER.size))
|
||||
raise ValueError(f'Unexpected anih header size {size}, expected {self.ANIH_HEADER.size}')
|
||||
|
||||
size, frame_count, step_count, width, height, bit_count, planes, display_rate, flags = self.ANIH_HEADER.unpack(
|
||||
self.blob[offset:offset + self.ANIH_HEADER.size])
|
||||
|
||||
if size != self.ANIH_HEADER.size:
|
||||
raise ValueError('Unexpected size in anih header %d, expected %d' % (size, self.ANIH_HEADER.size))
|
||||
raise ValueError(f'Unexpected size in anih header {size}, expected {self.ANIH_HEADER.size}')
|
||||
|
||||
if not flags & self.ICON_FLAG:
|
||||
raise NotImplementedError('Raw BMP images not supported.')
|
||||
|
@ -82,8 +82,8 @@ class ANIParser(BaseParser):
|
|||
if name == self.LIST_CHUNK:
|
||||
list_end = offset + size
|
||||
if self.blob[offset:offset + 4] != self.FRAME_TYPE:
|
||||
raise ValueError('Unexpected RIFF list type: %r, expected %r' %
|
||||
(self.blob[offset:offset + 4], self.FRAME_TYPE))
|
||||
raise ValueError(
|
||||
f'Unexpected RIFF list type: {self.blob[offset:offset + 4]!r}, expected {self.FRAME_TYPE!r}')
|
||||
offset += 4
|
||||
|
||||
for i in range(frame_count):
|
||||
|
@ -94,16 +94,16 @@ class ANIParser(BaseParser):
|
|||
offset += 1
|
||||
|
||||
if offset != list_end:
|
||||
raise ValueError('Wrong RIFF list size: %r, expected %r' % (offset, list_end))
|
||||
raise ValueError(f'Wrong RIFF list size: {offset}, expected {list_end}')
|
||||
elif name == self.SEQ_CHUNK:
|
||||
order = [i for i, in self.UNSIGNED.iter_unpack(self.blob[offset:offset + size])]
|
||||
if len(order) != step_count:
|
||||
raise ValueError('Wrong animation sequence size: %r, expected %r' % (len(order), step_count))
|
||||
raise ValueError(f'Wrong animation sequence size: {len(order)}, expected {step_count}')
|
||||
offset += size
|
||||
elif name == self.RATE_CHUNK:
|
||||
delays = [i for i, in self.UNSIGNED.iter_unpack(self.blob[offset:offset + size])]
|
||||
if len(delays) != step_count:
|
||||
raise ValueError('Wrong animation rate size: %r, expected %r' % (len(delays), step_count))
|
||||
raise ValueError(f'Wrong animation rate size: {len(delays)}, expected {step_count}')
|
||||
offset += size
|
||||
|
||||
if len(order) != step_count:
|
||||
|
|
|
@ -32,7 +32,7 @@ class XCursorParser(BaseParser):
|
|||
assert magic == self.MAGIC
|
||||
|
||||
if version != self.VERSION:
|
||||
raise ValueError('Unsupported Xcursor version %r' % version)
|
||||
raise ValueError(f'Unsupported Xcursor version 0x{version:08x}')
|
||||
|
||||
offset = self.FILE_HEADER.size
|
||||
chunks: List[Tuple[int, int, int]] = []
|
||||
|
@ -51,32 +51,31 @@ class XCursorParser(BaseParser):
|
|||
self._unpack(self.IMAGE_HEADER, position)
|
||||
|
||||
if size != self.IMAGE_HEADER.size:
|
||||
raise ValueError('Unexpected size: %r, expected %r' % (size, self.IMAGE_HEADER.size))
|
||||
raise ValueError(f'Unexpected size: {size}, expected {self.IMAGE_HEADER.size}')
|
||||
|
||||
if actual_type != chunk_type:
|
||||
raise ValueError('Unexpected chunk type: %r, expected %r' % (actual_type, chunk_type))
|
||||
raise ValueError(f'Unexpected chunk type: {actual_type}, expected {chunk_type}')
|
||||
|
||||
if nominal_size != chunk_subtype:
|
||||
raise ValueError('Unexpected nominal size: %r, expected %r' % (nominal_size, chunk_subtype))
|
||||
raise ValueError(f'Unexpected nominal size: {nominal_size}, expected {chunk_subtype}')
|
||||
|
||||
if width > 0x7FFF:
|
||||
raise ValueError('Image width too large: %r' % width)
|
||||
raise ValueError(f'Image width too large: {width}')
|
||||
|
||||
if height > 0x7FFF:
|
||||
raise ValueError('Image height too large: %r' % height)
|
||||
raise ValueError(f'Image height too large: {height}')
|
||||
|
||||
if x_offset > width:
|
||||
raise ValueError('Hotspot x-coordinate too large: %r' % x_offset)
|
||||
raise ValueError(f'Hotspot x-coordinate too large: {x_offset}')
|
||||
|
||||
if y_offset > height:
|
||||
raise ValueError('Hotspot x-coordinate too large: %r' % y_offset)
|
||||
raise ValueError(f'Hotspot x-coordinate too large: {y_offset}')
|
||||
|
||||
image_start = position + self.IMAGE_HEADER.size
|
||||
image_size = width * height * 4
|
||||
blob = self.blob[image_start:image_start + image_size]
|
||||
if len(blob) != image_size:
|
||||
raise ValueError('Invalid image at %d: expected %d bytes, got %d bytes' %
|
||||
(image_size, image_size, len(blob)))
|
||||
raise ValueError(f'Invalid image at {image_start}: expected {image_size} bytes, got {len(blob)} bytes')
|
||||
|
||||
image = Image(width=width, height=height)
|
||||
image.import_pixels(channel_map='BGRA', data=blob)
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
from typing import cast
|
||||
|
||||
import numpy
|
||||
|
||||
|
||||
|
@ -7,4 +9,4 @@ def premultiply_alpha(source: bytes) -> bytes:
|
|||
buffer[0::4] *= alpha
|
||||
buffer[1::4] *= alpha
|
||||
buffer[2::4] *= alpha
|
||||
return buffer.astype(numpy.uint8).tobytes()
|
||||
return cast(bytes, buffer.astype(numpy.uint8).tobytes())
|
||||
|
|
Loading…
Reference in a new issue