Added support for multiple tags on a single model

This commit is contained in:
jeffser 2024-05-18 15:52:50 -06:00
parent 8ddce304b2
commit 02acbb2d70
571 changed files with 76910 additions and 127 deletions

0
.flatpak-builder/cache/.lock vendored Normal file
View File

4
.flatpak-builder/cache/config vendored Normal file
View File

@ -0,0 +1,4 @@
[core]
repo_version=1
mode=bare-user-only
min-free-space-percent=0

View File

@ -0,0 +1,30 @@
The Python Imaging Library (PIL) is
Copyright © 1997-2011 by Secret Labs AB
Copyright © 1995-2011 by Fredrik Lundh and contributors
Pillow is the friendly PIL fork. It is
Copyright © 2010-2024 by Jeffrey A. Clark and contributors
Like PIL, Pillow is licensed under the open source HPND License:
By obtaining, using, and/or copying this software and/or its associated
documentation, you agree that you have read, understood, and will comply
with the following terms and conditions:
Permission to use, copy, modify and distribute this software and its
documentation for any purpose and without fee is hereby granted,
provided that the above copyright notice appears in all copies, and that
both that copyright notice and this permission notice appear in supporting
documentation, and that the name of Secret Labs AB or the author not be
used in advertising or publicity pertaining to distribution of the software
without specific, written prior permission.
SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.

View File

@ -0,0 +1,626 @@
import logging
from os import PathLike
from typing import BinaryIO, List, Optional, Set, Union
from .cd import (
coherence_ratio,
encoding_languages,
mb_encoding_languages,
merge_coherence_ratios,
)
from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE
from .md import mess_ratio
from .models import CharsetMatch, CharsetMatches
from .utils import (
any_specified_encoding,
cut_sequence_chunks,
iana_name,
identify_sig_or_bom,
is_cp_similar,
is_multi_byte_encoding,
should_strip_sig_or_bom,
)
# Will most likely be controversial
# logging.addLevelName(TRACE, "TRACE")
logger = logging.getLogger("charset_normalizer")
explain_handler = logging.StreamHandler()
explain_handler.setFormatter(
logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
)
def from_bytes(
sequences: Union[bytes, bytearray],
steps: int = 5,
chunk_size: int = 512,
threshold: float = 0.2,
cp_isolation: Optional[List[str]] = None,
cp_exclusion: Optional[List[str]] = None,
preemptive_behaviour: bool = True,
explain: bool = False,
language_threshold: float = 0.1,
enable_fallback: bool = True,
) -> CharsetMatches:
"""
Given a raw bytes sequence, return the best possibles charset usable to render str objects.
If there is no results, it is a strong indicator that the source is binary/not text.
By default, the process will extract 5 blocks of 512o each to assess the mess and coherence of a given sequence.
And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will.
The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page
but never take it for granted. Can improve the performance.
You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that
purpose.
This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32.
By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain'
toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging.
Custom logging format and handler can be set manually.
"""
if not isinstance(sequences, (bytearray, bytes)):
raise TypeError(
"Expected object of type bytes or bytearray, got: {0}".format(
type(sequences)
)
)
if explain:
previous_logger_level: int = logger.level
logger.addHandler(explain_handler)
logger.setLevel(TRACE)
length: int = len(sequences)
if length == 0:
logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.")
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level or logging.WARNING)
return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")])
if cp_isolation is not None:
logger.log(
TRACE,
"cp_isolation is set. use this flag for debugging purpose. "
"limited list of encoding allowed : %s.",
", ".join(cp_isolation),
)
cp_isolation = [iana_name(cp, False) for cp in cp_isolation]
else:
cp_isolation = []
if cp_exclusion is not None:
logger.log(
TRACE,
"cp_exclusion is set. use this flag for debugging purpose. "
"limited list of encoding excluded : %s.",
", ".join(cp_exclusion),
)
cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion]
else:
cp_exclusion = []
if length <= (chunk_size * steps):
logger.log(
TRACE,
"override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.",
steps,
chunk_size,
length,
)
steps = 1
chunk_size = length
if steps > 1 and length / steps < chunk_size:
chunk_size = int(length / steps)
is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE
is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE
if is_too_small_sequence:
logger.log(
TRACE,
"Trying to detect encoding from a tiny portion of ({}) byte(s).".format(
length
),
)
elif is_too_large_sequence:
logger.log(
TRACE,
"Using lazy str decoding because the payload is quite large, ({}) byte(s).".format(
length
),
)
prioritized_encodings: List[str] = []
specified_encoding: Optional[str] = (
any_specified_encoding(sequences) if preemptive_behaviour else None
)
if specified_encoding is not None:
prioritized_encodings.append(specified_encoding)
logger.log(
TRACE,
"Detected declarative mark in sequence. Priority +1 given for %s.",
specified_encoding,
)
tested: Set[str] = set()
tested_but_hard_failure: List[str] = []
tested_but_soft_failure: List[str] = []
fallback_ascii: Optional[CharsetMatch] = None
fallback_u8: Optional[CharsetMatch] = None
fallback_specified: Optional[CharsetMatch] = None
results: CharsetMatches = CharsetMatches()
sig_encoding, sig_payload = identify_sig_or_bom(sequences)
if sig_encoding is not None:
prioritized_encodings.append(sig_encoding)
logger.log(
TRACE,
"Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.",
len(sig_payload),
sig_encoding,
)
prioritized_encodings.append("ascii")
if "utf_8" not in prioritized_encodings:
prioritized_encodings.append("utf_8")
for encoding_iana in prioritized_encodings + IANA_SUPPORTED:
if cp_isolation and encoding_iana not in cp_isolation:
continue
if cp_exclusion and encoding_iana in cp_exclusion:
continue
if encoding_iana in tested:
continue
tested.add(encoding_iana)
decoded_payload: Optional[str] = None
bom_or_sig_available: bool = sig_encoding == encoding_iana
strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom(
encoding_iana
)
if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available:
logger.log(
TRACE,
"Encoding %s won't be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.",
encoding_iana,
)
continue
if encoding_iana in {"utf_7"} and not bom_or_sig_available:
logger.log(
TRACE,
"Encoding %s won't be tested as-is because detection is unreliable without BOM/SIG.",
encoding_iana,
)
continue
try:
is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana)
except (ModuleNotFoundError, ImportError):
logger.log(
TRACE,
"Encoding %s does not provide an IncrementalDecoder",
encoding_iana,
)
continue
try:
if is_too_large_sequence and is_multi_byte_decoder is False:
str(
sequences[: int(50e4)]
if strip_sig_or_bom is False
else sequences[len(sig_payload) : int(50e4)],
encoding=encoding_iana,
)
else:
decoded_payload = str(
sequences
if strip_sig_or_bom is False
else sequences[len(sig_payload) :],
encoding=encoding_iana,
)
except (UnicodeDecodeError, LookupError) as e:
if not isinstance(e, LookupError):
logger.log(
TRACE,
"Code page %s does not fit given bytes sequence at ALL. %s",
encoding_iana,
str(e),
)
tested_but_hard_failure.append(encoding_iana)
continue
similar_soft_failure_test: bool = False
for encoding_soft_failed in tested_but_soft_failure:
if is_cp_similar(encoding_iana, encoding_soft_failed):
similar_soft_failure_test = True
break
if similar_soft_failure_test:
logger.log(
TRACE,
"%s is deemed too similar to code page %s and was consider unsuited already. Continuing!",
encoding_iana,
encoding_soft_failed,
)
continue
r_ = range(
0 if not bom_or_sig_available else len(sig_payload),
length,
int(length / steps),
)
multi_byte_bonus: bool = (
is_multi_byte_decoder
and decoded_payload is not None
and len(decoded_payload) < length
)
if multi_byte_bonus:
logger.log(
TRACE,
"Code page %s is a multi byte encoding table and it appear that at least one character "
"was encoded using n-bytes.",
encoding_iana,
)
max_chunk_gave_up: int = int(len(r_) / 4)
max_chunk_gave_up = max(max_chunk_gave_up, 2)
early_stop_count: int = 0
lazy_str_hard_failure = False
md_chunks: List[str] = []
md_ratios = []
try:
for chunk in cut_sequence_chunks(
sequences,
encoding_iana,
r_,
chunk_size,
bom_or_sig_available,
strip_sig_or_bom,
sig_payload,
is_multi_byte_decoder,
decoded_payload,
):
md_chunks.append(chunk)
md_ratios.append(
mess_ratio(
chunk,
threshold,
explain is True and 1 <= len(cp_isolation) <= 2,
)
)
if md_ratios[-1] >= threshold:
early_stop_count += 1
if (early_stop_count >= max_chunk_gave_up) or (
bom_or_sig_available and strip_sig_or_bom is False
):
break
except (
UnicodeDecodeError
) as e: # Lazy str loading may have missed something there
logger.log(
TRACE,
"LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s",
encoding_iana,
str(e),
)
early_stop_count = max_chunk_gave_up
lazy_str_hard_failure = True
# We might want to check the sequence again with the whole content
# Only if initial MD tests passes
if (
not lazy_str_hard_failure
and is_too_large_sequence
and not is_multi_byte_decoder
):
try:
sequences[int(50e3) :].decode(encoding_iana, errors="strict")
except UnicodeDecodeError as e:
logger.log(
TRACE,
"LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s",
encoding_iana,
str(e),
)
tested_but_hard_failure.append(encoding_iana)
continue
mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0
if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up:
tested_but_soft_failure.append(encoding_iana)
logger.log(
TRACE,
"%s was excluded because of initial chaos probing. Gave up %i time(s). "
"Computed mean chaos is %f %%.",
encoding_iana,
early_stop_count,
round(mean_mess_ratio * 100, ndigits=3),
)
# Preparing those fallbacks in case we got nothing.
if (
enable_fallback
and encoding_iana in ["ascii", "utf_8", specified_encoding]
and not lazy_str_hard_failure
):
fallback_entry = CharsetMatch(
sequences, encoding_iana, threshold, False, [], decoded_payload
)
if encoding_iana == specified_encoding:
fallback_specified = fallback_entry
elif encoding_iana == "ascii":
fallback_ascii = fallback_entry
else:
fallback_u8 = fallback_entry
continue
logger.log(
TRACE,
"%s passed initial chaos probing. Mean measured chaos is %f %%",
encoding_iana,
round(mean_mess_ratio * 100, ndigits=3),
)
if not is_multi_byte_decoder:
target_languages: List[str] = encoding_languages(encoding_iana)
else:
target_languages = mb_encoding_languages(encoding_iana)
if target_languages:
logger.log(
TRACE,
"{} should target any language(s) of {}".format(
encoding_iana, str(target_languages)
),
)
cd_ratios = []
# We shall skip the CD when its about ASCII
# Most of the time its not relevant to run "language-detection" on it.
if encoding_iana != "ascii":
for chunk in md_chunks:
chunk_languages = coherence_ratio(
chunk,
language_threshold,
",".join(target_languages) if target_languages else None,
)
cd_ratios.append(chunk_languages)
cd_ratios_merged = merge_coherence_ratios(cd_ratios)
if cd_ratios_merged:
logger.log(
TRACE,
"We detected language {} using {}".format(
cd_ratios_merged, encoding_iana
),
)
results.append(
CharsetMatch(
sequences,
encoding_iana,
mean_mess_ratio,
bom_or_sig_available,
cd_ratios_merged,
decoded_payload,
)
)
if (
encoding_iana in [specified_encoding, "ascii", "utf_8"]
and mean_mess_ratio < 0.1
):
logger.debug(
"Encoding detection: %s is most likely the one.", encoding_iana
)
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level)
return CharsetMatches([results[encoding_iana]])
if encoding_iana == sig_encoding:
logger.debug(
"Encoding detection: %s is most likely the one as we detected a BOM or SIG within "
"the beginning of the sequence.",
encoding_iana,
)
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level)
return CharsetMatches([results[encoding_iana]])
if len(results) == 0:
if fallback_u8 or fallback_ascii or fallback_specified:
logger.log(
TRACE,
"Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.",
)
if fallback_specified:
logger.debug(
"Encoding detection: %s will be used as a fallback match",
fallback_specified.encoding,
)
results.append(fallback_specified)
elif (
(fallback_u8 and fallback_ascii is None)
or (
fallback_u8
and fallback_ascii
and fallback_u8.fingerprint != fallback_ascii.fingerprint
)
or (fallback_u8 is not None)
):
logger.debug("Encoding detection: utf_8 will be used as a fallback match")
results.append(fallback_u8)
elif fallback_ascii:
logger.debug("Encoding detection: ascii will be used as a fallback match")
results.append(fallback_ascii)
if results:
logger.debug(
"Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.",
results.best().encoding, # type: ignore
len(results) - 1,
)
else:
logger.debug("Encoding detection: Unable to determine any suitable charset.")
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level)
return results
def from_fp(
fp: BinaryIO,
steps: int = 5,
chunk_size: int = 512,
threshold: float = 0.20,
cp_isolation: Optional[List[str]] = None,
cp_exclusion: Optional[List[str]] = None,
preemptive_behaviour: bool = True,
explain: bool = False,
language_threshold: float = 0.1,
enable_fallback: bool = True,
) -> CharsetMatches:
"""
Same thing than the function from_bytes but using a file pointer that is already ready.
Will not close the file pointer.
"""
return from_bytes(
fp.read(),
steps,
chunk_size,
threshold,
cp_isolation,
cp_exclusion,
preemptive_behaviour,
explain,
language_threshold,
enable_fallback,
)
def from_path(
path: Union[str, bytes, PathLike], # type: ignore[type-arg]
steps: int = 5,
chunk_size: int = 512,
threshold: float = 0.20,
cp_isolation: Optional[List[str]] = None,
cp_exclusion: Optional[List[str]] = None,
preemptive_behaviour: bool = True,
explain: bool = False,
language_threshold: float = 0.1,
enable_fallback: bool = True,
) -> CharsetMatches:
"""
Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode.
Can raise IOError.
"""
with open(path, "rb") as fp:
return from_fp(
fp,
steps,
chunk_size,
threshold,
cp_isolation,
cp_exclusion,
preemptive_behaviour,
explain,
language_threshold,
enable_fallback,
)
def is_binary(
fp_or_path_or_payload: Union[PathLike, str, BinaryIO, bytes], # type: ignore[type-arg]
steps: int = 5,
chunk_size: int = 512,
threshold: float = 0.20,
cp_isolation: Optional[List[str]] = None,
cp_exclusion: Optional[List[str]] = None,
preemptive_behaviour: bool = True,
explain: bool = False,
language_threshold: float = 0.1,
enable_fallback: bool = False,
) -> bool:
"""
Detect if the given input (file, bytes, or path) points to a binary file. aka. not a string.
Based on the same main heuristic algorithms and default kwargs at the sole exception that fallbacks match
are disabled to be stricter around ASCII-compatible but unlikely to be a string.
"""
if isinstance(fp_or_path_or_payload, (str, PathLike)):
guesses = from_path(
fp_or_path_or_payload,
steps=steps,
chunk_size=chunk_size,
threshold=threshold,
cp_isolation=cp_isolation,
cp_exclusion=cp_exclusion,
preemptive_behaviour=preemptive_behaviour,
explain=explain,
language_threshold=language_threshold,
enable_fallback=enable_fallback,
)
elif isinstance(
fp_or_path_or_payload,
(
bytes,
bytearray,
),
):
guesses = from_bytes(
fp_or_path_or_payload,
steps=steps,
chunk_size=chunk_size,
threshold=threshold,
cp_isolation=cp_isolation,
cp_exclusion=cp_exclusion,
preemptive_behaviour=preemptive_behaviour,
explain=explain,
language_threshold=language_threshold,
enable_fallback=enable_fallback,
)
else:
guesses = from_fp(
fp_or_path_or_payload,
steps=steps,
chunk_size=chunk_size,
threshold=threshold,
cp_isolation=cp_isolation,
cp_exclusion=cp_exclusion,
preemptive_behaviour=preemptive_behaviour,
explain=explain,
language_threshold=language_threshold,
enable_fallback=enable_fallback,
)
return not guesses

View File

@ -0,0 +1,78 @@
#
# The Python Imaging Library.
# $Id$
#
# Basic McIdas support for PIL
#
# History:
# 1997-05-05 fl Created (8-bit images only)
# 2009-03-08 fl Added 16/32-bit support.
#
# Thanks to Richard Jones and Craig Swank for specs and samples.
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import struct
from . import Image, ImageFile
def _accept(prefix: bytes) -> bool:
return prefix[:8] == b"\x00\x00\x00\x00\x00\x00\x00\x04"
##
# Image plugin for McIdas area images.
class McIdasImageFile(ImageFile.ImageFile):
format = "MCIDAS"
format_description = "McIdas area file"
def _open(self) -> None:
# parse area file directory
assert self.fp is not None
s = self.fp.read(256)
if not _accept(s) or len(s) != 256:
msg = "not an McIdas area file"
raise SyntaxError(msg)
self.area_descriptor_raw = s
self.area_descriptor = w = [0] + list(struct.unpack("!64i", s))
# get mode
if w[11] == 1:
mode = rawmode = "L"
elif w[11] == 2:
# FIXME: add memory map support
mode = "I"
rawmode = "I;16B"
elif w[11] == 4:
# FIXME: add memory map support
mode = "I"
rawmode = "I;32B"
else:
msg = "unsupported McIdas format"
raise SyntaxError(msg)
self._mode = mode
self._size = w[10], w[9]
offset = w[34] + w[15]
stride = w[15] + w[10] * w[11] * w[14]
self.tile = [("raw", (0, 0) + self.size, offset, (rawmode, stride, 1))]
# --------------------------------------------------------------------
# registry
Image.register_open(McIdasImageFile.format, McIdasImageFile, _accept)
# no default extension

View File

@ -0,0 +1,101 @@
from __future__ import annotations
import http.client as httplib
from email.errors import MultipartInvariantViolationDefect, StartBoundaryNotFoundDefect
from ..exceptions import HeaderParsingError
def is_fp_closed(obj: object) -> bool:
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
try:
# Check `isclosed()` first, in case Python3 doesn't set `closed`.
# GH Issue #928
return obj.isclosed() # type: ignore[no-any-return, attr-defined]
except AttributeError:
pass
try:
# Check via the official file-like-object way.
return obj.closed # type: ignore[no-any-return, attr-defined]
except AttributeError:
pass
try:
# Check if the object is a container for another file-like object that
# gets released on exhaustion (e.g. HTTPResponse).
return obj.fp is None # type: ignore[attr-defined]
except AttributeError:
pass
raise ValueError("Unable to determine whether fp is closed.")
def assert_header_parsing(headers: httplib.HTTPMessage) -> None:
"""
Asserts whether all headers have been successfully parsed.
Extracts encountered errors from the result of parsing headers.
Only works on Python 3.
:param http.client.HTTPMessage headers: Headers to verify.
:raises urllib3.exceptions.HeaderParsingError:
If parsing errors are found.
"""
# This will fail silently if we pass in the wrong kind of parameter.
# To make debugging easier add an explicit check.
if not isinstance(headers, httplib.HTTPMessage):
raise TypeError(f"expected httplib.Message, got {type(headers)}.")
unparsed_data = None
# get_payload is actually email.message.Message.get_payload;
# we're only interested in the result if it's not a multipart message
if not headers.is_multipart():
payload = headers.get_payload()
if isinstance(payload, (bytes, str)):
unparsed_data = payload
# httplib is assuming a response body is available
# when parsing headers even when httplib only sends
# header data to parse_headers() This results in
# defects on multipart responses in particular.
# See: https://github.com/urllib3/urllib3/issues/800
# So we ignore the following defects:
# - StartBoundaryNotFoundDefect:
# The claimed start boundary was never found.
# - MultipartInvariantViolationDefect:
# A message claimed to be a multipart but no subparts were found.
defects = [
defect
for defect in headers.defects
if not isinstance(
defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect)
)
]
if defects or unparsed_data:
raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
def is_response_to_head(response: httplib.HTTPResponse) -> bool:
"""
Checks whether the request of a response has been a HEAD-request.
:param http.client.HTTPResponse response:
Response to check if the originating request
used 'HEAD' as a method.
"""
# FIXME: Can we do this somehow without accessing private httplib _method?
method_str = response._method # type: str # type: ignore[attr-defined]
return method_str.upper() == "HEAD"

View File

@ -0,0 +1,135 @@
#
# The Python Imaging Library.
# $Id$
#
# transform wrappers
#
# History:
# 2002-04-08 fl Created
#
# Copyright (c) 2002 by Secret Labs AB
# Copyright (c) 2002 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
from typing import Sequence
from . import Image
class Transform(Image.ImageTransformHandler):
"""Base class for other transforms defined in :py:mod:`~PIL.ImageTransform`."""
method: Image.Transform
def __init__(self, data: Sequence[int]) -> None:
self.data = data
def getdata(self) -> tuple[Image.Transform, Sequence[int]]:
return self.method, self.data
def transform(
self,
size: tuple[int, int],
image: Image.Image,
**options: dict[str, str | int | tuple[int, ...] | list[int]],
) -> Image.Image:
"""Perform the transform. Called from :py:meth:`.Image.transform`."""
# can be overridden
method, data = self.getdata()
return image.transform(size, method, data, **options)
class AffineTransform(Transform):
"""
Define an affine image transform.
This function takes a 6-tuple (a, b, c, d, e, f) which contain the first
two rows from an affine transform matrix. For each pixel (x, y) in the
output image, the new value is taken from a position (a x + b y + c,
d x + e y + f) in the input image, rounded to nearest pixel.
This function can be used to scale, translate, rotate, and shear the
original image.
See :py:meth:`.Image.transform`
:param matrix: A 6-tuple (a, b, c, d, e, f) containing the first two rows
from an affine transform matrix.
"""
method = Image.Transform.AFFINE
class PerspectiveTransform(Transform):
"""
Define a perspective image transform.
This function takes an 8-tuple (a, b, c, d, e, f, g, h). For each pixel
(x, y) in the output image, the new value is taken from a position
((a x + b y + c) / (g x + h y + 1), (d x + e y + f) / (g x + h y + 1)) in
the input image, rounded to nearest pixel.
This function can be used to scale, translate, rotate, and shear the
original image.
See :py:meth:`.Image.transform`
:param matrix: An 8-tuple (a, b, c, d, e, f, g, h).
"""
method = Image.Transform.PERSPECTIVE
class ExtentTransform(Transform):
"""
Define a transform to extract a subregion from an image.
Maps a rectangle (defined by two corners) from the image to a rectangle of
the given size. The resulting image will contain data sampled from between
the corners, such that (x0, y0) in the input image will end up at (0,0) in
the output image, and (x1, y1) at size.
This method can be used to crop, stretch, shrink, or mirror an arbitrary
rectangle in the current image. It is slightly slower than crop, but about
as fast as a corresponding resize operation.
See :py:meth:`.Image.transform`
:param bbox: A 4-tuple (x0, y0, x1, y1) which specifies two points in the
input image's coordinate system. See :ref:`coordinate-system`.
"""
method = Image.Transform.EXTENT
class QuadTransform(Transform):
"""
Define a quad image transform.
Maps a quadrilateral (a region defined by four corners) from the image to a
rectangle of the given size.
See :py:meth:`.Image.transform`
:param xy: An 8-tuple (x0, y0, x1, y1, x2, y2, x3, y3) which contain the
upper left, lower left, lower right, and upper right corner of the
source quadrilateral.
"""
method = Image.Transform.QUAD
class MeshTransform(Transform):
"""
Define a mesh image transform. A mesh transform consists of one or more
individual quad transforms.
See :py:meth:`.Image.transform`
:param data: A list of (bbox, quad) tuples.
"""
method = Image.Transform.MESH

View File

@ -0,0 +1,110 @@
let Status = {
SUCCESS_HEADER: -1,
SUCCESS_EOF: -2,
ERROR_TIMEOUT: -3,
ERROR_EXCEPTION: -4,
};
let connections = {};
let nextConnectionID = 1;
const encoder = new TextEncoder();
self.addEventListener("message", async function (event) {
if (event.data.close) {
let connectionID = event.data.close;
delete connections[connectionID];
return;
} else if (event.data.getMore) {
let connectionID = event.data.getMore;
let { curOffset, value, reader, intBuffer, byteBuffer } =
connections[connectionID];
// if we still have some in buffer, then just send it back straight away
if (!value || curOffset >= value.length) {
// read another buffer if required
try {
let readResponse = await reader.read();
if (readResponse.done) {
// read everything - clear connection and return
delete connections[connectionID];
Atomics.store(intBuffer, 0, Status.SUCCESS_EOF);
Atomics.notify(intBuffer, 0);
// finished reading successfully
// return from event handler
return;
}
curOffset = 0;
connections[connectionID].value = readResponse.value;
value = readResponse.value;
} catch (error) {
console.log("Request exception:", error);
let errorBytes = encoder.encode(error.message);
let written = errorBytes.length;
byteBuffer.set(errorBytes);
intBuffer[1] = written;
Atomics.store(intBuffer, 0, Status.ERROR_EXCEPTION);
Atomics.notify(intBuffer, 0);
}
}
// send as much buffer as we can
let curLen = value.length - curOffset;
if (curLen > byteBuffer.length) {
curLen = byteBuffer.length;
}
byteBuffer.set(value.subarray(curOffset, curOffset + curLen), 0);
Atomics.store(intBuffer, 0, curLen); // store current length in bytes
Atomics.notify(intBuffer, 0);
curOffset += curLen;
connections[connectionID].curOffset = curOffset;
return;
} else {
// start fetch
let connectionID = nextConnectionID;
nextConnectionID += 1;
const intBuffer = new Int32Array(event.data.buffer);
const byteBuffer = new Uint8Array(event.data.buffer, 8);
try {
const response = await fetch(event.data.url, event.data.fetchParams);
// return the headers first via textencoder
var headers = [];
for (const pair of response.headers.entries()) {
headers.push([pair[0], pair[1]]);
}
let headerObj = {
headers: headers,
status: response.status,
connectionID,
};
const headerText = JSON.stringify(headerObj);
let headerBytes = encoder.encode(headerText);
let written = headerBytes.length;
byteBuffer.set(headerBytes);
intBuffer[1] = written;
// make a connection
connections[connectionID] = {
reader: response.body.getReader(),
intBuffer: intBuffer,
byteBuffer: byteBuffer,
value: undefined,
curOffset: 0,
};
// set header ready
Atomics.store(intBuffer, 0, Status.SUCCESS_HEADER);
Atomics.notify(intBuffer, 0);
// all fetching after this goes through a new postmessage call with getMore
// this allows for parallel requests
} catch (error) {
console.log("Request exception:", error);
let errorBytes = encoder.encode(error.message);
let written = errorBytes.length;
byteBuffer.set(errorBytes);
intBuffer[1] = written;
Atomics.store(intBuffer, 0, Status.ERROR_EXCEPTION);
Atomics.notify(intBuffer, 0);
}
}
});
self.postMessage({ inited: true });

View File

@ -0,0 +1,43 @@
requests-2.31.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
requests-2.31.0.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
requests-2.31.0.dist-info/METADATA,sha256=eCPokOnbb0FROLrfl0R5EpDvdufsb9CaN4noJH__54I,4634
requests-2.31.0.dist-info/RECORD,,
requests-2.31.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
requests-2.31.0.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
requests-2.31.0.dist-info/top_level.txt,sha256=fMSVmHfb5rbGOo6xv-O_tUX6j-WyixssE-SnwcDRxNQ,9
requests/__init__.py,sha256=LvmKhjIz8mHaKXthC2Mv5ykZ1d92voyf3oJpd-VuAig,4963
requests/__pycache__/__init__.cpython-311.pyc,,
requests/__pycache__/__version__.cpython-311.pyc,,
requests/__pycache__/_internal_utils.cpython-311.pyc,,
requests/__pycache__/adapters.cpython-311.pyc,,
requests/__pycache__/api.cpython-311.pyc,,
requests/__pycache__/auth.cpython-311.pyc,,
requests/__pycache__/certs.cpython-311.pyc,,
requests/__pycache__/compat.cpython-311.pyc,,
requests/__pycache__/cookies.cpython-311.pyc,,
requests/__pycache__/exceptions.cpython-311.pyc,,
requests/__pycache__/help.cpython-311.pyc,,
requests/__pycache__/hooks.cpython-311.pyc,,
requests/__pycache__/models.cpython-311.pyc,,
requests/__pycache__/packages.cpython-311.pyc,,
requests/__pycache__/sessions.cpython-311.pyc,,
requests/__pycache__/status_codes.cpython-311.pyc,,
requests/__pycache__/structures.cpython-311.pyc,,
requests/__pycache__/utils.cpython-311.pyc,,
requests/__version__.py,sha256=ssI3Ezt7PaxgkOW45GhtwPUclo_SO_ygtIm4A74IOfw,435
requests/_internal_utils.py,sha256=nMQymr4hs32TqVo5AbCrmcJEhvPUh7xXlluyqwslLiQ,1495
requests/adapters.py,sha256=v_FmjU5KZ76k-YttShZYB5RprIzhhL8Y3zgW9p4eBQ8,19553
requests/api.py,sha256=q61xcXq4tmiImrvcSVLTbFyCiD2F-L_-hWKGbz4y8vg,6449
requests/auth.py,sha256=h-HLlVx9j8rKV5hfSAycP2ApOSglTz77R0tz7qCbbEE,10187
requests/certs.py,sha256=Z9Sb410Anv6jUFTyss0jFFhU6xst8ctELqfy8Ev23gw,429
requests/compat.py,sha256=yxntVOSEHGMrn7FNr_32EEam1ZNAdPRdSE13_yaHzTk,1451
requests/cookies.py,sha256=kD3kNEcCj-mxbtf5fJsSaT86eGoEYpD3X0CSgpzl7BM,18560
requests/exceptions.py,sha256=DhveFBclVjTRxhRduVpO-GbMYMID2gmjdLfNEqNpI_U,3811
requests/help.py,sha256=gPX5d_H7Xd88aDABejhqGgl9B1VFRTt5BmiYvL3PzIQ,3875
requests/hooks.py,sha256=CiuysiHA39V5UfcCBXFIx83IrDpuwfN9RcTUgv28ftQ,733
requests/models.py,sha256=-DlKi0or8gFAM6VzutobXvvBW_2wrJuOF5NfndTIddA,35223
requests/packages.py,sha256=DXgv-FJIczZITmv0vEBAhWj4W-5CGCIN_ksvgR17Dvs,957
requests/sessions.py,sha256=-LvTzrPtetSTrR3buxu4XhdgMrJFLB1q5D7P--L2Xhw,30373
requests/status_codes.py,sha256=FvHmT5uH-_uimtRz5hH9VCbt7VV-Nei2J9upbej6j8g,4235
requests/structures.py,sha256=-IbmhVz06S-5aPSZuUthZ6-6D9XOjRuTXHOabY041XM,2912
requests/utils.py,sha256=6sx2X3cIVA8BgWOg8odxFy-_lbWDFETU8HI4fU4Rmqw,33448

View File

@ -0,0 +1,124 @@
#
# The Python Imaging Library.
# $Id$
#
# WAL file handling
#
# History:
# 2003-04-23 fl created
#
# Copyright (c) 2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
"""
This reader is based on the specification available from:
https://www.flipcode.com/archives/Quake_2_BSP_File_Format.shtml
and has been tested with a few sample files found using google.
.. note::
This format cannot be automatically recognized, so the reader
is not registered for use with :py:func:`PIL.Image.open()`.
To open a WAL file, use the :py:func:`PIL.WalImageFile.open()` function instead.
"""
from __future__ import annotations
from . import Image, ImageFile
from ._binary import i32le as i32
class WalImageFile(ImageFile.ImageFile):
format = "WAL"
format_description = "Quake2 Texture"
def _open(self):
self._mode = "P"
# read header fields
header = self.fp.read(32 + 24 + 32 + 12)
self._size = i32(header, 32), i32(header, 36)
Image._decompression_bomb_check(self.size)
# load pixel data
offset = i32(header, 40)
self.fp.seek(offset)
# strings are null-terminated
self.info["name"] = header[:32].split(b"\0", 1)[0]
next_name = header[56 : 56 + 32].split(b"\0", 1)[0]
if next_name:
self.info["next_name"] = next_name
def load(self):
if not self.im:
self.im = Image.core.new(self.mode, self.size)
self.frombytes(self.fp.read(self.size[0] * self.size[1]))
self.putpalette(quake2palette)
return Image.Image.load(self)
def open(filename):
"""
Load texture from a Quake2 WAL texture file.
By default, a Quake2 standard palette is attached to the texture.
To override the palette, use the :py:func:`PIL.Image.Image.putpalette()` method.
:param filename: WAL file name, or an opened file handle.
:returns: An image instance.
"""
return WalImageFile(filename)
quake2palette = (
# default palette taken from piffo 0.93 by Hans Häggström
b"\x01\x01\x01\x0b\x0b\x0b\x12\x12\x12\x17\x17\x17\x1b\x1b\x1b\x1e"
b"\x1e\x1e\x22\x22\x22\x26\x26\x26\x29\x29\x29\x2c\x2c\x2c\x2f\x2f"
b"\x2f\x32\x32\x32\x35\x35\x35\x37\x37\x37\x3a\x3a\x3a\x3c\x3c\x3c"
b"\x24\x1e\x13\x22\x1c\x12\x20\x1b\x12\x1f\x1a\x10\x1d\x19\x10\x1b"
b"\x17\x0f\x1a\x16\x0f\x18\x14\x0d\x17\x13\x0d\x16\x12\x0d\x14\x10"
b"\x0b\x13\x0f\x0b\x10\x0d\x0a\x0f\x0b\x0a\x0d\x0b\x07\x0b\x0a\x07"
b"\x23\x23\x26\x22\x22\x25\x22\x20\x23\x21\x1f\x22\x20\x1e\x20\x1f"
b"\x1d\x1e\x1d\x1b\x1c\x1b\x1a\x1a\x1a\x19\x19\x18\x17\x17\x17\x16"
b"\x16\x14\x14\x14\x13\x13\x13\x10\x10\x10\x0f\x0f\x0f\x0d\x0d\x0d"
b"\x2d\x28\x20\x29\x24\x1c\x27\x22\x1a\x25\x1f\x17\x38\x2e\x1e\x31"
b"\x29\x1a\x2c\x25\x17\x26\x20\x14\x3c\x30\x14\x37\x2c\x13\x33\x28"
b"\x12\x2d\x24\x10\x28\x1f\x0f\x22\x1a\x0b\x1b\x14\x0a\x13\x0f\x07"
b"\x31\x1a\x16\x30\x17\x13\x2e\x16\x10\x2c\x14\x0d\x2a\x12\x0b\x27"
b"\x0f\x0a\x25\x0f\x07\x21\x0d\x01\x1e\x0b\x01\x1c\x0b\x01\x1a\x0b"
b"\x01\x18\x0a\x01\x16\x0a\x01\x13\x0a\x01\x10\x07\x01\x0d\x07\x01"
b"\x29\x23\x1e\x27\x21\x1c\x26\x20\x1b\x25\x1f\x1a\x23\x1d\x19\x21"
b"\x1c\x18\x20\x1b\x17\x1e\x19\x16\x1c\x18\x14\x1b\x17\x13\x19\x14"
b"\x10\x17\x13\x0f\x14\x10\x0d\x12\x0f\x0b\x0f\x0b\x0a\x0b\x0a\x07"
b"\x26\x1a\x0f\x23\x19\x0f\x20\x17\x0f\x1c\x16\x0f\x19\x13\x0d\x14"
b"\x10\x0b\x10\x0d\x0a\x0b\x0a\x07\x33\x22\x1f\x35\x29\x26\x37\x2f"
b"\x2d\x39\x35\x34\x37\x39\x3a\x33\x37\x39\x30\x34\x36\x2b\x31\x34"
b"\x27\x2e\x31\x22\x2b\x2f\x1d\x28\x2c\x17\x25\x2a\x0f\x20\x26\x0d"
b"\x1e\x25\x0b\x1c\x22\x0a\x1b\x20\x07\x19\x1e\x07\x17\x1b\x07\x14"
b"\x18\x01\x12\x16\x01\x0f\x12\x01\x0b\x0d\x01\x07\x0a\x01\x01\x01"
b"\x2c\x21\x21\x2a\x1f\x1f\x29\x1d\x1d\x27\x1c\x1c\x26\x1a\x1a\x24"
b"\x18\x18\x22\x17\x17\x21\x16\x16\x1e\x13\x13\x1b\x12\x12\x18\x10"
b"\x10\x16\x0d\x0d\x12\x0b\x0b\x0d\x0a\x0a\x0a\x07\x07\x01\x01\x01"
b"\x2e\x30\x29\x2d\x2e\x27\x2b\x2c\x26\x2a\x2a\x24\x28\x29\x23\x27"
b"\x27\x21\x26\x26\x1f\x24\x24\x1d\x22\x22\x1c\x1f\x1f\x1a\x1c\x1c"
b"\x18\x19\x19\x16\x17\x17\x13\x13\x13\x10\x0f\x0f\x0d\x0b\x0b\x0a"
b"\x30\x1e\x1b\x2d\x1c\x19\x2c\x1a\x17\x2a\x19\x14\x28\x17\x13\x26"
b"\x16\x10\x24\x13\x0f\x21\x12\x0d\x1f\x10\x0b\x1c\x0f\x0a\x19\x0d"
b"\x0a\x16\x0b\x07\x12\x0a\x07\x0f\x07\x01\x0a\x01\x01\x01\x01\x01"
b"\x28\x29\x38\x26\x27\x36\x25\x26\x34\x24\x24\x31\x22\x22\x2f\x20"
b"\x21\x2d\x1e\x1f\x2a\x1d\x1d\x27\x1b\x1b\x25\x19\x19\x21\x17\x17"
b"\x1e\x14\x14\x1b\x13\x12\x17\x10\x0f\x13\x0d\x0b\x0f\x0a\x07\x07"
b"\x2f\x32\x29\x2d\x30\x26\x2b\x2e\x24\x29\x2c\x21\x27\x2a\x1e\x25"
b"\x28\x1c\x23\x26\x1a\x21\x25\x18\x1e\x22\x14\x1b\x1f\x10\x19\x1c"
b"\x0d\x17\x1a\x0a\x13\x17\x07\x10\x13\x01\x0d\x0f\x01\x0a\x0b\x01"
b"\x01\x3f\x01\x13\x3c\x0b\x1b\x39\x10\x20\x35\x14\x23\x31\x17\x23"
b"\x2d\x18\x23\x29\x18\x3f\x3f\x3f\x3f\x3f\x39\x3f\x3f\x31\x3f\x3f"
b"\x2a\x3f\x3f\x20\x3f\x3f\x14\x3f\x3c\x12\x3f\x39\x0f\x3f\x35\x0b"
b"\x3f\x32\x07\x3f\x2d\x01\x3d\x2a\x01\x3b\x26\x01\x39\x21\x01\x37"
b"\x1d\x01\x34\x1a\x01\x32\x16\x01\x2f\x12\x01\x2d\x0f\x01\x2a\x0b"
b"\x01\x27\x07\x01\x23\x01\x01\x1d\x01\x01\x17\x01\x01\x10\x01\x01"
b"\x3d\x01\x01\x19\x19\x3f\x3f\x01\x01\x01\x01\x3f\x16\x16\x13\x10"
b"\x10\x0f\x0d\x0d\x0b\x3c\x2e\x2a\x36\x27\x20\x30\x21\x18\x29\x1b"
b"\x10\x3c\x39\x37\x37\x32\x2f\x31\x2c\x28\x2b\x26\x21\x30\x22\x20"
)

View File

@ -0,0 +1,112 @@
#
# The Python Imaging Library.
# $Id$
#
# Binary input/output support routines.
#
# Copyright (c) 1997-2003 by Secret Labs AB
# Copyright (c) 1995-2003 by Fredrik Lundh
# Copyright (c) 2012 by Brian Crowell
#
# See the README file for information on usage and redistribution.
#
"""Binary input/output support routines."""
from __future__ import annotations
from struct import pack, unpack_from
def i8(c: bytes) -> int:
return c[0]
def o8(i: int) -> bytes:
return bytes((i & 255,))
# Input, le = little endian, be = big endian
def i16le(c: bytes, o: int = 0) -> int:
"""
Converts a 2-bytes (16 bits) string to an unsigned integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from("<H", c, o)[0]
def si16le(c: bytes, o: int = 0) -> int:
"""
Converts a 2-bytes (16 bits) string to a signed integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from("<h", c, o)[0]
def si16be(c: bytes, o: int = 0) -> int:
"""
Converts a 2-bytes (16 bits) string to a signed integer, big endian.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from(">h", c, o)[0]
def i32le(c: bytes, o: int = 0) -> int:
"""
Converts a 4-bytes (32 bits) string to an unsigned integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from("<I", c, o)[0]
def si32le(c: bytes, o: int = 0) -> int:
"""
Converts a 4-bytes (32 bits) string to a signed integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from("<i", c, o)[0]
def si32be(c: bytes, o: int = 0) -> int:
"""
Converts a 4-bytes (32 bits) string to a signed integer, big endian.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from(">i", c, o)[0]
def i16be(c: bytes, o: int = 0) -> int:
return unpack_from(">H", c, o)[0]
def i32be(c: bytes, o: int = 0) -> int:
return unpack_from(">I", c, o)[0]
# Output, le = little endian, be = big endian
def o16le(i: int) -> bytes:
return pack("<H", i)
def o32le(i: int) -> bytes:
return pack("<I", i)
def o16be(i: int) -> bytes:
return pack(">H", i)
def o32be(i: int) -> bytes:
return pack(">I", i)

View File

@ -0,0 +1,2 @@
[console_scripts]
normalizer = charset_normalizer.cli:cli_detect

View File

@ -0,0 +1,61 @@
# connectionhandler.py
import json, requests
def simple_get(connection_url:str) -> dict:
try:
response = requests.get(connection_url)
if response.status_code == 200:
return {"status": "ok", "text": response.text, "status_code": response.status_code}
else:
return {"status": "error", "status_code": response.status_code}
except Exception as e:
return {"status": "error", "status_code": 0}
def simple_delete(connection_url:str, data) -> dict:
try:
response = requests.delete(connection_url, json=data)
if response.status_code == 200:
return {"status": "ok", "status_code": response.status_code}
else:
return {"status": "error", "text": "Failed to delete", "status_code": response.status_code}
except Exception as e:
return {"status": "error", "status_code": 0}
def stream_post(connection_url:str, data, callback:callable) -> dict:
try:
headers = {
"Content-Type": "application/json"
}
response = requests.post(connection_url, headers=headers, data=data, stream=True)
if response.status_code == 200:
for line in response.iter_lines():
if line:
callback(json.loads(line.decode("utf-8")))
return {"status": "ok", "status_code": response.status_code}
else:
return {"status": "error", "status_code": response.status_code}
except Exception as e:
return {"status": "error", "status_code": 0}
from time import sleep
def stream_post_fake(connection_url:str, data, callback:callable) -> dict:
data = {
"status": "pulling manifest"
}
callback(data)
for i in range(2):
for a in range(11):
sleep(.1)
data = {
"status": f"downloading digestname {i}",
"digest": f"digestname {i}",
"total": 500,
"completed": a * 50
}
callback(data)
for msg in ["verifying sha256 digest", "writting manifest", "removing any unused layers", "success"]:
sleep(.1)
data = {"status": msg}
callback(data)
return {"status": "ok", "status_code": 200}

View File

@ -0,0 +1,5 @@
from __future__ import annotations
from .features import pilinfo
pilinfo(supported_formats=False)

View File

@ -0,0 +1,6 @@
from .__main__ import cli_detect, query_yes_no
__all__ = (
"cli_detect",
"query_yes_no",
)

View File

@ -0,0 +1,272 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
width="128"
height="128"
id="svg11300"
version="1.0"
style="display:inline;enable-background:new"
viewBox="0 0 128 128"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:dc="http://purl.org/dc/elements/1.1/">
<title
id="title4162">Adwaita Icon Template</title>
<defs
id="defs3">
<linearGradient
id="linearGradient35">
<stop
style="stop-color:#3d3846;stop-opacity:1;"
offset="0"
id="stop35" />
<stop
style="stop-color:#241f31;stop-opacity:1;"
offset="1"
id="stop36" />
</linearGradient>
<linearGradient
id="linearGradient33">
<stop
style="stop-color:#99c1ff;stop-opacity:1;"
offset="0"
id="stop33" />
<stop
style="stop-color:#62a0ea;stop-opacity:1;"
offset="1"
id="stop34" />
</linearGradient>
<linearGradient
id="linearGradient31">
<stop
style="stop-color:#deddda;stop-opacity:1;"
offset="0"
id="stop31" />
<stop
style="stop-color:#f6f5f4;stop-opacity:1;"
offset="1"
id="stop32" />
</linearGradient>
<linearGradient
id="linearGradient24">
<stop
style="stop-color:#f6f5f4;stop-opacity:1;"
offset="0"
id="stop24" />
<stop
style="stop-color:#deddda;stop-opacity:1;"
offset="1"
id="stop23" />
</linearGradient>
<linearGradient
id="linearGradient12">
<stop
style="stop-color:#000000;stop-opacity:0;"
offset="0"
id="stop12" />
<stop
style="stop-color:#000000;stop-opacity:0;"
offset="1"
id="stop13" />
</linearGradient>
<linearGradient
xlink:href="#linearGradient12"
id="linearGradient13"
x1="40.888428"
y1="205.03607"
x2="40.90649"
y2="212.09515"
gradientUnits="userSpaceOnUse" />
<linearGradient
xlink:href="#linearGradient24"
id="linearGradient23"
x1="40.888428"
y1="205.03607"
x2="40.90649"
y2="212.09515"
gradientUnits="userSpaceOnUse" />
<linearGradient
xlink:href="#linearGradient24"
id="linearGradient29"
gradientUnits="userSpaceOnUse"
x1="40.888428"
y1="205.03607"
x2="40.90649"
y2="212.09515"
gradientTransform="matrix(-1,0,0,1,127.14843,2.8384866e-4)" />
<linearGradient
xlink:href="#linearGradient12"
id="linearGradient30"
gradientUnits="userSpaceOnUse"
x1="40.888428"
y1="205.03607"
x2="40.90649"
y2="212.09515"
gradientTransform="matrix(-1,0,0,1,127.14843,2.8384866e-4)" />
<linearGradient
xlink:href="#linearGradient31"
id="linearGradient32"
x1="63.552597"
y1="214.19464"
x2="63.552597"
y2="241.24492"
gradientUnits="userSpaceOnUse" />
<linearGradient
xlink:href="#linearGradient33"
id="linearGradient34"
x1="64.683159"
y1="267.04626"
x2="64.895935"
y2="278.69958"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.99245101,0,0,1.1818075,0.48386604,-51.63542)" />
<linearGradient
xlink:href="#linearGradient35"
id="linearGradient36"
x1="45.111782"
y1="235.32567"
x2="45.111782"
y2="229.17581"
gradientUnits="userSpaceOnUse" />
<linearGradient
xlink:href="#linearGradient35"
id="linearGradient37"
gradientUnits="userSpaceOnUse"
x1="45.111782"
y1="235.32567"
x2="45.111782"
y2="229.17581"
gradientTransform="translate(36.957243,0.15686125)" />
</defs>
<metadata
id="metadata4">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:creator>
<cc:Agent>
<dc:title>GNOME Design Team</dc:title>
</cc:Agent>
</dc:creator>
<dc:source />
<cc:license
rdf:resource="http://creativecommons.org/licenses/by-sa/4.0/" />
<dc:title>Adwaita Icon Template</dc:title>
<dc:subject>
<rdf:Bag />
</dc:subject>
<dc:date />
<dc:rights>
<cc:Agent>
<dc:title />
</cc:Agent>
</dc:rights>
<dc:publisher>
<cc:Agent>
<dc:title />
</cc:Agent>
</dc:publisher>
<dc:identifier />
<dc:relation />
<dc:language />
<dc:coverage />
<dc:description />
<dc:contributor>
<cc:Agent>
<dc:title />
</cc:Agent>
</dc:contributor>
</cc:Work>
<cc:License
rdf:about="http://creativecommons.org/licenses/by-sa/4.0/">
<cc:permits
rdf:resource="http://creativecommons.org/ns#Reproduction" />
<cc:permits
rdf:resource="http://creativecommons.org/ns#Distribution" />
<cc:requires
rdf:resource="http://creativecommons.org/ns#Notice" />
<cc:requires
rdf:resource="http://creativecommons.org/ns#Attribution" />
<cc:permits
rdf:resource="http://creativecommons.org/ns#DerivativeWorks" />
<cc:requires
rdf:resource="http://creativecommons.org/ns#ShareAlike" />
</cc:License>
</rdf:RDF>
</metadata>
<g
id="layer1"
style="display:inline"
transform="translate(0,-172)">
<g
id="layer4"
transform="matrix(1.2477821,0,0,1.2477821,-15.858054,-61.000241)">
<rect
style="display:inline;fill:url(#linearGradient32);fill-opacity:1;stroke:#000000;stroke-width:2.91036;stroke-dasharray:none;stroke-opacity:0;enable-background:new"
id="rect1"
width="81.276649"
height="56.210243"
x="23.361675"
y="213.42638"
ry="13.680508" />
<path
style="display:inline;fill:url(#linearGradient23);fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient13);stroke-width:3;stroke-linecap:round;stroke-dasharray:none;stroke-opacity:0;enable-background:new"
d="m 32.848631,215.48926 c 0,0 0.309082,-16.43408 7.036621,-19.01123 6.727539,-2.57715 10.409179,18.9375 10.409179,18.9375"
id="path1" />
<path
style="display:inline;fill:url(#linearGradient29);fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient30);stroke-width:3;stroke-linecap:round;stroke-dasharray:none;stroke-opacity:0;enable-background:new"
d="m 94.2998,215.48954 c 0,0 -0.309082,-16.43408 -7.036621,-19.01123 C 80.53564,193.90116 76.854,215.41581 76.854,215.41581"
id="path1-7" />
<circle
style="display:inline;fill:url(#linearGradient36);fill-opacity:1;stroke:none;stroke-width:3;stroke-linecap:round;stroke-dasharray:none;enable-background:new"
id="path3"
cx="45.129391"
cy="235.05762"
r="5.8994136" />
<circle
style="display:inline;fill:url(#linearGradient37);fill-opacity:1;stroke:none;stroke-width:3;stroke-linecap:round;stroke-dasharray:none;enable-background:new"
id="path3-6"
cx="82.086639"
cy="235.21448"
r="5.8994136" />
<path
style="display:inline;fill:none;stroke:#241f31;stroke-width:3;stroke-linecap:round;stroke-dasharray:none;stroke-opacity:1;enable-background:new"
d="m 32.392577,229.59423 c 0,0 25.649412,1.06983 24.62744,-3.62353"
id="path4" />
<path
style="display:inline;fill:none;stroke:#241f31;stroke-width:2.99972;stroke-linecap:round;stroke-dasharray:none;stroke-opacity:1;enable-background:new"
d="m 94.823791,229.75097 c 0,0 -25.649412,1.06983 -24.62744,-3.62353"
id="path4-5" />
<path
style="display:inline;fill:url(#linearGradient34);fill-opacity:1;stroke:none;stroke-width:3.24899;stroke-linecap:round;stroke-dasharray:none;enable-background:new"
d="m 23.60202,258.09454 c 1.243956,12.49842 3.858832,15.67625 12.858734,14.90301 8.999901,-0.77326 9.384671,10.27444 19.65082,5.37353 10.266149,-4.90093 14.08815,-3.56159 14.703102,-3.63198 0.614951,-0.0704 15.397528,10.41294 20.253656,3.89337 4.856129,-6.51955 7.043107,-1.94985 9.232508,-4.41272 2.1894,-2.46288 4.89442,-9.45966 3.87579,-16.22158"
id="path8-0" />
<path
style="display:inline;fill:#f6f5f4;fill-opacity:1;stroke:none;stroke-width:2.99884;stroke-linecap:round;stroke-dasharray:none;enable-background:new"
d="m 23.389225,256.86198 c 1.25195,10.5799 3.883629,13.26993 12.941363,12.61538 9.057733,-0.65456 9.444975,8.6973 19.777093,4.54868 10.332117,-4.14862 14.178679,-3.01487 14.797582,-3.07446 0.618903,-0.0596 15.49647,8.81454 20.383803,3.29574 4.887333,-5.5188 7.088365,-1.65056 9.291844,-3.73537 2.20346,-2.08482 4.92586,-8.00759 3.90069,-13.73155"
id="path8" />
<path
style="display:inline;fill:none;stroke:#241f31;stroke-width:2.99972;stroke-linecap:round;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1;enable-background:new"
d="m 63.095594,248.37344 c 0,0 10.15573,26.47309 21.090617,10.976"
id="path2" />
<path
style="display:inline;fill:none;stroke:#241f31;stroke-width:2.99972;stroke-linecap:round;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1;enable-background:new"
d="m 64.150661,248.40941 c 0,0 -10.15573,26.47309 -21.090617,10.976"
id="path2-6" />
<ellipse
style="display:inline;fill:#000000;stroke:none;stroke-width:1.29382;stroke-linecap:round;stroke-dasharray:none;enable-background:new"
id="path5"
cx="63.564262"
cy="248.16406"
rx="4.5169015"
ry="4.2407222" />
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 10 KiB

View File

@ -0,0 +1,104 @@
#
# The Python Imaging Library.
# $Id$
#
# image enhancement classes
#
# For a background, see "Image Processing By Interpolation and
# Extrapolation", Paul Haeberli and Douglas Voorhies. Available
# at http://www.graficaobscura.com/interp/index.html
#
# History:
# 1996-03-23 fl Created
# 2009-06-16 fl Fixed mean calculation
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
from . import Image, ImageFilter, ImageStat
class _Enhance:
def enhance(self, factor):
"""
Returns an enhanced image.
:param factor: A floating point value controlling the enhancement.
Factor 1.0 always returns a copy of the original image,
lower factors mean less color (brightness, contrast,
etc), and higher values more. There are no restrictions
on this value.
:rtype: :py:class:`~PIL.Image.Image`
"""
return Image.blend(self.degenerate, self.image, factor)
class Color(_Enhance):
"""Adjust image color balance.
This class can be used to adjust the colour balance of an image, in
a manner similar to the controls on a colour TV set. An enhancement
factor of 0.0 gives a black and white image. A factor of 1.0 gives
the original image.
"""
def __init__(self, image):
self.image = image
self.intermediate_mode = "L"
if "A" in image.getbands():
self.intermediate_mode = "LA"
self.degenerate = image.convert(self.intermediate_mode).convert(image.mode)
class Contrast(_Enhance):
"""Adjust image contrast.
This class can be used to control the contrast of an image, similar
to the contrast control on a TV set. An enhancement factor of 0.0
gives a solid gray image. A factor of 1.0 gives the original image.
"""
def __init__(self, image):
self.image = image
mean = int(ImageStat.Stat(image.convert("L")).mean[0] + 0.5)
self.degenerate = Image.new("L", image.size, mean).convert(image.mode)
if "A" in image.getbands():
self.degenerate.putalpha(image.getchannel("A"))
class Brightness(_Enhance):
"""Adjust image brightness.
This class can be used to control the brightness of an image. An
enhancement factor of 0.0 gives a black image. A factor of 1.0 gives the
original image.
"""
def __init__(self, image):
self.image = image
self.degenerate = Image.new(image.mode, image.size, 0)
if "A" in image.getbands():
self.degenerate.putalpha(image.getchannel("A"))
class Sharpness(_Enhance):
"""Adjust image sharpness.
This class can be used to adjust the sharpness of an image. An
enhancement factor of 0.0 gives a blurred image, a factor of 1.0 gives the
original image, and a factor of 2.0 gives a sharpened image.
"""
def __init__(self, image):
self.image = image
self.degenerate = image.filter(ImageFilter.SMOOTH)
if "A" in image.getbands():
self.degenerate.putalpha(image.getchannel("A"))

View File

@ -0,0 +1,159 @@
"""The match_hostname() function from Python 3.5, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
# It is modified to remove commonName support.
from __future__ import annotations
import ipaddress
import re
import typing
from ipaddress import IPv4Address, IPv6Address
if typing.TYPE_CHECKING:
from .ssl_ import _TYPE_PEER_CERT_RET_DICT
__version__ = "3.5.0.1"
class CertificateError(ValueError):
pass
def _dnsname_match(
dn: typing.Any, hostname: str, max_wildcards: int = 1
) -> typing.Match[str] | None | bool:
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r".")
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count("*")
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn)
)
# speed up common case w/o wildcards
if not wildcards:
return bool(dn.lower() == hostname.lower())
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == "*":
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append("[^.]+")
elif leftmost.startswith("xn--") or hostname.startswith("xn--"):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r"\*", "[^.]*"))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r"\A" + r"\.".join(pats) + r"\Z", re.IGNORECASE)
return pat.match(hostname)
def _ipaddress_match(ipname: str, host_ip: IPv4Address | IPv6Address) -> bool:
"""Exact matching of IP addresses.
RFC 9110 section 4.3.5: "A reference identity of IP-ID contains the decoded
bytes of the IP address. An IP version 4 address is 4 octets, and an IP
version 6 address is 16 octets. [...] A reference identity of type IP-ID
matches if the address is identical to an iPAddress value of the
subjectAltName extension of the certificate."
"""
# OpenSSL may add a trailing newline to a subjectAltName's IP address
# Divergence from upstream: ipaddress can't handle byte str
ip = ipaddress.ip_address(ipname.rstrip())
return bool(ip.packed == host_ip.packed)
def match_hostname(
cert: _TYPE_PEER_CERT_RET_DICT | None,
hostname: str,
hostname_checks_common_name: bool = False,
) -> None:
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError(
"empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED"
)
try:
# Divergence from upstream: ipaddress can't handle byte str
#
# The ipaddress module shipped with Python < 3.9 does not support
# scoped IPv6 addresses so we unconditionally strip the Zone IDs for
# now. Once we drop support for Python 3.9 we can remove this branch.
if "%" in hostname:
host_ip = ipaddress.ip_address(hostname[: hostname.rfind("%")])
else:
host_ip = ipaddress.ip_address(hostname)
except ValueError:
# Not an IP address (common case)
host_ip = None
dnsnames = []
san: tuple[tuple[str, str], ...] = cert.get("subjectAltName", ())
key: str
value: str
for key, value in san:
if key == "DNS":
if host_ip is None and _dnsname_match(value, hostname):
return
dnsnames.append(value)
elif key == "IP Address":
if host_ip is not None and _ipaddress_match(value, host_ip):
return
dnsnames.append(value)
# We only check 'commonName' if it's enabled and we're not verifying
# an IP address. IP addresses aren't valid within 'commonName'.
if hostname_checks_common_name and host_ip is None and not dnsnames:
for sub in cert.get("subject", ()):
for key, value in sub:
if key == "commonName":
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError(
"hostname %r "
"doesn't match either of %s" % (hostname, ", ".join(map(repr, dnsnames)))
)
elif len(dnsnames) == 1:
raise CertificateError(f"hostname {hostname!r} doesn't match {dnsnames[0]!r}")
else:
raise CertificateError("no appropriate subjectAltName fields were found")

View File

@ -0,0 +1,148 @@
#
# The Python Imaging Library
# $Id$
#
# FITS file handling
#
# Copyright (c) 1998-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import gzip
import math
from . import Image, ImageFile
def _accept(prefix: bytes) -> bool:
return prefix[:6] == b"SIMPLE"
class FitsImageFile(ImageFile.ImageFile):
format = "FITS"
format_description = "FITS"
def _open(self) -> None:
assert self.fp is not None
headers: dict[bytes, bytes] = {}
header_in_progress = False
decoder_name = ""
while True:
header = self.fp.read(80)
if not header:
msg = "Truncated FITS file"
raise OSError(msg)
keyword = header[:8].strip()
if keyword in (b"SIMPLE", b"XTENSION"):
header_in_progress = True
elif headers and not header_in_progress:
# This is now a data unit
break
elif keyword == b"END":
# Seek to the end of the header unit
self.fp.seek(math.ceil(self.fp.tell() / 2880) * 2880)
if not decoder_name:
decoder_name, offset, args = self._parse_headers(headers)
header_in_progress = False
continue
if decoder_name:
# Keep going to read past the headers
continue
value = header[8:].split(b"/")[0].strip()
if value.startswith(b"="):
value = value[1:].strip()
if not headers and (not _accept(keyword) or value != b"T"):
msg = "Not a FITS file"
raise SyntaxError(msg)
headers[keyword] = value
if not decoder_name:
msg = "No image data"
raise ValueError(msg)
offset += self.fp.tell() - 80
self.tile = [(decoder_name, (0, 0) + self.size, offset, args)]
def _get_size(
self, headers: dict[bytes, bytes], prefix: bytes
) -> tuple[int, int] | None:
naxis = int(headers[prefix + b"NAXIS"])
if naxis == 0:
return None
if naxis == 1:
return 1, int(headers[prefix + b"NAXIS1"])
else:
return int(headers[prefix + b"NAXIS1"]), int(headers[prefix + b"NAXIS2"])
def _parse_headers(
self, headers: dict[bytes, bytes]
) -> tuple[str, int, tuple[str | int, ...]]:
prefix = b""
decoder_name = "raw"
offset = 0
if (
headers.get(b"XTENSION") == b"'BINTABLE'"
and headers.get(b"ZIMAGE") == b"T"
and headers[b"ZCMPTYPE"] == b"'GZIP_1 '"
):
no_prefix_size = self._get_size(headers, prefix) or (0, 0)
number_of_bits = int(headers[b"BITPIX"])
offset = no_prefix_size[0] * no_prefix_size[1] * (number_of_bits // 8)
prefix = b"Z"
decoder_name = "fits_gzip"
size = self._get_size(headers, prefix)
if not size:
return "", 0, ()
self._size = size
number_of_bits = int(headers[prefix + b"BITPIX"])
if number_of_bits == 8:
self._mode = "L"
elif number_of_bits == 16:
self._mode = "I;16"
elif number_of_bits == 32:
self._mode = "I"
elif number_of_bits in (-32, -64):
self._mode = "F"
args = (self.mode, 0, -1) if decoder_name == "raw" else (number_of_bits,)
return decoder_name, offset, args
class FitsGzipDecoder(ImageFile.PyDecoder):
_pulls_fd = True
def decode(self, buffer):
assert self.fd is not None
value = gzip.decompress(self.fd.read())
rows = []
offset = 0
number_of_bits = min(self.args[0] // 8, 4)
for y in range(self.state.ysize):
row = bytearray()
for x in range(self.state.xsize):
row += value[offset + (4 - number_of_bits) : offset + 4]
offset += 4
rows.append(row)
self.set_as_raw(bytes([pixel for row in rows[::-1] for pixel in row]))
return -1, 0
# --------------------------------------------------------------------
# Registry
Image.register_open(FitsImageFile.format, FitsImageFile, _accept)
Image.register_decoder("fits_gzip", FitsGzipDecoder)
Image.register_extensions(FitsImageFile.format, [".fit", ".fits"])

View File

@ -0,0 +1,483 @@
from __future__ import annotations
import typing
from collections import OrderedDict
from enum import Enum, auto
from threading import RLock
if typing.TYPE_CHECKING:
# We can only import Protocol if TYPE_CHECKING because it's a development
# dependency, and is not available at runtime.
from typing import Protocol
from typing_extensions import Self
class HasGettableStringKeys(Protocol):
def keys(self) -> typing.Iterator[str]:
...
def __getitem__(self, key: str) -> str:
...
__all__ = ["RecentlyUsedContainer", "HTTPHeaderDict"]
# Key type
_KT = typing.TypeVar("_KT")
# Value type
_VT = typing.TypeVar("_VT")
# Default type
_DT = typing.TypeVar("_DT")
ValidHTTPHeaderSource = typing.Union[
"HTTPHeaderDict",
typing.Mapping[str, str],
typing.Iterable[typing.Tuple[str, str]],
"HasGettableStringKeys",
]
class _Sentinel(Enum):
not_passed = auto()
def ensure_can_construct_http_header_dict(
potential: object,
) -> ValidHTTPHeaderSource | None:
if isinstance(potential, HTTPHeaderDict):
return potential
elif isinstance(potential, typing.Mapping):
# Full runtime checking of the contents of a Mapping is expensive, so for the
# purposes of typechecking, we assume that any Mapping is the right shape.
return typing.cast(typing.Mapping[str, str], potential)
elif isinstance(potential, typing.Iterable):
# Similarly to Mapping, full runtime checking of the contents of an Iterable is
# expensive, so for the purposes of typechecking, we assume that any Iterable
# is the right shape.
return typing.cast(typing.Iterable[typing.Tuple[str, str]], potential)
elif hasattr(potential, "keys") and hasattr(potential, "__getitem__"):
return typing.cast("HasGettableStringKeys", potential)
else:
return None
class RecentlyUsedContainer(typing.Generic[_KT, _VT], typing.MutableMapping[_KT, _VT]):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
_container: typing.OrderedDict[_KT, _VT]
_maxsize: int
dispose_func: typing.Callable[[_VT], None] | None
lock: RLock
def __init__(
self,
maxsize: int = 10,
dispose_func: typing.Callable[[_VT], None] | None = None,
) -> None:
super().__init__()
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = OrderedDict()
self.lock = RLock()
def __getitem__(self, key: _KT) -> _VT:
# Re-insert the item, moving it to the end of the eviction line.
with self.lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key: _KT, value: _VT) -> None:
evicted_item = None
with self.lock:
# Possibly evict the existing value of 'key'
try:
# If the key exists, we'll overwrite it, which won't change the
# size of the pool. Because accessing a key should move it to
# the end of the eviction line, we pop it out first.
evicted_item = key, self._container.pop(key)
self._container[key] = value
except KeyError:
# When the key does not exist, we insert the value first so that
# evicting works in all cases, including when self._maxsize is 0
self._container[key] = value
if len(self._container) > self._maxsize:
# If we didn't evict an existing value, and we've hit our maximum
# size, then we have to evict the least recently used item from
# the beginning of the container.
evicted_item = self._container.popitem(last=False)
# After releasing the lock on the pool, dispose of any evicted value.
if evicted_item is not None and self.dispose_func:
_, evicted_value = evicted_item
self.dispose_func(evicted_value)
def __delitem__(self, key: _KT) -> None:
with self.lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self) -> int:
with self.lock:
return len(self._container)
def __iter__(self) -> typing.NoReturn:
raise NotImplementedError(
"Iteration over this class is unlikely to be threadsafe."
)
def clear(self) -> None:
with self.lock:
# Copy pointers to all values, then wipe the mapping
values = list(self._container.values())
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self) -> set[_KT]: # type: ignore[override]
with self.lock:
return set(self._container.keys())
class HTTPHeaderDictItemView(typing.Set[typing.Tuple[str, str]]):
"""
HTTPHeaderDict is unusual for a Mapping[str, str] in that it has two modes of
address.
If we directly try to get an item with a particular name, we will get a string
back that is the concatenated version of all the values:
>>> d['X-Header-Name']
'Value1, Value2, Value3'
However, if we iterate over an HTTPHeaderDict's items, we will optionally combine
these values based on whether combine=True was called when building up the dictionary
>>> d = HTTPHeaderDict({"A": "1", "B": "foo"})
>>> d.add("A", "2", combine=True)
>>> d.add("B", "bar")
>>> list(d.items())
[
('A', '1, 2'),
('B', 'foo'),
('B', 'bar'),
]
This class conforms to the interface required by the MutableMapping ABC while
also giving us the nonstandard iteration behavior we want; items with duplicate
keys, ordered by time of first insertion.
"""
_headers: HTTPHeaderDict
def __init__(self, headers: HTTPHeaderDict) -> None:
self._headers = headers
def __len__(self) -> int:
return len(list(self._headers.iteritems()))
def __iter__(self) -> typing.Iterator[tuple[str, str]]:
return self._headers.iteritems()
def __contains__(self, item: object) -> bool:
if isinstance(item, tuple) and len(item) == 2:
passed_key, passed_val = item
if isinstance(passed_key, str) and isinstance(passed_val, str):
return self._headers._has_value_for_header(passed_key, passed_val)
return False
class HTTPHeaderDict(typing.MutableMapping[str, str]):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
"""
_container: typing.MutableMapping[str, list[str]]
def __init__(self, headers: ValidHTTPHeaderSource | None = None, **kwargs: str):
super().__init__()
self._container = {} # 'dict' is insert-ordered
if headers is not None:
if isinstance(headers, HTTPHeaderDict):
self._copy_from(headers)
else:
self.extend(headers)
if kwargs:
self.extend(kwargs)
def __setitem__(self, key: str, val: str) -> None:
# avoid a bytes/str comparison by decoding before httplib
if isinstance(key, bytes):
key = key.decode("latin-1")
self._container[key.lower()] = [key, val]
def __getitem__(self, key: str) -> str:
val = self._container[key.lower()]
return ", ".join(val[1:])
def __delitem__(self, key: str) -> None:
del self._container[key.lower()]
def __contains__(self, key: object) -> bool:
if isinstance(key, str):
return key.lower() in self._container
return False
def setdefault(self, key: str, default: str = "") -> str:
return super().setdefault(key, default)
def __eq__(self, other: object) -> bool:
maybe_constructable = ensure_can_construct_http_header_dict(other)
if maybe_constructable is None:
return False
else:
other_as_http_header_dict = type(self)(maybe_constructable)
return {k.lower(): v for k, v in self.itermerged()} == {
k.lower(): v for k, v in other_as_http_header_dict.itermerged()
}
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __len__(self) -> int:
return len(self._container)
def __iter__(self) -> typing.Iterator[str]:
# Only provide the originally cased names
for vals in self._container.values():
yield vals[0]
def discard(self, key: str) -> None:
try:
del self[key]
except KeyError:
pass
def add(self, key: str, val: str, *, combine: bool = False) -> None:
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
If this is called with combine=True, instead of adding a new header value
as a distinct item during iteration, this will instead append the value to
any existing header value with a comma. If no existing header value exists
for the key, then the value will simply be added, ignoring the combine parameter.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
>>> list(headers.items())
[('foo', 'bar'), ('foo', 'baz')]
>>> headers.add('foo', 'quz', combine=True)
>>> list(headers.items())
[('foo', 'bar, baz, quz')]
"""
# avoid a bytes/str comparison by decoding before httplib
if isinstance(key, bytes):
key = key.decode("latin-1")
key_lower = key.lower()
new_vals = [key, val]
# Keep the common case aka no item present as fast as possible
vals = self._container.setdefault(key_lower, new_vals)
if new_vals is not vals:
# if there are values here, then there is at least the initial
# key/value pair
assert len(vals) >= 2
if combine:
vals[-1] = vals[-1] + ", " + val
else:
vals.append(val)
def extend(self, *args: ValidHTTPHeaderSource, **kwargs: str) -> None:
"""Generic import function for any type of header-like object.
Adapted version of MutableMapping.update in order to insert items
with self.add instead of self.__setitem__
"""
if len(args) > 1:
raise TypeError(
f"extend() takes at most 1 positional arguments ({len(args)} given)"
)
other = args[0] if len(args) >= 1 else ()
if isinstance(other, HTTPHeaderDict):
for key, val in other.iteritems():
self.add(key, val)
elif isinstance(other, typing.Mapping):
for key, val in other.items():
self.add(key, val)
elif isinstance(other, typing.Iterable):
other = typing.cast(typing.Iterable[typing.Tuple[str, str]], other)
for key, value in other:
self.add(key, value)
elif hasattr(other, "keys") and hasattr(other, "__getitem__"):
# THIS IS NOT A TYPESAFE BRANCH
# In this branch, the object has a `keys` attr but is not a Mapping or any of
# the other types indicated in the method signature. We do some stuff with
# it as though it partially implements the Mapping interface, but we're not
# doing that stuff safely AT ALL.
for key in other.keys():
self.add(key, other[key])
for key, value in kwargs.items():
self.add(key, value)
@typing.overload
def getlist(self, key: str) -> list[str]:
...
@typing.overload
def getlist(self, key: str, default: _DT) -> list[str] | _DT:
...
def getlist(
self, key: str, default: _Sentinel | _DT = _Sentinel.not_passed
) -> list[str] | _DT:
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
try:
vals = self._container[key.lower()]
except KeyError:
if default is _Sentinel.not_passed:
# _DT is unbound; empty list is instance of List[str]
return []
# _DT is bound; default is instance of _DT
return default
else:
# _DT may or may not be bound; vals[1:] is instance of List[str], which
# meets our external interface requirement of `Union[List[str], _DT]`.
return vals[1:]
def _prepare_for_method_change(self) -> Self:
"""
Remove content-specific header fields before changing the request
method to GET or HEAD according to RFC 9110, Section 15.4.
"""
content_specific_headers = [
"Content-Encoding",
"Content-Language",
"Content-Location",
"Content-Type",
"Content-Length",
"Digest",
"Last-Modified",
]
for header in content_specific_headers:
self.discard(header)
return self
# Backwards compatibility for httplib
getheaders = getlist
getallmatchingheaders = getlist
iget = getlist
# Backwards compatibility for http.cookiejar
get_all = getlist
def __repr__(self) -> str:
return f"{type(self).__name__}({dict(self.itermerged())})"
def _copy_from(self, other: HTTPHeaderDict) -> None:
for key in other:
val = other.getlist(key)
self._container[key.lower()] = [key, *val]
def copy(self) -> HTTPHeaderDict:
clone = type(self)()
clone._copy_from(self)
return clone
def iteritems(self) -> typing.Iterator[tuple[str, str]]:
"""Iterate over all header lines, including duplicate ones."""
for key in self:
vals = self._container[key.lower()]
for val in vals[1:]:
yield vals[0], val
def itermerged(self) -> typing.Iterator[tuple[str, str]]:
"""Iterate over all headers, merging duplicate ones together."""
for key in self:
val = self._container[key.lower()]
yield val[0], ", ".join(val[1:])
def items(self) -> HTTPHeaderDictItemView: # type: ignore[override]
return HTTPHeaderDictItemView(self)
def _has_value_for_header(self, header_name: str, potential_value: str) -> bool:
if header_name in self:
return potential_value in self._container[header_name.lower()][1:]
return False
def __ior__(self, other: object) -> HTTPHeaderDict:
# Supports extending a header dict in-place using operator |=
# combining items with add instead of __setitem__
maybe_constructable = ensure_can_construct_http_header_dict(other)
if maybe_constructable is None:
return NotImplemented
self.extend(maybe_constructable)
return self
def __or__(self, other: object) -> HTTPHeaderDict:
# Supports merging header dicts using operator |
# combining items with add instead of __setitem__
maybe_constructable = ensure_can_construct_http_header_dict(other)
if maybe_constructable is None:
return NotImplemented
result = self.copy()
result.extend(maybe_constructable)
return result
def __ror__(self, other: object) -> HTTPHeaderDict:
# Supports merging header dicts using operator | when other is on left side
# combining items with add instead of __setitem__
maybe_constructable = ensure_can_construct_http_header_dict(other)
if maybe_constructable is None:
return NotImplemented
result = type(self)(maybe_constructable)
result.extend(self)
return result

View File

@ -0,0 +1,107 @@
#
# The Python Imaging Library.
# $Id$
#
# Microsoft Image Composer support for PIL
#
# Notes:
# uses TiffImagePlugin.py to read the actual image streams
#
# History:
# 97-01-20 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import olefile
from . import Image, TiffImagePlugin
#
# --------------------------------------------------------------------
def _accept(prefix):
return prefix[:8] == olefile.MAGIC
##
# Image plugin for Microsoft's Image Composer file format.
class MicImageFile(TiffImagePlugin.TiffImageFile):
format = "MIC"
format_description = "Microsoft Image Composer"
_close_exclusive_fp_after_loading = False
def _open(self):
# read the OLE directory and see if this is a likely
# to be a Microsoft Image Composer file
try:
self.ole = olefile.OleFileIO(self.fp)
except OSError as e:
msg = "not an MIC file; invalid OLE file"
raise SyntaxError(msg) from e
# find ACI subfiles with Image members (maybe not the
# best way to identify MIC files, but what the... ;-)
self.images = [
path
for path in self.ole.listdir()
if path[1:] and path[0][-4:] == ".ACI" and path[1] == "Image"
]
# if we didn't find any images, this is probably not
# an MIC file.
if not self.images:
msg = "not an MIC file; no image entries"
raise SyntaxError(msg)
self.frame = None
self._n_frames = len(self.images)
self.is_animated = self._n_frames > 1
self.__fp = self.fp
self.seek(0)
def seek(self, frame):
if not self._seek_check(frame):
return
try:
filename = self.images[frame]
except IndexError as e:
msg = "no such frame"
raise EOFError(msg) from e
self.fp = self.ole.openstream(filename)
TiffImagePlugin.TiffImageFile._open(self)
self.frame = frame
def tell(self):
return self.frame
def close(self):
self.__fp.close()
self.ole.close()
super().close()
def __exit__(self, *args):
self.__fp.close()
self.ole.close()
super().__exit__()
#
# --------------------------------------------------------------------
Image.register_open(MicImageFile.format, MicImageFile, _accept)
Image.register_extension(MicImageFile.format, ".mic")

View File

@ -0,0 +1,339 @@
from __future__ import annotations
import collections
import os
import sys
import warnings
import PIL
from . import Image
modules = {
"pil": ("PIL._imaging", "PILLOW_VERSION"),
"tkinter": ("PIL._tkinter_finder", "tk_version"),
"freetype2": ("PIL._imagingft", "freetype2_version"),
"littlecms2": ("PIL._imagingcms", "littlecms_version"),
"webp": ("PIL._webp", "webpdecoder_version"),
}
def check_module(feature):
"""
Checks if a module is available.
:param feature: The module to check for.
:returns: ``True`` if available, ``False`` otherwise.
:raises ValueError: If the module is not defined in this version of Pillow.
"""
if feature not in modules:
msg = f"Unknown module {feature}"
raise ValueError(msg)
module, ver = modules[feature]
try:
__import__(module)
return True
except ModuleNotFoundError:
return False
except ImportError as ex:
warnings.warn(str(ex))
return False
def version_module(feature):
"""
:param feature: The module to check for.
:returns:
The loaded version number as a string, or ``None`` if unknown or not available.
:raises ValueError: If the module is not defined in this version of Pillow.
"""
if not check_module(feature):
return None
module, ver = modules[feature]
if ver is None:
return None
return getattr(__import__(module, fromlist=[ver]), ver)
def get_supported_modules():
"""
:returns: A list of all supported modules.
"""
return [f for f in modules if check_module(f)]
codecs = {
"jpg": ("jpeg", "jpeglib"),
"jpg_2000": ("jpeg2k", "jp2klib"),
"zlib": ("zip", "zlib"),
"libtiff": ("libtiff", "libtiff"),
}
def check_codec(feature):
"""
Checks if a codec is available.
:param feature: The codec to check for.
:returns: ``True`` if available, ``False`` otherwise.
:raises ValueError: If the codec is not defined in this version of Pillow.
"""
if feature not in codecs:
msg = f"Unknown codec {feature}"
raise ValueError(msg)
codec, lib = codecs[feature]
return codec + "_encoder" in dir(Image.core)
def version_codec(feature):
"""
:param feature: The codec to check for.
:returns:
The version number as a string, or ``None`` if not available.
Checked at compile time for ``jpg``, run-time otherwise.
:raises ValueError: If the codec is not defined in this version of Pillow.
"""
if not check_codec(feature):
return None
codec, lib = codecs[feature]
version = getattr(Image.core, lib + "_version")
if feature == "libtiff":
return version.split("\n")[0].split("Version ")[1]
return version
def get_supported_codecs():
"""
:returns: A list of all supported codecs.
"""
return [f for f in codecs if check_codec(f)]
features = {
"webp_anim": ("PIL._webp", "HAVE_WEBPANIM", None),
"webp_mux": ("PIL._webp", "HAVE_WEBPMUX", None),
"transp_webp": ("PIL._webp", "HAVE_TRANSPARENCY", None),
"raqm": ("PIL._imagingft", "HAVE_RAQM", "raqm_version"),
"fribidi": ("PIL._imagingft", "HAVE_FRIBIDI", "fribidi_version"),
"harfbuzz": ("PIL._imagingft", "HAVE_HARFBUZZ", "harfbuzz_version"),
"libjpeg_turbo": ("PIL._imaging", "HAVE_LIBJPEGTURBO", "libjpeg_turbo_version"),
"libimagequant": ("PIL._imaging", "HAVE_LIBIMAGEQUANT", "imagequant_version"),
"xcb": ("PIL._imaging", "HAVE_XCB", None),
}
def check_feature(feature):
"""
Checks if a feature is available.
:param feature: The feature to check for.
:returns: ``True`` if available, ``False`` if unavailable, ``None`` if unknown.
:raises ValueError: If the feature is not defined in this version of Pillow.
"""
if feature not in features:
msg = f"Unknown feature {feature}"
raise ValueError(msg)
module, flag, ver = features[feature]
try:
imported_module = __import__(module, fromlist=["PIL"])
return getattr(imported_module, flag)
except ModuleNotFoundError:
return None
except ImportError as ex:
warnings.warn(str(ex))
return None
def version_feature(feature):
"""
:param feature: The feature to check for.
:returns: The version number as a string, or ``None`` if not available.
:raises ValueError: If the feature is not defined in this version of Pillow.
"""
if not check_feature(feature):
return None
module, flag, ver = features[feature]
if ver is None:
return None
return getattr(__import__(module, fromlist=[ver]), ver)
def get_supported_features():
"""
:returns: A list of all supported features.
"""
return [f for f in features if check_feature(f)]
def check(feature):
"""
:param feature: A module, codec, or feature name.
:returns:
``True`` if the module, codec, or feature is available,
``False`` or ``None`` otherwise.
"""
if feature in modules:
return check_module(feature)
if feature in codecs:
return check_codec(feature)
if feature in features:
return check_feature(feature)
warnings.warn(f"Unknown feature '{feature}'.", stacklevel=2)
return False
def version(feature):
"""
:param feature:
The module, codec, or feature to check for.
:returns:
The version number as a string, or ``None`` if unknown or not available.
"""
if feature in modules:
return version_module(feature)
if feature in codecs:
return version_codec(feature)
if feature in features:
return version_feature(feature)
return None
def get_supported():
"""
:returns: A list of all supported modules, features, and codecs.
"""
ret = get_supported_modules()
ret.extend(get_supported_features())
ret.extend(get_supported_codecs())
return ret
def pilinfo(out=None, supported_formats=True):
"""
Prints information about this installation of Pillow.
This function can be called with ``python3 -m PIL``.
It can also be called with ``python3 -m PIL.report`` or ``python3 -m PIL --report``
to have "supported_formats" set to ``False``, omitting the list of all supported
image file formats.
:param out:
The output stream to print to. Defaults to ``sys.stdout`` if ``None``.
:param supported_formats:
If ``True``, a list of all supported image file formats will be printed.
"""
if out is None:
out = sys.stdout
Image.init()
print("-" * 68, file=out)
print(f"Pillow {PIL.__version__}", file=out)
py_version = sys.version.splitlines()
print(f"Python {py_version[0].strip()}", file=out)
for py_version in py_version[1:]:
print(f" {py_version.strip()}", file=out)
print("-" * 68, file=out)
print(f"Python executable is {sys.executable or 'unknown'}", file=out)
if sys.prefix != sys.base_prefix:
print(f"Environment Python files loaded from {sys.prefix}", file=out)
print(f"System Python files loaded from {sys.base_prefix}", file=out)
print("-" * 68, file=out)
print(
f"Python Pillow modules loaded from {os.path.dirname(Image.__file__)}",
file=out,
)
print(
f"Binary Pillow modules loaded from {os.path.dirname(Image.core.__file__)}",
file=out,
)
print("-" * 68, file=out)
for name, feature in [
("pil", "PIL CORE"),
("tkinter", "TKINTER"),
("freetype2", "FREETYPE2"),
("littlecms2", "LITTLECMS2"),
("webp", "WEBP"),
("transp_webp", "WEBP Transparency"),
("webp_mux", "WEBPMUX"),
("webp_anim", "WEBP Animation"),
("jpg", "JPEG"),
("jpg_2000", "OPENJPEG (JPEG2000)"),
("zlib", "ZLIB (PNG/ZIP)"),
("libtiff", "LIBTIFF"),
("raqm", "RAQM (Bidirectional Text)"),
("libimagequant", "LIBIMAGEQUANT (Quantization method)"),
("xcb", "XCB (X protocol)"),
]:
if check(name):
if name == "jpg" and check_feature("libjpeg_turbo"):
v = "libjpeg-turbo " + version_feature("libjpeg_turbo")
else:
v = version(name)
if v is not None:
version_static = name in ("pil", "jpg")
if name == "littlecms2":
# this check is also in src/_imagingcms.c:setup_module()
version_static = tuple(int(x) for x in v.split(".")) < (2, 7)
t = "compiled for" if version_static else "loaded"
if name == "raqm":
for f in ("fribidi", "harfbuzz"):
v2 = version_feature(f)
if v2 is not None:
v += f", {f} {v2}"
print("---", feature, "support ok,", t, v, file=out)
else:
print("---", feature, "support ok", file=out)
else:
print("***", feature, "support not installed", file=out)
print("-" * 68, file=out)
if supported_formats:
extensions = collections.defaultdict(list)
for ext, i in Image.EXTENSION.items():
extensions[i].append(ext)
for i in sorted(Image.ID):
line = f"{i}"
if i in Image.MIME:
line = f"{line} {Image.MIME[i]}"
print(line, file=out)
if i in extensions:
print(
"Extensions: {}".format(", ".join(sorted(extensions[i]))), file=out
)
features = []
if i in Image.OPEN:
features.append("open")
if i in Image.SAVE:
features.append("save")
if i in Image.SAVE_ALL:
features.append("save_all")
if i in Image.DECODERS:
features.append("decode")
if i in Image.ENCODERS:
features.append("encode")
print("Features: {}".format(", ".join(features)), file=out)
print("-" * 68, file=out)

View File

@ -0,0 +1,211 @@
"""
Python HTTP library with thread-safe connection pooling, file post support, user friendly, and more
"""
from __future__ import annotations
# Set default logging handler to avoid "No handler found" warnings.
import logging
import sys
import typing
import warnings
from logging import NullHandler
from . import exceptions
from ._base_connection import _TYPE_BODY
from ._collections import HTTPHeaderDict
from ._version import __version__
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url
from .filepost import _TYPE_FIELDS, encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import BaseHTTPResponse, HTTPResponse
from .util.request import make_headers
from .util.retry import Retry
from .util.timeout import Timeout
# Ensure that Python is compiled with OpenSSL 1.1.1+
# If the 'ssl' module isn't available at all that's
# fine, we only care if the module is available.
try:
import ssl
except ImportError:
pass
else:
if not ssl.OPENSSL_VERSION.startswith("OpenSSL "): # Defensive:
warnings.warn(
"urllib3 v2 only supports OpenSSL 1.1.1+, currently "
f"the 'ssl' module is compiled with {ssl.OPENSSL_VERSION!r}. "
"See: https://github.com/urllib3/urllib3/issues/3020",
exceptions.NotOpenSSLWarning,
)
elif ssl.OPENSSL_VERSION_INFO < (1, 1, 1): # Defensive:
raise ImportError(
"urllib3 v2 only supports OpenSSL 1.1.1+, currently "
f"the 'ssl' module is compiled with {ssl.OPENSSL_VERSION!r}. "
"See: https://github.com/urllib3/urllib3/issues/2168"
)
__author__ = "Andrey Petrov (andrey.petrov@shazow.net)"
__license__ = "MIT"
__version__ = __version__
__all__ = (
"HTTPConnectionPool",
"HTTPHeaderDict",
"HTTPSConnectionPool",
"PoolManager",
"ProxyManager",
"HTTPResponse",
"Retry",
"Timeout",
"add_stderr_logger",
"connection_from_url",
"disable_warnings",
"encode_multipart_formdata",
"make_headers",
"proxy_from_url",
"request",
"BaseHTTPResponse",
)
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(
level: int = logging.DEBUG,
) -> logging.StreamHandler[typing.TextIO]:
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s"))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug("Added a stderr logging handler to logger: %s", __name__)
return handler
# ... Clean up.
del NullHandler
# All warning filters *must* be appended unless you're really certain that they
# shouldn't be: otherwise, it's very hard for users to use most Python
# mechanisms to silence them.
# SecurityWarning's always go off by default.
warnings.simplefilter("always", exceptions.SecurityWarning, append=True)
# InsecurePlatformWarning's don't vary between requests, so we keep it default.
warnings.simplefilter("default", exceptions.InsecurePlatformWarning, append=True)
def disable_warnings(category: type[Warning] = exceptions.HTTPWarning) -> None:
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter("ignore", category)
_DEFAULT_POOL = PoolManager()
def request(
method: str,
url: str,
*,
body: _TYPE_BODY | None = None,
fields: _TYPE_FIELDS | None = None,
headers: typing.Mapping[str, str] | None = None,
preload_content: bool | None = True,
decode_content: bool | None = True,
redirect: bool | None = True,
retries: Retry | bool | int | None = None,
timeout: Timeout | float | int | None = 3,
json: typing.Any | None = None,
) -> BaseHTTPResponse:
"""
A convenience, top-level request method. It uses a module-global ``PoolManager`` instance.
Therefore, its side effects could be shared across dependencies relying on it.
To avoid side effects create a new ``PoolManager`` instance and use it instead.
The method does not accept low-level ``**urlopen_kw`` keyword arguments.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param url:
The URL to perform the request on.
:param body:
Data to send in the request body, either :class:`str`, :class:`bytes`,
an iterable of :class:`str`/:class:`bytes`, or a file-like object.
:param fields:
Data to encode and send in the request body.
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc.
:param bool preload_content:
If True, the response's body will be preloaded into memory.
:param bool decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
If ``None`` (default) will retry 3 times, see ``Retry.DEFAULT``. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param json:
Data to encode and send as JSON with UTF-encoded in the request body.
The ``"Content-Type"`` header will be set to ``"application/json"``
unless specified otherwise.
"""
return _DEFAULT_POOL.request(
method,
url,
body=body,
fields=fields,
headers=headers,
preload_content=preload_content,
decode_content=decode_content,
redirect=redirect,
retries=retries,
timeout=timeout,
json=json,
)
if sys.platform == "emscripten":
from .contrib.emscripten import inject_into_urllib3 # noqa: 401
inject_into_urllib3()

View File

@ -0,0 +1,580 @@
# window.py
#
# Copyright 2024 Unknown
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# SPDX-License-Identifier: GPL-3.0-or-later
import gi
gi.require_version('GtkSource', '5')
gi.require_version('GdkPixbuf', '2.0')
from gi.repository import Adw, Gtk, Gdk, GLib, GtkSource, Gio, GdkPixbuf
import json, requests, threading, os, re, base64
from io import BytesIO
from PIL import Image
from datetime import datetime
from .connection_handler import simple_get, simple_delete, stream_post, stream_post_fake
from .available_models import available_models
@Gtk.Template(resource_path='/com/jeffser/Alpaca/window.ui')
class AlpacaWindow(Adw.ApplicationWindow):
config_dir = os.path.join(os.getenv("XDG_CONFIG_HOME"), "/", os.path.expanduser("~/.var/app/com.jeffser.Alpaca/config"))
__gtype_name__ = 'AlpacaWindow'
#Variables
ollama_url = None
local_models = []
#In the future I will at multiple chats, for now I'll save it like this so that past chats don't break in the future
current_chat_id="0"
chats = {"chats": {"0": {"messages": []}}}
attached_image = {"path": None, "base64": None}
#Elements
bot_message : Gtk.TextBuffer = None
bot_message_box : Gtk.Box = None
bot_message_view : Gtk.TextView = None
connection_dialog = Gtk.Template.Child()
connection_carousel = Gtk.Template.Child()
connection_previous_button = Gtk.Template.Child()
connection_next_button = Gtk.Template.Child()
connection_url_entry = Gtk.Template.Child()
main_overlay = Gtk.Template.Child()
pull_overlay = Gtk.Template.Child()
manage_models_overlay = Gtk.Template.Child()
connection_overlay = Gtk.Template.Child()
chat_container = Gtk.Template.Child()
chat_window = Gtk.Template.Child()
message_text_view = Gtk.Template.Child()
send_button = Gtk.Template.Child()
image_button = Gtk.Template.Child()
file_filter_image = Gtk.Template.Child()
model_drop_down = Gtk.Template.Child()
model_string_list = Gtk.Template.Child()
manage_models_button = Gtk.Template.Child()
manage_models_dialog = Gtk.Template.Child()
model_list_box = Gtk.Template.Child()
pull_model_dialog = Gtk.Template.Child()
pull_model_status_page = Gtk.Template.Child()
pull_model_progress_bar = Gtk.Template.Child()
toast_messages = {
"error": [
"An error occurred",
"Failed to connect to server",
"Could not list local models",
"Could not delete model",
"Could not pull model",
"Cannot open image"
],
"info": [
"Please select a model before chatting",
"Conversation cannot be cleared while receiving a message"
],
"good": [
"Model deleted successfully",
"Model pulled successfully"
]
}
def show_toast(self, message_type:str, message_id:int, overlay):
if message_type not in self.toast_messages or message_id > len(self.toast_messages[message_type] or message_id < 0):
message_type = "error"
message_id = 0
toast = Adw.Toast(
title=self.toast_messages[message_type][message_id],
timeout=2
)
overlay.add_toast(toast)
def show_message(self, msg:str, bot:bool, footer:str=None, image_base64:str=None):
message_text = Gtk.TextView(
editable=False,
focusable=False,
wrap_mode= Gtk.WrapMode.WORD,
margin_top=12,
margin_bottom=12,
margin_start=12,
margin_end=12,
hexpand=True,
css_classes=["flat"]
)
message_buffer = message_text.get_buffer()
message_buffer.insert(message_buffer.get_end_iter(), msg)
if footer is not None: message_buffer.insert_markup(message_buffer.get_end_iter(), footer, len(footer))
message_box = Gtk.Box(
orientation=1,
css_classes=[None if bot else "card"]
)
message_text.set_valign(Gtk.Align.CENTER)
self.chat_container.append(message_box)
if image_base64 is not None:
image_data = base64.b64decode(image_base64)
loader = GdkPixbuf.PixbufLoader.new()
loader.write(image_data)
loader.close()
pixbuf = loader.get_pixbuf()
texture = Gdk.Texture.new_for_pixbuf(pixbuf)
image = Gtk.Image.new_from_paintable(texture)
image.set_size_request(360, 360)
image.set_margin_top(10)
image.set_margin_start(10)
image.set_margin_end(10)
image.set_hexpand(False)
image.set_css_classes(["flat"])
message_box.append(image)
message_box.append(message_text)
if bot:
self.bot_message = message_buffer
self.bot_message_view = message_text
self.bot_message_box = message_box
def verify_if_image_can_be_used(self, pspec=None, user_data=None):
if self.model_drop_down.get_selected_item() == None: return True
selected = self.model_drop_down.get_selected_item().get_string().split(":")[0]
if selected in ['llava']:
self.image_button.set_sensitive(True)
return True
else:
self.image_button.set_sensitive(False)
self.image_button.set_css_classes([])
self.image_button.get_child().set_icon_name("image-x-generic-symbolic")
self.attached_image = {"path": None, "base64": None}
return False
def update_list_local_models(self):
self.local_models = []
response = simple_get(self.ollama_url + "/api/tags")
for i in range(self.model_string_list.get_n_items() -1, -1, -1):
self.model_string_list.remove(i)
if response['status'] == 'ok':
for model in json.loads(response['text'])['models']:
self.model_string_list.append(model["name"])
self.local_models.append(model["name"])
self.model_drop_down.set_selected(0)
self.verify_if_image_can_be_used()
return
else:
self.show_connection_dialog(True)
self.show_toast("error", 2, self.connection_overlay)
def verify_connection(self):
response = simple_get(self.ollama_url)
if response['status'] == 'ok':
if "Ollama is running" in response['text']:
with open(os.path.join(self.config_dir, "server.conf"), "w+") as f: f.write(self.ollama_url)
#self.message_text_view.grab_focus_without_selecting()
self.update_list_local_models()
return True
return False
def add_code_blocks(self):
text = self.bot_message.get_text(self.bot_message.get_start_iter(), self.bot_message.get_end_iter(), True)
GLib.idle_add(self.bot_message_view.get_parent().remove, self.bot_message_view)
# Define a regular expression pattern to match code blocks
code_block_pattern = re.compile(r'```(\w+)\n(.*?)\n```', re.DOTALL)
parts = []
pos = 0
for match in code_block_pattern.finditer(text):
start, end = match.span()
if pos < start:
normal_text = text[pos:start]
parts.append({"type": "normal", "text": normal_text.strip()})
language = match.group(1)
code_text = match.group(2)
parts.append({"type": "code", "text": code_text, "language": language})
pos = end
# Extract any remaining normal text after the last code block
if pos < len(text):
normal_text = text[pos:]
if normal_text.strip():
parts.append({"type": "normal", "text": normal_text.strip()})
for part in parts:
if part['type'] == 'normal':
message_text = Gtk.TextView(
editable=False,
focusable=False,
wrap_mode= Gtk.WrapMode.WORD,
margin_top=12,
margin_bottom=12,
margin_start=12,
margin_end=12,
hexpand=True,
css_classes=["flat"]
)
message_buffer = message_text.get_buffer()
if part['text'].split("\n")[-1] == parts[-1]['text'].split("\n")[-1]:
footer = "\n<small>" + part['text'].split('\n')[-1] + "</small>"
part['text'] = '\n'.join(part['text'].split("\n")[:-1])
message_buffer.insert(message_buffer.get_end_iter(), part['text'])
message_buffer.insert_markup(message_buffer.get_end_iter(), footer, len(footer))
else:
message_buffer.insert(message_buffer.get_end_iter(), part['text'])
self.bot_message_box.append(message_text)
else:
language = GtkSource.LanguageManager.get_default().get_language(part['language'])
buffer = GtkSource.Buffer.new_with_language(language)
buffer.set_text(part['text'])
buffer.set_style_scheme(GtkSource.StyleSchemeManager.get_default().get_scheme('classic-dark'))
source_view = GtkSource.View(
auto_indent=True, indent_width=4, buffer=buffer, show_line_numbers=True
)
source_view.get_style_context().add_class("card")
self.bot_message_box.append(source_view)
self.bot_message = None
self.bot_message_box = None
def update_bot_message(self, data):
vadjustment = self.chat_window.get_vadjustment()
if vadjustment.get_value() + 50 >= vadjustment.get_upper() - vadjustment.get_page_size():
GLib.idle_add(vadjustment.set_value, vadjustment.get_upper())
if data['done']:
formated_datetime = datetime.now().strftime("%Y/%m/%d %H:%M")
text = f"\n<small>{data['model']}\t|\t{formated_datetime}</small>"
GLib.idle_add(self.bot_message.insert_markup, self.bot_message.get_end_iter(), text, len(text))
self.save_history()
else:
if self.chats["chats"][self.current_chat_id]["messages"][-1]['role'] == "user":
self.chats["chats"][self.current_chat_id]["messages"].append({
"role": "assistant",
"model": data['model'],
"date": datetime.now().strftime("%Y/%m/%d %H:%M"),
"content": ''
})
GLib.idle_add(self.bot_message.insert, self.bot_message.get_end_iter(), data['message']['content'])
self.chats["chats"][self.current_chat_id]["messages"][-1]['content'] += data['message']['content']
def run_message(self, messages, model):
response = stream_post(f"{self.ollama_url}/api/chat", data=json.dumps({"model": model, "messages": messages}), callback=self.update_bot_message)
GLib.idle_add(self.add_code_blocks)
GLib.idle_add(self.send_button.set_sensitive, True)
GLib.idle_add(self.image_button.set_sensitive, True)
GLib.idle_add(self.image_button.set_css_classes, [])
GLib.idle_add(self.image_button.get_child().set_icon_name, "image-x-generic-symbolic")
self.attached_image = {"path": None, "base64": None}
GLib.idle_add(self.message_text_view.set_sensitive, True)
if response['status'] == 'error':
GLib.idle_add(self.show_toast, 'error', 1, self.connection_overlay)
GLib.idle_add(self.show_connection_dialog, True)
def send_message(self, button):
if not self.message_text_view.get_buffer().get_text(self.message_text_view.get_buffer().get_start_iter(), self.message_text_view.get_buffer().get_end_iter(), False): return
current_model = self.model_drop_down.get_selected_item()
if current_model is None:
self.show_toast("info", 0, self.main_overlay)
return
formated_datetime = datetime.now().strftime("%Y/%m/%d %H:%M")
self.chats["chats"][self.current_chat_id]["messages"].append({
"role": "user",
"model": "User",
"date": formated_datetime,
"content": self.message_text_view.get_buffer().get_text(self.message_text_view.get_buffer().get_start_iter(), self.message_text_view.get_buffer().get_end_iter(), False)
})
data = {
"model": current_model.get_string(),
"messages": self.chats["chats"][self.current_chat_id]["messages"]
}
if self.verify_if_image_can_be_used() and self.attached_image["base64"] is not None:
data["messages"][-1]["images"] = [self.attached_image["base64"]]
self.message_text_view.set_sensitive(False)
self.send_button.set_sensitive(False)
self.image_button.set_sensitive(False)
self.show_message(self.message_text_view.get_buffer().get_text(self.message_text_view.get_buffer().get_start_iter(), self.message_text_view.get_buffer().get_end_iter(), False), False, f"\n\n<small>{formated_datetime}</small>", self.attached_image["base64"])
self.message_text_view.get_buffer().set_text("", 0)
self.show_message("", True)
thread = threading.Thread(target=self.run_message, args=(data['messages'], data['model']))
thread.start()
def delete_model(self, dialog, task, model_name, button):
if dialog.choose_finish(task) == "delete":
response = simple_delete(self.ollama_url + "/api/delete", data={"name": model_name})
if response['status'] == 'ok':
button.set_icon_name("folder-download-symbolic")
button.set_css_classes(["accent", "pull"])
self.show_toast("good", 0, self.manage_models_overlay)
for i in range(self.model_string_list.get_n_items()):
if self.model_string_list.get_string(i) == model_name:
self.model_string_list.remove(i)
self.model_drop_down.set_selected(0)
break
else:
self.show_toast("error", 3, self.connection_overlay)
self.manage_models_dialog.close()
self.show_connection_dialog(True)
def pull_model_update(self, data):
try:
GLib.idle_add(self.pull_model_progress_bar.set_text, data['status'])
if 'completed' in data:
if 'total' in data: GLib.idle_add(self.pull_model_progress_bar.set_fraction, data['completed'] / data['total'])
else: GLib.idle_add(self.pull_model_progress_bar.set_fraction, 1.0)
else:
GLib.idle_add(self.pull_model_progress_bar.set_fraction, 0.0)
except Exception as e: print(e)
def pull_model(self, dialog, task, model_name, button):
if dialog.choose_finish(task) == "pull":
data = {"name":model_name}
GLib.idle_add(self.pull_model_dialog.present, self.manage_models_dialog)
response = stream_post(f"{self.ollama_url}/api/pull", data=json.dumps(data), callback=self.pull_model_update)
GLib.idle_add(self.pull_model_dialog.force_close)
if response['status'] == 'ok':
GLib.idle_add(button.set_icon_name, "user-trash-symbolic")
GLib.idle_add(button.set_css_classes, ["error", "delete"])
GLib.idle_add(self.model_string_list.append, model_name)
GLib.idle_add(self.show_toast, "good", 1, self.manage_models_overlay)
else:
GLib.idle_add(self.show_toast, "error", 4, self.connection_overlay)
GLib.idle_add(self.manage_models_dialog.close)
GLib.idle_add(self.show_connection_dialog, True)
def pull_model_start(self, dialog, task, model_name, button):
self.pull_model_status_page.set_description(model_name)
thread = threading.Thread(target=self.pull_model, args=(dialog, task, model_name, button))
thread.start()
def model_action_button_activate(self, button, model_name):
action = list(set(button.get_css_classes()) & set(["delete", "pull"]))[0]
dialog = Adw.AlertDialog(
heading=f"{action.capitalize()} Model",
body=f"Are you sure you want to {action} '{model_name}'?",
close_response="cancel"
)
dialog.add_response("cancel", "Cancel")
dialog.add_response(action, action.capitalize())
dialog.set_response_appearance(action, Adw.ResponseAppearance.DESTRUCTIVE if action == "delete" else Adw.ResponseAppearance.SUGGESTED)
dialog.choose(
parent = self.manage_models_dialog,
cancellable = None,
callback = lambda dialog, task, model_name = model_name, button = button:
self.delete_model(dialog, task, model_name, button) if action == "delete" else self.pull_model_start(dialog, task, model_name,button)
)
def update_list_available_models(self):
self.model_list_box.remove_all()
for model_name, model_description in available_models.items():
model = Adw.ActionRow(
title = model_name,
subtitle = model_description,
)
if ":" not in model_name: model_name += ":latest"
button = Gtk.Button(
icon_name = "folder-download-symbolic" if model_name not in self.local_models else "user-trash-symbolic",
vexpand = False,
valign = 3,
css_classes = ["accent", "pull"] if model_name not in self.local_models else ["error", "delete"])
button.connect("clicked", lambda button=button, model_name=model_name: self.model_action_button_activate(button, model_name))
model.add_suffix(button)
self.model_list_box.append(model)
def manage_models_button_activate(self, button):
self.manage_models_dialog.present(self)
self.update_list_available_models()
def connection_carousel_page_changed(self, carousel, index):
if index == 0: self.connection_previous_button.set_sensitive(False)
else: self.connection_previous_button.set_sensitive(True)
if index == carousel.get_n_pages()-1: self.connection_next_button.set_label("Connect")
else: self.connection_next_button.set_label("Next")
def connection_previous_button_activate(self, button):
self.connection_carousel.scroll_to(self.connection_carousel.get_nth_page(self.connection_carousel.get_position()-1), True)
def connection_next_button_activate(self, button):
if button.get_label() == "Next": self.connection_carousel.scroll_to(self.connection_carousel.get_nth_page(self.connection_carousel.get_position()+1), True)
else:
self.ollama_url = self.connection_url_entry.get_text()
if self.verify_connection():
self.connection_dialog.force_close()
else:
self.show_connection_dialog(True)
self.show_toast("error", 1, self.connection_overlay)
def show_connection_dialog(self, error:bool=False):
self.connection_carousel.scroll_to(self.connection_carousel.get_nth_page(self.connection_carousel.get_n_pages()-1),False)
if self.ollama_url is not None: self.connection_url_entry.set_text(self.ollama_url)
if error: self.connection_url_entry.set_css_classes(["error"])
else: self.connection_url_entry.set_css_classes([])
self.connection_dialog.present(self)
def clear_conversation(self):
for widget in list(self.chat_container): self.chat_container.remove(widget)
self.chats["chats"][self.current_chat_id]["messages"] = []
def clear_conversation_dialog_response(self, dialog, task):
if dialog.choose_finish(task) == "empty":
self.clear_conversation()
self.save_history()
def clear_conversation_dialog(self):
if self.bot_message is not None:
self.show_toast("info", 1, self.main_overlay)
return
dialog = Adw.AlertDialog(
heading=f"Clear Conversation",
body=f"Are you sure you want to clear the conversation?",
close_response="cancel"
)
dialog.add_response("cancel", "Cancel")
dialog.add_response("empty", "Empty")
dialog.set_response_appearance("empty", Adw.ResponseAppearance.DESTRUCTIVE)
dialog.choose(
parent = self,
cancellable = None,
callback = self.clear_conversation_dialog_response
)
def save_history(self):
with open(os.path.join(self.config_dir, "chats.json"), "w+") as f:
json.dump(self.chats, f, indent=4)
def load_history(self):
if os.path.exists(os.path.join(self.config_dir, "chats.json")):
self.clear_conversation()
try:
with open(os.path.join(self.config_dir, "chats.json"), "r") as f:
self.chats = json.load(f)
except Exception as e:
self.chats = {"chats": {"0": {"messages": []}}}
for message in self.chats['chats'][self.current_chat_id]['messages']:
if message['role'] == 'user':
self.show_message(message['content'], False, f"\n\n<small>{message['date']}</small>", message['images'][0] if 'images' in message and len(message['images']) > 0 else None)
else:
self.show_message(message['content'], True, f"\n\n<small>{message['model']}\t|\t{message['date']}</small>")
self.add_code_blocks()
self.bot_message = None
def closing_connection_dialog_response(self, dialog, task):
result = dialog.choose_finish(task)
if result == "cancel": return
if result == "save":
self.ollama_url = self.connection_url_entry.get_text()
elif result == "discard" and self.ollama_url is None: self.destroy()
self.connection_dialog.force_close()
if self.ollama_url is None or self.verify_connection() == False:
self.show_connection_dialog(True)
self.show_toast("error", 1, self.connection_overlay)
def closing_connection_dialog(self, dialog):
if self.ollama_url is None: self.destroy()
if self.ollama_url == self.connection_url_entry.get_text():
self.connection_dialog.force_close()
if self.ollama_url is None or self.verify_connection() == False:
self.show_connection_dialog(True)
self.show_toast("error", 1, self.connection_overlay)
return
dialog = Adw.AlertDialog(
heading=f"Save Changes?",
body=f"Do you want to save the URL change?",
close_response="cancel"
)
dialog.add_response("cancel", "Cancel")
dialog.add_response("discard", "Discard")
dialog.add_response("save", "Save")
dialog.set_response_appearance("discard", Adw.ResponseAppearance.DESTRUCTIVE)
dialog.set_response_appearance("save", Adw.ResponseAppearance.SUGGESTED)
dialog.choose(
parent = self,
cancellable = None,
callback = self.closing_connection_dialog_response
)
def load_image(self, file_dialog, result):
try: file = file_dialog.open_finish(result)
except: return
try:
self.attached_image["path"] = file.get_path()
'''with open(self.attached_image["path"], "rb") as image_file:
self.attached_image["base64"] = base64.b64encode(image_file.read()).decode("utf-8")'''
with Image.open(self.attached_image["path"]) as img:
width, height = img.size
max_size = 240
if width > height:
new_width = max_size
new_height = int((max_size / width) * height)
else:
new_height = max_size
new_width = int((max_size / height) * width)
resized_img = img.resize((new_width, new_height), Image.LANCZOS)
with BytesIO() as output:
resized_img.save(output, format="JPEG")
image_data = output.getvalue()
self.attached_image["base64"] = base64.b64encode(image_data).decode("utf-8")
self.image_button.set_css_classes(["destructive-action"])
self.image_button.get_child().set_icon_name("edit-delete-symbolic")
except Exception as e:
print(e)
self.show_toast("error", 5, self.main_overlay)
def remove_image(self, dialog, task):
if dialog.choose_finish(task) == 'remove':
self.image_button.set_css_classes([])
self.image_button.get_child().set_icon_name("image-x-generic-symbolic")
self.attached_image = {"path": None, "base64": None}
def open_image(self, button):
if "destructive-action" in button.get_css_classes():
dialog = Adw.AlertDialog(
heading=f"Remove Image?",
body=f"Are you sure you want to remove image?",
close_response="cancel"
)
dialog.add_response("cancel", "Cancel")
dialog.add_response("remove", "Remove")
dialog.set_response_appearance("remove", Adw.ResponseAppearance.DESTRUCTIVE)
dialog.choose(
parent = self,
cancellable = None,
callback = self.remove_image
)
else:
file_dialog = Gtk.FileDialog(default_filter=self.file_filter_image)
file_dialog.open(self, None, self.load_image)
def __init__(self, **kwargs):
super().__init__(**kwargs)
GtkSource.init()
self.manage_models_button.connect("clicked", self.manage_models_button_activate)
self.send_button.connect("clicked", self.send_message)
self.image_button.connect("clicked", self.open_image)
self.set_default_widget(self.send_button)
self.model_drop_down.connect("notify", self.verify_if_image_can_be_used)
#self.message_text_view.set_activates_default(self.send_button)
self.connection_carousel.connect("page-changed", self.connection_carousel_page_changed)
self.connection_previous_button.connect("clicked", self.connection_previous_button_activate)
self.connection_next_button.connect("clicked", self.connection_next_button_activate)
self.connection_url_entry.connect("changed", lambda entry: entry.set_css_classes([]))
self.connection_dialog.connect("close-attempt", self.closing_connection_dialog)
self.load_history()
if os.path.exists(os.path.join(self.config_dir, "server.conf")):
with open(os.path.join(self.config_dir, "server.conf"), "r") as f:
self.ollama_url = f.read()
if self.verify_connection() is False: self.show_connection_dialog(True)
else: self.connection_dialog.present(self)

View File

@ -0,0 +1,381 @@
#
# The Python Imaging Library.
# $Id$
#
# EXIF tags
#
# Copyright (c) 2003 by Secret Labs AB
#
# See the README file for information on usage and redistribution.
#
"""
This module provides constants and clear-text names for various
well-known EXIF tags.
"""
from __future__ import annotations
from enum import IntEnum
class Base(IntEnum):
# possibly incomplete
InteropIndex = 0x0001
ProcessingSoftware = 0x000B
NewSubfileType = 0x00FE
SubfileType = 0x00FF
ImageWidth = 0x0100
ImageLength = 0x0101
BitsPerSample = 0x0102
Compression = 0x0103
PhotometricInterpretation = 0x0106
Thresholding = 0x0107
CellWidth = 0x0108
CellLength = 0x0109
FillOrder = 0x010A
DocumentName = 0x010D
ImageDescription = 0x010E
Make = 0x010F
Model = 0x0110
StripOffsets = 0x0111
Orientation = 0x0112
SamplesPerPixel = 0x0115
RowsPerStrip = 0x0116
StripByteCounts = 0x0117
MinSampleValue = 0x0118
MaxSampleValue = 0x0119
XResolution = 0x011A
YResolution = 0x011B
PlanarConfiguration = 0x011C
PageName = 0x011D
FreeOffsets = 0x0120
FreeByteCounts = 0x0121
GrayResponseUnit = 0x0122
GrayResponseCurve = 0x0123
T4Options = 0x0124
T6Options = 0x0125
ResolutionUnit = 0x0128
PageNumber = 0x0129
TransferFunction = 0x012D
Software = 0x0131
DateTime = 0x0132
Artist = 0x013B
HostComputer = 0x013C
Predictor = 0x013D
WhitePoint = 0x013E
PrimaryChromaticities = 0x013F
ColorMap = 0x0140
HalftoneHints = 0x0141
TileWidth = 0x0142
TileLength = 0x0143
TileOffsets = 0x0144
TileByteCounts = 0x0145
SubIFDs = 0x014A
InkSet = 0x014C
InkNames = 0x014D
NumberOfInks = 0x014E
DotRange = 0x0150
TargetPrinter = 0x0151
ExtraSamples = 0x0152
SampleFormat = 0x0153
SMinSampleValue = 0x0154
SMaxSampleValue = 0x0155
TransferRange = 0x0156
ClipPath = 0x0157
XClipPathUnits = 0x0158
YClipPathUnits = 0x0159
Indexed = 0x015A
JPEGTables = 0x015B
OPIProxy = 0x015F
JPEGProc = 0x0200
JpegIFOffset = 0x0201
JpegIFByteCount = 0x0202
JpegRestartInterval = 0x0203
JpegLosslessPredictors = 0x0205
JpegPointTransforms = 0x0206
JpegQTables = 0x0207
JpegDCTables = 0x0208
JpegACTables = 0x0209
YCbCrCoefficients = 0x0211
YCbCrSubSampling = 0x0212
YCbCrPositioning = 0x0213
ReferenceBlackWhite = 0x0214
XMLPacket = 0x02BC
RelatedImageFileFormat = 0x1000
RelatedImageWidth = 0x1001
RelatedImageLength = 0x1002
Rating = 0x4746
RatingPercent = 0x4749
ImageID = 0x800D
CFARepeatPatternDim = 0x828D
BatteryLevel = 0x828F
Copyright = 0x8298
ExposureTime = 0x829A
FNumber = 0x829D
IPTCNAA = 0x83BB
ImageResources = 0x8649
ExifOffset = 0x8769
InterColorProfile = 0x8773
ExposureProgram = 0x8822
SpectralSensitivity = 0x8824
GPSInfo = 0x8825
ISOSpeedRatings = 0x8827
OECF = 0x8828
Interlace = 0x8829
TimeZoneOffset = 0x882A
SelfTimerMode = 0x882B
SensitivityType = 0x8830
StandardOutputSensitivity = 0x8831
RecommendedExposureIndex = 0x8832
ISOSpeed = 0x8833
ISOSpeedLatitudeyyy = 0x8834
ISOSpeedLatitudezzz = 0x8835
ExifVersion = 0x9000
DateTimeOriginal = 0x9003
DateTimeDigitized = 0x9004
OffsetTime = 0x9010
OffsetTimeOriginal = 0x9011
OffsetTimeDigitized = 0x9012
ComponentsConfiguration = 0x9101
CompressedBitsPerPixel = 0x9102
ShutterSpeedValue = 0x9201
ApertureValue = 0x9202
BrightnessValue = 0x9203
ExposureBiasValue = 0x9204
MaxApertureValue = 0x9205
SubjectDistance = 0x9206
MeteringMode = 0x9207
LightSource = 0x9208
Flash = 0x9209
FocalLength = 0x920A
Noise = 0x920D
ImageNumber = 0x9211
SecurityClassification = 0x9212
ImageHistory = 0x9213
TIFFEPStandardID = 0x9216
MakerNote = 0x927C
UserComment = 0x9286
SubsecTime = 0x9290
SubsecTimeOriginal = 0x9291
SubsecTimeDigitized = 0x9292
AmbientTemperature = 0x9400
Humidity = 0x9401
Pressure = 0x9402
WaterDepth = 0x9403
Acceleration = 0x9404
CameraElevationAngle = 0x9405
XPTitle = 0x9C9B
XPComment = 0x9C9C
XPAuthor = 0x9C9D
XPKeywords = 0x9C9E
XPSubject = 0x9C9F
FlashPixVersion = 0xA000
ColorSpace = 0xA001
ExifImageWidth = 0xA002
ExifImageHeight = 0xA003
RelatedSoundFile = 0xA004
ExifInteroperabilityOffset = 0xA005
FlashEnergy = 0xA20B
SpatialFrequencyResponse = 0xA20C
FocalPlaneXResolution = 0xA20E
FocalPlaneYResolution = 0xA20F
FocalPlaneResolutionUnit = 0xA210
SubjectLocation = 0xA214
ExposureIndex = 0xA215
SensingMethod = 0xA217
FileSource = 0xA300
SceneType = 0xA301
CFAPattern = 0xA302
CustomRendered = 0xA401
ExposureMode = 0xA402
WhiteBalance = 0xA403
DigitalZoomRatio = 0xA404
FocalLengthIn35mmFilm = 0xA405
SceneCaptureType = 0xA406
GainControl = 0xA407
Contrast = 0xA408
Saturation = 0xA409
Sharpness = 0xA40A
DeviceSettingDescription = 0xA40B
SubjectDistanceRange = 0xA40C
ImageUniqueID = 0xA420
CameraOwnerName = 0xA430
BodySerialNumber = 0xA431
LensSpecification = 0xA432
LensMake = 0xA433
LensModel = 0xA434
LensSerialNumber = 0xA435
CompositeImage = 0xA460
CompositeImageCount = 0xA461
CompositeImageExposureTimes = 0xA462
Gamma = 0xA500
PrintImageMatching = 0xC4A5
DNGVersion = 0xC612
DNGBackwardVersion = 0xC613
UniqueCameraModel = 0xC614
LocalizedCameraModel = 0xC615
CFAPlaneColor = 0xC616
CFALayout = 0xC617
LinearizationTable = 0xC618
BlackLevelRepeatDim = 0xC619
BlackLevel = 0xC61A
BlackLevelDeltaH = 0xC61B
BlackLevelDeltaV = 0xC61C
WhiteLevel = 0xC61D
DefaultScale = 0xC61E
DefaultCropOrigin = 0xC61F
DefaultCropSize = 0xC620
ColorMatrix1 = 0xC621
ColorMatrix2 = 0xC622
CameraCalibration1 = 0xC623
CameraCalibration2 = 0xC624
ReductionMatrix1 = 0xC625
ReductionMatrix2 = 0xC626
AnalogBalance = 0xC627
AsShotNeutral = 0xC628
AsShotWhiteXY = 0xC629
BaselineExposure = 0xC62A
BaselineNoise = 0xC62B
BaselineSharpness = 0xC62C
BayerGreenSplit = 0xC62D
LinearResponseLimit = 0xC62E
CameraSerialNumber = 0xC62F
LensInfo = 0xC630
ChromaBlurRadius = 0xC631
AntiAliasStrength = 0xC632
ShadowScale = 0xC633
DNGPrivateData = 0xC634
MakerNoteSafety = 0xC635
CalibrationIlluminant1 = 0xC65A
CalibrationIlluminant2 = 0xC65B
BestQualityScale = 0xC65C
RawDataUniqueID = 0xC65D
OriginalRawFileName = 0xC68B
OriginalRawFileData = 0xC68C
ActiveArea = 0xC68D
MaskedAreas = 0xC68E
AsShotICCProfile = 0xC68F
AsShotPreProfileMatrix = 0xC690
CurrentICCProfile = 0xC691
CurrentPreProfileMatrix = 0xC692
ColorimetricReference = 0xC6BF
CameraCalibrationSignature = 0xC6F3
ProfileCalibrationSignature = 0xC6F4
AsShotProfileName = 0xC6F6
NoiseReductionApplied = 0xC6F7
ProfileName = 0xC6F8
ProfileHueSatMapDims = 0xC6F9
ProfileHueSatMapData1 = 0xC6FA
ProfileHueSatMapData2 = 0xC6FB
ProfileToneCurve = 0xC6FC
ProfileEmbedPolicy = 0xC6FD
ProfileCopyright = 0xC6FE
ForwardMatrix1 = 0xC714
ForwardMatrix2 = 0xC715
PreviewApplicationName = 0xC716
PreviewApplicationVersion = 0xC717
PreviewSettingsName = 0xC718
PreviewSettingsDigest = 0xC719
PreviewColorSpace = 0xC71A
PreviewDateTime = 0xC71B
RawImageDigest = 0xC71C
OriginalRawFileDigest = 0xC71D
SubTileBlockSize = 0xC71E
RowInterleaveFactor = 0xC71F
ProfileLookTableDims = 0xC725
ProfileLookTableData = 0xC726
OpcodeList1 = 0xC740
OpcodeList2 = 0xC741
OpcodeList3 = 0xC74E
NoiseProfile = 0xC761
"""Maps EXIF tags to tag names."""
TAGS = {
**{i.value: i.name for i in Base},
0x920C: "SpatialFrequencyResponse",
0x9214: "SubjectLocation",
0x9215: "ExposureIndex",
0x828E: "CFAPattern",
0x920B: "FlashEnergy",
0x9216: "TIFF/EPStandardID",
}
class GPS(IntEnum):
GPSVersionID = 0
GPSLatitudeRef = 1
GPSLatitude = 2
GPSLongitudeRef = 3
GPSLongitude = 4
GPSAltitudeRef = 5
GPSAltitude = 6
GPSTimeStamp = 7
GPSSatellites = 8
GPSStatus = 9
GPSMeasureMode = 10
GPSDOP = 11
GPSSpeedRef = 12
GPSSpeed = 13
GPSTrackRef = 14
GPSTrack = 15
GPSImgDirectionRef = 16
GPSImgDirection = 17
GPSMapDatum = 18
GPSDestLatitudeRef = 19
GPSDestLatitude = 20
GPSDestLongitudeRef = 21
GPSDestLongitude = 22
GPSDestBearingRef = 23
GPSDestBearing = 24
GPSDestDistanceRef = 25
GPSDestDistance = 26
GPSProcessingMethod = 27
GPSAreaInformation = 28
GPSDateStamp = 29
GPSDifferential = 30
GPSHPositioningError = 31
"""Maps EXIF GPS tags to tag names."""
GPSTAGS = {i.value: i.name for i in GPS}
class Interop(IntEnum):
InteropIndex = 1
InteropVersion = 2
RelatedImageFileFormat = 4096
RelatedImageWidth = 4097
RleatedImageHeight = 4098
class IFD(IntEnum):
Exif = 34665
GPSInfo = 34853
Makernote = 37500
Interop = 40965
IFD1 = -1
class LightSource(IntEnum):
Unknown = 0
Daylight = 1
Fluorescent = 2
Tungsten = 3
Flash = 4
Fine = 9
Cloudy = 10
Shade = 11
DaylightFluorescent = 12
DayWhiteFluorescent = 13
CoolWhiteFluorescent = 14
WhiteFluorescent = 15
StandardLightA = 17
StandardLightB = 18
StandardLightC = 19
D55 = 20
D65 = 21
D75 = 22
D50 = 23
ISO = 24
Other = 255

View File

@ -0,0 +1,565 @@
#
# The Python Imaging Library.
# $Id$
#
# standard filters
#
# History:
# 1995-11-27 fl Created
# 2002-06-08 fl Added rank and mode filters
# 2003-09-15 fl Fixed rank calculation in rank filter; added expand call
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-2002 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import functools
class Filter:
pass
class MultibandFilter(Filter):
pass
class BuiltinFilter(MultibandFilter):
def filter(self, image):
if image.mode == "P":
msg = "cannot filter palette images"
raise ValueError(msg)
return image.filter(*self.filterargs)
class Kernel(BuiltinFilter):
"""
Create a convolution kernel. This only supports 3x3 and 5x5 integer and floating
point kernels.
Kernels can only be applied to "L" and "RGB" images.
:param size: Kernel size, given as (width, height). This must be (3,3) or (5,5).
:param kernel: A sequence containing kernel weights. The kernel will be flipped
vertically before being applied to the image.
:param scale: Scale factor. If given, the result for each pixel is divided by this
value. The default is the sum of the kernel weights.
:param offset: Offset. If given, this value is added to the result, after it has
been divided by the scale factor.
"""
name = "Kernel"
def __init__(self, size, kernel, scale=None, offset=0):
if scale is None:
# default scale is sum of kernel
scale = functools.reduce(lambda a, b: a + b, kernel)
if size[0] * size[1] != len(kernel):
msg = "not enough coefficients in kernel"
raise ValueError(msg)
self.filterargs = size, scale, offset, kernel
class RankFilter(Filter):
"""
Create a rank filter. The rank filter sorts all pixels in
a window of the given size, and returns the ``rank``'th value.
:param size: The kernel size, in pixels.
:param rank: What pixel value to pick. Use 0 for a min filter,
``size * size / 2`` for a median filter, ``size * size - 1``
for a max filter, etc.
"""
name = "Rank"
def __init__(self, size, rank):
self.size = size
self.rank = rank
def filter(self, image):
if image.mode == "P":
msg = "cannot filter palette images"
raise ValueError(msg)
image = image.expand(self.size // 2, self.size // 2)
return image.rankfilter(self.size, self.rank)
class MedianFilter(RankFilter):
"""
Create a median filter. Picks the median pixel value in a window with the
given size.
:param size: The kernel size, in pixels.
"""
name = "Median"
def __init__(self, size=3):
self.size = size
self.rank = size * size // 2
class MinFilter(RankFilter):
"""
Create a min filter. Picks the lowest pixel value in a window with the
given size.
:param size: The kernel size, in pixels.
"""
name = "Min"
def __init__(self, size=3):
self.size = size
self.rank = 0
class MaxFilter(RankFilter):
"""
Create a max filter. Picks the largest pixel value in a window with the
given size.
:param size: The kernel size, in pixels.
"""
name = "Max"
def __init__(self, size=3):
self.size = size
self.rank = size * size - 1
class ModeFilter(Filter):
"""
Create a mode filter. Picks the most frequent pixel value in a box with the
given size. Pixel values that occur only once or twice are ignored; if no
pixel value occurs more than twice, the original pixel value is preserved.
:param size: The kernel size, in pixels.
"""
name = "Mode"
def __init__(self, size=3):
self.size = size
def filter(self, image):
return image.modefilter(self.size)
class GaussianBlur(MultibandFilter):
"""Blurs the image with a sequence of extended box filters, which
approximates a Gaussian kernel. For details on accuracy see
<https://www.mia.uni-saarland.de/Publications/gwosdek-ssvm11.pdf>
:param radius: Standard deviation of the Gaussian kernel. Either a sequence of two
numbers for x and y, or a single number for both.
"""
name = "GaussianBlur"
def __init__(self, radius=2):
self.radius = radius
def filter(self, image):
xy = self.radius
if not isinstance(xy, (tuple, list)):
xy = (xy, xy)
if xy == (0, 0):
return image.copy()
return image.gaussian_blur(xy)
class BoxBlur(MultibandFilter):
"""Blurs the image by setting each pixel to the average value of the pixels
in a square box extending radius pixels in each direction.
Supports float radius of arbitrary size. Uses an optimized implementation
which runs in linear time relative to the size of the image
for any radius value.
:param radius: Size of the box in a direction. Either a sequence of two numbers for
x and y, or a single number for both.
Radius 0 does not blur, returns an identical image.
Radius 1 takes 1 pixel in each direction, i.e. 9 pixels in total.
"""
name = "BoxBlur"
def __init__(self, radius):
xy = radius
if not isinstance(xy, (tuple, list)):
xy = (xy, xy)
if xy[0] < 0 or xy[1] < 0:
msg = "radius must be >= 0"
raise ValueError(msg)
self.radius = radius
def filter(self, image):
xy = self.radius
if not isinstance(xy, (tuple, list)):
xy = (xy, xy)
if xy == (0, 0):
return image.copy()
return image.box_blur(xy)
class UnsharpMask(MultibandFilter):
"""Unsharp mask filter.
See Wikipedia's entry on `digital unsharp masking`_ for an explanation of
the parameters.
:param radius: Blur Radius
:param percent: Unsharp strength, in percent
:param threshold: Threshold controls the minimum brightness change that
will be sharpened
.. _digital unsharp masking: https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking
"""
name = "UnsharpMask"
def __init__(self, radius=2, percent=150, threshold=3):
self.radius = radius
self.percent = percent
self.threshold = threshold
def filter(self, image):
return image.unsharp_mask(self.radius, self.percent, self.threshold)
class BLUR(BuiltinFilter):
name = "Blur"
# fmt: off
filterargs = (5, 5), 16, 0, (
1, 1, 1, 1, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 1, 1, 1, 1,
)
# fmt: on
class CONTOUR(BuiltinFilter):
name = "Contour"
# fmt: off
filterargs = (3, 3), 1, 255, (
-1, -1, -1,
-1, 8, -1,
-1, -1, -1,
)
# fmt: on
class DETAIL(BuiltinFilter):
name = "Detail"
# fmt: off
filterargs = (3, 3), 6, 0, (
0, -1, 0,
-1, 10, -1,
0, -1, 0,
)
# fmt: on
class EDGE_ENHANCE(BuiltinFilter):
name = "Edge-enhance"
# fmt: off
filterargs = (3, 3), 2, 0, (
-1, -1, -1,
-1, 10, -1,
-1, -1, -1,
)
# fmt: on
class EDGE_ENHANCE_MORE(BuiltinFilter):
name = "Edge-enhance More"
# fmt: off
filterargs = (3, 3), 1, 0, (
-1, -1, -1,
-1, 9, -1,
-1, -1, -1,
)
# fmt: on
class EMBOSS(BuiltinFilter):
name = "Emboss"
# fmt: off
filterargs = (3, 3), 1, 128, (
-1, 0, 0,
0, 1, 0,
0, 0, 0,
)
# fmt: on
class FIND_EDGES(BuiltinFilter):
name = "Find Edges"
# fmt: off
filterargs = (3, 3), 1, 0, (
-1, -1, -1,
-1, 8, -1,
-1, -1, -1,
)
# fmt: on
class SHARPEN(BuiltinFilter):
name = "Sharpen"
# fmt: off
filterargs = (3, 3), 16, 0, (
-2, -2, -2,
-2, 32, -2,
-2, -2, -2,
)
# fmt: on
class SMOOTH(BuiltinFilter):
name = "Smooth"
# fmt: off
filterargs = (3, 3), 13, 0, (
1, 1, 1,
1, 5, 1,
1, 1, 1,
)
# fmt: on
class SMOOTH_MORE(BuiltinFilter):
name = "Smooth More"
# fmt: off
filterargs = (5, 5), 100, 0, (
1, 1, 1, 1, 1,
1, 5, 5, 5, 1,
1, 5, 44, 5, 1,
1, 5, 5, 5, 1,
1, 1, 1, 1, 1,
)
# fmt: on
class Color3DLUT(MultibandFilter):
"""Three-dimensional color lookup table.
Transforms 3-channel pixels using the values of the channels as coordinates
in the 3D lookup table and interpolating the nearest elements.
This method allows you to apply almost any color transformation
in constant time by using pre-calculated decimated tables.
.. versionadded:: 5.2.0
:param size: Size of the table. One int or tuple of (int, int, int).
Minimal size in any dimension is 2, maximum is 65.
:param table: Flat lookup table. A list of ``channels * size**3``
float elements or a list of ``size**3`` channels-sized
tuples with floats. Channels are changed first,
then first dimension, then second, then third.
Value 0.0 corresponds lowest value of output, 1.0 highest.
:param channels: Number of channels in the table. Could be 3 or 4.
Default is 3.
:param target_mode: A mode for the result image. Should have not less
than ``channels`` channels. Default is ``None``,
which means that mode wouldn't be changed.
"""
name = "Color 3D LUT"
def __init__(self, size, table, channels=3, target_mode=None, **kwargs):
if channels not in (3, 4):
msg = "Only 3 or 4 output channels are supported"
raise ValueError(msg)
self.size = size = self._check_size(size)
self.channels = channels
self.mode = target_mode
# Hidden flag `_copy_table=False` could be used to avoid extra copying
# of the table if the table is specially made for the constructor.
copy_table = kwargs.get("_copy_table", True)
items = size[0] * size[1] * size[2]
wrong_size = False
numpy = None
if hasattr(table, "shape"):
try:
import numpy
except ImportError:
pass
if numpy and isinstance(table, numpy.ndarray):
if copy_table:
table = table.copy()
if table.shape in [
(items * channels,),
(items, channels),
(size[2], size[1], size[0], channels),
]:
table = table.reshape(items * channels)
else:
wrong_size = True
else:
if copy_table:
table = list(table)
# Convert to a flat list
if table and isinstance(table[0], (list, tuple)):
table, raw_table = [], table
for pixel in raw_table:
if len(pixel) != channels:
msg = (
"The elements of the table should "
f"have a length of {channels}."
)
raise ValueError(msg)
table.extend(pixel)
if wrong_size or len(table) != items * channels:
msg = (
"The table should have either channels * size**3 float items "
"or size**3 items of channels-sized tuples with floats. "
f"Table should be: {channels}x{size[0]}x{size[1]}x{size[2]}. "
f"Actual length: {len(table)}"
)
raise ValueError(msg)
self.table = table
@staticmethod
def _check_size(size):
try:
_, _, _ = size
except ValueError as e:
msg = "Size should be either an integer or a tuple of three integers."
raise ValueError(msg) from e
except TypeError:
size = (size, size, size)
size = [int(x) for x in size]
for size_1d in size:
if not 2 <= size_1d <= 65:
msg = "Size should be in [2, 65] range."
raise ValueError(msg)
return size
@classmethod
def generate(cls, size, callback, channels=3, target_mode=None):
"""Generates new LUT using provided callback.
:param size: Size of the table. Passed to the constructor.
:param callback: Function with three parameters which correspond
three color channels. Will be called ``size**3``
times with values from 0.0 to 1.0 and should return
a tuple with ``channels`` elements.
:param channels: The number of channels which should return callback.
:param target_mode: Passed to the constructor of the resulting
lookup table.
"""
size_1d, size_2d, size_3d = cls._check_size(size)
if channels not in (3, 4):
msg = "Only 3 or 4 output channels are supported"
raise ValueError(msg)
table = [0] * (size_1d * size_2d * size_3d * channels)
idx_out = 0
for b in range(size_3d):
for g in range(size_2d):
for r in range(size_1d):
table[idx_out : idx_out + channels] = callback(
r / (size_1d - 1), g / (size_2d - 1), b / (size_3d - 1)
)
idx_out += channels
return cls(
(size_1d, size_2d, size_3d),
table,
channels=channels,
target_mode=target_mode,
_copy_table=False,
)
def transform(self, callback, with_normals=False, channels=None, target_mode=None):
"""Transforms the table values using provided callback and returns
a new LUT with altered values.
:param callback: A function which takes old lookup table values
and returns a new set of values. The number
of arguments which function should take is
``self.channels`` or ``3 + self.channels``
if ``with_normals`` flag is set.
Should return a tuple of ``self.channels`` or
``channels`` elements if it is set.
:param with_normals: If true, ``callback`` will be called with
coordinates in the color cube as the first
three arguments. Otherwise, ``callback``
will be called only with actual color values.
:param channels: The number of channels in the resulting lookup table.
:param target_mode: Passed to the constructor of the resulting
lookup table.
"""
if channels not in (None, 3, 4):
msg = "Only 3 or 4 output channels are supported"
raise ValueError(msg)
ch_in = self.channels
ch_out = channels or ch_in
size_1d, size_2d, size_3d = self.size
table = [0] * (size_1d * size_2d * size_3d * ch_out)
idx_in = 0
idx_out = 0
for b in range(size_3d):
for g in range(size_2d):
for r in range(size_1d):
values = self.table[idx_in : idx_in + ch_in]
if with_normals:
values = callback(
r / (size_1d - 1),
g / (size_2d - 1),
b / (size_3d - 1),
*values,
)
else:
values = callback(*values)
table[idx_out : idx_out + ch_out] = values
idx_in += ch_in
idx_out += ch_out
return type(self)(
self.size,
table,
channels=ch_out,
target_mode=target_mode or self.mode,
_copy_table=False,
)
def __repr__(self):
r = [
f"{self.__class__.__name__} from {self.table.__class__.__name__}",
"size={:d}x{:d}x{:d}".format(*self.size),
f"channels={self.channels:d}",
]
if self.mode:
r.append(f"target_mode={self.mode}")
return "<{}>".format(" ".join(r))
def filter(self, image):
from . import Image
return image.color_lut_3d(
self.mode or image.mode,
Image.Resampling.BILINEAR,
self.channels,
self.size[0],
self.size[1],
self.size[2],
self.table,
)

View File

@ -0,0 +1,418 @@
"""
Support for streaming http requests in emscripten.
A few caveats -
Firstly, you can't do streaming http in the main UI thread, because atomics.wait isn't allowed.
Streaming only works if you're running pyodide in a web worker.
Secondly, this uses an extra web worker and SharedArrayBuffer to do the asynchronous fetch
operation, so it requires that you have crossOriginIsolation enabled, by serving over https
(or from localhost) with the two headers below set:
Cross-Origin-Opener-Policy: same-origin
Cross-Origin-Embedder-Policy: require-corp
You can tell if cross origin isolation is successfully enabled by looking at the global crossOriginIsolated variable in
javascript console. If it isn't, streaming requests will fallback to XMLHttpRequest, i.e. getting the whole
request into a buffer and then returning it. it shows a warning in the javascript console in this case.
Finally, the webworker which does the streaming fetch is created on initial import, but will only be started once
control is returned to javascript. Call `await wait_for_streaming_ready()` to wait for streaming fetch.
NB: in this code, there are a lot of javascript objects. They are named js_*
to make it clear what type of object they are.
"""
from __future__ import annotations
import io
import json
from email.parser import Parser
from importlib.resources import files
from typing import TYPE_CHECKING, Any
import js # type: ignore[import-not-found]
from pyodide.ffi import ( # type: ignore[import-not-found]
JsArray,
JsException,
JsProxy,
to_js,
)
if TYPE_CHECKING:
from typing_extensions import Buffer
from .request import EmscriptenRequest
from .response import EmscriptenResponse
"""
There are some headers that trigger unintended CORS preflight requests.
See also https://github.com/koenvo/pyodide-http/issues/22
"""
HEADERS_TO_IGNORE = ("user-agent",)
SUCCESS_HEADER = -1
SUCCESS_EOF = -2
ERROR_TIMEOUT = -3
ERROR_EXCEPTION = -4
_STREAMING_WORKER_CODE = (
files(__package__)
.joinpath("emscripten_fetch_worker.js")
.read_text(encoding="utf-8")
)
class _RequestError(Exception):
def __init__(
self,
message: str | None = None,
*,
request: EmscriptenRequest | None = None,
response: EmscriptenResponse | None = None,
):
self.request = request
self.response = response
self.message = message
super().__init__(self.message)
class _StreamingError(_RequestError):
pass
class _TimeoutError(_RequestError):
pass
def _obj_from_dict(dict_val: dict[str, Any]) -> JsProxy:
return to_js(dict_val, dict_converter=js.Object.fromEntries)
class _ReadStream(io.RawIOBase):
def __init__(
self,
int_buffer: JsArray,
byte_buffer: JsArray,
timeout: float,
worker: JsProxy,
connection_id: int,
request: EmscriptenRequest,
):
self.int_buffer = int_buffer
self.byte_buffer = byte_buffer
self.read_pos = 0
self.read_len = 0
self.connection_id = connection_id
self.worker = worker
self.timeout = int(1000 * timeout) if timeout > 0 else None
self.is_live = True
self._is_closed = False
self.request: EmscriptenRequest | None = request
def __del__(self) -> None:
self.close()
# this is compatible with _base_connection
def is_closed(self) -> bool:
return self._is_closed
# for compatibility with RawIOBase
@property
def closed(self) -> bool:
return self.is_closed()
def close(self) -> None:
if not self.is_closed():
self.read_len = 0
self.read_pos = 0
self.int_buffer = None
self.byte_buffer = None
self._is_closed = True
self.request = None
if self.is_live:
self.worker.postMessage(_obj_from_dict({"close": self.connection_id}))
self.is_live = False
super().close()
def readable(self) -> bool:
return True
def writable(self) -> bool:
return False
def seekable(self) -> bool:
return False
def readinto(self, byte_obj: Buffer) -> int:
if not self.int_buffer:
raise _StreamingError(
"No buffer for stream in _ReadStream.readinto",
request=self.request,
response=None,
)
if self.read_len == 0:
# wait for the worker to send something
js.Atomics.store(self.int_buffer, 0, ERROR_TIMEOUT)
self.worker.postMessage(_obj_from_dict({"getMore": self.connection_id}))
if (
js.Atomics.wait(self.int_buffer, 0, ERROR_TIMEOUT, self.timeout)
== "timed-out"
):
raise _TimeoutError
data_len = self.int_buffer[0]
if data_len > 0:
self.read_len = data_len
self.read_pos = 0
elif data_len == ERROR_EXCEPTION:
string_len = self.int_buffer[1]
# decode the error string
js_decoder = js.TextDecoder.new()
json_str = js_decoder.decode(self.byte_buffer.slice(0, string_len))
raise _StreamingError(
f"Exception thrown in fetch: {json_str}",
request=self.request,
response=None,
)
else:
# EOF, free the buffers and return zero
# and free the request
self.is_live = False
self.close()
return 0
# copy from int32array to python bytes
ret_length = min(self.read_len, len(memoryview(byte_obj)))
subarray = self.byte_buffer.subarray(
self.read_pos, self.read_pos + ret_length
).to_py()
memoryview(byte_obj)[0:ret_length] = subarray
self.read_len -= ret_length
self.read_pos += ret_length
return ret_length
class _StreamingFetcher:
def __init__(self) -> None:
# make web-worker and data buffer on startup
self.streaming_ready = False
js_data_blob = js.Blob.new(
[_STREAMING_WORKER_CODE], _obj_from_dict({"type": "application/javascript"})
)
def promise_resolver(js_resolve_fn: JsProxy, js_reject_fn: JsProxy) -> None:
def onMsg(e: JsProxy) -> None:
self.streaming_ready = True
js_resolve_fn(e)
def onErr(e: JsProxy) -> None:
js_reject_fn(e) # Defensive: never happens in ci
self.js_worker.onmessage = onMsg
self.js_worker.onerror = onErr
js_data_url = js.URL.createObjectURL(js_data_blob)
self.js_worker = js.globalThis.Worker.new(js_data_url)
self.js_worker_ready_promise = js.globalThis.Promise.new(promise_resolver)
def send(self, request: EmscriptenRequest) -> EmscriptenResponse:
headers = {
k: v for k, v in request.headers.items() if k not in HEADERS_TO_IGNORE
}
body = request.body
fetch_data = {"headers": headers, "body": to_js(body), "method": request.method}
# start the request off in the worker
timeout = int(1000 * request.timeout) if request.timeout > 0 else None
js_shared_buffer = js.SharedArrayBuffer.new(1048576)
js_int_buffer = js.Int32Array.new(js_shared_buffer)
js_byte_buffer = js.Uint8Array.new(js_shared_buffer, 8)
js.Atomics.store(js_int_buffer, 0, ERROR_TIMEOUT)
js.Atomics.notify(js_int_buffer, 0)
js_absolute_url = js.URL.new(request.url, js.location).href
self.js_worker.postMessage(
_obj_from_dict(
{
"buffer": js_shared_buffer,
"url": js_absolute_url,
"fetchParams": fetch_data,
}
)
)
# wait for the worker to send something
js.Atomics.wait(js_int_buffer, 0, ERROR_TIMEOUT, timeout)
if js_int_buffer[0] == ERROR_TIMEOUT:
raise _TimeoutError(
"Timeout connecting to streaming request",
request=request,
response=None,
)
elif js_int_buffer[0] == SUCCESS_HEADER:
# got response
# header length is in second int of intBuffer
string_len = js_int_buffer[1]
# decode the rest to a JSON string
js_decoder = js.TextDecoder.new()
# this does a copy (the slice) because decode can't work on shared array
# for some silly reason
json_str = js_decoder.decode(js_byte_buffer.slice(0, string_len))
# get it as an object
response_obj = json.loads(json_str)
return EmscriptenResponse(
request=request,
status_code=response_obj["status"],
headers=response_obj["headers"],
body=_ReadStream(
js_int_buffer,
js_byte_buffer,
request.timeout,
self.js_worker,
response_obj["connectionID"],
request,
),
)
elif js_int_buffer[0] == ERROR_EXCEPTION:
string_len = js_int_buffer[1]
# decode the error string
js_decoder = js.TextDecoder.new()
json_str = js_decoder.decode(js_byte_buffer.slice(0, string_len))
raise _StreamingError(
f"Exception thrown in fetch: {json_str}", request=request, response=None
)
else:
raise _StreamingError(
f"Unknown status from worker in fetch: {js_int_buffer[0]}",
request=request,
response=None,
)
# check if we are in a worker or not
def is_in_browser_main_thread() -> bool:
return hasattr(js, "window") and hasattr(js, "self") and js.self == js.window
def is_cross_origin_isolated() -> bool:
return hasattr(js, "crossOriginIsolated") and js.crossOriginIsolated
def is_in_node() -> bool:
return (
hasattr(js, "process")
and hasattr(js.process, "release")
and hasattr(js.process.release, "name")
and js.process.release.name == "node"
)
def is_worker_available() -> bool:
return hasattr(js, "Worker") and hasattr(js, "Blob")
_fetcher: _StreamingFetcher | None = None
if is_worker_available() and (
(is_cross_origin_isolated() and not is_in_browser_main_thread())
and (not is_in_node())
):
_fetcher = _StreamingFetcher()
else:
_fetcher = None
def send_streaming_request(request: EmscriptenRequest) -> EmscriptenResponse | None:
if _fetcher and streaming_ready():
return _fetcher.send(request)
else:
_show_streaming_warning()
return None
_SHOWN_TIMEOUT_WARNING = False
def _show_timeout_warning() -> None:
global _SHOWN_TIMEOUT_WARNING
if not _SHOWN_TIMEOUT_WARNING:
_SHOWN_TIMEOUT_WARNING = True
message = "Warning: Timeout is not available on main browser thread"
js.console.warn(message)
_SHOWN_STREAMING_WARNING = False
def _show_streaming_warning() -> None:
global _SHOWN_STREAMING_WARNING
if not _SHOWN_STREAMING_WARNING:
_SHOWN_STREAMING_WARNING = True
message = "Can't stream HTTP requests because: \n"
if not is_cross_origin_isolated():
message += " Page is not cross-origin isolated\n"
if is_in_browser_main_thread():
message += " Python is running in main browser thread\n"
if not is_worker_available():
message += " Worker or Blob classes are not available in this environment." # Defensive: this is always False in browsers that we test in
if streaming_ready() is False:
message += """ Streaming fetch worker isn't ready. If you want to be sure that streaming fetch
is working, you need to call: 'await urllib3.contrib.emscripten.fetch.wait_for_streaming_ready()`"""
from js import console
console.warn(message)
def send_request(request: EmscriptenRequest) -> EmscriptenResponse:
try:
js_xhr = js.XMLHttpRequest.new()
if not is_in_browser_main_thread():
js_xhr.responseType = "arraybuffer"
if request.timeout:
js_xhr.timeout = int(request.timeout * 1000)
else:
js_xhr.overrideMimeType("text/plain; charset=ISO-8859-15")
if request.timeout:
# timeout isn't available on the main thread - show a warning in console
# if it is set
_show_timeout_warning()
js_xhr.open(request.method, request.url, False)
for name, value in request.headers.items():
if name.lower() not in HEADERS_TO_IGNORE:
js_xhr.setRequestHeader(name, value)
js_xhr.send(to_js(request.body))
headers = dict(Parser().parsestr(js_xhr.getAllResponseHeaders()))
if not is_in_browser_main_thread():
body = js_xhr.response.to_py().tobytes()
else:
body = js_xhr.response.encode("ISO-8859-15")
return EmscriptenResponse(
status_code=js_xhr.status, headers=headers, body=body, request=request
)
except JsException as err:
if err.name == "TimeoutError":
raise _TimeoutError(err.message, request=request)
elif err.name == "NetworkError":
raise _RequestError(err.message, request=request)
else:
# general http error
raise _RequestError(err.message, request=request)
def streaming_ready() -> bool | None:
if _fetcher:
return _fetcher.streaming_ready
else:
return None # no fetcher, return None to signify that
async def wait_for_streaming_ready() -> bool:
if _fetcher:
await _fetcher.js_worker_ready_promise
return True
else:
return False

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

View File

@ -0,0 +1,200 @@
#
# The Python Imaging Library.
#
# MSP file handling
#
# This is the format used by the Paint program in Windows 1 and 2.
#
# History:
# 95-09-05 fl Created
# 97-01-03 fl Read/write MSP images
# 17-02-21 es Fixed RLE interpretation
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1995-97.
# Copyright (c) Eric Soroos 2017.
#
# See the README file for information on usage and redistribution.
#
# More info on this format: https://archive.org/details/gg243631
# Page 313:
# Figure 205. Windows Paint Version 1: "DanM" Format
# Figure 206. Windows Paint Version 2: "LinS" Format. Used in Windows V2.03
#
# See also: https://www.fileformat.info/format/mspaint/egff.htm
from __future__ import annotations
import io
import struct
from typing import IO
from . import Image, ImageFile
from ._binary import i16le as i16
from ._binary import o16le as o16
#
# read MSP files
def _accept(prefix: bytes) -> bool:
return prefix[:4] in [b"DanM", b"LinS"]
##
# Image plugin for Windows MSP images. This plugin supports both
# uncompressed (Windows 1.0).
class MspImageFile(ImageFile.ImageFile):
format = "MSP"
format_description = "Windows Paint"
def _open(self) -> None:
# Header
assert self.fp is not None
s = self.fp.read(32)
if not _accept(s):
msg = "not an MSP file"
raise SyntaxError(msg)
# Header checksum
checksum = 0
for i in range(0, 32, 2):
checksum = checksum ^ i16(s, i)
if checksum != 0:
msg = "bad MSP checksum"
raise SyntaxError(msg)
self._mode = "1"
self._size = i16(s, 4), i16(s, 6)
if s[:4] == b"DanM":
self.tile = [("raw", (0, 0) + self.size, 32, ("1", 0, 1))]
else:
self.tile = [("MSP", (0, 0) + self.size, 32, None)]
class MspDecoder(ImageFile.PyDecoder):
# The algo for the MSP decoder is from
# https://www.fileformat.info/format/mspaint/egff.htm
# cc-by-attribution -- That page references is taken from the
# Encyclopedia of Graphics File Formats and is licensed by
# O'Reilly under the Creative Common/Attribution license
#
# For RLE encoded files, the 32byte header is followed by a scan
# line map, encoded as one 16bit word of encoded byte length per
# line.
#
# NOTE: the encoded length of the line can be 0. This was not
# handled in the previous version of this encoder, and there's no
# mention of how to handle it in the documentation. From the few
# examples I've seen, I've assumed that it is a fill of the
# background color, in this case, white.
#
#
# Pseudocode of the decoder:
# Read a BYTE value as the RunType
# If the RunType value is zero
# Read next byte as the RunCount
# Read the next byte as the RunValue
# Write the RunValue byte RunCount times
# If the RunType value is non-zero
# Use this value as the RunCount
# Read and write the next RunCount bytes literally
#
# e.g.:
# 0x00 03 ff 05 00 01 02 03 04
# would yield the bytes:
# 0xff ff ff 00 01 02 03 04
#
# which are then interpreted as a bit packed mode '1' image
_pulls_fd = True
def decode(self, buffer: bytes) -> tuple[int, int]:
assert self.fd is not None
img = io.BytesIO()
blank_line = bytearray((0xFF,) * ((self.state.xsize + 7) // 8))
try:
self.fd.seek(32)
rowmap = struct.unpack_from(
f"<{self.state.ysize}H", self.fd.read(self.state.ysize * 2)
)
except struct.error as e:
msg = "Truncated MSP file in row map"
raise OSError(msg) from e
for x, rowlen in enumerate(rowmap):
try:
if rowlen == 0:
img.write(blank_line)
continue
row = self.fd.read(rowlen)
if len(row) != rowlen:
msg = f"Truncated MSP file, expected {rowlen} bytes on row {x}"
raise OSError(msg)
idx = 0
while idx < rowlen:
runtype = row[idx]
idx += 1
if runtype == 0:
(runcount, runval) = struct.unpack_from("Bc", row, idx)
img.write(runval * runcount)
idx += 2
else:
runcount = runtype
img.write(row[idx : idx + runcount])
idx += runcount
except struct.error as e:
msg = f"Corrupted MSP file in row {x}"
raise OSError(msg) from e
self.set_as_raw(img.getvalue(), ("1", 0, 1))
return -1, 0
Image.register_decoder("MSP", MspDecoder)
#
# write MSP files (uncompressed only)
def _save(im: Image.Image, fp: IO[bytes], filename: str) -> None:
if im.mode != "1":
msg = f"cannot write mode {im.mode} as MSP"
raise OSError(msg)
# create MSP header
header = [0] * 16
header[0], header[1] = i16(b"Da"), i16(b"nM") # version 1
header[2], header[3] = im.size
header[4], header[5] = 1, 1
header[6], header[7] = 1, 1
header[8], header[9] = im.size
checksum = 0
for h in header:
checksum = checksum ^ h
header[12] = checksum # FIXME: is this the right field?
# header
for h in header:
fp.write(o16(h))
# image body
ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 32, ("1", 0, 1))])
#
# registry
Image.register_open(MspImageFile.format, MspImageFile, _accept)
Image.register_save(MspImageFile.format, _save)
Image.register_extension(MspImageFile.format, ".msp")

View File

@ -0,0 +1,284 @@
#
# The Python Imaging Library.
# $Id$
#
# a Tk display interface
#
# History:
# 96-04-08 fl Created
# 96-09-06 fl Added getimage method
# 96-11-01 fl Rewritten, removed image attribute and crop method
# 97-05-09 fl Use PyImagingPaste method instead of image type
# 97-05-12 fl Minor tweaks to match the IFUNC95 interface
# 97-05-17 fl Support the "pilbitmap" booster patch
# 97-06-05 fl Added file= and data= argument to image constructors
# 98-03-09 fl Added width and height methods to Image classes
# 98-07-02 fl Use default mode for "P" images without palette attribute
# 98-07-02 fl Explicitly destroy Tkinter image objects
# 99-07-24 fl Support multiple Tk interpreters (from Greg Couch)
# 99-07-26 fl Automatically hook into Tkinter (if possible)
# 99-08-15 fl Hook uses _imagingtk instead of _imaging
#
# Copyright (c) 1997-1999 by Secret Labs AB
# Copyright (c) 1996-1997 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import tkinter
from io import BytesIO
from . import Image
# --------------------------------------------------------------------
# Check for Tkinter interface hooks
_pilbitmap_ok = None
def _pilbitmap_check():
global _pilbitmap_ok
if _pilbitmap_ok is None:
try:
im = Image.new("1", (1, 1))
tkinter.BitmapImage(data=f"PIL:{im.im.id}")
_pilbitmap_ok = 1
except tkinter.TclError:
_pilbitmap_ok = 0
return _pilbitmap_ok
def _get_image_from_kw(kw):
source = None
if "file" in kw:
source = kw.pop("file")
elif "data" in kw:
source = BytesIO(kw.pop("data"))
if source:
return Image.open(source)
def _pyimagingtkcall(command, photo, id):
tk = photo.tk
try:
tk.call(command, photo, id)
except tkinter.TclError:
# activate Tkinter hook
# may raise an error if it cannot attach to Tkinter
from . import _imagingtk
_imagingtk.tkinit(tk.interpaddr())
tk.call(command, photo, id)
# --------------------------------------------------------------------
# PhotoImage
class PhotoImage:
"""
A Tkinter-compatible photo image. This can be used
everywhere Tkinter expects an image object. If the image is an RGBA
image, pixels having alpha 0 are treated as transparent.
The constructor takes either a PIL image, or a mode and a size.
Alternatively, you can use the ``file`` or ``data`` options to initialize
the photo image object.
:param image: Either a PIL image, or a mode string. If a mode string is
used, a size must also be given.
:param size: If the first argument is a mode string, this defines the size
of the image.
:keyword file: A filename to load the image from (using
``Image.open(file)``).
:keyword data: An 8-bit string containing image data (as loaded from an
image file).
"""
def __init__(self, image=None, size=None, **kw):
# Tk compatibility: file or data
if image is None:
image = _get_image_from_kw(kw)
if hasattr(image, "mode") and hasattr(image, "size"):
# got an image instead of a mode
mode = image.mode
if mode == "P":
# palette mapped data
image.apply_transparency()
image.load()
try:
mode = image.palette.mode
except AttributeError:
mode = "RGB" # default
size = image.size
kw["width"], kw["height"] = size
else:
mode = image
image = None
if mode not in ["1", "L", "RGB", "RGBA"]:
mode = Image.getmodebase(mode)
self.__mode = mode
self.__size = size
self.__photo = tkinter.PhotoImage(**kw)
self.tk = self.__photo.tk
if image:
self.paste(image)
def __del__(self):
name = self.__photo.name
self.__photo.name = None
try:
self.__photo.tk.call("image", "delete", name)
except Exception:
pass # ignore internal errors
def __str__(self):
"""
Get the Tkinter photo image identifier. This method is automatically
called by Tkinter whenever a PhotoImage object is passed to a Tkinter
method.
:return: A Tkinter photo image identifier (a string).
"""
return str(self.__photo)
def width(self):
"""
Get the width of the image.
:return: The width, in pixels.
"""
return self.__size[0]
def height(self):
"""
Get the height of the image.
:return: The height, in pixels.
"""
return self.__size[1]
def paste(self, im):
"""
Paste a PIL image into the photo image. Note that this can
be very slow if the photo image is displayed.
:param im: A PIL image. The size must match the target region. If the
mode does not match, the image is converted to the mode of
the bitmap image.
"""
# convert to blittable
im.load()
image = im.im
if image.isblock() and im.mode == self.__mode:
block = image
else:
block = image.new_block(self.__mode, im.size)
image.convert2(block, image) # convert directly between buffers
_pyimagingtkcall("PyImagingPhoto", self.__photo, block.id)
# --------------------------------------------------------------------
# BitmapImage
class BitmapImage:
"""
A Tkinter-compatible bitmap image. This can be used everywhere Tkinter
expects an image object.
The given image must have mode "1". Pixels having value 0 are treated as
transparent. Options, if any, are passed on to Tkinter. The most commonly
used option is ``foreground``, which is used to specify the color for the
non-transparent parts. See the Tkinter documentation for information on
how to specify colours.
:param image: A PIL image.
"""
def __init__(self, image=None, **kw):
# Tk compatibility: file or data
if image is None:
image = _get_image_from_kw(kw)
self.__mode = image.mode
self.__size = image.size
if _pilbitmap_check():
# fast way (requires the pilbitmap booster patch)
image.load()
kw["data"] = f"PIL:{image.im.id}"
self.__im = image # must keep a reference
else:
# slow but safe way
kw["data"] = image.tobitmap()
self.__photo = tkinter.BitmapImage(**kw)
def __del__(self):
name = self.__photo.name
self.__photo.name = None
try:
self.__photo.tk.call("image", "delete", name)
except Exception:
pass # ignore internal errors
def width(self):
"""
Get the width of the image.
:return: The width, in pixels.
"""
return self.__size[0]
def height(self):
"""
Get the height of the image.
:return: The height, in pixels.
"""
return self.__size[1]
def __str__(self):
"""
Get the Tkinter bitmap image identifier. This method is automatically
called by Tkinter whenever a BitmapImage object is passed to a Tkinter
method.
:return: A Tkinter bitmap image identifier (a string).
"""
return str(self.__photo)
def getimage(photo):
"""Copies the contents of a PhotoImage to a PIL image memory."""
im = Image.new("RGBA", (photo.width(), photo.height()))
block = im.im
_pyimagingtkcall("PyImagingPhotoGet", photo, block.id)
return im
def _show(image, title):
"""Helper for the Image.show method."""
class UI(tkinter.Label):
def __init__(self, master, im):
if im.mode == "1":
self.image = BitmapImage(im, foreground="white", master=master)
else:
self.image = PhotoImage(im, master=master)
super().__init__(master, image=self.image, bg="black", bd=0)
if not tkinter._default_root:
msg = "tkinter not initialized"
raise OSError(msg)
top = tkinter.Toplevel()
if title:
top.title(title)
UI(top, image).pack()

View File

@ -0,0 +1,74 @@
#
# The Python Imaging Library
# $Id$
#
# BUFR stub adapter
#
# Copyright (c) 1996-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
from . import Image, ImageFile
_handler = None
def register_handler(handler):
"""
Install application-specific BUFR image handler.
:param handler: Handler object.
"""
global _handler
_handler = handler
# --------------------------------------------------------------------
# Image adapter
def _accept(prefix):
return prefix[:4] == b"BUFR" or prefix[:4] == b"ZCZC"
class BufrStubImageFile(ImageFile.StubImageFile):
format = "BUFR"
format_description = "BUFR"
def _open(self):
offset = self.fp.tell()
if not _accept(self.fp.read(4)):
msg = "Not a BUFR file"
raise SyntaxError(msg)
self.fp.seek(offset)
# make something up
self._mode = "F"
self._size = 1, 1
loader = self._load()
if loader:
loader.open(self)
def _load(self):
return _handler
def _save(im, fp, filename):
if _handler is None or not hasattr(_handler, "save"):
msg = "BUFR save handler not installed"
raise OSError(msg)
_handler.save(im, fp, filename)
# --------------------------------------------------------------------
# Registry
Image.register_open(BufrStubImageFile.format, BufrStubImageFile, _accept)
Image.register_save(BufrStubImageFile.format, _save)
Image.register_extension(BufrStubImageFile.format, ".bufr")

View File

@ -0,0 +1,5 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.43.0)
Root-Is-Purelib: false
Tag: cp311-cp311-linux_x86_64

View File

@ -0,0 +1,5 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.40.0)
Root-Is-Purelib: true
Tag: py3-none-any

View File

@ -0,0 +1,74 @@
#
# The Python Imaging Library
# $Id$
#
# HDF5 stub adapter
#
# Copyright (c) 2000-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
from . import Image, ImageFile
_handler = None
def register_handler(handler):
"""
Install application-specific HDF5 image handler.
:param handler: Handler object.
"""
global _handler
_handler = handler
# --------------------------------------------------------------------
# Image adapter
def _accept(prefix):
return prefix[:8] == b"\x89HDF\r\n\x1a\n"
class HDF5StubImageFile(ImageFile.StubImageFile):
format = "HDF5"
format_description = "HDF5"
def _open(self):
offset = self.fp.tell()
if not _accept(self.fp.read(8)):
msg = "Not an HDF file"
raise SyntaxError(msg)
self.fp.seek(offset)
# make something up
self._mode = "F"
self._size = 1, 1
loader = self._load()
if loader:
loader.open(self)
def _load(self):
return _handler
def _save(im, fp, filename):
if _handler is None or not hasattr(_handler, "save"):
msg = "HDF5 save handler not installed"
raise OSError(msg)
_handler.save(im, fp, filename)
# --------------------------------------------------------------------
# Registry
Image.register_open(HDF5StubImageFile.format, HDF5StubImageFile, _accept)
Image.register_save(HDF5StubImageFile.format, _save)
Image.register_extensions(HDF5StubImageFile.format, [".h5", ".hdf"])

View File

@ -0,0 +1,303 @@
#
# The Python Imaging Library.
# $Id$
#
# PDF (Acrobat) file handling
#
# History:
# 1996-07-16 fl Created
# 1997-01-18 fl Fixed header
# 2004-02-21 fl Fixes for 1/L/CMYK images, etc.
# 2004-02-24 fl Fixes for 1 and P images.
#
# Copyright (c) 1997-2004 by Secret Labs AB. All rights reserved.
# Copyright (c) 1996-1997 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
##
# Image plugin for PDF images (output only).
##
from __future__ import annotations
import io
import math
import os
import time
from . import Image, ImageFile, ImageSequence, PdfParser, __version__, features
#
# --------------------------------------------------------------------
# object ids:
# 1. catalogue
# 2. pages
# 3. image
# 4. page
# 5. page contents
def _save_all(im, fp, filename):
_save(im, fp, filename, save_all=True)
##
# (Internal) Image save plugin for the PDF format.
def _write_image(im, filename, existing_pdf, image_refs):
# FIXME: Should replace ASCIIHexDecode with RunLengthDecode
# (packbits) or LZWDecode (tiff/lzw compression). Note that
# PDF 1.2 also supports Flatedecode (zip compression).
params = None
decode = None
#
# Get image characteristics
width, height = im.size
dict_obj = {"BitsPerComponent": 8}
if im.mode == "1":
if features.check("libtiff"):
filter = "CCITTFaxDecode"
dict_obj["BitsPerComponent"] = 1
params = PdfParser.PdfArray(
[
PdfParser.PdfDict(
{
"K": -1,
"BlackIs1": True,
"Columns": width,
"Rows": height,
}
)
]
)
else:
filter = "DCTDecode"
dict_obj["ColorSpace"] = PdfParser.PdfName("DeviceGray")
procset = "ImageB" # grayscale
elif im.mode == "L":
filter = "DCTDecode"
# params = f"<< /Predictor 15 /Columns {width-2} >>"
dict_obj["ColorSpace"] = PdfParser.PdfName("DeviceGray")
procset = "ImageB" # grayscale
elif im.mode == "LA":
filter = "JPXDecode"
# params = f"<< /Predictor 15 /Columns {width-2} >>"
procset = "ImageB" # grayscale
dict_obj["SMaskInData"] = 1
elif im.mode == "P":
filter = "ASCIIHexDecode"
palette = im.getpalette()
dict_obj["ColorSpace"] = [
PdfParser.PdfName("Indexed"),
PdfParser.PdfName("DeviceRGB"),
len(palette) // 3 - 1,
PdfParser.PdfBinary(palette),
]
procset = "ImageI" # indexed color
if "transparency" in im.info:
smask = im.convert("LA").getchannel("A")
smask.encoderinfo = {}
image_ref = _write_image(smask, filename, existing_pdf, image_refs)[0]
dict_obj["SMask"] = image_ref
elif im.mode == "RGB":
filter = "DCTDecode"
dict_obj["ColorSpace"] = PdfParser.PdfName("DeviceRGB")
procset = "ImageC" # color images
elif im.mode == "RGBA":
filter = "JPXDecode"
procset = "ImageC" # color images
dict_obj["SMaskInData"] = 1
elif im.mode == "CMYK":
filter = "DCTDecode"
dict_obj["ColorSpace"] = PdfParser.PdfName("DeviceCMYK")
procset = "ImageC" # color images
decode = [1, 0, 1, 0, 1, 0, 1, 0]
else:
msg = f"cannot save mode {im.mode}"
raise ValueError(msg)
#
# image
op = io.BytesIO()
if filter == "ASCIIHexDecode":
ImageFile._save(im, op, [("hex", (0, 0) + im.size, 0, im.mode)])
elif filter == "CCITTFaxDecode":
im.save(
op,
"TIFF",
compression="group4",
# use a single strip
strip_size=math.ceil(width / 8) * height,
)
elif filter == "DCTDecode":
Image.SAVE["JPEG"](im, op, filename)
elif filter == "JPXDecode":
del dict_obj["BitsPerComponent"]
Image.SAVE["JPEG2000"](im, op, filename)
else:
msg = f"unsupported PDF filter ({filter})"
raise ValueError(msg)
stream = op.getvalue()
if filter == "CCITTFaxDecode":
stream = stream[8:]
filter = PdfParser.PdfArray([PdfParser.PdfName(filter)])
else:
filter = PdfParser.PdfName(filter)
image_ref = image_refs.pop(0)
existing_pdf.write_obj(
image_ref,
stream=stream,
Type=PdfParser.PdfName("XObject"),
Subtype=PdfParser.PdfName("Image"),
Width=width, # * 72.0 / x_resolution,
Height=height, # * 72.0 / y_resolution,
Filter=filter,
Decode=decode,
DecodeParms=params,
**dict_obj,
)
return image_ref, procset
def _save(im, fp, filename, save_all=False):
is_appending = im.encoderinfo.get("append", False)
if is_appending:
existing_pdf = PdfParser.PdfParser(f=fp, filename=filename, mode="r+b")
else:
existing_pdf = PdfParser.PdfParser(f=fp, filename=filename, mode="w+b")
dpi = im.encoderinfo.get("dpi")
if dpi:
x_resolution = dpi[0]
y_resolution = dpi[1]
else:
x_resolution = y_resolution = im.encoderinfo.get("resolution", 72.0)
info = {
"title": (
None if is_appending else os.path.splitext(os.path.basename(filename))[0]
),
"author": None,
"subject": None,
"keywords": None,
"creator": None,
"producer": None,
"creationDate": None if is_appending else time.gmtime(),
"modDate": None if is_appending else time.gmtime(),
}
for k, default in info.items():
v = im.encoderinfo.get(k) if k in im.encoderinfo else default
if v:
existing_pdf.info[k[0].upper() + k[1:]] = v
#
# make sure image data is available
im.load()
existing_pdf.start_writing()
existing_pdf.write_header()
existing_pdf.write_comment(f"created by Pillow {__version__} PDF driver")
#
# pages
ims = [im]
if save_all:
append_images = im.encoderinfo.get("append_images", [])
for append_im in append_images:
append_im.encoderinfo = im.encoderinfo.copy()
ims.append(append_im)
number_of_pages = 0
image_refs = []
page_refs = []
contents_refs = []
for im in ims:
im_number_of_pages = 1
if save_all:
try:
im_number_of_pages = im.n_frames
except AttributeError:
# Image format does not have n_frames.
# It is a single frame image
pass
number_of_pages += im_number_of_pages
for i in range(im_number_of_pages):
image_refs.append(existing_pdf.next_object_id(0))
if im.mode == "P" and "transparency" in im.info:
image_refs.append(existing_pdf.next_object_id(0))
page_refs.append(existing_pdf.next_object_id(0))
contents_refs.append(existing_pdf.next_object_id(0))
existing_pdf.pages.append(page_refs[-1])
#
# catalog and list of pages
existing_pdf.write_catalog()
page_number = 0
for im_sequence in ims:
im_pages = ImageSequence.Iterator(im_sequence) if save_all else [im_sequence]
for im in im_pages:
image_ref, procset = _write_image(im, filename, existing_pdf, image_refs)
#
# page
existing_pdf.write_page(
page_refs[page_number],
Resources=PdfParser.PdfDict(
ProcSet=[PdfParser.PdfName("PDF"), PdfParser.PdfName(procset)],
XObject=PdfParser.PdfDict(image=image_ref),
),
MediaBox=[
0,
0,
im.width * 72.0 / x_resolution,
im.height * 72.0 / y_resolution,
],
Contents=contents_refs[page_number],
)
#
# page contents
page_contents = b"q %f 0 0 %f 0 0 cm /image Do Q\n" % (
im.width * 72.0 / x_resolution,
im.height * 72.0 / y_resolution,
)
existing_pdf.write_obj(contents_refs[page_number], stream=page_contents)
page_number += 1
#
# trailer
existing_pdf.write_xref_and_trailer()
if hasattr(fp, "flush"):
fp.flush()
existing_pdf.close()
#
# --------------------------------------------------------------------
Image.register_save("PDF", _save)
Image.register_save_all("PDF", _save_all)
Image.register_extension("PDF", ".pdf")
Image.register_mime("PDF", "application/pdf")

View File

@ -0,0 +1,5 @@
[Runtime]
name=com.jeffser.Alpaca.Debug
[ExtensionOf]
ref=app/com.jeffser.Alpaca/x86_64/master

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16"><g color="#000" fill="#2e3436"><path d="M7.188 2.281c-.094.056-.192.125-.29.19L5.566 3.803a1.684 1.684 0 11-2.17 2.17L2.332 7.037c.506-.069 1.017-.136 1.2.026.242.214.139 1.031.155 1.656.213.088.427.171.657.219.04.008.085-.007.125 0 .337-.525.683-1.288 1-1.344.322-.057.905.562 1.406.937a3.67 3.67 0 00.656-.468c-.195-.595-.594-1.369-.437-1.657.158-.29 1.019-.37 1.625-.531.028-.183.062-.371.062-.562 0-.075-.027-.146-.031-.22-.587-.217-1.435-.385-1.562-.687-.128-.302.34-1.021.593-1.593a3.722 3.722 0 00-.593-.532zm3.875 3.25c-.165.475-.305 1.086-.47 1.563-.43.047-.84.14-1.218.312-.38-.322-.787-.773-1.156-1.093a5.562 5.562 0 00-.688.468c.177.46.453 1.001.625 1.469-.298.309-.531.67-.719 1.063-.494 0-1.102-.084-1.593-.094a5.68 5.68 0 00-.219.812c.435.24 1.006.468 1.438.72-.006.093-.032.185-.032.28 0 .333.049.66.125.97-.382.304-.898.63-1.28.937.015.044.04.083.058.127l.613.613c.417-.1.868-.223 1.266-.303.248.343.532.626.875.875-.027.135-.068.283-.104.428.174-.063.34-.155.482-.297l1.432-1.432a1.994 1.994 0 01.533-3.918c.919 0 1.684.623 1.918 1.467l1.338-1.338c.06-.06.11-.124.156-.191-.035-.062-.06-.13-.1-.188.096-.152.205-.31.315-.47.017-.348-.1-.7-.37-.971l-.177-.176c-.28.192-.561.387-.83.555-.345-.233-.746-.383-1.156-.5-.077-.507-.107-1.132-.187-1.625a5.44 5.44 0 00-.875-.063zm-9.247.608c-.087.068-.173.138-.254.205l.014.035z" style="marker:none" overflow="visible"/><path d="M8.707.293a1 1 0 00-1.415 0l-6.999 7a1 1 0 000 1.413l7 7.001a1 1 0 001.415 0l7-7a1 1 0 000-1.413zm-.708 2.121l5.587 5.587L8 13.586 2.414 7.999z" style="line-height:normal;font-variant-ligatures:normal;font-variant-position:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-alternates:normal;font-feature-settings:normal;text-indent:0;text-align:start;text-decoration-line:none;text-decoration-style:solid;text-decoration-color:#000;text-transform:none;text-orientation:mixed;shape-padding:0;isolation:auto;mix-blend-mode:normal;marker:none" font-weight="400" font-family="sans-serif" overflow="visible"/></g></svg>

After

Width:  |  Height:  |  Size: 2.0 KiB

Some files were not shown because too many files have changed in this diff Show More