Added support for multiple tags on a single model

This commit is contained in:
jeffser 2024-05-18 15:52:50 -06:00
parent 8ddce304b2
commit 02acbb2d70
571 changed files with 76910 additions and 127 deletions

0
.flatpak-builder/cache/.lock vendored Normal file
View File

4
.flatpak-builder/cache/config vendored Normal file
View File

@ -0,0 +1,4 @@
[core]
repo_version=1
mode=bare-user-only
min-free-space-percent=0

View File

@ -0,0 +1,30 @@
The Python Imaging Library (PIL) is
Copyright © 1997-2011 by Secret Labs AB
Copyright © 1995-2011 by Fredrik Lundh and contributors
Pillow is the friendly PIL fork. It is
Copyright © 2010-2024 by Jeffrey A. Clark and contributors
Like PIL, Pillow is licensed under the open source HPND License:
By obtaining, using, and/or copying this software and/or its associated
documentation, you agree that you have read, understood, and will comply
with the following terms and conditions:
Permission to use, copy, modify and distribute this software and its
documentation for any purpose and without fee is hereby granted,
provided that the above copyright notice appears in all copies, and that
both that copyright notice and this permission notice appear in supporting
documentation, and that the name of Secret Labs AB or the author not be
used in advertising or publicity pertaining to distribution of the software
without specific, written prior permission.
SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.

View File

@ -0,0 +1,626 @@
import logging
from os import PathLike
from typing import BinaryIO, List, Optional, Set, Union
from .cd import (
coherence_ratio,
encoding_languages,
mb_encoding_languages,
merge_coherence_ratios,
)
from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE
from .md import mess_ratio
from .models import CharsetMatch, CharsetMatches
from .utils import (
any_specified_encoding,
cut_sequence_chunks,
iana_name,
identify_sig_or_bom,
is_cp_similar,
is_multi_byte_encoding,
should_strip_sig_or_bom,
)
# Will most likely be controversial
# logging.addLevelName(TRACE, "TRACE")
logger = logging.getLogger("charset_normalizer")
explain_handler = logging.StreamHandler()
explain_handler.setFormatter(
logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
)
def from_bytes(
sequences: Union[bytes, bytearray],
steps: int = 5,
chunk_size: int = 512,
threshold: float = 0.2,
cp_isolation: Optional[List[str]] = None,
cp_exclusion: Optional[List[str]] = None,
preemptive_behaviour: bool = True,
explain: bool = False,
language_threshold: float = 0.1,
enable_fallback: bool = True,
) -> CharsetMatches:
"""
Given a raw bytes sequence, return the best possibles charset usable to render str objects.
If there is no results, it is a strong indicator that the source is binary/not text.
By default, the process will extract 5 blocks of 512o each to assess the mess and coherence of a given sequence.
And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will.
The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page
but never take it for granted. Can improve the performance.
You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that
purpose.
This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32.
By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain'
toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging.
Custom logging format and handler can be set manually.
"""
if not isinstance(sequences, (bytearray, bytes)):
raise TypeError(
"Expected object of type bytes or bytearray, got: {0}".format(
type(sequences)
)
)
if explain:
previous_logger_level: int = logger.level
logger.addHandler(explain_handler)
logger.setLevel(TRACE)
length: int = len(sequences)
if length == 0:
logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.")
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level or logging.WARNING)
return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")])
if cp_isolation is not None:
logger.log(
TRACE,
"cp_isolation is set. use this flag for debugging purpose. "
"limited list of encoding allowed : %s.",
", ".join(cp_isolation),
)
cp_isolation = [iana_name(cp, False) for cp in cp_isolation]
else:
cp_isolation = []
if cp_exclusion is not None:
logger.log(
TRACE,
"cp_exclusion is set. use this flag for debugging purpose. "
"limited list of encoding excluded : %s.",
", ".join(cp_exclusion),
)
cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion]
else:
cp_exclusion = []
if length <= (chunk_size * steps):
logger.log(
TRACE,
"override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.",
steps,
chunk_size,
length,
)
steps = 1
chunk_size = length
if steps > 1 and length / steps < chunk_size:
chunk_size = int(length / steps)
is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE
is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE
if is_too_small_sequence:
logger.log(
TRACE,
"Trying to detect encoding from a tiny portion of ({}) byte(s).".format(
length
),
)
elif is_too_large_sequence:
logger.log(
TRACE,
"Using lazy str decoding because the payload is quite large, ({}) byte(s).".format(
length
),
)
prioritized_encodings: List[str] = []
specified_encoding: Optional[str] = (
any_specified_encoding(sequences) if preemptive_behaviour else None
)
if specified_encoding is not None:
prioritized_encodings.append(specified_encoding)
logger.log(
TRACE,
"Detected declarative mark in sequence. Priority +1 given for %s.",
specified_encoding,
)
tested: Set[str] = set()
tested_but_hard_failure: List[str] = []
tested_but_soft_failure: List[str] = []
fallback_ascii: Optional[CharsetMatch] = None
fallback_u8: Optional[CharsetMatch] = None
fallback_specified: Optional[CharsetMatch] = None
results: CharsetMatches = CharsetMatches()
sig_encoding, sig_payload = identify_sig_or_bom(sequences)
if sig_encoding is not None:
prioritized_encodings.append(sig_encoding)
logger.log(
TRACE,
"Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.",
len(sig_payload),
sig_encoding,
)
prioritized_encodings.append("ascii")
if "utf_8" not in prioritized_encodings:
prioritized_encodings.append("utf_8")
for encoding_iana in prioritized_encodings + IANA_SUPPORTED:
if cp_isolation and encoding_iana not in cp_isolation:
continue
if cp_exclusion and encoding_iana in cp_exclusion:
continue
if encoding_iana in tested:
continue
tested.add(encoding_iana)
decoded_payload: Optional[str] = None
bom_or_sig_available: bool = sig_encoding == encoding_iana
strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom(
encoding_iana
)
if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available:
logger.log(
TRACE,
"Encoding %s won't be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.",
encoding_iana,
)
continue
if encoding_iana in {"utf_7"} and not bom_or_sig_available:
logger.log(
TRACE,
"Encoding %s won't be tested as-is because detection is unreliable without BOM/SIG.",
encoding_iana,
)
continue
try:
is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana)
except (ModuleNotFoundError, ImportError):
logger.log(
TRACE,
"Encoding %s does not provide an IncrementalDecoder",
encoding_iana,
)
continue
try:
if is_too_large_sequence and is_multi_byte_decoder is False:
str(
sequences[: int(50e4)]
if strip_sig_or_bom is False
else sequences[len(sig_payload) : int(50e4)],
encoding=encoding_iana,
)
else:
decoded_payload = str(
sequences
if strip_sig_or_bom is False
else sequences[len(sig_payload) :],
encoding=encoding_iana,
)
except (UnicodeDecodeError, LookupError) as e:
if not isinstance(e, LookupError):
logger.log(
TRACE,
"Code page %s does not fit given bytes sequence at ALL. %s",
encoding_iana,
str(e),
)
tested_but_hard_failure.append(encoding_iana)
continue
similar_soft_failure_test: bool = False
for encoding_soft_failed in tested_but_soft_failure:
if is_cp_similar(encoding_iana, encoding_soft_failed):
similar_soft_failure_test = True
break
if similar_soft_failure_test:
logger.log(
TRACE,
"%s is deemed too similar to code page %s and was consider unsuited already. Continuing!",
encoding_iana,
encoding_soft_failed,
)
continue
r_ = range(
0 if not bom_or_sig_available else len(sig_payload),
length,
int(length / steps),
)
multi_byte_bonus: bool = (
is_multi_byte_decoder
and decoded_payload is not None
and len(decoded_payload) < length
)
if multi_byte_bonus:
logger.log(
TRACE,
"Code page %s is a multi byte encoding table and it appear that at least one character "
"was encoded using n-bytes.",
encoding_iana,
)
max_chunk_gave_up: int = int(len(r_) / 4)
max_chunk_gave_up = max(max_chunk_gave_up, 2)
early_stop_count: int = 0
lazy_str_hard_failure = False
md_chunks: List[str] = []
md_ratios = []
try:
for chunk in cut_sequence_chunks(
sequences,
encoding_iana,
r_,
chunk_size,
bom_or_sig_available,
strip_sig_or_bom,
sig_payload,
is_multi_byte_decoder,
decoded_payload,
):
md_chunks.append(chunk)
md_ratios.append(
mess_ratio(
chunk,
threshold,
explain is True and 1 <= len(cp_isolation) <= 2,
)
)
if md_ratios[-1] >= threshold:
early_stop_count += 1
if (early_stop_count >= max_chunk_gave_up) or (
bom_or_sig_available and strip_sig_or_bom is False
):
break
except (
UnicodeDecodeError
) as e: # Lazy str loading may have missed something there
logger.log(
TRACE,
"LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s",
encoding_iana,
str(e),
)
early_stop_count = max_chunk_gave_up
lazy_str_hard_failure = True
# We might want to check the sequence again with the whole content
# Only if initial MD tests passes
if (
not lazy_str_hard_failure
and is_too_large_sequence
and not is_multi_byte_decoder
):
try:
sequences[int(50e3) :].decode(encoding_iana, errors="strict")
except UnicodeDecodeError as e:
logger.log(
TRACE,
"LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s",
encoding_iana,
str(e),
)
tested_but_hard_failure.append(encoding_iana)
continue
mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0
if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up:
tested_but_soft_failure.append(encoding_iana)
logger.log(
TRACE,
"%s was excluded because of initial chaos probing. Gave up %i time(s). "
"Computed mean chaos is %f %%.",
encoding_iana,
early_stop_count,
round(mean_mess_ratio * 100, ndigits=3),
)
# Preparing those fallbacks in case we got nothing.
if (
enable_fallback
and encoding_iana in ["ascii", "utf_8", specified_encoding]
and not lazy_str_hard_failure
):
fallback_entry = CharsetMatch(
sequences, encoding_iana, threshold, False, [], decoded_payload
)
if encoding_iana == specified_encoding:
fallback_specified = fallback_entry
elif encoding_iana == "ascii":
fallback_ascii = fallback_entry
else:
fallback_u8 = fallback_entry
continue
logger.log(
TRACE,
"%s passed initial chaos probing. Mean measured chaos is %f %%",
encoding_iana,
round(mean_mess_ratio * 100, ndigits=3),
)
if not is_multi_byte_decoder:
target_languages: List[str] = encoding_languages(encoding_iana)
else:
target_languages = mb_encoding_languages(encoding_iana)
if target_languages:
logger.log(
TRACE,
"{} should target any language(s) of {}".format(
encoding_iana, str(target_languages)
),
)
cd_ratios = []
# We shall skip the CD when its about ASCII
# Most of the time its not relevant to run "language-detection" on it.
if encoding_iana != "ascii":
for chunk in md_chunks:
chunk_languages = coherence_ratio(
chunk,
language_threshold,
",".join(target_languages) if target_languages else None,
)
cd_ratios.append(chunk_languages)
cd_ratios_merged = merge_coherence_ratios(cd_ratios)
if cd_ratios_merged:
logger.log(
TRACE,
"We detected language {} using {}".format(
cd_ratios_merged, encoding_iana
),
)
results.append(
CharsetMatch(
sequences,
encoding_iana,
mean_mess_ratio,
bom_or_sig_available,
cd_ratios_merged,
decoded_payload,
)
)
if (
encoding_iana in [specified_encoding, "ascii", "utf_8"]
and mean_mess_ratio < 0.1
):
logger.debug(
"Encoding detection: %s is most likely the one.", encoding_iana
)
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level)
return CharsetMatches([results[encoding_iana]])
if encoding_iana == sig_encoding:
logger.debug(
"Encoding detection: %s is most likely the one as we detected a BOM or SIG within "
"the beginning of the sequence.",
encoding_iana,
)
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level)
return CharsetMatches([results[encoding_iana]])
if len(results) == 0:
if fallback_u8 or fallback_ascii or fallback_specified:
logger.log(
TRACE,
"Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.",
)
if fallback_specified:
logger.debug(
"Encoding detection: %s will be used as a fallback match",
fallback_specified.encoding,
)
results.append(fallback_specified)
elif (
(fallback_u8 and fallback_ascii is None)
or (
fallback_u8
and fallback_ascii
and fallback_u8.fingerprint != fallback_ascii.fingerprint
)
or (fallback_u8 is not None)
):
logger.debug("Encoding detection: utf_8 will be used as a fallback match")
results.append(fallback_u8)
elif fallback_ascii:
logger.debug("Encoding detection: ascii will be used as a fallback match")
results.append(fallback_ascii)
if results:
logger.debug(
"Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.",
results.best().encoding, # type: ignore
len(results) - 1,
)
else:
logger.debug("Encoding detection: Unable to determine any suitable charset.")
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level)
return results
def from_fp(
fp: BinaryIO,
steps: int = 5,
chunk_size: int = 512,
threshold: float = 0.20,
cp_isolation: Optional[List[str]] = None,
cp_exclusion: Optional[List[str]] = None,
preemptive_behaviour: bool = True,
explain: bool = False,
language_threshold: float = 0.1,
enable_fallback: bool = True,
) -> CharsetMatches:
"""
Same thing than the function from_bytes but using a file pointer that is already ready.
Will not close the file pointer.
"""
return from_bytes(
fp.read(),
steps,
chunk_size,
threshold,
cp_isolation,
cp_exclusion,
preemptive_behaviour,
explain,
language_threshold,
enable_fallback,
)
def from_path(
path: Union[str, bytes, PathLike], # type: ignore[type-arg]
steps: int = 5,
chunk_size: int = 512,
threshold: float = 0.20,
cp_isolation: Optional[List[str]] = None,
cp_exclusion: Optional[List[str]] = None,
preemptive_behaviour: bool = True,
explain: bool = False,
language_threshold: float = 0.1,
enable_fallback: bool = True,
) -> CharsetMatches:
"""
Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode.
Can raise IOError.
"""
with open(path, "rb") as fp:
return from_fp(
fp,
steps,
chunk_size,
threshold,
cp_isolation,
cp_exclusion,
preemptive_behaviour,
explain,
language_threshold,
enable_fallback,
)
def is_binary(
fp_or_path_or_payload: Union[PathLike, str, BinaryIO, bytes], # type: ignore[type-arg]
steps: int = 5,
chunk_size: int = 512,
threshold: float = 0.20,
cp_isolation: Optional[List[str]] = None,
cp_exclusion: Optional[List[str]] = None,
preemptive_behaviour: bool = True,
explain: bool = False,
language_threshold: float = 0.1,
enable_fallback: bool = False,
) -> bool:
"""
Detect if the given input (file, bytes, or path) points to a binary file. aka. not a string.
Based on the same main heuristic algorithms and default kwargs at the sole exception that fallbacks match
are disabled to be stricter around ASCII-compatible but unlikely to be a string.
"""
if isinstance(fp_or_path_or_payload, (str, PathLike)):
guesses = from_path(
fp_or_path_or_payload,
steps=steps,
chunk_size=chunk_size,
threshold=threshold,
cp_isolation=cp_isolation,
cp_exclusion=cp_exclusion,
preemptive_behaviour=preemptive_behaviour,
explain=explain,
language_threshold=language_threshold,
enable_fallback=enable_fallback,
)
elif isinstance(
fp_or_path_or_payload,
(
bytes,
bytearray,
),
):
guesses = from_bytes(
fp_or_path_or_payload,
steps=steps,
chunk_size=chunk_size,
threshold=threshold,
cp_isolation=cp_isolation,
cp_exclusion=cp_exclusion,
preemptive_behaviour=preemptive_behaviour,
explain=explain,
language_threshold=language_threshold,
enable_fallback=enable_fallback,
)
else:
guesses = from_fp(
fp_or_path_or_payload,
steps=steps,
chunk_size=chunk_size,
threshold=threshold,
cp_isolation=cp_isolation,
cp_exclusion=cp_exclusion,
preemptive_behaviour=preemptive_behaviour,
explain=explain,
language_threshold=language_threshold,
enable_fallback=enable_fallback,
)
return not guesses

View File

@ -0,0 +1,78 @@
#
# The Python Imaging Library.
# $Id$
#
# Basic McIdas support for PIL
#
# History:
# 1997-05-05 fl Created (8-bit images only)
# 2009-03-08 fl Added 16/32-bit support.
#
# Thanks to Richard Jones and Craig Swank for specs and samples.
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import struct
from . import Image, ImageFile
def _accept(prefix: bytes) -> bool:
return prefix[:8] == b"\x00\x00\x00\x00\x00\x00\x00\x04"
##
# Image plugin for McIdas area images.
class McIdasImageFile(ImageFile.ImageFile):
format = "MCIDAS"
format_description = "McIdas area file"
def _open(self) -> None:
# parse area file directory
assert self.fp is not None
s = self.fp.read(256)
if not _accept(s) or len(s) != 256:
msg = "not an McIdas area file"
raise SyntaxError(msg)
self.area_descriptor_raw = s
self.area_descriptor = w = [0] + list(struct.unpack("!64i", s))
# get mode
if w[11] == 1:
mode = rawmode = "L"
elif w[11] == 2:
# FIXME: add memory map support
mode = "I"
rawmode = "I;16B"
elif w[11] == 4:
# FIXME: add memory map support
mode = "I"
rawmode = "I;32B"
else:
msg = "unsupported McIdas format"
raise SyntaxError(msg)
self._mode = mode
self._size = w[10], w[9]
offset = w[34] + w[15]
stride = w[15] + w[10] * w[11] * w[14]
self.tile = [("raw", (0, 0) + self.size, offset, (rawmode, stride, 1))]
# --------------------------------------------------------------------
# registry
Image.register_open(McIdasImageFile.format, McIdasImageFile, _accept)
# no default extension

View File

@ -0,0 +1,101 @@
from __future__ import annotations
import http.client as httplib
from email.errors import MultipartInvariantViolationDefect, StartBoundaryNotFoundDefect
from ..exceptions import HeaderParsingError
def is_fp_closed(obj: object) -> bool:
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
try:
# Check `isclosed()` first, in case Python3 doesn't set `closed`.
# GH Issue #928
return obj.isclosed() # type: ignore[no-any-return, attr-defined]
except AttributeError:
pass
try:
# Check via the official file-like-object way.
return obj.closed # type: ignore[no-any-return, attr-defined]
except AttributeError:
pass
try:
# Check if the object is a container for another file-like object that
# gets released on exhaustion (e.g. HTTPResponse).
return obj.fp is None # type: ignore[attr-defined]
except AttributeError:
pass
raise ValueError("Unable to determine whether fp is closed.")
def assert_header_parsing(headers: httplib.HTTPMessage) -> None:
"""
Asserts whether all headers have been successfully parsed.
Extracts encountered errors from the result of parsing headers.
Only works on Python 3.
:param http.client.HTTPMessage headers: Headers to verify.
:raises urllib3.exceptions.HeaderParsingError:
If parsing errors are found.
"""
# This will fail silently if we pass in the wrong kind of parameter.
# To make debugging easier add an explicit check.
if not isinstance(headers, httplib.HTTPMessage):
raise TypeError(f"expected httplib.Message, got {type(headers)}.")
unparsed_data = None
# get_payload is actually email.message.Message.get_payload;
# we're only interested in the result if it's not a multipart message
if not headers.is_multipart():
payload = headers.get_payload()
if isinstance(payload, (bytes, str)):
unparsed_data = payload
# httplib is assuming a response body is available
# when parsing headers even when httplib only sends
# header data to parse_headers() This results in
# defects on multipart responses in particular.
# See: https://github.com/urllib3/urllib3/issues/800
# So we ignore the following defects:
# - StartBoundaryNotFoundDefect:
# The claimed start boundary was never found.
# - MultipartInvariantViolationDefect:
# A message claimed to be a multipart but no subparts were found.
defects = [
defect
for defect in headers.defects
if not isinstance(
defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect)
)
]
if defects or unparsed_data:
raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
def is_response_to_head(response: httplib.HTTPResponse) -> bool:
"""
Checks whether the request of a response has been a HEAD-request.
:param http.client.HTTPResponse response:
Response to check if the originating request
used 'HEAD' as a method.
"""
# FIXME: Can we do this somehow without accessing private httplib _method?
method_str = response._method # type: str # type: ignore[attr-defined]
return method_str.upper() == "HEAD"

View File

@ -0,0 +1,135 @@
#
# The Python Imaging Library.
# $Id$
#
# transform wrappers
#
# History:
# 2002-04-08 fl Created
#
# Copyright (c) 2002 by Secret Labs AB
# Copyright (c) 2002 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
from typing import Sequence
from . import Image
class Transform(Image.ImageTransformHandler):
"""Base class for other transforms defined in :py:mod:`~PIL.ImageTransform`."""
method: Image.Transform
def __init__(self, data: Sequence[int]) -> None:
self.data = data
def getdata(self) -> tuple[Image.Transform, Sequence[int]]:
return self.method, self.data
def transform(
self,
size: tuple[int, int],
image: Image.Image,
**options: dict[str, str | int | tuple[int, ...] | list[int]],
) -> Image.Image:
"""Perform the transform. Called from :py:meth:`.Image.transform`."""
# can be overridden
method, data = self.getdata()
return image.transform(size, method, data, **options)
class AffineTransform(Transform):
"""
Define an affine image transform.
This function takes a 6-tuple (a, b, c, d, e, f) which contain the first
two rows from an affine transform matrix. For each pixel (x, y) in the
output image, the new value is taken from a position (a x + b y + c,
d x + e y + f) in the input image, rounded to nearest pixel.
This function can be used to scale, translate, rotate, and shear the
original image.
See :py:meth:`.Image.transform`
:param matrix: A 6-tuple (a, b, c, d, e, f) containing the first two rows
from an affine transform matrix.
"""
method = Image.Transform.AFFINE
class PerspectiveTransform(Transform):
"""
Define a perspective image transform.
This function takes an 8-tuple (a, b, c, d, e, f, g, h). For each pixel
(x, y) in the output image, the new value is taken from a position
((a x + b y + c) / (g x + h y + 1), (d x + e y + f) / (g x + h y + 1)) in
the input image, rounded to nearest pixel.
This function can be used to scale, translate, rotate, and shear the
original image.
See :py:meth:`.Image.transform`
:param matrix: An 8-tuple (a, b, c, d, e, f, g, h).
"""
method = Image.Transform.PERSPECTIVE
class ExtentTransform(Transform):
"""
Define a transform to extract a subregion from an image.
Maps a rectangle (defined by two corners) from the image to a rectangle of
the given size. The resulting image will contain data sampled from between
the corners, such that (x0, y0) in the input image will end up at (0,0) in
the output image, and (x1, y1) at size.
This method can be used to crop, stretch, shrink, or mirror an arbitrary
rectangle in the current image. It is slightly slower than crop, but about
as fast as a corresponding resize operation.
See :py:meth:`.Image.transform`
:param bbox: A 4-tuple (x0, y0, x1, y1) which specifies two points in the
input image's coordinate system. See :ref:`coordinate-system`.
"""
method = Image.Transform.EXTENT
class QuadTransform(Transform):
"""
Define a quad image transform.
Maps a quadrilateral (a region defined by four corners) from the image to a
rectangle of the given size.
See :py:meth:`.Image.transform`
:param xy: An 8-tuple (x0, y0, x1, y1, x2, y2, x3, y3) which contain the
upper left, lower left, lower right, and upper right corner of the
source quadrilateral.
"""
method = Image.Transform.QUAD
class MeshTransform(Transform):
"""
Define a mesh image transform. A mesh transform consists of one or more
individual quad transforms.
See :py:meth:`.Image.transform`
:param data: A list of (bbox, quad) tuples.
"""
method = Image.Transform.MESH

View File

@ -0,0 +1,110 @@
let Status = {
SUCCESS_HEADER: -1,
SUCCESS_EOF: -2,
ERROR_TIMEOUT: -3,
ERROR_EXCEPTION: -4,
};
let connections = {};
let nextConnectionID = 1;
const encoder = new TextEncoder();
self.addEventListener("message", async function (event) {
if (event.data.close) {
let connectionID = event.data.close;
delete connections[connectionID];
return;
} else if (event.data.getMore) {
let connectionID = event.data.getMore;
let { curOffset, value, reader, intBuffer, byteBuffer } =
connections[connectionID];
// if we still have some in buffer, then just send it back straight away
if (!value || curOffset >= value.length) {
// read another buffer if required
try {
let readResponse = await reader.read();
if (readResponse.done) {
// read everything - clear connection and return
delete connections[connectionID];
Atomics.store(intBuffer, 0, Status.SUCCESS_EOF);
Atomics.notify(intBuffer, 0);
// finished reading successfully
// return from event handler
return;
}
curOffset = 0;
connections[connectionID].value = readResponse.value;
value = readResponse.value;
} catch (error) {
console.log("Request exception:", error);
let errorBytes = encoder.encode(error.message);
let written = errorBytes.length;
byteBuffer.set(errorBytes);
intBuffer[1] = written;
Atomics.store(intBuffer, 0, Status.ERROR_EXCEPTION);
Atomics.notify(intBuffer, 0);
}
}
// send as much buffer as we can
let curLen = value.length - curOffset;
if (curLen > byteBuffer.length) {
curLen = byteBuffer.length;
}
byteBuffer.set(value.subarray(curOffset, curOffset + curLen), 0);
Atomics.store(intBuffer, 0, curLen); // store current length in bytes
Atomics.notify(intBuffer, 0);
curOffset += curLen;
connections[connectionID].curOffset = curOffset;
return;
} else {
// start fetch
let connectionID = nextConnectionID;
nextConnectionID += 1;
const intBuffer = new Int32Array(event.data.buffer);
const byteBuffer = new Uint8Array(event.data.buffer, 8);
try {
const response = await fetch(event.data.url, event.data.fetchParams);
// return the headers first via textencoder
var headers = [];
for (const pair of response.headers.entries()) {
headers.push([pair[0], pair[1]]);
}
let headerObj = {
headers: headers,
status: response.status,
connectionID,
};
const headerText = JSON.stringify(headerObj);
let headerBytes = encoder.encode(headerText);
let written = headerBytes.length;
byteBuffer.set(headerBytes);
intBuffer[1] = written;
// make a connection
connections[connectionID] = {
reader: response.body.getReader(),
intBuffer: intBuffer,
byteBuffer: byteBuffer,
value: undefined,
curOffset: 0,
};
// set header ready
Atomics.store(intBuffer, 0, Status.SUCCESS_HEADER);
Atomics.notify(intBuffer, 0);
// all fetching after this goes through a new postmessage call with getMore
// this allows for parallel requests
} catch (error) {
console.log("Request exception:", error);
let errorBytes = encoder.encode(error.message);
let written = errorBytes.length;
byteBuffer.set(errorBytes);
intBuffer[1] = written;
Atomics.store(intBuffer, 0, Status.ERROR_EXCEPTION);
Atomics.notify(intBuffer, 0);
}
}
});
self.postMessage({ inited: true });

View File

@ -0,0 +1,43 @@
requests-2.31.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
requests-2.31.0.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
requests-2.31.0.dist-info/METADATA,sha256=eCPokOnbb0FROLrfl0R5EpDvdufsb9CaN4noJH__54I,4634
requests-2.31.0.dist-info/RECORD,,
requests-2.31.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
requests-2.31.0.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
requests-2.31.0.dist-info/top_level.txt,sha256=fMSVmHfb5rbGOo6xv-O_tUX6j-WyixssE-SnwcDRxNQ,9
requests/__init__.py,sha256=LvmKhjIz8mHaKXthC2Mv5ykZ1d92voyf3oJpd-VuAig,4963
requests/__pycache__/__init__.cpython-311.pyc,,
requests/__pycache__/__version__.cpython-311.pyc,,
requests/__pycache__/_internal_utils.cpython-311.pyc,,
requests/__pycache__/adapters.cpython-311.pyc,,
requests/__pycache__/api.cpython-311.pyc,,
requests/__pycache__/auth.cpython-311.pyc,,
requests/__pycache__/certs.cpython-311.pyc,,
requests/__pycache__/compat.cpython-311.pyc,,
requests/__pycache__/cookies.cpython-311.pyc,,
requests/__pycache__/exceptions.cpython-311.pyc,,
requests/__pycache__/help.cpython-311.pyc,,
requests/__pycache__/hooks.cpython-311.pyc,,
requests/__pycache__/models.cpython-311.pyc,,
requests/__pycache__/packages.cpython-311.pyc,,
requests/__pycache__/sessions.cpython-311.pyc,,
requests/__pycache__/status_codes.cpython-311.pyc,,
requests/__pycache__/structures.cpython-311.pyc,,
requests/__pycache__/utils.cpython-311.pyc,,
requests/__version__.py,sha256=ssI3Ezt7PaxgkOW45GhtwPUclo_SO_ygtIm4A74IOfw,435
requests/_internal_utils.py,sha256=nMQymr4hs32TqVo5AbCrmcJEhvPUh7xXlluyqwslLiQ,1495
requests/adapters.py,sha256=v_FmjU5KZ76k-YttShZYB5RprIzhhL8Y3zgW9p4eBQ8,19553
requests/api.py,sha256=q61xcXq4tmiImrvcSVLTbFyCiD2F-L_-hWKGbz4y8vg,6449
requests/auth.py,sha256=h-HLlVx9j8rKV5hfSAycP2ApOSglTz77R0tz7qCbbEE,10187
requests/certs.py,sha256=Z9Sb410Anv6jUFTyss0jFFhU6xst8ctELqfy8Ev23gw,429
requests/compat.py,sha256=yxntVOSEHGMrn7FNr_32EEam1ZNAdPRdSE13_yaHzTk,1451
requests/cookies.py,sha256=kD3kNEcCj-mxbtf5fJsSaT86eGoEYpD3X0CSgpzl7BM,18560
requests/exceptions.py,sha256=DhveFBclVjTRxhRduVpO-GbMYMID2gmjdLfNEqNpI_U,3811
requests/help.py,sha256=gPX5d_H7Xd88aDABejhqGgl9B1VFRTt5BmiYvL3PzIQ,3875
requests/hooks.py,sha256=CiuysiHA39V5UfcCBXFIx83IrDpuwfN9RcTUgv28ftQ,733
requests/models.py,sha256=-DlKi0or8gFAM6VzutobXvvBW_2wrJuOF5NfndTIddA,35223
requests/packages.py,sha256=DXgv-FJIczZITmv0vEBAhWj4W-5CGCIN_ksvgR17Dvs,957
requests/sessions.py,sha256=-LvTzrPtetSTrR3buxu4XhdgMrJFLB1q5D7P--L2Xhw,30373
requests/status_codes.py,sha256=FvHmT5uH-_uimtRz5hH9VCbt7VV-Nei2J9upbej6j8g,4235
requests/structures.py,sha256=-IbmhVz06S-5aPSZuUthZ6-6D9XOjRuTXHOabY041XM,2912
requests/utils.py,sha256=6sx2X3cIVA8BgWOg8odxFy-_lbWDFETU8HI4fU4Rmqw,33448

View File

@ -0,0 +1,124 @@
#
# The Python Imaging Library.
# $Id$
#
# WAL file handling
#
# History:
# 2003-04-23 fl created
#
# Copyright (c) 2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
"""
This reader is based on the specification available from:
https://www.flipcode.com/archives/Quake_2_BSP_File_Format.shtml
and has been tested with a few sample files found using google.
.. note::
This format cannot be automatically recognized, so the reader
is not registered for use with :py:func:`PIL.Image.open()`.
To open a WAL file, use the :py:func:`PIL.WalImageFile.open()` function instead.
"""
from __future__ import annotations
from . import Image, ImageFile
from ._binary import i32le as i32
class WalImageFile(ImageFile.ImageFile):
format = "WAL"
format_description = "Quake2 Texture"
def _open(self):
self._mode = "P"
# read header fields
header = self.fp.read(32 + 24 + 32 + 12)
self._size = i32(header, 32), i32(header, 36)
Image._decompression_bomb_check(self.size)
# load pixel data
offset = i32(header, 40)
self.fp.seek(offset)
# strings are null-terminated
self.info["name"] = header[:32].split(b"\0", 1)[0]
next_name = header[56 : 56 + 32].split(b"\0", 1)[0]
if next_name:
self.info["next_name"] = next_name
def load(self):
if not self.im:
self.im = Image.core.new(self.mode, self.size)
self.frombytes(self.fp.read(self.size[0] * self.size[1]))
self.putpalette(quake2palette)
return Image.Image.load(self)
def open(filename):
"""
Load texture from a Quake2 WAL texture file.
By default, a Quake2 standard palette is attached to the texture.
To override the palette, use the :py:func:`PIL.Image.Image.putpalette()` method.
:param filename: WAL file name, or an opened file handle.
:returns: An image instance.
"""
return WalImageFile(filename)
quake2palette = (
# default palette taken from piffo 0.93 by Hans Häggström
b"\x01\x01\x01\x0b\x0b\x0b\x12\x12\x12\x17\x17\x17\x1b\x1b\x1b\x1e"
b"\x1e\x1e\x22\x22\x22\x26\x26\x26\x29\x29\x29\x2c\x2c\x2c\x2f\x2f"
b"\x2f\x32\x32\x32\x35\x35\x35\x37\x37\x37\x3a\x3a\x3a\x3c\x3c\x3c"
b"\x24\x1e\x13\x22\x1c\x12\x20\x1b\x12\x1f\x1a\x10\x1d\x19\x10\x1b"
b"\x17\x0f\x1a\x16\x0f\x18\x14\x0d\x17\x13\x0d\x16\x12\x0d\x14\x10"
b"\x0b\x13\x0f\x0b\x10\x0d\x0a\x0f\x0b\x0a\x0d\x0b\x07\x0b\x0a\x07"
b"\x23\x23\x26\x22\x22\x25\x22\x20\x23\x21\x1f\x22\x20\x1e\x20\x1f"
b"\x1d\x1e\x1d\x1b\x1c\x1b\x1a\x1a\x1a\x19\x19\x18\x17\x17\x17\x16"
b"\x16\x14\x14\x14\x13\x13\x13\x10\x10\x10\x0f\x0f\x0f\x0d\x0d\x0d"
b"\x2d\x28\x20\x29\x24\x1c\x27\x22\x1a\x25\x1f\x17\x38\x2e\x1e\x31"
b"\x29\x1a\x2c\x25\x17\x26\x20\x14\x3c\x30\x14\x37\x2c\x13\x33\x28"
b"\x12\x2d\x24\x10\x28\x1f\x0f\x22\x1a\x0b\x1b\x14\x0a\x13\x0f\x07"
b"\x31\x1a\x16\x30\x17\x13\x2e\x16\x10\x2c\x14\x0d\x2a\x12\x0b\x27"
b"\x0f\x0a\x25\x0f\x07\x21\x0d\x01\x1e\x0b\x01\x1c\x0b\x01\x1a\x0b"
b"\x01\x18\x0a\x01\x16\x0a\x01\x13\x0a\x01\x10\x07\x01\x0d\x07\x01"
b"\x29\x23\x1e\x27\x21\x1c\x26\x20\x1b\x25\x1f\x1a\x23\x1d\x19\x21"
b"\x1c\x18\x20\x1b\x17\x1e\x19\x16\x1c\x18\x14\x1b\x17\x13\x19\x14"
b"\x10\x17\x13\x0f\x14\x10\x0d\x12\x0f\x0b\x0f\x0b\x0a\x0b\x0a\x07"
b"\x26\x1a\x0f\x23\x19\x0f\x20\x17\x0f\x1c\x16\x0f\x19\x13\x0d\x14"
b"\x10\x0b\x10\x0d\x0a\x0b\x0a\x07\x33\x22\x1f\x35\x29\x26\x37\x2f"
b"\x2d\x39\x35\x34\x37\x39\x3a\x33\x37\x39\x30\x34\x36\x2b\x31\x34"
b"\x27\x2e\x31\x22\x2b\x2f\x1d\x28\x2c\x17\x25\x2a\x0f\x20\x26\x0d"
b"\x1e\x25\x0b\x1c\x22\x0a\x1b\x20\x07\x19\x1e\x07\x17\x1b\x07\x14"
b"\x18\x01\x12\x16\x01\x0f\x12\x01\x0b\x0d\x01\x07\x0a\x01\x01\x01"
b"\x2c\x21\x21\x2a\x1f\x1f\x29\x1d\x1d\x27\x1c\x1c\x26\x1a\x1a\x24"
b"\x18\x18\x22\x17\x17\x21\x16\x16\x1e\x13\x13\x1b\x12\x12\x18\x10"
b"\x10\x16\x0d\x0d\x12\x0b\x0b\x0d\x0a\x0a\x0a\x07\x07\x01\x01\x01"
b"\x2e\x30\x29\x2d\x2e\x27\x2b\x2c\x26\x2a\x2a\x24\x28\x29\x23\x27"
b"\x27\x21\x26\x26\x1f\x24\x24\x1d\x22\x22\x1c\x1f\x1f\x1a\x1c\x1c"
b"\x18\x19\x19\x16\x17\x17\x13\x13\x13\x10\x0f\x0f\x0d\x0b\x0b\x0a"
b"\x30\x1e\x1b\x2d\x1c\x19\x2c\x1a\x17\x2a\x19\x14\x28\x17\x13\x26"
b"\x16\x10\x24\x13\x0f\x21\x12\x0d\x1f\x10\x0b\x1c\x0f\x0a\x19\x0d"
b"\x0a\x16\x0b\x07\x12\x0a\x07\x0f\x07\x01\x0a\x01\x01\x01\x01\x01"
b"\x28\x29\x38\x26\x27\x36\x25\x26\x34\x24\x24\x31\x22\x22\x2f\x20"
b"\x21\x2d\x1e\x1f\x2a\x1d\x1d\x27\x1b\x1b\x25\x19\x19\x21\x17\x17"
b"\x1e\x14\x14\x1b\x13\x12\x17\x10\x0f\x13\x0d\x0b\x0f\x0a\x07\x07"
b"\x2f\x32\x29\x2d\x30\x26\x2b\x2e\x24\x29\x2c\x21\x27\x2a\x1e\x25"
b"\x28\x1c\x23\x26\x1a\x21\x25\x18\x1e\x22\x14\x1b\x1f\x10\x19\x1c"
b"\x0d\x17\x1a\x0a\x13\x17\x07\x10\x13\x01\x0d\x0f\x01\x0a\x0b\x01"
b"\x01\x3f\x01\x13\x3c\x0b\x1b\x39\x10\x20\x35\x14\x23\x31\x17\x23"
b"\x2d\x18\x23\x29\x18\x3f\x3f\x3f\x3f\x3f\x39\x3f\x3f\x31\x3f\x3f"
b"\x2a\x3f\x3f\x20\x3f\x3f\x14\x3f\x3c\x12\x3f\x39\x0f\x3f\x35\x0b"
b"\x3f\x32\x07\x3f\x2d\x01\x3d\x2a\x01\x3b\x26\x01\x39\x21\x01\x37"
b"\x1d\x01\x34\x1a\x01\x32\x16\x01\x2f\x12\x01\x2d\x0f\x01\x2a\x0b"
b"\x01\x27\x07\x01\x23\x01\x01\x1d\x01\x01\x17\x01\x01\x10\x01\x01"
b"\x3d\x01\x01\x19\x19\x3f\x3f\x01\x01\x01\x01\x3f\x16\x16\x13\x10"
b"\x10\x0f\x0d\x0d\x0b\x3c\x2e\x2a\x36\x27\x20\x30\x21\x18\x29\x1b"
b"\x10\x3c\x39\x37\x37\x32\x2f\x31\x2c\x28\x2b\x26\x21\x30\x22\x20"
)

View File

@ -0,0 +1,112 @@
#
# The Python Imaging Library.
# $Id$
#
# Binary input/output support routines.
#
# Copyright (c) 1997-2003 by Secret Labs AB
# Copyright (c) 1995-2003 by Fredrik Lundh
# Copyright (c) 2012 by Brian Crowell
#
# See the README file for information on usage and redistribution.
#
"""Binary input/output support routines."""
from __future__ import annotations
from struct import pack, unpack_from
def i8(c: bytes) -> int:
return c[0]
def o8(i: int) -> bytes:
return bytes((i & 255,))
# Input, le = little endian, be = big endian
def i16le(c: bytes, o: int = 0) -> int:
"""
Converts a 2-bytes (16 bits) string to an unsigned integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from("<H", c, o)[0]
def si16le(c: bytes, o: int = 0) -> int:
"""
Converts a 2-bytes (16 bits) string to a signed integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from("<h", c, o)[0]
def si16be(c: bytes, o: int = 0) -> int:
"""
Converts a 2-bytes (16 bits) string to a signed integer, big endian.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from(">h", c, o)[0]
def i32le(c: bytes, o: int = 0) -> int:
"""
Converts a 4-bytes (32 bits) string to an unsigned integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from("<I", c, o)[0]
def si32le(c: bytes, o: int = 0) -> int:
"""
Converts a 4-bytes (32 bits) string to a signed integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from("<i", c, o)[0]
def si32be(c: bytes, o: int = 0) -> int:
"""
Converts a 4-bytes (32 bits) string to a signed integer, big endian.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from(">i", c, o)[0]
def i16be(c: bytes, o: int = 0) -> int:
return unpack_from(">H", c, o)[0]
def i32be(c: bytes, o: int = 0) -> int:
return unpack_from(">I", c, o)[0]
# Output, le = little endian, be = big endian
def o16le(i: int) -> bytes:
return pack("<H", i)
def o32le(i: int) -> bytes:
return pack("<I", i)
def o16be(i: int) -> bytes:
return pack(">H", i)
def o32be(i: int) -> bytes:
return pack(">I", i)

View File

@ -0,0 +1,2 @@
[console_scripts]
normalizer = charset_normalizer.cli:cli_detect

View File

@ -0,0 +1,61 @@
# connectionhandler.py
import json, requests
def simple_get(connection_url:str) -> dict:
try:
response = requests.get(connection_url)
if response.status_code == 200:
return {"status": "ok", "text": response.text, "status_code": response.status_code}
else:
return {"status": "error", "status_code": response.status_code}
except Exception as e:
return {"status": "error", "status_code": 0}
def simple_delete(connection_url:str, data) -> dict:
try:
response = requests.delete(connection_url, json=data)
if response.status_code == 200:
return {"status": "ok", "status_code": response.status_code}
else:
return {"status": "error", "text": "Failed to delete", "status_code": response.status_code}
except Exception as e:
return {"status": "error", "status_code": 0}
def stream_post(connection_url:str, data, callback:callable) -> dict:
try:
headers = {
"Content-Type": "application/json"
}
response = requests.post(connection_url, headers=headers, data=data, stream=True)
if response.status_code == 200:
for line in response.iter_lines():
if line:
callback(json.loads(line.decode("utf-8")))
return {"status": "ok", "status_code": response.status_code}
else:
return {"status": "error", "status_code": response.status_code}
except Exception as e:
return {"status": "error", "status_code": 0}
from time import sleep
def stream_post_fake(connection_url:str, data, callback:callable) -> dict:
data = {
"status": "pulling manifest"
}
callback(data)
for i in range(2):
for a in range(11):
sleep(.1)
data = {
"status": f"downloading digestname {i}",
"digest": f"digestname {i}",
"total": 500,
"completed": a * 50
}
callback(data)
for msg in ["verifying sha256 digest", "writting manifest", "removing any unused layers", "success"]:
sleep(.1)
data = {"status": msg}
callback(data)
return {"status": "ok", "status_code": 200}

View File

@ -0,0 +1,5 @@
from __future__ import annotations
from .features import pilinfo
pilinfo(supported_formats=False)

View File

@ -0,0 +1,6 @@
from .__main__ import cli_detect, query_yes_no
__all__ = (
"cli_detect",
"query_yes_no",
)

View File

@ -0,0 +1,272 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
width="128"
height="128"
id="svg11300"
version="1.0"
style="display:inline;enable-background:new"
viewBox="0 0 128 128"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:dc="http://purl.org/dc/elements/1.1/">
<title
id="title4162">Adwaita Icon Template</title>
<defs
id="defs3">
<linearGradient
id="linearGradient35">
<stop
style="stop-color:#3d3846;stop-opacity:1;"
offset="0"
id="stop35" />
<stop
style="stop-color:#241f31;stop-opacity:1;"
offset="1"
id="stop36" />
</linearGradient>
<linearGradient
id="linearGradient33">
<stop
style="stop-color:#99c1ff;stop-opacity:1;"
offset="0"
id="stop33" />
<stop
style="stop-color:#62a0ea;stop-opacity:1;"
offset="1"
id="stop34" />
</linearGradient>
<linearGradient
id="linearGradient31">
<stop
style="stop-color:#deddda;stop-opacity:1;"
offset="0"
id="stop31" />
<stop
style="stop-color:#f6f5f4;stop-opacity:1;"
offset="1"
id="stop32" />
</linearGradient>
<linearGradient
id="linearGradient24">
<stop
style="stop-color:#f6f5f4;stop-opacity:1;"
offset="0"
id="stop24" />
<stop
style="stop-color:#deddda;stop-opacity:1;"
offset="1"
id="stop23" />
</linearGradient>
<linearGradient
id="linearGradient12">
<stop
style="stop-color:#000000;stop-opacity:0;"
offset="0"
id="stop12" />
<stop
style="stop-color:#000000;stop-opacity:0;"
offset="1"
id="stop13" />
</linearGradient>
<linearGradient
xlink:href="#linearGradient12"
id="linearGradient13"
x1="40.888428"
y1="205.03607"
x2="40.90649"
y2="212.09515"
gradientUnits="userSpaceOnUse" />
<linearGradient
xlink:href="#linearGradient24"
id="linearGradient23"
x1="40.888428"
y1="205.03607"
x2="40.90649"
y2="212.09515"
gradientUnits="userSpaceOnUse" />
<linearGradient
xlink:href="#linearGradient24"
id="linearGradient29"
gradientUnits="userSpaceOnUse"
x1="40.888428"
y1="205.03607"
x2="40.90649"
y2="212.09515"
gradientTransform="matrix(-1,0,0,1,127.14843,2.8384866e-4)" />
<linearGradient
xlink:href="#linearGradient12"
id="linearGradient30"
gradientUnits="userSpaceOnUse"
x1="40.888428"
y1="205.03607"
x2="40.90649"
y2="212.09515"
gradientTransform="matrix(-1,0,0,1,127.14843,2.8384866e-4)" />
<linearGradient
xlink:href="#linearGradient31"
id="linearGradient32"
x1="63.552597"
y1="214.19464"
x2="63.552597"
y2="241.24492"
gradientUnits="userSpaceOnUse" />
<linearGradient
xlink:href="#linearGradient33"
id="linearGradient34"
x1="64.683159"
y1="267.04626"
x2="64.895935"
y2="278.69958"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.99245101,0,0,1.1818075,0.48386604,-51.63542)" />
<linearGradient
xlink:href="#linearGradient35"
id="linearGradient36"
x1="45.111782"
y1="235.32567"
x2="45.111782"
y2="229.17581"
gradientUnits="userSpaceOnUse" />
<linearGradient
xlink:href="#linearGradient35"
id="linearGradient37"
gradientUnits="userSpaceOnUse"
x1="45.111782"
y1="235.32567"
x2="45.111782"
y2="229.17581"
gradientTransform="translate(36.957243,0.15686125)" />
</defs>
<metadata
id="metadata4">
<rdf:RDF>
<cc:Work
rdf:about="">