| Current File : /home/mmdealscpanel/yummmdeals.com/util.py.tar |
lib64/python3.6/ctypes/util.py 0000644 00000026745 15033050200 0012074 0 ustar 00 import os
import shutil
import subprocess
import sys
# find_library(name) returns the pathname of a library, or None.
if os.name == "nt":
def _get_build_version():
"""Return the version of MSVC that was used to build Python.
For Python 2.3 and up, the version number is included in
sys.version. For earlier versions, assume the compiler is MSVC 6.
"""
# This function was copied from Lib/distutils/msvccompiler.py
prefix = "MSC v."
i = sys.version.find(prefix)
if i == -1:
return 6
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
if majorVersion >= 13:
majorVersion += 1
minorVersion = int(s[2:3]) / 10.0
# I don't think paths are affected by minor version in version 6
if majorVersion == 6:
minorVersion = 0
if majorVersion >= 6:
return majorVersion + minorVersion
# else we don't know what version of the compiler this is
return None
def find_msvcrt():
"""Return the name of the VC runtime dll"""
version = _get_build_version()
if version is None:
# better be safe than sorry
return None
if version <= 6:
clibname = 'msvcrt'
elif version <= 13:
clibname = 'msvcr%d' % (version * 10)
else:
# CRT is no longer directly loadable. See issue23606 for the
# discussion about alternative approaches.
return None
# If python was built with in debug mode
import importlib.machinery
if '_d.pyd' in importlib.machinery.EXTENSION_SUFFIXES:
clibname += 'd'
return clibname+'.dll'
def find_library(name):
if name in ('c', 'm'):
return find_msvcrt()
# See MSDN for the REAL search order.
for directory in os.environ['PATH'].split(os.pathsep):
fname = os.path.join(directory, name)
if os.path.isfile(fname):
return fname
if fname.lower().endswith(".dll"):
continue
fname = fname + ".dll"
if os.path.isfile(fname):
return fname
return None
if os.name == "posix" and sys.platform == "darwin":
from ctypes.macholib.dyld import dyld_find as _dyld_find
def find_library(name):
possible = ['lib%s.dylib' % name,
'%s.dylib' % name,
'%s.framework/%s' % (name, name)]
for name in possible:
try:
return _dyld_find(name)
except ValueError:
continue
return None
elif os.name == "posix":
# Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump
import re, tempfile
def _findLib_gcc(name):
# Run GCC's linker with the -t (aka --trace) option and examine the
# library name it prints out. The GCC command will fail because we
# haven't supplied a proper program with main(), but that does not
# matter.
expr = os.fsencode(r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name))
c_compiler = shutil.which('gcc')
if not c_compiler:
c_compiler = shutil.which('cc')
if not c_compiler:
# No C compiler available, give up
return None
temp = tempfile.NamedTemporaryFile()
try:
args = [c_compiler, '-Wl,-t', '-o', temp.name, '-l' + name]
env = dict(os.environ)
env['LC_ALL'] = 'C'
env['LANG'] = 'C'
try:
proc = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
except OSError: # E.g. bad executable
return None
with proc:
trace = proc.stdout.read()
finally:
try:
temp.close()
except FileNotFoundError:
# Raised if the file was already removed, which is the normal
# behaviour of GCC if linking fails
pass
res = re.search(expr, trace)
if not res:
return None
return os.fsdecode(res.group(0))
if sys.platform == "sunos5":
# use /usr/ccs/bin/dump on solaris
def _get_soname(f):
if not f:
return None
try:
proc = subprocess.Popen(("/usr/ccs/bin/dump", "-Lpv", f),
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
except OSError: # E.g. command not found
return None
with proc:
data = proc.stdout.read()
res = re.search(br'\[.*\]\sSONAME\s+([^\s]+)', data)
if not res:
return None
return os.fsdecode(res.group(1))
else:
def _get_soname(f):
# assuming GNU binutils / ELF
if not f:
return None
objdump = shutil.which('objdump')
if not objdump:
# objdump is not available, give up
return None
try:
proc = subprocess.Popen((objdump, '-p', '-j', '.dynamic', f),
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
except OSError: # E.g. bad executable
return None
with proc:
dump = proc.stdout.read()
res = re.search(br'\sSONAME\s+([^\s]+)', dump)
if not res:
return None
return os.fsdecode(res.group(1))
if sys.platform.startswith(("freebsd", "openbsd", "dragonfly")):
def _num_version(libname):
# "libxyz.so.MAJOR.MINOR" => [ MAJOR, MINOR ]
parts = libname.split(b".")
nums = []
try:
while parts:
nums.insert(0, int(parts.pop()))
except ValueError:
pass
return nums or [sys.maxsize]
def find_library(name):
ename = re.escape(name)
expr = r':-l%s\.\S+ => \S*/(lib%s\.\S+)' % (ename, ename)
expr = os.fsencode(expr)
try:
proc = subprocess.Popen(('/sbin/ldconfig', '-r'),
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
except OSError: # E.g. command not found
data = b''
else:
with proc:
data = proc.stdout.read()
res = re.findall(expr, data)
if not res:
return _get_soname(_findLib_gcc(name))
res.sort(key=_num_version)
return os.fsdecode(res[-1])
elif sys.platform == "sunos5":
def _findLib_crle(name, is64):
if not os.path.exists('/usr/bin/crle'):
return None
env = dict(os.environ)
env['LC_ALL'] = 'C'
if is64:
args = ('/usr/bin/crle', '-64')
else:
args = ('/usr/bin/crle',)
paths = None
try:
proc = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
env=env)
except OSError: # E.g. bad executable
return None
with proc:
for line in proc.stdout:
line = line.strip()
if line.startswith(b'Default Library Path (ELF):'):
paths = os.fsdecode(line).split()[4]
if not paths:
return None
for dir in paths.split(":"):
libfile = os.path.join(dir, "lib%s.so" % name)
if os.path.exists(libfile):
return libfile
return None
def find_library(name, is64 = False):
return _get_soname(_findLib_crle(name, is64) or _findLib_gcc(name))
else:
def _findSoname_ldconfig(name):
import struct
if struct.calcsize('l') == 4:
machine = os.uname().machine + '-32'
else:
machine = os.uname().machine + '-64'
mach_map = {
'x86_64-64': 'libc6,x86-64',
'ppc64-64': 'libc6,64bit',
'sparc64-64': 'libc6,64bit',
's390x-64': 'libc6,64bit',
'ia64-64': 'libc6,IA-64',
}
abi_type = mach_map.get(machine, 'libc6')
# XXX assuming GLIBC's ldconfig (with option -p)
regex = r'\s+(lib%s\.[^\s]+)\s+\(%s'
regex = os.fsencode(regex % (re.escape(name), abi_type))
try:
with subprocess.Popen(['/sbin/ldconfig', '-p'],
stdin=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
stdout=subprocess.PIPE,
env={'LC_ALL': 'C', 'LANG': 'C'}) as p:
res = re.search(regex, p.stdout.read())
if res:
return os.fsdecode(res.group(1))
except OSError:
pass
def _findLib_ld(name):
# See issue #9998 for why this is needed
expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name)
cmd = ['ld', '-t']
libpath = os.environ.get('LD_LIBRARY_PATH')
if libpath:
for d in libpath.split(':'):
cmd.extend(['-L', d])
cmd.extend(['-o', os.devnull, '-l%s' % name])
result = None
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, _ = p.communicate()
res = re.search(expr, os.fsdecode(out))
if res:
result = res.group(0)
except Exception as e:
pass # result will be None
return result
def find_library(name):
# See issue #9998
return _findSoname_ldconfig(name) or \
_get_soname(_findLib_gcc(name) or _findLib_ld(name))
################################################################
# test code
def test():
from ctypes import cdll
if os.name == "nt":
print(cdll.msvcrt)
print(cdll.load("msvcrt"))
print(find_library("msvcrt"))
if os.name == "posix":
# find and load_version
print(find_library("m"))
print(find_library("c"))
print(find_library("bz2"))
# getattr
## print cdll.m
## print cdll.bz2
# load
if sys.platform == "darwin":
print(cdll.LoadLibrary("libm.dylib"))
print(cdll.LoadLibrary("libcrypto.dylib"))
print(cdll.LoadLibrary("libSystem.dylib"))
print(cdll.LoadLibrary("System.framework/System"))
else:
print(cdll.LoadLibrary("libm.so"))
print(cdll.LoadLibrary("libcrypt.so"))
print(find_library("crypt"))
if __name__ == "__main__":
test()
lib/python3.6/site-packages/dnf/util.py 0000644 00000047627 15040155177 0013725 0 ustar 00 # util.py
# Basic dnf utils.
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from .pycomp import PY3, basestring
from dnf.i18n import _, ucd
import argparse
import dnf
import dnf.callback
import dnf.const
import dnf.pycomp
import errno
import functools
import hawkey
import itertools
import locale
import logging
import os
import pwd
import shutil
import sys
import tempfile
import time
import libdnf.repo
import libdnf.transaction
logger = logging.getLogger('dnf')
MAIN_PROG = argparse.ArgumentParser().prog if argparse.ArgumentParser().prog == "yum" else "dnf"
MAIN_PROG_UPPER = MAIN_PROG.upper()
"""DNF Utilities."""
def _parse_specs(namespace, values):
"""
Categorize :param values list into packages, groups and filenames
:param namespace: argparse.Namespace, where specs will be stored
:param values: list of specs, whether packages ('foo') or groups/modules ('@bar')
or filenames ('*.rmp', 'http://*', ...)
To access packages use: specs.pkg_specs,
to access groups use: specs.grp_specs,
to access filenames use: specs.filenames
"""
setattr(namespace, "filenames", [])
setattr(namespace, "grp_specs", [])
setattr(namespace, "pkg_specs", [])
tmp_set = set()
for value in values:
if value in tmp_set:
continue
tmp_set.add(value)
schemes = dnf.pycomp.urlparse.urlparse(value)[0]
if value.endswith('.rpm'):
namespace.filenames.append(value)
elif schemes and schemes in ('http', 'ftp', 'file', 'https'):
namespace.filenames.append(value)
elif value.startswith('@'):
namespace.grp_specs.append(value[1:])
else:
namespace.pkg_specs.append(value)
def _urlopen_progress(url, conf, progress=None):
if progress is None:
progress = dnf.callback.NullDownloadProgress()
pload = dnf.repo.RemoteRPMPayload(url, conf, progress)
if os.path.exists(pload.local_path):
return pload.local_path
est_remote_size = sum([pload.download_size])
progress.start(1, est_remote_size)
targets = [pload._librepo_target()]
try:
libdnf.repo.PackageTarget.downloadPackages(libdnf.repo.VectorPPackageTarget(targets), True)
except RuntimeError as e:
if conf.strict:
raise IOError(str(e))
logger.error(str(e))
return pload.local_path
def _urlopen(url, conf=None, repo=None, mode='w+b', **kwargs):
"""
Open the specified absolute url, return a file object
which respects proxy setting even for non-repo downloads
"""
if PY3 and 'b' not in mode:
kwargs.setdefault('encoding', 'utf-8')
fo = tempfile.NamedTemporaryFile(mode, **kwargs)
try:
if repo:
repo._repo.downloadUrl(url, fo.fileno())
else:
libdnf.repo.Downloader.downloadURL(conf._config if conf else None, url, fo.fileno())
except RuntimeError as e:
raise IOError(str(e))
fo.seek(0)
return fo
def rtrim(s, r):
if s.endswith(r):
s = s[:-len(r)]
return s
def am_i_root():
# used by ansible (lib/ansible/modules/packaging/os/dnf.py)
return os.geteuid() == 0
def clear_dir(path):
"""Remove all files and dirs under `path`
Also see rm_rf()
"""
for entry in os.listdir(path):
contained_path = os.path.join(path, entry)
rm_rf(contained_path)
def ensure_dir(dname):
# used by ansible (lib/ansible/modules/packaging/os/dnf.py)
try:
os.makedirs(dname, mode=0o755)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(dname):
raise e
def split_path(path):
"""
Split path by path separators.
Use os.path.join() to join the path back to string.
"""
result = []
head = path
while True:
head, tail = os.path.split(head)
if not tail:
if head or not result:
# if not result: make sure result is [""] so os.path.join(*result) can be called
result.insert(0, head)
break
result.insert(0, tail)
return result
def empty(iterable):
try:
l = len(iterable)
except TypeError:
l = len(list(iterable))
return l == 0
def first(iterable):
"""Returns the first item from an iterable or None if it has no elements."""
it = iter(iterable)
try:
return next(it)
except StopIteration:
return None
def first_not_none(iterable):
it = iter(iterable)
try:
return next(item for item in it if item is not None)
except StopIteration:
return None
def file_age(fn):
return time.time() - file_timestamp(fn)
def file_timestamp(fn):
return os.stat(fn).st_mtime
def get_effective_login():
try:
return pwd.getpwuid(os.geteuid())[0]
except KeyError:
return "UID: %s" % os.geteuid()
def get_in(dct, keys, not_found):
"""Like dict.get() for nested dicts."""
for k in keys:
dct = dct.get(k)
if dct is None:
return not_found
return dct
def group_by_filter(fn, iterable):
def splitter(acc, item):
acc[not bool(fn(item))].append(item)
return acc
return functools.reduce(splitter, iterable, ([], []))
def insert_if(item, iterable, condition):
"""Insert an item into an iterable by a condition."""
for original_item in iterable:
if condition(original_item):
yield item
yield original_item
def is_exhausted(iterator):
"""Test whether an iterator is exhausted."""
try:
next(iterator)
except StopIteration:
return True
else:
return False
def is_glob_pattern(pattern):
if is_string_type(pattern):
pattern = [pattern]
return (isinstance(pattern, list) and any(set(p) & set("*[?") for p in pattern))
def is_string_type(obj):
if PY3:
return isinstance(obj, str)
else:
return isinstance(obj, basestring)
def lazyattr(attrname):
"""Decorator to get lazy attribute initialization.
Composes with @property. Force reinitialization by deleting the <attrname>.
"""
def get_decorated(fn):
def cached_getter(obj):
try:
return getattr(obj, attrname)
except AttributeError:
val = fn(obj)
setattr(obj, attrname, val)
return val
return cached_getter
return get_decorated
def mapall(fn, *seq):
"""Like functools.map(), but return a list instead of an iterator.
This means all side effects of fn take place even without iterating the
result.
"""
return list(map(fn, *seq))
def normalize_time(timestamp):
"""Convert time into locale aware datetime string object."""
t = time.strftime("%c", time.localtime(timestamp))
if not dnf.pycomp.PY3:
current_locale_setting = locale.getlocale()[1]
if current_locale_setting:
t = t.decode(current_locale_setting)
return t
def on_ac_power():
"""Decide whether we are on line power.
Returns True if we are on line power, False if not, None if it can not be
decided.
"""
try:
ps_folder = "/sys/class/power_supply"
ac_nodes = [node for node in os.listdir(ps_folder) if node.startswith("AC")]
if len(ac_nodes) > 0:
ac_node = ac_nodes[0]
with open("{}/{}/online".format(ps_folder, ac_node)) as ac_status:
data = ac_status.read()
return int(data) == 1
return None
except (IOError, ValueError):
return None
def on_metered_connection():
"""Decide whether we are on metered connection.
Returns:
True: if on metered connection
False: if not
None: if it can not be decided
"""
try:
import dbus
except ImportError:
return None
try:
bus = dbus.SystemBus()
proxy = bus.get_object("org.freedesktop.NetworkManager",
"/org/freedesktop/NetworkManager")
iface = dbus.Interface(proxy, "org.freedesktop.DBus.Properties")
metered = iface.Get("org.freedesktop.NetworkManager", "Metered")
except dbus.DBusException:
return None
if metered == 0: # NM_METERED_UNKNOWN
return None
elif metered in (1, 3): # NM_METERED_YES, NM_METERED_GUESS_YES
return True
elif metered in (2, 4): # NM_METERED_NO, NM_METERED_GUESS_NO
return False
else: # Something undocumented (at least at this moment)
raise ValueError("Unknown value for metered property: %r", metered)
def partition(pred, iterable):
"""Use a predicate to partition entries into false entries and true entries.
Credit: Python library itertools' documentation.
"""
t1, t2 = itertools.tee(iterable)
return dnf.pycomp.filterfalse(pred, t1), filter(pred, t2)
def rm_rf(path):
try:
shutil.rmtree(path)
except OSError:
pass
def split_by(iterable, condition):
"""Split an iterable into tuples by a condition.
Inserts a separator before each item which meets the condition and then
cuts the iterable by these separators.
"""
separator = object() # A unique object.
# Create a function returning tuple of objects before the separator.
def next_subsequence(it):
return tuple(itertools.takewhile(lambda e: e != separator, it))
# Mark each place where the condition is met by the separator.
marked = insert_if(separator, iterable, condition)
# The 1st subsequence may be empty if the 1st item meets the condition.
yield next_subsequence(marked)
while True:
subsequence = next_subsequence(marked)
if not subsequence:
break
yield subsequence
def strip_prefix(s, prefix):
if s.startswith(prefix):
return s[len(prefix):]
return None
def touch(path, no_create=False):
"""Create an empty file if it doesn't exist or bump it's timestamps.
If no_create is True only bumps the timestamps.
"""
if no_create or os.access(path, os.F_OK):
return os.utime(path, None)
with open(path, 'a'):
pass
def _terminal_messenger(tp='write', msg="", out=sys.stdout):
try:
if tp == 'write':
out.write(msg)
elif tp == 'flush':
out.flush()
elif tp == 'write_flush':
out.write(msg)
out.flush()
elif tp == 'print':
print(msg, file=out)
else:
raise ValueError('Unsupported type: ' + tp)
except IOError as e:
logger.critical('{}: {}'.format(type(e).__name__, ucd(e)))
pass
def _format_resolve_problems(resolve_problems):
"""
Format string about problems in resolve
:param resolve_problems: list with list of strings (output of goal.problem_rules())
:return: string
"""
msg = ""
count_problems = (len(resolve_problems) > 1)
for i, rs in enumerate(resolve_problems, start=1):
if count_problems:
msg += "\n " + _("Problem") + " %d: " % i
else:
msg += "\n " + _("Problem") + ": "
msg += "\n - ".join(rs)
return msg
def _te_nevra(te):
nevra = te.N() + '-'
if te.E() is not None and te.E() != '0':
nevra += te.E() + ':'
return nevra + te.V() + '-' + te.R() + '.' + te.A()
def _log_rpm_trans_with_swdb(rpm_transaction, swdb_transaction):
logger.debug("Logging transaction elements")
for rpm_el in rpm_transaction:
tsi = rpm_el.Key()
tsi_state = None
if tsi is not None:
tsi_state = tsi.state
msg = "RPM element: '{}', Key(): '{}', Key state: '{}', Failed() '{}': ".format(
_te_nevra(rpm_el), tsi, tsi_state, rpm_el.Failed())
logger.debug(msg)
for tsi in swdb_transaction:
msg = "SWDB element: '{}', State: '{}', Action: '{}', From repo: '{}', Reason: '{}', " \
"Get reason: '{}'".format(str(tsi), tsi.state, tsi.action, tsi.from_repo, tsi.reason,
tsi.get_reason())
logger.debug(msg)
def _sync_rpm_trans_with_swdb(rpm_transaction, swdb_transaction):
revert_actions = {libdnf.transaction.TransactionItemAction_DOWNGRADED,
libdnf.transaction.TransactionItemAction_OBSOLETED,
libdnf.transaction.TransactionItemAction_REMOVE,
libdnf.transaction.TransactionItemAction_UPGRADED,
libdnf.transaction.TransactionItemAction_REINSTALLED}
cached_tsi = [tsi for tsi in swdb_transaction]
el_not_found = False
error = False
for rpm_el in rpm_transaction:
te_nevra = _te_nevra(rpm_el)
tsi = rpm_el.Key()
if tsi is None or not hasattr(tsi, "pkg"):
for tsi_candidate in cached_tsi:
if tsi_candidate.state != libdnf.transaction.TransactionItemState_UNKNOWN:
continue
if tsi_candidate.action not in revert_actions:
continue
if str(tsi_candidate) == te_nevra:
tsi = tsi_candidate
break
if tsi is None or not hasattr(tsi, "pkg"):
logger.critical(_("TransactionItem not found for key: {}").format(te_nevra))
el_not_found = True
continue
if rpm_el.Failed():
tsi.state = libdnf.transaction.TransactionItemState_ERROR
error = True
else:
tsi.state = libdnf.transaction.TransactionItemState_DONE
for tsi in cached_tsi:
if tsi.state == libdnf.transaction.TransactionItemState_UNKNOWN:
logger.critical(_("TransactionSWDBItem not found for key: {}").format(str(tsi)))
el_not_found = True
if error:
logger.debug(_('Errors occurred during transaction.'))
if el_not_found:
_log_rpm_trans_with_swdb(rpm_transaction, cached_tsi)
class tmpdir(object):
# used by subscription-manager (src/dnf-plugins/product-id.py)
def __init__(self):
prefix = '%s-' % dnf.const.PREFIX
self.path = tempfile.mkdtemp(prefix=prefix)
def __enter__(self):
return self.path
def __exit__(self, exc_type, exc_value, traceback):
rm_rf(self.path)
class Bunch(dict):
"""Dictionary with attribute accessing syntax.
In DNF, prefer using this over dnf.yum.misc.GenericHolder.
Credit: Alex Martelli, Doug Hudgeon
"""
def __init__(self, *args, **kwds):
super(Bunch, self).__init__(*args, **kwds)
self.__dict__ = self
def __hash__(self):
return id(self)
class MultiCallList(list):
def __init__(self, iterable):
super(MultiCallList, self).__init__()
self.extend(iterable)
def __getattr__(self, what):
def fn(*args, **kwargs):
def call_what(v):
method = getattr(v, what)
return method(*args, **kwargs)
return list(map(call_what, self))
return fn
def __setattr__(self, what, val):
def setter(item):
setattr(item, what, val)
return list(map(setter, self))
def _make_lists(transaction):
b = Bunch({
'downgraded': [],
'erased': [],
'erased_clean': [],
'erased_dep': [],
'installed': [],
'installed_group': [],
'installed_dep': [],
'installed_weak': [],
'reinstalled': [],
'upgraded': [],
'failed': [],
})
for tsi in transaction:
if tsi.state == libdnf.transaction.TransactionItemState_ERROR:
b.failed.append(tsi)
elif tsi.action == libdnf.transaction.TransactionItemAction_DOWNGRADE:
b.downgraded.append(tsi)
elif tsi.action == libdnf.transaction.TransactionItemAction_INSTALL:
if tsi.reason == libdnf.transaction.TransactionItemReason_GROUP:
b.installed_group.append(tsi)
elif tsi.reason == libdnf.transaction.TransactionItemReason_DEPENDENCY:
b.installed_dep.append(tsi)
elif tsi.reason == libdnf.transaction.TransactionItemReason_WEAK_DEPENDENCY:
b.installed_weak.append(tsi)
else:
# TransactionItemReason_USER
b.installed.append(tsi)
elif tsi.action == libdnf.transaction.TransactionItemAction_REINSTALL:
b.reinstalled.append(tsi)
elif tsi.action == libdnf.transaction.TransactionItemAction_REMOVE:
if tsi.reason == libdnf.transaction.TransactionItemReason_CLEAN:
b.erased_clean.append(tsi)
elif tsi.reason == libdnf.transaction.TransactionItemReason_DEPENDENCY:
b.erased_dep.append(tsi)
else:
b.erased.append(tsi)
elif tsi.action == libdnf.transaction.TransactionItemAction_UPGRADE:
b.upgraded.append(tsi)
return b
def _post_transaction_output(base, transaction, action_callback):
"""Returns a human-readable summary of the results of the
transaction.
:param action_callback: function generating output for specific action. It
takes two parameters - action as a string and list of affected packages for
this action
:return: a list of lines containing a human-readable summary of the
results of the transaction
"""
def _tsi_or_pkg_nevra_cmp(item1, item2):
"""Compares two transaction items or packages by nevra.
Used as a fallback when tsi does not contain package object.
"""
ret = (item1.name > item2.name) - (item1.name < item2.name)
if ret != 0:
return ret
nevra1 = hawkey.NEVRA(name=item1.name, epoch=item1.epoch, version=item1.version,
release=item1.release, arch=item1.arch)
nevra2 = hawkey.NEVRA(name=item2.name, epoch=item2.epoch, version=item2.version,
release=item2.release, arch=item2.arch)
ret = nevra1.evr_cmp(nevra2, base.sack)
if ret != 0:
return ret
return (item1.arch > item2.arch) - (item1.arch < item2.arch)
list_bunch = dnf.util._make_lists(transaction)
skipped_conflicts, skipped_broken = base._skipped_packages(
report_problems=False, transaction=transaction)
skipped = skipped_conflicts.union(skipped_broken)
out = []
for (action, tsis) in [(_('Upgraded'), list_bunch.upgraded),
(_('Downgraded'), list_bunch.downgraded),
(_('Installed'), list_bunch.installed +
list_bunch.installed_group +
list_bunch.installed_weak +
list_bunch.installed_dep),
(_('Reinstalled'), list_bunch.reinstalled),
(_('Skipped'), skipped),
(_('Removed'), list_bunch.erased +
list_bunch.erased_dep +
list_bunch.erased_clean),
(_('Failed'), list_bunch.failed)]:
out.extend(action_callback(
action, sorted(tsis, key=functools.cmp_to_key(_tsi_or_pkg_nevra_cmp))))
return out
lib64/python3.8/wsgiref/util.py 0000644 00000013333 15040173635 0012242 0 ustar 00 """Miscellaneous WSGI-related Utilities"""
import posixpath
__all__ = [
'FileWrapper', 'guess_scheme', 'application_uri', 'request_uri',
'shift_path_info', 'setup_testing_defaults',
]
class FileWrapper:
"""Wrapper to convert file-like objects to iterables"""
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike,'close'):
self.close = filelike.close
def __getitem__(self,key):
import warnings
warnings.warn(
"FileWrapper's __getitem__ method ignores 'key' parameter. "
"Use iterator protocol instead.",
DeprecationWarning,
stacklevel=2
)
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
def __iter__(self):
return self
def __next__(self):
data = self.filelike.read(self.blksize)
if data:
return data
raise StopIteration
def guess_scheme(environ):
"""Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
"""
if environ.get("HTTPS") in ('yes','on','1'):
return 'https'
else:
return 'http'
def application_uri(environ):
"""Return the application's base URI (no PATH_INFO or QUERY_STRING)"""
url = environ['wsgi.url_scheme']+'://'
from urllib.parse import quote
if environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
url += quote(environ.get('SCRIPT_NAME') or '/', encoding='latin1')
return url
def request_uri(environ, include_query=True):
"""Return the full request URI, optionally including the query string"""
url = application_uri(environ)
from urllib.parse import quote
path_info = quote(environ.get('PATH_INFO',''), safe='/;=,', encoding='latin1')
if not environ.get('SCRIPT_NAME'):
url += path_info[1:]
else:
url += path_info
if include_query and environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
return url
def shift_path_info(environ):
"""Shift a name from PATH_INFO to SCRIPT_NAME, returning it
If there are no remaining path segments in PATH_INFO, return None.
Note: 'environ' is modified in-place; use a copy if you need to keep
the original PATH_INFO or SCRIPT_NAME.
Note: when PATH_INFO is just a '/', this returns '' and appends a trailing
'/' to SCRIPT_NAME, even though empty path segments are normally ignored,
and SCRIPT_NAME doesn't normally end in a '/'. This is intentional
behavior, to ensure that an application can tell the difference between
'/x' and '/x/' when traversing to objects.
"""
path_info = environ.get('PATH_INFO','')
if not path_info:
return None
path_parts = path_info.split('/')
path_parts[1:-1] = [p for p in path_parts[1:-1] if p and p != '.']
name = path_parts[1]
del path_parts[1]
script_name = environ.get('SCRIPT_NAME','')
script_name = posixpath.normpath(script_name+'/'+name)
if script_name.endswith('/'):
script_name = script_name[:-1]
if not name and not script_name.endswith('/'):
script_name += '/'
environ['SCRIPT_NAME'] = script_name
environ['PATH_INFO'] = '/'.join(path_parts)
# Special case: '/.' on PATH_INFO doesn't get stripped,
# because we don't strip the last element of PATH_INFO
# if there's only one path part left. Instead of fixing this
# above, we fix it here so that PATH_INFO gets normalized to
# an empty string in the environ.
if name=='.':
name = None
return name
def setup_testing_defaults(environ):
"""Update 'environ' with trivial defaults for testing purposes
This adds various parameters required for WSGI, including HTTP_HOST,
SERVER_NAME, SERVER_PORT, REQUEST_METHOD, SCRIPT_NAME, PATH_INFO,
and all of the wsgi.* variables. It only supplies default values,
and does not replace any existing settings for these variables.
This routine is intended to make it easier for unit tests of WSGI
servers and applications to set up dummy environments. It should *not*
be used by actual WSGI servers or applications, since the data is fake!
"""
environ.setdefault('SERVER_NAME','127.0.0.1')
environ.setdefault('SERVER_PROTOCOL','HTTP/1.0')
environ.setdefault('HTTP_HOST',environ['SERVER_NAME'])
environ.setdefault('REQUEST_METHOD','GET')
if 'SCRIPT_NAME' not in environ and 'PATH_INFO' not in environ:
environ.setdefault('SCRIPT_NAME','')
environ.setdefault('PATH_INFO','/')
environ.setdefault('wsgi.version', (1,0))
environ.setdefault('wsgi.run_once', 0)
environ.setdefault('wsgi.multithread', 0)
environ.setdefault('wsgi.multiprocess', 0)
from io import StringIO, BytesIO
environ.setdefault('wsgi.input', BytesIO())
environ.setdefault('wsgi.errors', StringIO())
environ.setdefault('wsgi.url_scheme',guess_scheme(environ))
if environ['wsgi.url_scheme']=='http':
environ.setdefault('SERVER_PORT', '80')
elif environ['wsgi.url_scheme']=='https':
environ.setdefault('SERVER_PORT', '443')
_hoppish = {
'connection', 'keep-alive', 'proxy-authenticate',
'proxy-authorization', 'te', 'trailers', 'transfer-encoding',
'upgrade'
}.__contains__
def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return _hoppish(header_name.lower())
lib64/python2.7/wsgiref/util.py 0000644 00000012710 15045766770 0012253 0 ustar 00 """Miscellaneous WSGI-related Utilities"""
import posixpath
__all__ = [
'FileWrapper', 'guess_scheme', 'application_uri', 'request_uri',
'shift_path_info', 'setup_testing_defaults',
]
class FileWrapper:
"""Wrapper to convert file-like objects to iterables"""
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike,'close'):
self.close = filelike.close
def __getitem__(self,key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
def __iter__(self):
return self
def next(self):
data = self.filelike.read(self.blksize)
if data:
return data
raise StopIteration
def guess_scheme(environ):
"""Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
"""
if environ.get("HTTPS") in ('yes','on','1'):
return 'https'
else:
return 'http'
def application_uri(environ):
"""Return the application's base URI (no PATH_INFO or QUERY_STRING)"""
url = environ['wsgi.url_scheme']+'://'
from urllib import quote
if environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
url += quote(environ.get('SCRIPT_NAME') or '/')
return url
def request_uri(environ, include_query=1):
"""Return the full request URI, optionally including the query string"""
url = application_uri(environ)
from urllib import quote
path_info = quote(environ.get('PATH_INFO',''),safe='/;=,')
if not environ.get('SCRIPT_NAME'):
url += path_info[1:]
else:
url += path_info
if include_query and environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
return url
def shift_path_info(environ):
"""Shift a name from PATH_INFO to SCRIPT_NAME, returning it
If there are no remaining path segments in PATH_INFO, return None.
Note: 'environ' is modified in-place; use a copy if you need to keep
the original PATH_INFO or SCRIPT_NAME.
Note: when PATH_INFO is just a '/', this returns '' and appends a trailing
'/' to SCRIPT_NAME, even though empty path segments are normally ignored,
and SCRIPT_NAME doesn't normally end in a '/'. This is intentional
behavior, to ensure that an application can tell the difference between
'/x' and '/x/' when traversing to objects.
"""
path_info = environ.get('PATH_INFO','')
if not path_info:
return None
path_parts = path_info.split('/')
path_parts[1:-1] = [p for p in path_parts[1:-1] if p and p != '.']
name = path_parts[1]
del path_parts[1]
script_name = environ.get('SCRIPT_NAME','')
script_name = posixpath.normpath(script_name+'/'+name)
if script_name.endswith('/'):
script_name = script_name[:-1]
if not name and not script_name.endswith('/'):
script_name += '/'
environ['SCRIPT_NAME'] = script_name
environ['PATH_INFO'] = '/'.join(path_parts)
# Special case: '/.' on PATH_INFO doesn't get stripped,
# because we don't strip the last element of PATH_INFO
# if there's only one path part left. Instead of fixing this
# above, we fix it here so that PATH_INFO gets normalized to
# an empty string in the environ.
if name=='.':
name = None
return name
def setup_testing_defaults(environ):
"""Update 'environ' with trivial defaults for testing purposes
This adds various parameters required for WSGI, including HTTP_HOST,
SERVER_NAME, SERVER_PORT, REQUEST_METHOD, SCRIPT_NAME, PATH_INFO,
and all of the wsgi.* variables. It only supplies default values,
and does not replace any existing settings for these variables.
This routine is intended to make it easier for unit tests of WSGI
servers and applications to set up dummy environments. It should *not*
be used by actual WSGI servers or applications, since the data is fake!
"""
environ.setdefault('SERVER_NAME','127.0.0.1')
environ.setdefault('SERVER_PROTOCOL','HTTP/1.0')
environ.setdefault('HTTP_HOST',environ['SERVER_NAME'])
environ.setdefault('REQUEST_METHOD','GET')
if 'SCRIPT_NAME' not in environ and 'PATH_INFO' not in environ:
environ.setdefault('SCRIPT_NAME','')
environ.setdefault('PATH_INFO','/')
environ.setdefault('wsgi.version', (1,0))
environ.setdefault('wsgi.run_once', 0)
environ.setdefault('wsgi.multithread', 0)
environ.setdefault('wsgi.multiprocess', 0)
from StringIO import StringIO
environ.setdefault('wsgi.input', StringIO(""))
environ.setdefault('wsgi.errors', StringIO())
environ.setdefault('wsgi.url_scheme',guess_scheme(environ))
if environ['wsgi.url_scheme']=='http':
environ.setdefault('SERVER_PORT', '80')
elif environ['wsgi.url_scheme']=='https':
environ.setdefault('SERVER_PORT', '443')
_hoppish = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}.__contains__
def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return _hoppish(header_name.lower())
lib64/python3.6/wsgiref/util.py 0000644 00000013002 15045767203 0012236 0 ustar 00 """Miscellaneous WSGI-related Utilities"""
import posixpath
__all__ = [
'FileWrapper', 'guess_scheme', 'application_uri', 'request_uri',
'shift_path_info', 'setup_testing_defaults',
]
class FileWrapper:
"""Wrapper to convert file-like objects to iterables"""
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike,'close'):
self.close = filelike.close
def __getitem__(self,key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
def __iter__(self):
return self
def __next__(self):
data = self.filelike.read(self.blksize)
if data:
return data
raise StopIteration
def guess_scheme(environ):
"""Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
"""
if environ.get("HTTPS") in ('yes','on','1'):
return 'https'
else:
return 'http'
def application_uri(environ):
"""Return the application's base URI (no PATH_INFO or QUERY_STRING)"""
url = environ['wsgi.url_scheme']+'://'
from urllib.parse import quote
if environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
url += quote(environ.get('SCRIPT_NAME') or '/', encoding='latin1')
return url
def request_uri(environ, include_query=True):
"""Return the full request URI, optionally including the query string"""
url = application_uri(environ)
from urllib.parse import quote
path_info = quote(environ.get('PATH_INFO',''), safe='/;=,', encoding='latin1')
if not environ.get('SCRIPT_NAME'):
url += path_info[1:]
else:
url += path_info
if include_query and environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
return url
def shift_path_info(environ):
"""Shift a name from PATH_INFO to SCRIPT_NAME, returning it
If there are no remaining path segments in PATH_INFO, return None.
Note: 'environ' is modified in-place; use a copy if you need to keep
the original PATH_INFO or SCRIPT_NAME.
Note: when PATH_INFO is just a '/', this returns '' and appends a trailing
'/' to SCRIPT_NAME, even though empty path segments are normally ignored,
and SCRIPT_NAME doesn't normally end in a '/'. This is intentional
behavior, to ensure that an application can tell the difference between
'/x' and '/x/' when traversing to objects.
"""
path_info = environ.get('PATH_INFO','')
if not path_info:
return None
path_parts = path_info.split('/')
path_parts[1:-1] = [p for p in path_parts[1:-1] if p and p != '.']
name = path_parts[1]
del path_parts[1]
script_name = environ.get('SCRIPT_NAME','')
script_name = posixpath.normpath(script_name+'/'+name)
if script_name.endswith('/'):
script_name = script_name[:-1]
if not name and not script_name.endswith('/'):
script_name += '/'
environ['SCRIPT_NAME'] = script_name
environ['PATH_INFO'] = '/'.join(path_parts)
# Special case: '/.' on PATH_INFO doesn't get stripped,
# because we don't strip the last element of PATH_INFO
# if there's only one path part left. Instead of fixing this
# above, we fix it here so that PATH_INFO gets normalized to
# an empty string in the environ.
if name=='.':
name = None
return name
def setup_testing_defaults(environ):
"""Update 'environ' with trivial defaults for testing purposes
This adds various parameters required for WSGI, including HTTP_HOST,
SERVER_NAME, SERVER_PORT, REQUEST_METHOD, SCRIPT_NAME, PATH_INFO,
and all of the wsgi.* variables. It only supplies default values,
and does not replace any existing settings for these variables.
This routine is intended to make it easier for unit tests of WSGI
servers and applications to set up dummy environments. It should *not*
be used by actual WSGI servers or applications, since the data is fake!
"""
environ.setdefault('SERVER_NAME','127.0.0.1')
environ.setdefault('SERVER_PROTOCOL','HTTP/1.0')
environ.setdefault('HTTP_HOST',environ['SERVER_NAME'])
environ.setdefault('REQUEST_METHOD','GET')
if 'SCRIPT_NAME' not in environ and 'PATH_INFO' not in environ:
environ.setdefault('SCRIPT_NAME','')
environ.setdefault('PATH_INFO','/')
environ.setdefault('wsgi.version', (1,0))
environ.setdefault('wsgi.run_once', 0)
environ.setdefault('wsgi.multithread', 0)
environ.setdefault('wsgi.multiprocess', 0)
from io import StringIO, BytesIO
environ.setdefault('wsgi.input', BytesIO())
environ.setdefault('wsgi.errors', StringIO())
environ.setdefault('wsgi.url_scheme',guess_scheme(environ))
if environ['wsgi.url_scheme']=='http':
environ.setdefault('SERVER_PORT', '80')
elif environ['wsgi.url_scheme']=='https':
environ.setdefault('SERVER_PORT', '443')
_hoppish = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}.__contains__
def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return _hoppish(header_name.lower())
lib64/python2.7/ctypes/util.py 0000644 00000025037 15047030245 0012101 0 ustar 00 import os
import subprocess
import sys
# find_library(name) returns the pathname of a library, or None.
if os.name == "nt":
def _get_build_version():
"""Return the version of MSVC that was used to build Python.
For Python 2.3 and up, the version number is included in
sys.version. For earlier versions, assume the compiler is MSVC 6.
"""
# This function was copied from Lib/distutils/msvccompiler.py
prefix = "MSC v."
i = sys.version.find(prefix)
if i == -1:
return 6
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
minorVersion = int(s[2:3]) / 10.0
# I don't think paths are affected by minor version in version 6
if majorVersion == 6:
minorVersion = 0
if majorVersion >= 6:
return majorVersion + minorVersion
# else we don't know what version of the compiler this is
return None
def find_msvcrt():
"""Return the name of the VC runtime dll"""
version = _get_build_version()
if version is None:
# better be safe than sorry
return None
if version <= 6:
clibname = 'msvcrt'
else:
clibname = 'msvcr%d' % (version * 10)
# If python was built with in debug mode
import imp
if imp.get_suffixes()[0][0] == '_d.pyd':
clibname += 'd'
return clibname+'.dll'
def find_library(name):
if name in ('c', 'm'):
return find_msvcrt()
# See MSDN for the REAL search order.
for directory in os.environ['PATH'].split(os.pathsep):
fname = os.path.join(directory, name)
if os.path.isfile(fname):
return fname
if fname.lower().endswith(".dll"):
continue
fname = fname + ".dll"
if os.path.isfile(fname):
return fname
return None
if os.name == "ce":
# search path according to MSDN:
# - absolute path specified by filename
# - The .exe launch directory
# - the Windows directory
# - ROM dll files (where are they?)
# - OEM specified search path: HKLM\Loader\SystemPath
def find_library(name):
return name
if os.name == "posix" and sys.platform == "darwin":
from ctypes.macholib.dyld import dyld_find as _dyld_find
def find_library(name):
possible = ['lib%s.dylib' % name,
'%s.dylib' % name,
'%s.framework/%s' % (name, name)]
for name in possible:
try:
return _dyld_find(name)
except ValueError:
continue
return None
elif os.name == "posix":
# Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump
import re, tempfile, errno
def _findLib_gcc(name):
# Run GCC's linker with the -t (aka --trace) option and examine the
# library name it prints out. The GCC command will fail because we
# haven't supplied a proper program with main(), but that does not
# matter.
expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name)
cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit; fi;' \
'LANG=C LC_ALL=C $CC -Wl,-t -o "$2" 2>&1 -l"$1"'
temp = tempfile.NamedTemporaryFile()
try:
proc = subprocess.Popen((cmd, '_findLib_gcc', name, temp.name),
shell=True,
stdout=subprocess.PIPE)
[trace, _] = proc.communicate()
finally:
try:
temp.close()
except OSError, e:
# ENOENT is raised if the file was already removed, which is
# the normal behaviour of GCC if linking fails
if e.errno != errno.ENOENT:
raise
res = re.search(expr, trace)
if not res:
return None
return res.group(0)
if sys.platform == "sunos5":
# use /usr/ccs/bin/dump on solaris
def _get_soname(f):
if not f:
return None
null = open(os.devnull, "wb")
try:
with null:
proc = subprocess.Popen(("/usr/ccs/bin/dump", "-Lpv", f),
stdout=subprocess.PIPE,
stderr=null)
except OSError: # E.g. command not found
return None
[data, _] = proc.communicate()
res = re.search(br'\[.*\]\sSONAME\s+([^\s]+)', data)
if not res:
return None
return res.group(1)
else:
def _get_soname(f):
# assuming GNU binutils / ELF
if not f:
return None
cmd = 'if ! type objdump >/dev/null 2>&1; then exit 10; fi;' \
'objdump -p -j .dynamic 2>/dev/null "$1"'
proc = subprocess.Popen((cmd, '_get_soname', f), shell=True,
stdout=subprocess.PIPE)
[dump, _] = proc.communicate()
if proc.returncode == 10:
return os.path.basename(f) # This is good for GLibc, I think,
# and a dep on binutils is big (for
# live CDs).
res = re.search(br'\sSONAME\s+([^\s]+)', dump)
if not res:
return None
return res.group(1)
if (sys.platform.startswith("freebsd")
or sys.platform.startswith("openbsd")
or sys.platform.startswith("dragonfly")):
def _num_version(libname):
# "libxyz.so.MAJOR.MINOR" => [ MAJOR, MINOR ]
parts = libname.split(b".")
nums = []
try:
while parts:
nums.insert(0, int(parts.pop()))
except ValueError:
pass
return nums or [sys.maxint]
def find_library(name):
ename = re.escape(name)
expr = r':-l%s\.\S+ => \S*/(lib%s\.\S+)' % (ename, ename)
null = open(os.devnull, 'wb')
try:
with null:
proc = subprocess.Popen(('/sbin/ldconfig', '-r'),
stdout=subprocess.PIPE,
stderr=null)
except OSError: # E.g. command not found
data = b''
else:
[data, _] = proc.communicate()
res = re.findall(expr, data)
if not res:
return _get_soname(_findLib_gcc(name))
res.sort(key=_num_version)
return res[-1]
elif sys.platform == "sunos5":
def _findLib_crle(name, is64):
if not os.path.exists('/usr/bin/crle'):
return None
env = dict(os.environ)
env['LC_ALL'] = 'C'
if is64:
args = ('/usr/bin/crle', '-64')
else:
args = ('/usr/bin/crle',)
paths = None
null = open(os.devnull, 'wb')
try:
with null:
proc = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=null,
env=env)
except OSError: # E.g. bad executable
return None
try:
for line in proc.stdout:
line = line.strip()
if line.startswith(b'Default Library Path (ELF):'):
paths = line.split()[4]
finally:
proc.stdout.close()
proc.wait()
if not paths:
return None
for dir in paths.split(":"):
libfile = os.path.join(dir, "lib%s.so" % name)
if os.path.exists(libfile):
return libfile
return None
def find_library(name, is64 = False):
return _get_soname(_findLib_crle(name, is64) or _findLib_gcc(name))
else:
def _findSoname_ldconfig(name):
import struct
if struct.calcsize('l') == 4:
machine = os.uname()[4] + '-32'
else:
machine = os.uname()[4] + '-64'
mach_map = {
'x86_64-64': 'libc6,x86-64',
'ppc64-64': 'libc6,64bit',
'sparc64-64': 'libc6,64bit',
's390x-64': 'libc6,64bit',
'ia64-64': 'libc6,IA-64',
}
abi_type = mach_map.get(machine, 'libc6')
# XXX assuming GLIBC's ldconfig (with option -p)
expr = r'\s+(lib%s\.[^\s]+)\s+\(%s' % (re.escape(name), abi_type)
env = dict(os.environ)
env['LC_ALL'] = 'C'
env['LANG'] = 'C'
null = open(os.devnull, 'wb')
try:
with null:
p = subprocess.Popen(['/sbin/ldconfig', '-p'],
stderr=null,
stdout=subprocess.PIPE,
env=env)
except OSError: # E.g. command not found
return None
[data, _] = p.communicate()
res = re.search(expr, data)
if not res:
return None
return res.group(1)
def find_library(name):
return _findSoname_ldconfig(name) or _get_soname(_findLib_gcc(name))
################################################################
# test code
def test():
from ctypes import cdll
if os.name == "nt":
print cdll.msvcrt
print cdll.load("msvcrt")
print find_library("msvcrt")
if os.name == "posix":
# find and load_version
print find_library("m")
print find_library("c")
print find_library("bz2")
# getattr
## print cdll.m
## print cdll.bz2
# load
if sys.platform == "darwin":
print cdll.LoadLibrary("libm.dylib")
print cdll.LoadLibrary("libcrypto.dylib")
print cdll.LoadLibrary("libSystem.dylib")
print cdll.LoadLibrary("System.framework/System")
else:
print cdll.LoadLibrary("libm.so")
print cdll.LoadLibrary("libcrypt.so")
print find_library("crypt")
if __name__ == "__main__":
test()
usr/lib64/python3.8/unittest/util.py 0000644 00000012137 15053076045 0013266 0 ustar 00 """Various utility functions."""
from collections import namedtuple, Counter
from os.path import commonprefix
__unittest = True
_MAX_LENGTH = 80
_PLACEHOLDER_LEN = 12
_MIN_BEGIN_LEN = 5
_MIN_END_LEN = 5
_MIN_COMMON_LEN = 5
_MIN_DIFF_LEN = _MAX_LENGTH - \
(_MIN_BEGIN_LEN + _PLACEHOLDER_LEN + _MIN_COMMON_LEN +
_PLACEHOLDER_LEN + _MIN_END_LEN)
assert _MIN_DIFF_LEN >= 0
def _shorten(s, prefixlen, suffixlen):
skip = len(s) - prefixlen - suffixlen
if skip > _PLACEHOLDER_LEN:
s = '%s[%d chars]%s' % (s[:prefixlen], skip, s[len(s) - suffixlen:])
return s
def _common_shorten_repr(*args):
args = tuple(map(safe_repr, args))
maxlen = max(map(len, args))
if maxlen <= _MAX_LENGTH:
return args
prefix = commonprefix(args)
prefixlen = len(prefix)
common_len = _MAX_LENGTH - \
(maxlen - prefixlen + _MIN_BEGIN_LEN + _PLACEHOLDER_LEN)
if common_len > _MIN_COMMON_LEN:
assert _MIN_BEGIN_LEN + _PLACEHOLDER_LEN + _MIN_COMMON_LEN + \
(maxlen - prefixlen) < _MAX_LENGTH
prefix = _shorten(prefix, _MIN_BEGIN_LEN, common_len)
return tuple(prefix + s[prefixlen:] for s in args)
prefix = _shorten(prefix, _MIN_BEGIN_LEN, _MIN_COMMON_LEN)
return tuple(prefix + _shorten(s[prefixlen:], _MIN_DIFF_LEN, _MIN_END_LEN)
for s in args)
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
def strclass(cls):
return "%s.%s" % (cls.__module__, cls.__qualname__)
def sorted_list_difference(expected, actual):
"""Finds elements in only one or the other of two, sorted input lists.
Returns a two-element tuple of lists. The first list contains those
elements in the "expected" list but not in the "actual" list, and the
second contains those elements in the "actual" list but not in the
"expected" list. Duplicate elements in either input list are ignored.
"""
i = j = 0
missing = []
unexpected = []
while True:
try:
e = expected[i]
a = actual[j]
if e < a:
missing.append(e)
i += 1
while expected[i] == e:
i += 1
elif e > a:
unexpected.append(a)
j += 1
while actual[j] == a:
j += 1
else:
i += 1
try:
while expected[i] == e:
i += 1
finally:
j += 1
while actual[j] == a:
j += 1
except IndexError:
missing.extend(expected[i:])
unexpected.extend(actual[j:])
break
return missing, unexpected
def unorderable_list_difference(expected, actual):
"""Same behavior as sorted_list_difference but
for lists of unorderable items (like dicts).
As it does a linear search per item (remove) it
has O(n*n) performance."""
missing = []
while expected:
item = expected.pop()
try:
actual.remove(item)
except ValueError:
missing.append(item)
# anything left in actual is unexpected
return missing, actual
def three_way_cmp(x, y):
"""Return -1 if x < y, 0 if x == y and 1 if x > y"""
return (x > y) - (x < y)
_Mismatch = namedtuple('Mismatch', 'actual expected value')
def _count_diff_all_purpose(actual, expected):
'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ'
# elements need not be hashable
s, t = list(actual), list(expected)
m, n = len(s), len(t)
NULL = object()
result = []
for i, elem in enumerate(s):
if elem is NULL:
continue
cnt_s = cnt_t = 0
for j in range(i, m):
if s[j] == elem:
cnt_s += 1
s[j] = NULL
for j, other_elem in enumerate(t):
if other_elem == elem:
cnt_t += 1
t[j] = NULL
if cnt_s != cnt_t:
diff = _Mismatch(cnt_s, cnt_t, elem)
result.append(diff)
for i, elem in enumerate(t):
if elem is NULL:
continue
cnt_t = 0
for j in range(i, n):
if t[j] == elem:
cnt_t += 1
t[j] = NULL
diff = _Mismatch(0, cnt_t, elem)
result.append(diff)
return result
def _count_diff_hashable(actual, expected):
'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ'
# elements must be hashable
s, t = Counter(actual), Counter(expected)
result = []
for elem, cnt_s in s.items():
cnt_t = t.get(elem, 0)
if cnt_s != cnt_t:
diff = _Mismatch(cnt_s, cnt_t, elem)
result.append(diff)
for elem, cnt_t in t.items():
if elem not in s:
diff = _Mismatch(0, cnt_t, elem)
result.append(diff)
return result