filename
stringlengths 6
103
| patch
stringlengths 47
76k
| parent_content
stringlengths 17
1.6M
| id
stringlengths 12
19
|
|---|---|---|---|
pipenv/core.py
|
@@ -17,7 +17,7 @@
from pipenv.patched import crayons
from pipenv.utils import (
cmd_list_to_shell, convert_deps_to_pip, create_spinner, download_file,
- find_python, get_canonical_names, get_source_list, is_pinned,
+ find_python, get_canonical_names, get_host_and_port, get_source_list, is_pinned,
is_python_command, is_required_version, is_star, is_valid_url,
parse_indexes, pep423_name, prepare_pip_source_args, proper_case,
python_version, run_command, subprocess_run, venv_resolve_deps
@@ -169,7 +169,7 @@ def import_requirements(project, r=None, dev=False):
if extra_index:
indexes.append(extra_index)
if trusted_host:
- trusted_hosts.append(trusted_host)
+ trusted_hosts.append(get_host_and_port(trusted_host))
indexes = sorted(set(indexes))
trusted_hosts = sorted(set(trusted_hosts))
reqs = [install_req_from_parsed_requirement(f) for f in parse_requirements(r, session=pip_requests)]
@@ -185,8 +185,13 @@ def import_requirements(project, r=None, dev=False):
else:
project.add_package_to_pipfile(str(package.req), dev=dev)
for index in indexes:
- trusted = index in trusted_hosts
- project.add_index_to_pipfile(index, verify_ssl=trusted)
+ # don't require HTTPS for trusted hosts (see: https://pip.pypa.io/en/stable/cli/pip/#cmdoption-trusted-host)
+ host_and_port = get_host_and_port(index)
+ require_valid_https = not any((v in trusted_hosts for v in (
+ host_and_port,
+ host_and_port.partition(':')[0], # also check if hostname without port is in trusted_hosts
+ )))
+ project.add_index_to_pipfile(index, verify_ssl=require_valid_https)
project.recase_pipfile()
|
import json as simplejson
import logging
import os
from pathlib import Path
from posixpath import expandvars
import sys
import time
import warnings
import click
import dotenv
import pipfile
import vistir
from pipenv import environments, exceptions, pep508checker, progress
from pipenv._compat import decode_for_output, fix_utf8
from pipenv.patched import crayons
from pipenv.utils import (
cmd_list_to_shell, convert_deps_to_pip, create_spinner, download_file,
find_python, get_canonical_names, get_source_list, is_pinned,
is_python_command, is_required_version, is_star, is_valid_url,
parse_indexes, pep423_name, prepare_pip_source_args, proper_case,
python_version, run_command, subprocess_run, venv_resolve_deps
)
if environments.is_type_checking():
from typing import Dict, List, Optional, Union
from pipenv.project import Project
from pipenv.vendor.requirementslib.models.requirements import Requirement
TSourceDict = Dict[str, Union[str, bool]]
# Packages that should be ignored later.
BAD_PACKAGES = (
"distribute",
"packaging",
"pip",
"pkg-resources",
"setuptools",
"wheel",
)
FIRST_PACKAGES = ("cython",)
if not environments.PIPENV_HIDE_EMOJIS:
now = time.localtime()
# Halloween easter-egg.
if ((now.tm_mon == 10) and (now.tm_mday == 30)) or (
(now.tm_mon == 10) and (now.tm_mday == 31)
):
INSTALL_LABEL = "🎃 "
# Christmas easter-egg.
elif ((now.tm_mon == 12) and (now.tm_mday == 24)) or (
(now.tm_mon == 12) and (now.tm_mday == 25)
):
INSTALL_LABEL = "🎅 "
else:
INSTALL_LABEL = "🐍 "
INSTALL_LABEL2 = crayons.normal("☤ ", bold=True)
STARTING_LABEL = " "
else:
INSTALL_LABEL = " "
INSTALL_LABEL2 = " "
STARTING_LABEL = " "
# Disable colors, for the color blind and others who do not prefer colors.
if environments.PIPENV_COLORBLIND:
crayons.disable()
def do_clear(project):
click.echo(crayons.normal(fix_utf8("Clearing caches..."), bold=True))
try:
from pip._internal import locations
except ImportError: # pip 9.
from pip import locations
try:
vistir.path.rmtree(project.s.PIPENV_CACHE_DIR, onerror=vistir.path.handle_remove_readonly)
# Other processes may be writing into this directory simultaneously.
vistir.path.rmtree(
locations.USER_CACHE_DIR,
ignore_errors=environments.PIPENV_IS_CI,
onerror=vistir.path.handle_remove_readonly
)
except OSError as e:
# Ignore FileNotFoundError. This is needed for Python 2.7.
import errno
if e.errno == errno.ENOENT:
pass
raise
def load_dot_env(project, as_dict=False):
"""Loads .env file into sys.environ."""
if not project.s.PIPENV_DONT_LOAD_ENV:
# If the project doesn't exist yet, check current directory for a .env file
project_directory = project.project_directory or "."
dotenv_file = project.s.PIPENV_DOTENV_LOCATION or os.sep.join(
[project_directory, ".env"]
)
if os.path.isfile(dotenv_file):
click.echo(
crayons.normal(fix_utf8("Loading .env environment variables..."), bold=True),
err=True,
)
else:
if project.s.PIPENV_DOTENV_LOCATION:
click.echo(
"{}: file {}={} does not exist!!\n{}".format(
crayons.red("Warning", bold=True),
crayons.normal("PIPENV_DOTENV_LOCATION", bold=True),
crayons.normal(project.s.PIPENV_DOTENV_LOCATION, bold=True),
crayons.red("Not loading environment variables.", bold=True),
),
err=True,
)
if as_dict:
return dotenv.dotenv_values(dotenv_file)
else:
dotenv.load_dotenv(dotenv_file, override=True)
project.s.initialize()
def cleanup_virtualenv(project, bare=True):
"""Removes the virtualenv directory from the system."""
if not bare:
click.echo(crayons.red("Environment creation aborted."))
try:
# Delete the virtualenv.
vistir.path.rmtree(project.virtualenv_location)
except OSError as e:
click.echo(
"{} An error occurred while removing {}!".format(
crayons.red("Error: ", bold=True),
crayons.green(project.virtualenv_location),
),
err=True,
)
click.echo(crayons.cyan(e), err=True)
def import_requirements(project, r=None, dev=False):
from pipenv.patched.notpip._vendor import requests as pip_requests
from pipenv.patched.notpip._internal.req.constructors import install_req_from_parsed_requirement
from pipenv.vendor.pip_shims.shims import parse_requirements
# Parse requirements.txt file with Pip's parser.
# Pip requires a `PipSession` which is a subclass of requests.Session.
# Since we're not making any network calls, it's initialized to nothing.
if r:
assert os.path.isfile(r)
# Default path, if none is provided.
if r is None:
r = project.requirements_location
with open(r) as f:
contents = f.read()
indexes = []
trusted_hosts = []
# Find and add extra indexes.
for line in contents.split("\n"):
index, extra_index, trusted_host, _ = parse_indexes(line.strip(), strict=True)
if index:
indexes = [index]
if extra_index:
indexes.append(extra_index)
if trusted_host:
trusted_hosts.append(trusted_host)
indexes = sorted(set(indexes))
trusted_hosts = sorted(set(trusted_hosts))
reqs = [install_req_from_parsed_requirement(f) for f in parse_requirements(r, session=pip_requests)]
for package in reqs:
if package.name not in BAD_PACKAGES:
if package.link is not None:
package_string = (
f"-e {package.link}"
if package.editable
else str(package.link)
)
project.add_package_to_pipfile(package_string, dev=dev)
else:
project.add_package_to_pipfile(str(package.req), dev=dev)
for index in indexes:
trusted = index in trusted_hosts
project.add_index_to_pipfile(index, verify_ssl=trusted)
project.recase_pipfile()
def ensure_environment():
# Skip this on Windows...
if os.name != "nt":
if "LANG" not in os.environ:
click.echo(
"{}: the environment variable {} is not set!"
"\nWe recommend setting this in {} (or equivalent) for "
"proper expected behavior.".format(
crayons.red("Warning", bold=True),
crayons.normal("LANG", bold=True),
crayons.green("~/.profile"),
),
err=True,
)
def import_from_code(path="."):
from pipreqs import pipreqs
rs = []
try:
for r in pipreqs.get_all_imports(
path, encoding="utf-8", extra_ignore_dirs=[".venv"]
):
if r not in BAD_PACKAGES:
rs.append(r)
pkg_names = pipreqs.get_pkg_names(rs)
return [proper_case(r) for r in pkg_names]
except Exception:
return []
def ensure_pipfile(project, validate=True, skip_requirements=False, system=False):
"""Creates a Pipfile for the project, if it doesn't exist."""
# Assert Pipfile exists.
python = project._which("python") if not (project.s.USING_DEFAULT_PYTHON or system) else None
if project.pipfile_is_empty:
# Show an error message and exit if system is passed and no pipfile exists
if system and not project.s.PIPENV_VIRTUALENV:
raise exceptions.PipenvOptionsError(
"--system",
"--system is intended to be used for pre-existing Pipfile "
"installation, not installation of specific packages. Aborting."
)
# If there's a requirements file, but no Pipfile...
if project.requirements_exists and not skip_requirements:
click.echo(
crayons.normal(
fix_utf8("requirements.txt found, instead of Pipfile! Converting..."),
bold=True,
)
)
# Create a Pipfile...
project.create_pipfile(python=python)
with create_spinner("Importing requirements...", project.s) as sp:
# Import requirements.txt.
try:
import_requirements(project)
except Exception:
sp.fail(environments.PIPENV_SPINNER_FAIL_TEXT.format("Failed..."))
else:
sp.ok(environments.PIPENV_SPINNER_OK_TEXT.format("Success!"))
# Warn the user of side-effects.
click.echo(
"{0}: Your {1} now contains pinned versions, if your {2} did. \n"
"We recommend updating your {1} to specify the {3} version, instead."
"".format(
crayons.red("Warning", bold=True),
crayons.normal("Pipfile", bold=True),
crayons.normal("requirements.txt", bold=True),
crayons.normal('"*"', bold=True),
)
)
else:
click.echo(
crayons.normal(fix_utf8("Creating a Pipfile for this project..."), bold=True),
err=True,
)
# Create the pipfile if it doesn't exist.
project.create_pipfile(python=python)
# Validate the Pipfile's contents.
if validate and project.virtualenv_exists and not project.s.PIPENV_SKIP_VALIDATION:
# Ensure that Pipfile is using proper casing.
p = project.parsed_pipfile
changed = project.ensure_proper_casing()
# Write changes out to disk.
if changed:
click.echo(
crayons.normal("Fixing package names in Pipfile...", bold=True), err=True
)
project.write_toml(p)
def find_a_system_python(line):
"""Find a Python installation from a given line.
This tries to parse the line in various of ways:
* Looks like an absolute path? Use it directly.
* Looks like a py.exe call? Use py.exe to get the executable.
* Starts with "py" something? Looks like a python command. Try to find it
in PATH, and use it directly.
* Search for "python" and "pythonX.Y" executables in PATH to find a match.
* Nothing fits, return None.
"""
from .vendor.pythonfinder import Finder
finder = Finder(system=False, global_search=True)
if not line:
return next(iter(finder.find_all_python_versions()), None)
# Use the windows finder executable
if (line.startswith("py ") or line.startswith("py.exe ")) and os.name == "nt":
line = line.split(" ", 1)[1].lstrip("-")
python_entry = find_python(finder, line)
return python_entry
def ensure_python(project, three=None, python=None):
# Runtime import is necessary due to the possibility that the environments module may have been reloaded.
if project.s.PIPENV_PYTHON and python is False and three is None:
python = project.s.PIPENV_PYTHON
def abort(msg=''):
click.echo(
"{}\nYou can specify specific versions of Python with:\n{}".format(
crayons.red(msg),
crayons.yellow(
"$ pipenv --python {}".format(
os.sep.join(("path", "to", "python"))
)
)
),
err=True,
)
sys.exit(1)
project.s.USING_DEFAULT_PYTHON = three is None and not python
# Find out which python is desired.
if not python:
python = convert_three_to_python(three, python)
if not python:
python = project.required_python_version
if not python:
python = project.s.PIPENV_DEFAULT_PYTHON_VERSION
path_to_python = find_a_system_python(python)
if project.s.is_verbose():
click.echo(f"Using python: {python}", err=True)
click.echo(f"Path to python: {path_to_python}", err=True)
if not path_to_python and python is not None:
# We need to install Python.
click.echo(
"{}: Python {} {}".format(
crayons.red("Warning", bold=True),
crayons.cyan(python),
fix_utf8("was not found on your system..."),
),
err=True,
)
# check for python installers
from .installers import Asdf, InstallerError, InstallerNotFound, Pyenv
# prefer pyenv if both pyenv and asdf are installed as it's
# dedicated to python installs so probably the preferred
# method of the user for new python installs.
installer = None
if not project.s.PIPENV_DONT_USE_PYENV:
try:
installer = Pyenv(project)
except InstallerNotFound:
pass
if installer is None and not project.s.PIPENV_DONT_USE_ASDF:
try:
installer = Asdf(project)
except InstallerNotFound:
pass
if not installer:
abort("Neither 'pyenv' nor 'asdf' could be found to install Python.")
else:
if environments.SESSION_IS_INTERACTIVE or project.s.PIPENV_YES:
try:
version = installer.find_version_to_install(python)
except ValueError:
abort()
except InstallerError as e:
abort(f'Something went wrong while installing Python:\n{e.err}')
s = "{} {} {}".format(
"Would you like us to install",
crayons.green(f"CPython {version}"),
f"with {installer}?",
)
# Prompt the user to continue...
if not (project.s.PIPENV_YES or click.confirm(s, default=True)):
abort()
else:
# Tell the user we're installing Python.
click.echo(
"{} {} {} {}{}".format(
crayons.normal("Installing", bold=True),
crayons.green(f"CPython {version}", bold=True),
crayons.normal(f"with {installer.cmd}", bold=True),
crayons.normal("(this may take a few minutes)"),
crayons.normal("...", bold=True),
)
)
with create_spinner("Installing python...", project.s) as sp:
try:
c = installer.install(version)
except InstallerError as e:
sp.fail(environments.PIPENV_SPINNER_FAIL_TEXT.format(
"Failed...")
)
click.echo(fix_utf8("Something went wrong..."), err=True)
click.echo(crayons.cyan(e.err), err=True)
else:
sp.ok(environments.PIPENV_SPINNER_OK_TEXT.format("Success!"))
# Print the results, in a beautiful blue...
click.echo(crayons.cyan(c.stdout), err=True)
# Clear the pythonfinder caches
from .vendor.pythonfinder import Finder
finder = Finder(system=False, global_search=True)
finder.find_python_version.cache_clear()
finder.find_all_python_versions.cache_clear()
# Find the newly installed Python, hopefully.
version = str(version)
path_to_python = find_a_system_python(version)
try:
assert python_version(path_to_python) == version
except AssertionError:
click.echo(
"{}: The Python you just installed is not available on your {}, apparently."
"".format(
crayons.red("Warning", bold=True),
crayons.normal("PATH", bold=True),
),
err=True,
)
sys.exit(1)
return path_to_python
def ensure_virtualenv(project, three=None, python=None, site_packages=None, pypi_mirror=None):
"""Creates a virtualenv, if one doesn't exist."""
def abort():
sys.exit(1)
if not project.virtualenv_exists:
try:
# Ensure environment variables are set properly.
ensure_environment()
# Ensure Python is available.
python = ensure_python(project, three=three, python=python)
if python is not None and not isinstance(python, str):
python = python.path.as_posix()
# Create the virtualenv.
# Abort if --system (or running in a virtualenv).
if project.s.PIPENV_USE_SYSTEM:
click.echo(
crayons.red(
"You are attempting to re–create a virtualenv that "
"Pipenv did not create. Aborting."
)
)
sys.exit(1)
do_create_virtualenv(
project, python=python, site_packages=site_packages, pypi_mirror=pypi_mirror
)
except KeyboardInterrupt:
# If interrupted, cleanup the virtualenv.
cleanup_virtualenv(project, bare=False)
sys.exit(1)
# If --three, --two, or --python were passed...
elif (python) or (three is not None) or (site_packages is not None):
project.s.USING_DEFAULT_PYTHON = False
# Ensure python is installed before deleting existing virtual env
python = ensure_python(project, three=three, python=python)
if python is not None and not isinstance(python, str):
python = python.path.as_posix()
click.echo(crayons.red("Virtualenv already exists!"), err=True)
# If VIRTUAL_ENV is set, there is a possibility that we are
# going to remove the active virtualenv that the user cares
# about, so confirm first.
if "VIRTUAL_ENV" in os.environ:
if not (
project.s.PIPENV_YES or click.confirm("Remove existing virtualenv?", default=True)
):
abort()
click.echo(
crayons.normal(fix_utf8("Removing existing virtualenv..."), bold=True), err=True
)
# Remove the virtualenv.
cleanup_virtualenv(project, bare=True)
# Call this function again.
ensure_virtualenv(
project,
three=three,
python=python,
site_packages=site_packages,
pypi_mirror=pypi_mirror,
)
def ensure_project(
project,
three=None,
python=None,
validate=True,
system=False,
warn=True,
site_packages=None,
deploy=False,
skip_requirements=False,
pypi_mirror=None,
clear=False,
):
"""Ensures both Pipfile and virtualenv exist for the project."""
# Automatically use an activated virtualenv.
if project.s.PIPENV_USE_SYSTEM:
system = True
if not project.pipfile_exists and deploy:
raise exceptions.PipfileNotFound
# Skip virtualenv creation when --system was used.
if not system:
ensure_virtualenv(
project,
three=three,
python=python,
site_packages=site_packages,
pypi_mirror=pypi_mirror,
)
if warn:
# Warn users if they are using the wrong version of Python.
if project.required_python_version:
path_to_python = project._which("python") or project._which("py")
if path_to_python and project.required_python_version not in (
python_version(path_to_python) or ""
):
click.echo(
"{}: Your Pipfile requires {} {}, "
"but you are using {} ({}).".format(
crayons.red("Warning", bold=True),
crayons.normal("python_version", bold=True),
crayons.cyan(project.required_python_version),
crayons.cyan(python_version(path_to_python) or "unknown"),
crayons.green(shorten_path(path_to_python)),
),
err=True,
)
click.echo(
" {} and rebuilding the virtual environment "
"may resolve the issue.".format(crayons.green("$ pipenv --rm")),
err=True,
)
if not deploy:
click.echo(
" {} will surely fail."
"".format(crayons.yellow("$ pipenv check")),
err=True,
)
else:
raise exceptions.DeployException
# Ensure the Pipfile exists.
ensure_pipfile(
project, validate=validate, skip_requirements=skip_requirements, system=system
)
def shorten_path(location, bold=False):
"""Returns a visually shorter representation of a given system path."""
original = location
short = os.sep.join(
[s[0] if len(s) > (len("2long4")) else s for s in location.split(os.sep)]
)
short = short.split(os.sep)
short[-1] = original.split(os.sep)[-1]
if bold:
short[-1] = str(crayons.normal(short[-1], bold=True))
return os.sep.join(short)
# return short
def do_where(project, virtualenv=False, bare=True):
"""Executes the where functionality."""
if not virtualenv:
if not project.pipfile_exists:
click.echo(
"No Pipfile present at project home. Consider running "
"{} first to automatically generate a Pipfile for you."
"".format(crayons.green("`pipenv install`")),
err=True,
)
return
location = project.pipfile_location
# Shorten the virtual display of the path to the virtualenv.
if not bare:
location = shorten_path(location)
click.echo(
"Pipfile found at {}.\n Considering this to be the project home."
"".format(crayons.green(location)),
err=True,
)
else:
click.echo(project.project_directory)
else:
location = project.virtualenv_location
if not bare:
click.echo(
f"Virtualenv location: {crayons.green(location)}", err=True
)
else:
click.echo(location)
def _cleanup_procs(project, procs, failed_deps_queue, retry=True):
while not procs.empty():
c = procs.get()
try:
out, err = c.communicate()
except AttributeError:
out, err = c.stdout, c.stderr
failed = c.returncode != 0
if "Ignoring" in out:
click.echo(crayons.yellow(out.strip()))
elif project.s.is_verbose():
click.echo(crayons.cyan(out.strip() or err.strip()))
# The Installation failed...
if failed:
# If there is a mismatch in installed locations or the install fails
# due to wrongful disabling of pep517, we should allow for
# additional passes at installation
if "does not match installed location" in err:
project.environment.expand_egg_links()
click.echo("{}".format(
crayons.yellow(
"Failed initial installation: Failed to overwrite existing "
"package, likely due to path aliasing. Expanding and trying "
"again!"
)
))
dep = c.dep.copy()
dep.use_pep517 = True
elif "Disabling PEP 517 processing is invalid" in err:
dep = c.dep.copy()
dep.use_pep517 = True
elif not retry:
# The Installation failed...
# We echo both c.stdout and c.stderr because pip returns error details on out.
err = err.strip().splitlines() if err else []
out = out.strip().splitlines() if out else []
err_lines = [line for message in [out, err] for line in message]
# Return the subprocess' return code.
raise exceptions.InstallError(c.dep.name, extra=err_lines)
else:
# Alert the user.
dep = c.dep.copy()
dep.use_pep517 = False
click.echo(
"{} {}! Will try again.".format(
crayons.red("An error occurred while installing"),
crayons.green(dep.as_line()),
), err=True
)
# Save the Failed Dependency for later.
failed_deps_queue.put(dep)
def batch_install(project, deps_list, procs, failed_deps_queue,
requirements_dir, no_deps=True, ignore_hashes=False,
allow_global=False, blocking=False, pypi_mirror=None,
retry=True, sequential_deps=None):
from .vendor.requirementslib.models.utils import (
strip_extras_markers_from_requirement
)
if sequential_deps is None:
sequential_deps = []
failed = (not retry)
install_deps = not no_deps
if not failed:
label = INSTALL_LABEL if not environments.PIPENV_HIDE_EMOJIS else ""
else:
label = INSTALL_LABEL2
deps_to_install = deps_list[:]
deps_to_install.extend(sequential_deps)
deps_to_install = [
dep for dep in deps_to_install if not project.environment.is_satisfied(dep)
]
sequential_dep_names = [d.name for d in sequential_deps]
deps_list_bar = progress.bar(
deps_to_install, width=32,
label=label
)
trusted_hosts = []
# Install these because
for dep in deps_list_bar:
extra_indexes = []
if dep.req.req:
dep.req.req = strip_extras_markers_from_requirement(dep.req.req)
if dep.markers:
dep.markers = str(strip_extras_markers_from_requirement(dep.get_markers()))
# Install the module.
is_artifact = False
if dep.is_file_or_url and (dep.is_direct_url or any(
dep.req.uri.endswith(ext) for ext in ["zip", "tar.gz"]
)):
is_artifact = True
elif dep.is_vcs:
is_artifact = True
if not project.s.PIPENV_RESOLVE_VCS and is_artifact and not dep.editable:
install_deps = True
no_deps = False
with vistir.contextmanagers.temp_environ():
if not allow_global:
os.environ["PIP_USER"] = vistir.compat.fs_str("0")
if "PYTHONHOME" in os.environ:
del os.environ["PYTHONHOME"]
if "GIT_CONFIG" in os.environ and dep.is_vcs:
del os.environ["GIT_CONFIG"]
use_pep517 = True
if failed and not dep.is_vcs:
use_pep517 = getattr(dep, "use_pep517", False)
is_sequential = sequential_deps and dep.name in sequential_dep_names
is_blocking = any([dep.editable, dep.is_vcs, blocking, is_sequential])
c = pip_install(
project,
dep,
ignore_hashes=any([ignore_hashes, dep.editable, dep.is_vcs]),
allow_global=allow_global,
no_deps=not install_deps,
block=is_blocking,
index=dep.index,
requirements_dir=requirements_dir,
pypi_mirror=pypi_mirror,
trusted_hosts=trusted_hosts,
extra_indexes=extra_indexes,
use_pep517=use_pep517,
)
c.dep = dep
procs.put(c)
if procs.full() or procs.qsize() == len(deps_list) or is_sequential:
_cleanup_procs(project, procs, failed_deps_queue, retry=retry)
def do_install_dependencies(
project,
dev=False,
dev_only=False,
bare=False,
emit_requirements=False,
allow_global=False,
ignore_hashes=False,
skip_lock=False,
concurrent=True,
requirements_dir=None,
pypi_mirror=None,
):
""""
Executes the install functionality.
If emit_requirements is True, simply spits out a requirements format to stdout.
"""
import queue
if emit_requirements:
bare = True
# Load the lockfile if it exists, or if dev_only is being used.
if skip_lock or not project.lockfile_exists:
if not bare:
click.echo(
crayons.normal(fix_utf8("Installing dependencies from Pipfile..."), bold=True)
)
# skip_lock should completely bypass the lockfile (broken in 4dac1676)
lockfile = project.get_or_create_lockfile(from_pipfile=True)
else:
lockfile = project.get_or_create_lockfile()
if not bare:
click.echo(
crayons.normal(
fix_utf8("Installing dependencies from Pipfile.lock ({})...".format(
lockfile["_meta"].get("hash", {}).get("sha256")[-6:]
)),
bold=True,
)
)
# Allow pip to resolve dependencies when in skip-lock mode.
no_deps = not skip_lock # skip_lock true, no_deps False, pip resolves deps
dev = dev or dev_only
deps_list = list(lockfile.get_requirements(dev=dev, only=dev_only))
if emit_requirements:
index_args = prepare_pip_source_args(
get_source_list(project, pypi_mirror=pypi_mirror)
)
index_args = " ".join(index_args).replace(" -", "\n-")
deps = [
req.as_line(sources=False, include_hashes=False) for req in deps_list
]
click.echo(index_args)
click.echo("\n".join(sorted(deps)))
sys.exit(0)
if concurrent:
nprocs = project.s.PIPENV_MAX_SUBPROCESS
else:
nprocs = 1
procs = queue.Queue(maxsize=nprocs)
failed_deps_queue = queue.Queue()
if skip_lock:
ignore_hashes = True
editable_or_vcs_deps = [dep for dep in deps_list if (dep.editable or dep.vcs)]
normal_deps = [dep for dep in deps_list if not (dep.editable or dep.vcs)]
install_kwargs = {
"no_deps": no_deps, "ignore_hashes": ignore_hashes, "allow_global": allow_global,
"blocking": not concurrent, "pypi_mirror": pypi_mirror,
"sequential_deps": editable_or_vcs_deps
}
batch_install(
project, normal_deps, procs, failed_deps_queue, requirements_dir, **install_kwargs
)
if not procs.empty():
_cleanup_procs(project, procs, failed_deps_queue)
# click.echo(crayons.normal(
# decode_for_output("Installing editable and vcs dependencies..."), bold=True
# ))
# install_kwargs.update({"blocking": True})
# # XXX: All failed and editable/vcs deps should be installed in sequential mode!
# procs = queue.Queue(maxsize=1)
# batch_install(
# editable_or_vcs_deps, procs, failed_deps_queue, requirements_dir,
# **install_kwargs
# )
# Iterate over the hopefully-poorly-packaged dependencies...
if not failed_deps_queue.empty():
click.echo(
crayons.normal(fix_utf8("Installing initially failed dependencies..."), bold=True)
)
retry_list = []
while not failed_deps_queue.empty():
failed_dep = failed_deps_queue.get()
retry_list.append(failed_dep)
install_kwargs.update({"retry": False})
batch_install(
project, retry_list, procs, failed_deps_queue, requirements_dir, **install_kwargs
)
if not procs.empty():
_cleanup_procs(project, procs, failed_deps_queue, retry=False)
def convert_three_to_python(three, python):
"""Converts a Three flag into a Python flag, and raises customer warnings
in the process, if needed.
"""
if not python:
if three is False:
return "2"
elif three is True:
return "3"
else:
return python
def do_create_virtualenv(project, python=None, site_packages=None, pypi_mirror=None):
"""Creates a virtualenv."""
click.echo(
crayons.normal(fix_utf8("Creating a virtualenv for this project..."), bold=True), err=True
)
click.echo(
f"Pipfile: {crayons.yellow(project.pipfile_location, bold=True)}",
err=True,
)
# Default to using sys.executable, if Python wasn't provided.
using_string = "Using"
if not python:
python = sys.executable
using_string = "Using default python from"
click.echo(
"{0} {1} {3} {2}".format(
crayons.normal(using_string, bold=True),
crayons.yellow(python, bold=True),
crayons.normal(fix_utf8("to create virtualenv..."), bold=True),
crayons.green(f"({python_version(python)})"),
),
err=True,
)
cmd = [
Path(sys.executable).absolute().as_posix(),
"-m",
"virtualenv",
f"--prompt={project.name}",
f"--python={python}",
project.get_location_for_virtualenv(),
]
# Pass site-packages flag to virtualenv, if desired...
if site_packages:
click.echo(
crayons.normal(fix_utf8("Making site-packages available..."), bold=True), err=True
)
cmd.append("--system-site-packages")
if pypi_mirror:
pip_config = {"PIP_INDEX_URL": vistir.misc.fs_str(pypi_mirror)}
else:
pip_config = {}
# Actually create the virtualenv.
error = None
with create_spinner("Creating virtual environment...", project.s) as sp:
c = subprocess_run(cmd, env=pip_config)
click.echo(crayons.cyan(f"{c.stdout}"), err=True)
if c.returncode != 0:
error = c.stderr if project.s.is_verbose() else exceptions.prettify_exc(c.stderr)
sp.fail(environments.PIPENV_SPINNER_FAIL_TEXT.format("Failed creating virtual environment"))
else:
sp.green.ok(environments.PIPENV_SPINNER_OK_TEXT.format("Successfully created virtual environment!"))
if error is not None:
raise exceptions.VirtualenvCreationException(
extra=crayons.red(f"{error}")
)
# Associate project directory with the environment.
# This mimics Pew's "setproject".
project_file_name = os.path.join(project.virtualenv_location, ".project")
with open(project_file_name, "w") as f:
f.write(vistir.misc.fs_str(project.project_directory))
from .environment import Environment
sources = project.pipfile_sources
# project.get_location_for_virtualenv is only for if we are creating a new virtualenv
# whereas virtualenv_location is for the current path to the runtime
project._environment = Environment(
prefix=project.virtualenv_location,
is_venv=True,
sources=sources,
pipfile=project.parsed_pipfile,
project=project
)
project._environment.add_dist("pipenv")
# Say where the virtualenv is.
do_where(project, virtualenv=True, bare=False)
def parse_download_fname(fname, name):
fname, fextension = os.path.splitext(fname)
if fextension == ".whl":
fname = "-".join(fname.split("-")[:-3])
if fname.endswith(".tar"):
fname, _ = os.path.splitext(fname)
# Substring out package name (plus dash) from file name to get version.
version = fname[len(name) + 1 :]
# Ignore implicit post releases in version number.
if "-" in version and version.split("-")[1].isdigit():
version = version.split("-")[0]
return version
def get_downloads_info(project, names_map, section):
from .vendor.requirementslib.models.requirements import Requirement
info = []
p = project.parsed_pipfile
for fname in os.listdir(project.download_location):
# Get name from filename mapping.
name = Requirement.from_line(names_map[fname]).name
# Get the version info from the filenames.
version = parse_download_fname(fname, name)
# Get the hash of each file.
cmd = [
which_pip(project),
"hash",
os.sep.join([project.download_location, fname]),
]
c = subprocess_run(cmd)
hash = c.stdout.split("--hash=")[1].strip()
# Verify we're adding the correct version from Pipfile
# and not one from a dependency.
specified_version = p[section].get(name, "")
if is_required_version(version, specified_version):
info.append(dict(name=name, version=version, hash=hash))
return info
def overwrite_dev(prod, dev):
dev_keys = set(list(dev.keys()))
prod_keys = set(list(prod.keys()))
for pkg in dev_keys & prod_keys:
dev[pkg] = prod[pkg]
return dev
def do_lock(
project,
ctx=None,
system=False,
clear=False,
pre=False,
keep_outdated=False,
write=True,
pypi_mirror=None,
):
"""Executes the freeze functionality."""
cached_lockfile = {}
if not pre:
pre = project.settings.get("allow_prereleases")
if keep_outdated:
if not project.lockfile_exists:
raise exceptions.PipenvOptionsError(
"--keep-outdated", ctx=ctx,
message="Pipfile.lock must exist to use --keep-outdated!"
)
cached_lockfile = project.lockfile_content
# Create the lockfile.
lockfile = project._lockfile
# Cleanup lockfile.
for section in ("default", "develop"):
for k, v in lockfile[section].copy().items():
if not hasattr(v, "keys"):
del lockfile[section][k]
# Ensure that develop inherits from default.
dev_packages = project.dev_packages.copy()
dev_packages = overwrite_dev(project.packages, dev_packages)
# Resolve dev-package dependencies, with pip-tools.
for is_dev in [True, False]:
pipfile_section = "dev-packages" if is_dev else "packages"
if project.pipfile_exists:
packages = project.parsed_pipfile.get(pipfile_section, {})
else:
packages = getattr(project, pipfile_section.replace("-", "_"))
if write:
# Alert the user of progress.
click.echo(
"{} {} {}".format(
crayons.normal("Locking"),
crayons.yellow("[{}]".format(pipfile_section.replace("_", "-"))),
crayons.normal(fix_utf8("dependencies...")),
),
err=True,
)
# Mutates the lockfile
venv_resolve_deps(
packages,
which=project._which,
project=project,
dev=is_dev,
clear=clear,
pre=pre,
allow_global=system,
pypi_mirror=pypi_mirror,
pipfile=packages,
lockfile=lockfile,
keep_outdated=keep_outdated
)
# Support for --keep-outdated...
if keep_outdated:
from pipenv.vendor.packaging.utils import canonicalize_name
for section_name, section in (
("default", project.packages),
("develop", project.dev_packages),
):
for package_specified in section.keys():
if not is_pinned(section[package_specified]):
canonical_name = canonicalize_name(package_specified)
if canonical_name in cached_lockfile[section_name]:
lockfile[section_name][canonical_name] = cached_lockfile[
section_name
][canonical_name].copy()
for key in ["default", "develop"]:
packages = set(cached_lockfile[key].keys())
new_lockfile = set(lockfile[key].keys())
missing = packages - new_lockfile
for missing_pkg in missing:
lockfile[key][missing_pkg] = cached_lockfile[key][missing_pkg].copy()
# Overwrite any develop packages with default packages.
lockfile["develop"].update(overwrite_dev(lockfile.get("default", {}), lockfile["develop"]))
if write:
project.write_lockfile(lockfile)
click.echo(
"{}".format(
crayons.normal(
"Updated Pipfile.lock ({})!".format(
lockfile["_meta"].get("hash", {}).get("sha256")[-6:]
),
bold=True,
)
),
err=True,
)
else:
return lockfile
def do_purge(project, bare=False, downloads=False, allow_global=False):
"""Executes the purge functionality."""
if downloads:
if not bare:
click.echo(crayons.normal(fix_utf8("Clearing out downloads directory..."), bold=True))
vistir.path.rmtree(project.download_location)
return
# Remove comments from the output, if any.
installed = {
pep423_name(pkg.project_name) for pkg in project.environment.get_installed_packages()
}
bad_pkgs = {pep423_name(pkg) for pkg in BAD_PACKAGES}
# Remove setuptools, pip, etc from targets for removal
to_remove = installed - bad_pkgs
# Skip purging if there is no packages which needs to be removed
if not to_remove:
if not bare:
click.echo("Found 0 installed package, skip purging.")
click.echo(crayons.green("Environment now purged and fresh!"))
return installed
if not bare:
click.echo(
fix_utf8(f"Found {len(to_remove)} installed package(s), purging...")
)
command = [
which_pip(project, allow_global=allow_global),
"uninstall", "-y",
] + list(to_remove)
if project.s.is_verbose():
click.echo(f"$ {cmd_list_to_shell(command)}")
c = subprocess_run(command)
if c.returncode != 0:
raise exceptions.UninstallError(installed, cmd_list_to_shell(command), c.stdout + c.stderr, c.returncode)
if not bare:
click.echo(crayons.cyan(c.stdout))
click.echo(crayons.green("Environment now purged and fresh!"))
return installed
def do_init(
project,
dev=False,
dev_only=False,
emit_requirements=False,
allow_global=False,
ignore_pipfile=False,
skip_lock=False,
system=False,
concurrent=True,
deploy=False,
pre=False,
keep_outdated=False,
requirements_dir=None,
pypi_mirror=None,
):
"""Executes the init functionality."""
python = None
if project.s.PIPENV_PYTHON is not None:
python = project.s.PIPENV_PYTHON
elif project.s.PIPENV_DEFAULT_PYTHON_VERSION is not None:
python = project.s.PIPENV_DEFAULT_PYTHON_VERSION
if not system and not project.s.PIPENV_USE_SYSTEM:
if not project.virtualenv_exists:
try:
do_create_virtualenv(project, python=python, three=None, pypi_mirror=pypi_mirror)
except KeyboardInterrupt:
cleanup_virtualenv(project, bare=False)
sys.exit(1)
# Ensure the Pipfile exists.
if not deploy:
ensure_pipfile(project, system=system)
if not requirements_dir:
requirements_dir = vistir.path.create_tracked_tempdir(
suffix="-requirements", prefix="pipenv-"
)
# Write out the lockfile if it doesn't exist, but not if the Pipfile is being ignored
if (project.lockfile_exists and not ignore_pipfile) and not skip_lock:
old_hash = project.get_lockfile_hash()
new_hash = project.calculate_pipfile_hash()
if new_hash != old_hash:
if deploy:
click.echo(
crayons.red(
"Your Pipfile.lock ({}) is out of date. Expected: ({}).".format(
old_hash[-6:], new_hash[-6:]
)
)
)
raise exceptions.DeployException
sys.exit(1)
elif (system or allow_global) and not (project.s.PIPENV_VIRTUALENV):
click.echo(
crayons.yellow(fix_utf8(
"Pipfile.lock ({}) out of date, but installation "
"uses {} re-building lockfile must happen in "
"isolation. Please rebuild lockfile in a virtualenv. "
"Continuing anyway...".format(
old_hash[-6:], "--system"
))
),
err=True,
)
else:
if old_hash:
msg = fix_utf8("Pipfile.lock ({0}) out of date, updating to ({1})...")
else:
msg = fix_utf8("Pipfile.lock is corrupted, replaced with ({1})...")
click.echo(
crayons.yellow(msg.format(old_hash[-6:], new_hash[-6:]), bold=True),
err=True,
)
do_lock(
project,
system=system,
pre=pre,
keep_outdated=keep_outdated,
write=True,
pypi_mirror=pypi_mirror,
)
# Write out the lockfile if it doesn't exist.
if not project.lockfile_exists and not skip_lock:
# Unless we're in a virtualenv not managed by pipenv, abort if we're
# using the system's python.
if (system or allow_global) and not (project.s.PIPENV_VIRTUALENV):
raise exceptions.PipenvOptionsError(
"--system",
"--system is intended to be used for Pipfile installation, "
"not installation of specific packages. Aborting.\n"
"See also: --deploy flag."
)
else:
click.echo(
crayons.normal(fix_utf8("Pipfile.lock not found, creating..."), bold=True),
err=True,
)
do_lock(
project,
system=system,
pre=pre,
keep_outdated=keep_outdated,
write=True,
pypi_mirror=pypi_mirror,
)
do_install_dependencies(
project,
dev=dev,
dev_only=dev_only,
emit_requirements=emit_requirements,
allow_global=allow_global,
skip_lock=skip_lock,
concurrent=concurrent,
requirements_dir=requirements_dir,
pypi_mirror=pypi_mirror,
)
# Hint the user what to do to activate the virtualenv.
if not allow_global and not deploy and "PIPENV_ACTIVE" not in os.environ:
click.echo(
"To activate this project's virtualenv, run {}.\n"
"Alternatively, run a command "
"inside the virtualenv with {}.".format(
crayons.yellow("pipenv shell"), crayons.yellow("pipenv run")
)
)
def get_pip_args(
project,
pre=False, # type: bool
verbose=False, # type: bool
upgrade=False, # type: bool
require_hashes=False, # type: bool
no_build_isolation=False, # type: bool
no_use_pep517=False, # type: bool
no_deps=False, # type: bool
selective_upgrade=False, # type: bool
src_dir=None, # type: Optional[str]
):
# type: (...) -> List[str]
from .vendor.packaging.version import parse as parse_version
arg_map = {
"pre": ["--pre"],
"verbose": ["--verbose"],
"upgrade": ["--upgrade"],
"require_hashes": ["--require-hashes"],
"no_build_isolation": ["--no-build-isolation"],
"no_use_pep517": [],
"no_deps": ["--no-deps"],
"selective_upgrade": [
"--upgrade-strategy=only-if-needed",
"--exists-action={}".format(project.s.PIP_EXISTS_ACTION or "i")
],
"src_dir": src_dir,
}
if project.environment.pip_version >= parse_version("19.0"):
arg_map["no_use_pep517"].append("--no-use-pep517")
if project.environment.pip_version < parse_version("19.1"):
arg_map["no_use_pep517"].append("--no-build-isolation")
arg_set = []
for key in arg_map.keys():
if key in locals() and locals().get(key):
arg_set.extend(arg_map.get(key))
elif key == "selective_upgrade" and not locals().get(key):
arg_set.append("--exists-action=i")
return list(vistir.misc.dedup(arg_set))
def get_requirement_line(
requirement, # type: Requirement
src_dir=None, # type: Optional[str]
include_hashes=True, # type: bool
format_for_file=False, # type: bool
):
# type: (...) -> Union[List[str], str]
line = None
if requirement.vcs or requirement.is_file_or_url:
if src_dir and requirement.line_instance.wheel_kwargs:
requirement.line_instance._wheel_kwargs.update({
"src_dir": src_dir
})
requirement.line_instance.vcsrepo
line = requirement.line_instance.line
if requirement.line_instance.markers:
line = f'{line}; {requirement.line_instance.markers}'
if not format_for_file:
line = f'"{line}"'
if requirement.editable:
if not format_for_file:
return ["-e", line]
return f'-e {line}'
if not format_for_file:
return [line]
return line
return requirement.as_line(include_hashes=include_hashes, as_list=not format_for_file)
def write_requirement_to_file(
project, # type: Project
requirement, # type: Requirement
requirements_dir=None, # type: Optional[str]
src_dir=None, # type: Optional[str]
include_hashes=True # type: bool
):
# type: (...) -> str
if not requirements_dir:
requirements_dir = vistir.path.create_tracked_tempdir(
prefix="pipenv", suffix="requirements")
line = requirement.line_instance.get_line(
with_prefix=True, with_hashes=include_hashes, with_markers=True, as_list=False
)
f = vistir.compat.NamedTemporaryFile(
prefix="pipenv-", suffix="-requirement.txt", dir=requirements_dir,
delete=False
)
if project.s.is_verbose():
click.echo(
f"Writing supplied requirement line to temporary file: {line!r}",
err=True
)
f.write(vistir.misc.to_bytes(line))
r = f.name
f.close()
return r
def pip_install(
project,
requirement=None,
r=None,
allow_global=False,
ignore_hashes=False,
no_deps=None,
block=True,
index=None,
pre=False,
selective_upgrade=False,
requirements_dir=None,
extra_indexes=None,
pypi_mirror=None,
trusted_hosts=None,
use_pep517=True
):
piplogger = logging.getLogger("pipenv.patched.notpip._internal.commands.install")
src_dir = None
if not trusted_hosts:
trusted_hosts = []
trusted_hosts.extend(os.environ.get("PIP_TRUSTED_HOSTS", []))
if not allow_global:
src_dir = os.getenv("PIP_SRC", os.getenv("PIP_SRC_DIR", project.virtualenv_src_location))
else:
src_dir = os.getenv("PIP_SRC", os.getenv("PIP_SRC_DIR"))
if requirement:
if requirement.editable or not requirement.hashes:
ignore_hashes = True
elif not (requirement.is_vcs or requirement.editable or requirement.vcs):
ignore_hashes = False
line = None
# Try installing for each source in project.sources.
if not index and requirement.index:
index = requirement.index
if index and not extra_indexes:
extra_indexes = list(project.sources)
if requirement and requirement.vcs or requirement.editable:
requirement.index = None
# Install dependencies when a package is a non-editable VCS dependency.
# Don't specify a source directory when using --system.
if not requirement.editable and no_deps is not True:
# Leave this off becauase old lockfiles don't have all deps included
# TODO: When can it be turned back on?
no_deps = False
elif requirement.editable and no_deps is None:
no_deps = True
r = write_requirement_to_file(
project, requirement, requirements_dir=requirements_dir, src_dir=src_dir,
include_hashes=not ignore_hashes
)
sources = get_source_list(
project, index, extra_indexes=extra_indexes, trusted_hosts=trusted_hosts,
pypi_mirror=pypi_mirror
)
if r:
with open(r, "r") as fh:
if "--hash" not in fh.read():
ignore_hashes = True
if project.s.is_verbose():
piplogger.setLevel(logging.WARN)
if requirement:
click.echo(
crayons.normal(f"Installing {requirement.name!r}", bold=True),
err=True,
)
pip_command = [project._which("python", allow_global=allow_global), "-m", "pip", "install"]
pip_args = get_pip_args(
project, pre=pre, verbose=project.s.is_verbose(), upgrade=True,
selective_upgrade=selective_upgrade, no_use_pep517=not use_pep517,
no_deps=no_deps, require_hashes=not ignore_hashes,
)
pip_command.extend(pip_args)
if r:
pip_command.extend(["-r", vistir.path.normalize_path(r)])
elif line:
pip_command.extend(line)
pip_command.extend(prepare_pip_source_args(sources))
if project.s.is_verbose():
click.echo(f"$ {cmd_list_to_shell(pip_command)}", err=True)
cache_dir = Path(project.s.PIPENV_CACHE_DIR)
DEFAULT_EXISTS_ACTION = "w"
if selective_upgrade:
DEFAULT_EXISTS_ACTION = "i"
exists_action = vistir.misc.fs_str(project.s.PIP_EXISTS_ACTION or DEFAULT_EXISTS_ACTION)
pip_config = {
"PIP_CACHE_DIR": vistir.misc.fs_str(cache_dir.as_posix()),
"PIP_WHEEL_DIR": vistir.misc.fs_str(cache_dir.joinpath("wheels").as_posix()),
"PIP_DESTINATION_DIR": vistir.misc.fs_str(
cache_dir.joinpath("pkgs").as_posix()
),
"PIP_EXISTS_ACTION": exists_action,
"PATH": vistir.misc.fs_str(os.environ.get("PATH")),
}
if src_dir:
if project.s.is_verbose():
click.echo(f"Using source directory: {src_dir!r}", err=True)
pip_config.update(
{"PIP_SRC": vistir.misc.fs_str(src_dir)}
)
c = subprocess_run(pip_command, block=block, env=pip_config)
c.env = pip_config
return c
def pip_download(project, package_name):
cache_dir = Path(project.s.PIPENV_CACHE_DIR)
pip_config = {
"PIP_CACHE_DIR": vistir.misc.fs_str(cache_dir.as_posix()),
"PIP_WHEEL_DIR": vistir.misc.fs_str(cache_dir.joinpath("wheels").as_posix()),
"PIP_DESTINATION_DIR": vistir.misc.fs_str(
cache_dir.joinpath("pkgs").as_posix()
),
}
for source in project.sources:
cmd = [
which_pip(project),
"download",
package_name,
"-i", source["url"],
"-d", project.download_location,
]
c = subprocess_run(cmd, env=pip_config)
if c.returncode == 0:
break
return c
def fallback_which(command, location=None, allow_global=False, system=False):
"""
A fallback implementation of the `which` utility command that relies exclusively on
searching the path for commands.
:param str command: The command to search for, optional
:param str location: The search location to prioritize (prepend to path), defaults to None
:param bool allow_global: Whether to search the global path, defaults to False
:param bool system: Whether to use the system python instead of pipenv's python, defaults to False
:raises ValueError: Raised if no command is provided
:raises TypeError: Raised if the command provided is not a string
:return: A path to the discovered command location
:rtype: str
"""
from .vendor.pythonfinder import Finder
if not command:
raise ValueError("fallback_which: Must provide a command to search for...")
if not isinstance(command, str):
raise TypeError(f"Provided command must be a string, received {command!r}")
global_search = system or allow_global
if location is None:
global_search = True
finder = Finder(system=False, global_search=global_search, path=location)
if is_python_command(command):
result = find_python(finder, command)
if result:
return result
result = finder.which(command)
if result:
return result.path.as_posix()
return ""
def which_pip(project, allow_global=False):
"""Returns the location of virtualenv-installed pip."""
location = None
if "VIRTUAL_ENV" in os.environ:
location = os.environ["VIRTUAL_ENV"]
if allow_global:
if location:
pip = project._which("pip", location=location)
if pip:
return pip
for p in ("pip", "pip3", "pip2"):
where = system_which(p)
if where:
return where
pip = project._which("pip")
if not pip:
pip = fallback_which("pip", allow_global=allow_global, location=location)
return pip
def system_which(command, path=None):
"""Emulates the system's which. Returns None if not found."""
import shutil
result = shutil.which(command, path=path)
if result is None:
_which = "where" if os.name == "nt" else "which -a"
env = {'PATH': path} if path else None
c = subprocess_run(f"{_which} {command}", shell=True, env=env)
if c.returncode == 127:
click.echo(
"{}: the {} system utility is required for Pipenv to find Python installations properly."
"\n Please install it.".format(
crayons.red("Warning", bold=True), crayons.yellow(_which)
),
err=True,
)
if c.returncode == 0:
result = next(iter(c.stdout.splitlines()), None)
return result
def format_help(help):
"""Formats the help string."""
help = help.replace("Options:", str(crayons.normal("Options:", bold=True)))
help = help.replace(
"Usage: pipenv", str("Usage: {}".format(crayons.normal("pipenv", bold=True)))
)
help = help.replace(" check", str(crayons.red(" check", bold=True)))
help = help.replace(" clean", str(crayons.red(" clean", bold=True)))
help = help.replace(" graph", str(crayons.red(" graph", bold=True)))
help = help.replace(" install", str(crayons.magenta(" install", bold=True)))
help = help.replace(" lock", str(crayons.green(" lock", bold=True)))
help = help.replace(" open", str(crayons.red(" open", bold=True)))
help = help.replace(" run", str(crayons.yellow(" run", bold=True)))
help = help.replace(" shell", str(crayons.yellow(" shell", bold=True)))
help = help.replace(" scripts", str(crayons.yellow(" scripts", bold=True)))
help = help.replace(" sync", str(crayons.green(" sync", bold=True)))
help = help.replace(" uninstall", str(crayons.magenta(" uninstall", bold=True)))
help = help.replace(" update", str(crayons.green(" update", bold=True)))
additional_help = """
Usage Examples:
Create a new project using Python 3.7, specifically:
$ {}
Remove project virtualenv (inferred from current directory):
$ {}
Install all dependencies for a project (including dev):
$ {}
Create a lockfile containing pre-releases:
$ {}
Show a graph of your installed dependencies:
$ {}
Check your installed dependencies for security vulnerabilities:
$ {}
Install a local setup.py into your virtual environment/Pipfile:
$ {}
Use a lower-level pip command:
$ {}
Commands:""".format(
crayons.yellow("pipenv --python 3.7"),
crayons.yellow("pipenv --rm"),
crayons.yellow("pipenv install --dev"),
crayons.yellow("pipenv lock --pre"),
crayons.yellow("pipenv graph"),
crayons.yellow("pipenv check"),
crayons.yellow("pipenv install -e ."),
crayons.yellow("pipenv run pip freeze"),
)
help = help.replace("Commands:", additional_help)
return help
def format_pip_error(error):
error = error.replace("Expected", str(crayons.green("Expected", bold=True)))
error = error.replace("Got", str(crayons.red("Got", bold=True)))
error = error.replace(
"THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS FILE",
str(
crayons.red(
"THESE PACKAGES DO NOT MATCH THE HASHES FROM Pipfile.lock!", bold=True
)
),
)
error = error.replace(
"someone may have tampered with them",
str(crayons.red("someone may have tampered with them")),
)
error = error.replace("option to pip install", "option to 'pipenv install'")
return error
def format_pip_output(out, r=None):
def gen(out):
for line in out.split("\n"):
# Remove requirements file information from pip9 output.
if "(from -r" in line:
yield line[: line.index("(from -r")]
else:
yield line
out = "\n".join([line for line in gen(out)])
return out
def warn_in_virtualenv(project):
# Only warn if pipenv isn't already active.
if environments.is_in_virtualenv() and not project.s.is_quiet():
click.echo(
"{}: Pipenv found itself running within a virtual environment, "
"so it will automatically use that environment, instead of "
"creating its own for any project. You can set "
"{} to force pipenv to ignore that environment and create "
"its own instead. You can set {} to suppress this "
"warning.".format(
crayons.green("Courtesy Notice"),
crayons.normal("PIPENV_IGNORE_VIRTUALENVS=1", bold=True),
crayons.normal("PIPENV_VERBOSITY=-1", bold=True),
),
err=True,
)
def ensure_lockfile(project, keep_outdated=False, pypi_mirror=None):
"""Ensures that the lockfile is up-to-date."""
if not keep_outdated:
keep_outdated = project.settings.get("keep_outdated")
# Write out the lockfile if it doesn't exist, but not if the Pipfile is being ignored
if project.lockfile_exists:
old_hash = project.get_lockfile_hash()
new_hash = project.calculate_pipfile_hash()
if new_hash != old_hash:
click.echo(
crayons.yellow(
fix_utf8("Pipfile.lock ({}) out of date, updating to ({})...".format(
old_hash[-6:], new_hash[-6:]
)),
bold=True,
),
err=True,
)
do_lock(project, keep_outdated=keep_outdated, pypi_mirror=pypi_mirror)
else:
do_lock(project, keep_outdated=keep_outdated, pypi_mirror=pypi_mirror)
def do_py(project, ctx=None, system=False):
if not project.virtualenv_exists:
click.echo(
"{}({}){}".format(
crayons.red("No virtualenv has been created for this project "),
crayons.yellow(project.project_directory, bold=True),
crayons.red(" yet!")
),
err=True,
)
ctx.abort()
try:
click.echo(project._which("python", allow_global=system))
except AttributeError:
click.echo(crayons.red("No project found!"))
def do_outdated(project, pypi_mirror=None, pre=False, clear=False):
# TODO: Allow --skip-lock here?
from collections import namedtuple
from .vendor.packaging.utils import canonicalize_name
from .vendor.requirementslib.models.requirements import Requirement
from .vendor.requirementslib.models.utils import get_version
from .vendor.vistir.compat import Mapping
packages = {}
package_info = namedtuple("PackageInfo", ["name", "installed", "available"])
installed_packages = project.environment.get_installed_packages()
outdated_packages = {
canonicalize_name(pkg.project_name): package_info
(pkg.project_name, pkg.parsed_version, pkg.latest_version)
for pkg in project.environment.get_outdated_packages()
}
reverse_deps = {
canonicalize_name(name): deps
for name, deps in project.environment.reverse_dependencies().items()
}
for result in installed_packages:
dep = Requirement.from_line(str(result.as_requirement()))
packages.update(dep.as_pipfile())
updated_packages = {}
lockfile = do_lock(project, clear=clear, pre=pre, write=False, pypi_mirror=pypi_mirror)
for section in ("develop", "default"):
for package in lockfile[section]:
try:
updated_packages[package] = lockfile[section][package]["version"]
except KeyError:
pass
outdated = []
skipped = []
for package in packages:
norm_name = pep423_name(package)
if norm_name in updated_packages:
if updated_packages[norm_name] != packages[package]:
outdated.append(
package_info(package, updated_packages[norm_name], packages[package])
)
elif canonicalize_name(package) in outdated_packages:
skipped.append(outdated_packages[canonicalize_name(package)])
for package, old_version, new_version in skipped:
name_in_pipfile = project.get_package_name_in_pipfile(package)
pipfile_version_text = ""
required = ""
version = None
if name_in_pipfile:
version = get_version(project.packages[name_in_pipfile])
rdeps = reverse_deps.get(canonicalize_name(package))
if isinstance(rdeps, Mapping) and "required" in rdeps:
required = " {} required".format(rdeps["required"])
if version:
pipfile_version_text = f" ({version} set in Pipfile)"
else:
pipfile_version_text = " (Unpinned in Pipfile)"
click.echo(
crayons.yellow(
"Skipped Update of Package {!s}: {!s} installed,{!s}{!s}, "
"{!s} available.".format(
package, old_version, required, pipfile_version_text, new_version
)
), err=True
)
if not outdated:
click.echo(crayons.green("All packages are up to date!", bold=True))
sys.exit(0)
for package, new_version, old_version in outdated:
click.echo(
"Package {!r} out-of-date: {!r} installed, {!r} available.".format(
package, old_version, new_version
)
)
sys.exit(bool(outdated))
def do_install(
project,
packages=False,
editable_packages=False,
index_url=False,
extra_index_url=False,
dev=False,
three=False,
python=False,
pypi_mirror=None,
system=False,
lock=True,
ignore_pipfile=False,
skip_lock=False,
requirementstxt=False,
sequential=False,
pre=False,
code=False,
deploy=False,
keep_outdated=False,
selective_upgrade=False,
site_packages=None,
):
from .vendor.pip_shims.shims import PipError
requirements_directory = vistir.path.create_tracked_tempdir(
suffix="-requirements", prefix="pipenv-"
)
warnings.filterwarnings("default", category=vistir.compat.ResourceWarning)
if selective_upgrade:
keep_outdated = True
packages = packages if packages else []
editable_packages = editable_packages if editable_packages else []
package_args = [p for p in packages if p] + [p for p in editable_packages if p]
skip_requirements = False
# Don't search for requirements.txt files if the user provides one
if requirementstxt or package_args or project.pipfile_exists:
skip_requirements = True
concurrent = not sequential
# Ensure that virtualenv is available and pipfile are available
ensure_project(
project,
three=three,
python=python,
system=system,
warn=True,
deploy=deploy,
skip_requirements=skip_requirements,
pypi_mirror=pypi_mirror,
site_packages=site_packages,
)
# Don't attempt to install develop and default packages if Pipfile is missing
if not project.pipfile_exists and not (package_args or dev) and not code:
if not (ignore_pipfile or deploy):
raise exceptions.PipfileNotFound(project.path_to("Pipfile"))
elif ((skip_lock and deploy) or ignore_pipfile) and not project.lockfile_exists:
raise exceptions.LockfileNotFound(project.path_to("Pipfile.lock"))
# Load the --pre settings from the Pipfile.
if not pre:
pre = project.settings.get("allow_prereleases")
if not keep_outdated:
keep_outdated = project.settings.get("keep_outdated")
remote = requirementstxt and is_valid_url(requirementstxt)
# Warn and exit if --system is used without a pipfile.
if (system and package_args) and not project.s.PIPENV_VIRTUALENV:
raise exceptions.SystemUsageError
# Automatically use an activated virtualenv.
if project.s.PIPENV_USE_SYSTEM:
system = True
if system:
project.s.PIPENV_USE_SYSTEM = True
os.environ["PIPENV_USE_SYSTEM"] = "1"
# Check if the file is remote or not
if remote:
click.echo(
crayons.normal(
fix_utf8("Remote requirements file provided! Downloading..."), bold=True
),
err=True,
)
fd = vistir.path.create_tracked_tempfile(
prefix="pipenv-", suffix="-requirement.txt", dir=requirements_directory
)
temp_reqs = fd.name
requirements_url = requirementstxt
# Download requirements file
try:
download_file(requirements_url, temp_reqs, project.s.PIPENV_MAX_RETRIES)
except OSError:
fd.close()
os.unlink(temp_reqs)
click.echo(
crayons.red(
"Unable to find requirements file at {}.".format(
crayons.normal(requirements_url)
)
),
err=True,
)
sys.exit(1)
finally:
fd.close()
# Replace the url with the temporary requirements file
requirementstxt = temp_reqs
remote = True
if requirementstxt:
error, traceback = None, None
click.echo(
crayons.normal(
fix_utf8("Requirements file provided! Importing into Pipfile..."), bold=True
),
err=True,
)
try:
import_requirements(project, r=project.path_to(requirementstxt), dev=dev)
except (UnicodeDecodeError, PipError) as e:
# Don't print the temp file path if remote since it will be deleted.
req_path = requirements_url if remote else project.path_to(requirementstxt)
error = (
"Unexpected syntax in {}. Are you sure this is a "
"requirements.txt style file?".format(req_path)
)
traceback = e
except AssertionError as e:
error = (
"Requirements file doesn't appear to exist. Please ensure the file exists in your "
"project directory or you provided the correct path."
)
traceback = e
finally:
# If requirements file was provided by remote url delete the temporary file
if remote:
fd.close() # Close for windows to allow file cleanup.
os.remove(temp_reqs)
if error and traceback:
click.echo(crayons.red(error))
click.echo(crayons.yellow(str(traceback)), err=True)
sys.exit(1)
if code:
click.echo(
crayons.normal(fix_utf8("Discovering imports from local codebase..."), bold=True)
)
for req in import_from_code(code):
click.echo(f" Found {crayons.green(req)}!")
project.add_package_to_pipfile(req)
# Allow more than one package to be provided.
package_args = [p for p in packages] + [
f"-e {pkg}" for pkg in editable_packages
]
# Support for --selective-upgrade.
# We should do this part first to make sure that we actually do selectively upgrade
# the items specified
if selective_upgrade:
from .vendor.requirementslib.models.requirements import Requirement
for i, package in enumerate(package_args[:]):
section = project.packages if not dev else project.dev_packages
package = Requirement.from_line(package)
package__name, package__val = package.pipfile_entry
try:
if not is_star(section[package__name]) and is_star(package__val):
# Support for VCS dependencies.
package_args[i] = convert_deps_to_pip(
{package__name: section[package__name]}, project=project, r=False
)[0]
except KeyError:
pass
# Install all dependencies, if none was provided.
# This basically ensures that we have a pipfile and lockfile, then it locks and
# installs from the lockfile
if not packages and not editable_packages:
# Update project settings with pre preference.
if pre:
project.update_settings({"allow_prereleases": pre})
do_init(
project,
dev=dev,
allow_global=system,
ignore_pipfile=ignore_pipfile,
system=system,
skip_lock=skip_lock,
concurrent=concurrent,
deploy=deploy,
pre=pre,
requirements_dir=requirements_directory,
pypi_mirror=pypi_mirror,
keep_outdated=keep_outdated
)
# This is for if the user passed in dependencies, then we want to make sure we
else:
from .vendor.requirementslib.models.requirements import Requirement
# make a tuple of (display_name, entry)
pkg_list = packages + [f'-e {pkg}' for pkg in editable_packages]
if not system and not project.virtualenv_exists:
do_init(
project,
dev=dev,
system=system,
allow_global=system,
concurrent=concurrent,
keep_outdated=keep_outdated,
requirements_dir=requirements_directory,
deploy=deploy,
pypi_mirror=pypi_mirror,
skip_lock=skip_lock,
)
pip_shims_module = os.environ.pop("PIP_SHIMS_BASE_MODULE", None)
for pkg_line in pkg_list:
click.echo(
crayons.normal(
fix_utf8(f"Installing {crayons.green(pkg_line, bold=True)}..."),
bold=True,
)
)
# pip install:
with vistir.contextmanagers.temp_environ(), create_spinner("Installing...", project.s) as sp:
if not system:
os.environ["PIP_USER"] = vistir.compat.fs_str("0")
if "PYTHONHOME" in os.environ:
del os.environ["PYTHONHOME"]
sp.text = f"Resolving {pkg_line}..."
try:
pkg_requirement = Requirement.from_line(pkg_line)
except ValueError as e:
sp.write_err(vistir.compat.fs_str("{}: {}".format(crayons.red("WARNING"), e)))
sp.red.fail(environments.PIPENV_SPINNER_FAIL_TEXT.format("Installation Failed"))
sys.exit(1)
no_deps = False
sp.text = "Installing..."
try:
sp.text = f"Installing {pkg_requirement.name}..."
if project.s.is_verbose():
sp.hide_and_write(f"Installing package: {pkg_requirement.as_line(include_hashes=False)}")
c = pip_install(
project,
pkg_requirement,
ignore_hashes=True,
allow_global=system,
selective_upgrade=selective_upgrade,
no_deps=no_deps,
pre=pre,
requirements_dir=requirements_directory,
index=index_url,
extra_indexes=extra_index_url,
pypi_mirror=pypi_mirror,
)
if c.returncode:
sp.write_err(
"{} An error occurred while installing {}!".format(
crayons.red("Error: ", bold=True), crayons.green(pkg_line)
),
)
sp.write_err(
vistir.compat.fs_str(f"Error text: {c.stdout}")
)
sp.write_err(crayons.cyan(vistir.compat.fs_str(format_pip_error(c.stderr))))
if project.s.is_verbose():
sp.write_err(crayons.cyan(vistir.compat.fs_str(format_pip_output(c.stdout))))
if "setup.py egg_info" in c.stderr:
sp.write_err(vistir.compat.fs_str(
"This is likely caused by a bug in {}. "
"Report this to its maintainers.".format(
crayons.green(pkg_requirement.name)
)
))
sp.red.fail(environments.PIPENV_SPINNER_FAIL_TEXT.format("Installation Failed"))
sys.exit(1)
except (ValueError, RuntimeError) as e:
sp.write_err(vistir.compat.fs_str(
"{}: {}".format(crayons.red("WARNING"), e),
))
sp.red.fail(environments.PIPENV_SPINNER_FAIL_TEXT.format(
"Installation Failed",
))
sys.exit(1)
# Warn if --editable wasn't passed.
if pkg_requirement.is_vcs and not pkg_requirement.editable and not project.s.PIPENV_RESOLVE_VCS:
sp.write_err(
"{}: You installed a VCS dependency in non-editable mode. "
"This will work fine, but sub-dependencies will not be resolved by {}."
"\n To enable this sub-dependency functionality, specify that this dependency is editable."
"".format(
crayons.red("Warning", bold=True),
crayons.yellow("$ pipenv lock"),
)
)
sp.write(vistir.compat.fs_str(
"{} {} {} {}{}".format(
crayons.normal("Adding", bold=True),
crayons.green(f"{pkg_requirement.name}", bold=True),
crayons.normal("to Pipfile's", bold=True),
crayons.yellow("[dev-packages]" if dev else "[packages]", bold=True),
crayons.normal(fix_utf8("..."), bold=True),
)
))
# Add the package to the Pipfile.
indexes = list(filter(None, [index_url, *extra_index_url]))
for index in indexes:
index_name = project.add_index_to_pipfile(
index, verify_ssl=index.startswith("https:")
)
if index_url and not extra_index_url:
pkg_requirement.index = index_name
try:
project.add_package_to_pipfile(pkg_requirement, dev)
except ValueError:
import traceback
sp.write_err(
"{} {}".format(
crayons.red("Error:", bold=True), traceback.format_exc()
)
)
sp.fail(environments.PIPENV_SPINNER_FAIL_TEXT.format(
"Failed adding package to Pipfile"
))
sp.ok(environments.PIPENV_SPINNER_OK_TEXT.format("Installation Succeeded"))
# Update project settings with pre preference.
if pre:
project.update_settings({"allow_prereleases": pre})
if pip_shims_module:
os.environ["PIP_SHIMS_BASE_MODULE"] = pip_shims_module
do_init(
project,
dev=dev,
system=system,
allow_global=system,
concurrent=concurrent,
keep_outdated=keep_outdated,
requirements_dir=requirements_directory,
deploy=deploy,
pypi_mirror=pypi_mirror,
skip_lock=skip_lock,
)
sys.exit(0)
def do_uninstall(
project,
packages=False,
editable_packages=False,
three=None,
python=False,
system=False,
lock=False,
all_dev=False,
all=False,
keep_outdated=False,
pypi_mirror=None,
ctx=None
):
from .vendor.packaging.utils import canonicalize_name
from .vendor.requirementslib.models.requirements import Requirement
# Automatically use an activated virtualenv.
if project.s.PIPENV_USE_SYSTEM:
system = True
# Ensure that virtualenv is available.
# TODO: We probably shouldn't ensure a project exists if the outcome will be to just
# install things in order to remove them... maybe tell the user to install first?
ensure_project(project, three=three, python=python, pypi_mirror=pypi_mirror)
# Un-install all dependencies, if --all was provided.
if not any([packages, editable_packages, all_dev, all]):
raise exceptions.PipenvUsageError("No package provided!", ctx=ctx)
editable_pkgs = [
Requirement.from_line(f"-e {p}").name for p in editable_packages if p
]
packages += editable_pkgs
package_names = {p for p in packages if p}
package_map = {
canonicalize_name(p): p for p in packages if p
}
installed_package_names = project.installed_package_names
# Intelligently detect if --dev should be used or not.
lockfile_packages = set()
if project.lockfile_exists:
project_pkg_names = project.lockfile_package_names
else:
project_pkg_names = project.pipfile_package_names
pipfile_remove = True
# Uninstall [dev-packages], if --dev was provided.
if all_dev:
if "dev-packages" not in project.parsed_pipfile and not project_pkg_names["dev"]:
click.echo(
crayons.normal(
"No {} to uninstall.".format(crayons.yellow("[dev-packages]")),
bold=True,
)
)
return
click.echo(
crayons.normal(
fix_utf8("Un-installing {}...".format(crayons.yellow("[dev-packages]"))), bold=True
)
)
package_names = set(project_pkg_names["dev"]) - set(project_pkg_names["default"])
# Remove known "bad packages" from the list.
bad_pkgs = get_canonical_names(BAD_PACKAGES)
ignored_packages = bad_pkgs & set(list(package_map.keys()))
for ignored_pkg in ignored_packages:
if project.s.is_verbose():
click.echo(f"Ignoring {ignored_pkg}.", err=True)
package_names.discard(package_map[ignored_pkg])
used_packages = project_pkg_names["combined"] & installed_package_names
failure = False
if all:
click.echo(
crayons.normal(
fix_utf8("Un-installing all {} and {}...".format(
crayons.yellow("[dev-packages]"),
crayons.yellow("[packages]"),
)), bold=True
)
)
do_purge(project, bare=False, allow_global=system)
sys.exit(0)
selected_pkg_map = {
canonicalize_name(p): p for p in package_names
}
packages_to_remove = [
p for normalized, p in selected_pkg_map.items()
if normalized in (used_packages - bad_pkgs)
]
pip_path = None
for normalized, package_name in selected_pkg_map.items():
click.echo(
crayons.normal(
fix_utf8(f"Uninstalling {crayons.green(package_name)}..."), bold=True
)
)
# Uninstall the package.
if package_name in packages_to_remove:
with project.environment.activated():
if pip_path is None:
pip_path = which_pip(project, allow_global=system)
cmd = [pip_path, "uninstall", package_name, "-y"]
c = run_command(cmd, is_verbose=project.s.is_verbose())
click.echo(crayons.cyan(c.stdout))
if c.returncode != 0:
failure = True
if not failure and pipfile_remove:
in_packages = project.get_package_name_in_pipfile(package_name, dev=False)
in_dev_packages = project.get_package_name_in_pipfile(
package_name, dev=True
)
if normalized in lockfile_packages:
click.echo("{} {} {} {}".format(
crayons.cyan("Removing"),
crayons.green(package_name),
crayons.cyan("from"),
crayons.white(fix_utf8("Pipfile.lock...")))
)
lockfile = project.get_or_create_lockfile()
if normalized in lockfile.default:
del lockfile.default[normalized]
if normalized in lockfile.develop:
del lockfile.develop[normalized]
lockfile.write()
if not (in_dev_packages or in_packages):
if normalized in lockfile_packages:
continue
click.echo(
"No package {} to remove from Pipfile.".format(
crayons.green(package_name)
)
)
continue
click.echo(
fix_utf8(f"Removing {crayons.green(package_name)} from Pipfile...")
)
# Remove package from both packages and dev-packages.
if in_dev_packages:
project.remove_package_from_pipfile(package_name, dev=True)
if in_packages:
project.remove_package_from_pipfile(package_name, dev=False)
if lock:
do_lock(project, system=system, keep_outdated=keep_outdated, pypi_mirror=pypi_mirror)
sys.exit(int(failure))
def do_shell(project, three=None, python=False, fancy=False, shell_args=None, pypi_mirror=None):
# Ensure that virtualenv is available.
ensure_project(
project, three=three, python=python, validate=False, pypi_mirror=pypi_mirror,
)
# Support shell compatibility mode.
if project.s.PIPENV_SHELL_FANCY:
fancy = True
from .shells import choose_shell
shell = choose_shell(project)
click.echo(fix_utf8("Launching subshell in virtual environment..."), err=True)
fork_args = (
project.virtualenv_location,
project.project_directory,
shell_args,
)
# Set an environment variable, so we know we're in the environment.
# Only set PIPENV_ACTIVE after finishing reading virtualenv_location
# otherwise its value will be changed
os.environ["PIPENV_ACTIVE"] = vistir.misc.fs_str("1")
os.environ.pop("PIP_SHIMS_BASE_MODULE", None)
if fancy:
shell.fork(*fork_args)
return
try:
shell.fork_compat(*fork_args)
except (AttributeError, ImportError):
click.echo(fix_utf8(
"Compatibility mode not supported. "
"Trying to continue as well-configured shell..."),
err=True,
)
shell.fork(*fork_args)
def _inline_activate_virtualenv(project):
try:
activate_this = project._which("activate_this.py")
if not activate_this or not os.path.exists(activate_this):
raise exceptions.VirtualenvActivationException()
with open(activate_this) as f:
code = compile(f.read(), activate_this, "exec")
exec(code, dict(__file__=activate_this))
# Catch all errors, just in case.
except Exception:
click.echo(
"{}: There was an unexpected error while activating your "
"virtualenv. Continuing anyway...".format(
crayons.red("Warning", bold=True)
),
err=True,
)
def _inline_activate_venv(project):
"""Built-in venv doesn't have activate_this.py, but doesn't need it anyway.
As long as we find the correct executable, built-in venv sets up the
environment automatically.
See: https://bugs.python.org/issue21496#msg218455
"""
components = []
for name in ("bin", "Scripts"):
bindir = os.path.join(project.virtualenv_location, name)
if os.path.exists(bindir):
components.append(bindir)
if "PATH" in os.environ:
components.append(os.environ["PATH"])
os.environ["PATH"] = os.pathsep.join(components)
def inline_activate_virtual_environment(project):
root = project.virtualenv_location
if os.path.exists(os.path.join(root, "pyvenv.cfg")):
_inline_activate_venv(project)
else:
_inline_activate_virtualenv(project)
if "VIRTUAL_ENV" not in os.environ:
os.environ["VIRTUAL_ENV"] = vistir.misc.fs_str(root)
def _launch_windows_subprocess(script, env):
import subprocess
path = env.get("PATH", "")
command = system_which(script.command, path=path)
options = {"universal_newlines": True, "env": env}
script.cmd_args[1:] = [expandvars(arg) for arg in script.args]
# Command not found, maybe this is a shell built-in?
if not command:
return subprocess.Popen(script.cmdify(), shell=True, **options)
# Try to use CreateProcess directly if possible. Specifically catch
# Windows error 193 "Command is not a valid Win32 application" to handle
# a "command" that is non-executable. See pypa/pipenv#2727.
try:
return subprocess.Popen([command] + script.args, **options)
except OSError as e:
if e.winerror != 193:
raise
# Try shell mode to use Windows's file association for file launch.
return subprocess.Popen(script.cmdify(), shell=True, **options)
def do_run_nt(project, script, env):
p = _launch_windows_subprocess(script, env)
p.communicate()
sys.exit(p.returncode)
def do_run_posix(project, script, command, env):
path = env.get("PATH")
command_path = system_which(script.command, path=path)
if not command_path:
if project.has_script(command):
click.echo(
"{}: the command {} (from {}) could not be found within {}."
"".format(
crayons.red("Error", bold=True),
crayons.yellow(script.command),
crayons.normal(command, bold=True),
crayons.normal("PATH", bold=True),
),
err=True,
)
else:
click.echo(
"{}: the command {} could not be found within {} or Pipfile's {}."
"".format(
crayons.red("Error", bold=True),
crayons.yellow(command),
crayons.normal("PATH", bold=True),
crayons.normal("[scripts]", bold=True),
),
err=True,
)
sys.exit(1)
os.execve(
command_path,
[command_path, *(os.path.expandvars(arg) for arg in script.args)],
env
)
def do_run(project, command, args, three=None, python=False, pypi_mirror=None):
"""Attempt to run command either pulling from project or interpreting as executable.
Args are appended to the command in [scripts] section of project if found.
"""
from .cmdparse import ScriptEmptyError
# Ensure that virtualenv is available.
ensure_project(
project, three=three, python=python, validate=False, pypi_mirror=pypi_mirror,
)
env = os.environ.copy()
env.update(load_dot_env(project, as_dict=True) or {})
env.pop("PIP_SHIMS_BASE_MODULE", None)
path = env.get('PATH', '')
if project.virtualenv_location:
new_path = os.path.join(project.virtualenv_location, 'Scripts' if os.name == 'nt' else 'bin')
paths = path.split(os.pathsep)
paths.insert(0, new_path)
path = os.pathsep.join(paths)
env["VIRTUAL_ENV"] = project.virtualenv_location
env["PATH"] = path
# Set an environment variable, so we know we're in the environment.
# Only set PIPENV_ACTIVE after finishing reading virtualenv_location
# such as in inline_activate_virtual_environment
# otherwise its value will be changed
env["PIPENV_ACTIVE"] = vistir.misc.fs_str("1")
env.pop("PIP_SHIMS_BASE_MODULE", None)
try:
script = project.build_script(command, args)
cmd_string = cmd_list_to_shell([script.command] + script.args)
if project.s.is_verbose():
click.echo(crayons.normal(f"$ {cmd_string}"), err=True)
except ScriptEmptyError:
click.echo("Can't run script {0!r}-it's empty?", err=True)
run_args = [project, script]
run_kwargs = {'env': env}
# We're using `do_run_nt` on CI (even if we're running on a non-nt machine)
# as a workaround for https://github.com/pypa/pipenv/issues/4909.
if os.name == "nt" or environments.PIPENV_IS_CI:
run_fn = do_run_nt
else:
run_fn = do_run_posix
run_kwargs.update({"command": command})
run_fn(*run_args, **run_kwargs)
def do_check(
project,
three=None,
python=False,
system=False,
unused=False,
db=None,
ignore=None,
output="default",
key=None,
quiet=False,
args=None,
pypi_mirror=None
):
from pipenv.vendor.first import first
from pipenv.vendor.vistir.compat import JSONDecodeError
if not system:
# Ensure that virtualenv is available.
ensure_project(
project,
three=three,
python=python,
validate=False,
warn=False,
pypi_mirror=pypi_mirror,
)
if not args:
args = []
if unused:
deps_required = [k.lower() for k in project.packages.keys()]
deps_needed = [k.lower() for k in import_from_code(unused)]
for dep in deps_needed:
try:
deps_required.remove(dep)
except ValueError:
pass
if deps_required:
if not quiet and not project.s.is_quiet():
click.echo(
crayons.normal(
"The following dependencies appear unused, and may be safe for removal:"
)
)
for dep in deps_required:
click.echo(f" - {crayons.green(dep)}")
sys.exit(1)
else:
sys.exit(0)
if not quiet and not project.s.is_quiet():
click.echo(crayons.normal(decode_for_output("Checking PEP 508 requirements..."), bold=True))
pep508checker_path = pep508checker.__file__.rstrip("cdo")
safety_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "patched", "safety"
)
if not system:
python = project._which("python")
else:
python = first(system_which(p) for p in ("python", "python3", "python2"))
if not python:
click.echo(crayons.red("The Python interpreter can't be found."), err=True)
sys.exit(1)
_cmd = [Path(python).as_posix()]
# Run the PEP 508 checker in the virtualenv.
cmd = _cmd + [Path(pep508checker_path).as_posix()]
c = run_command(cmd, is_verbose=project.s.is_verbose())
if c.returncode is not None:
try:
results = simplejson.loads(c.stdout.strip())
except JSONDecodeError:
click.echo("{}\n{}\n{}".format(
crayons.white(decode_for_output("Failed parsing pep508 results: "), bold=True),
c.stdout.strip(),
c.stderr.strip()
))
sys.exit(1)
# Load the pipfile.
p = pipfile.Pipfile.load(project.pipfile_location)
failed = False
# Assert each specified requirement.
for marker, specifier in p.data["_meta"]["requires"].items():
if marker in results:
try:
assert results[marker] == specifier
except AssertionError:
failed = True
click.echo(
"Specifier {} does not match {} ({})."
"".format(
crayons.green(marker),
crayons.cyan(specifier),
crayons.yellow(results[marker]),
),
err=True,
)
if failed:
click.echo(crayons.red("Failed!"), err=True)
sys.exit(1)
else:
if not quiet and not project.s.is_quiet():
click.echo(crayons.green("Passed!"))
if not quiet and not project.s.is_quiet():
click.echo(crayons.normal(
decode_for_output("Checking installed package safety..."), bold=True)
)
if ignore:
if not isinstance(ignore, (tuple, list)):
ignore = [ignore]
ignored = [["--ignore", cve] for cve in ignore]
if not quiet and not project.s.is_quiet():
click.echo(
crayons.normal(
"Notice: Ignoring CVE(s) {}".format(crayons.yellow(", ".join(ignore)))
),
err=True,
)
else:
ignored = []
switch = output
if output == "default":
switch = "json"
cmd = _cmd + [safety_path, "check", f"--{switch}"]
if db:
if not quiet and not project.s.is_quiet():
click.echo(crayons.normal(f"Using local database {db}"))
cmd.append(f"--db={db}")
elif key or project.s.PIPENV_PYUP_API_KEY:
cmd = cmd + [f"--key={key or project.s.PIPENV_PYUP_API_KEY}"]
if ignored:
for cve in ignored:
cmd += cve
c = run_command(cmd, catch_exceptions=False, is_verbose=project.s.is_verbose())
if output == "default":
try:
results = simplejson.loads(c.stdout)
except (ValueError, JSONDecodeError):
raise exceptions.JSONParseError(c.stdout, c.stderr)
except Exception:
raise exceptions.PipenvCmdError(cmd_list_to_shell(c.args), c.stdout, c.stderr, c.returncode)
for (package, resolved, installed, description, vuln, *_) in results:
click.echo(
"{}: {} {} resolved ({} installed)!".format(
crayons.normal(vuln, bold=True),
crayons.green(package),
crayons.yellow(resolved, bold=False),
crayons.yellow(installed, bold=True),
)
)
click.echo(f"{description}")
click.echo()
if c.returncode == 0:
click.echo(crayons.green("All good!"))
sys.exit(0)
else:
sys.exit(1)
else:
click.echo(c.stdout)
sys.exit(c.returncode)
def do_graph(project, bare=False, json=False, json_tree=False, reverse=False):
from pipenv.vendor import pipdeptree
from pipenv.vendor.vistir.compat import JSONDecodeError
pipdeptree_path = pipdeptree.__file__.rstrip("cdo")
try:
python_path = project._which("python")
except AttributeError:
click.echo(
"{}: {}".format(
crayons.red("Warning", bold=True),
"Unable to display currently-installed dependency graph information here. "
"Please run within a Pipenv project.",
),
err=True,
)
sys.exit(1)
except RuntimeError:
pass
else:
if not os.name == 'nt': # bugfix #4388
python_path = Path(python_path).as_posix()
pipdeptree_path = Path(pipdeptree_path).as_posix()
if reverse and json:
click.echo(
"{}: {}".format(
crayons.red("Warning", bold=True),
"Using both --reverse and --json together is not supported. "
"Please select one of the two options.",
),
err=True,
)
sys.exit(1)
if reverse and json_tree:
click.echo(
"{}: {}".format(
crayons.red("Warning", bold=True),
"Using both --reverse and --json-tree together is not supported. "
"Please select one of the two options.",
),
err=True,
)
sys.exit(1)
if json and json_tree:
click.echo(
"{}: {}".format(
crayons.red("Warning", bold=True),
"Using both --json and --json-tree together is not supported. "
"Please select one of the two options.",
),
err=True,
)
sys.exit(1)
flag = ""
if json:
flag = "--json"
if json_tree:
flag = "--json-tree"
if reverse:
flag = "--reverse"
if not project.virtualenv_exists:
click.echo(
"{}: No virtualenv has been created for this project yet! Consider "
"running {} first to automatically generate one for you or see "
"{} for further instructions.".format(
crayons.red("Warning", bold=True),
crayons.green("`pipenv install`"),
crayons.green("`pipenv install --help`"),
),
err=True,
)
sys.exit(1)
cmd_args = [python_path, pipdeptree_path, "-l"]
if flag:
cmd_args.append(flag)
c = run_command(cmd_args, is_verbose=project.s.is_verbose())
# Run dep-tree.
if not bare:
if json:
data = []
try:
parsed = simplejson.loads(c.stdout.strip())
except JSONDecodeError:
raise exceptions.JSONParseError(c.stdout, c.stderr)
else:
for d in parsed:
if d["package"]["key"] not in BAD_PACKAGES:
data.append(d)
click.echo(simplejson.dumps(data, indent=4))
sys.exit(0)
elif json_tree:
def traverse(obj):
if isinstance(obj, list):
return [
traverse(package)
for package in obj
if package["key"] not in BAD_PACKAGES
]
else:
obj["dependencies"] = traverse(obj["dependencies"])
return obj
try:
parsed = simplejson.loads(c.stdout.strip())
except JSONDecodeError:
raise exceptions.JSONParseError(c.stdout, c.stderr)
else:
data = traverse(parsed)
click.echo(simplejson.dumps(data, indent=4))
sys.exit(0)
else:
for line in c.stdout.strip().split("\n"):
# Ignore bad packages as top level.
# TODO: This should probably be a "==" in + line.partition
if line.split("==")[0] in BAD_PACKAGES and not reverse:
continue
# Bold top-level packages.
if not line.startswith(" "):
click.echo(crayons.normal(line, bold=True))
# Echo the rest.
else:
click.echo(crayons.normal(line, bold=False))
else:
click.echo(c.stdout)
if c.returncode != 0:
click.echo(
"{} {}".format(
crayons.red("ERROR: ", bold=True),
crayons.white(f"{c.stderr}"),
),
err=True,
)
# Return its return code.
sys.exit(c.returncode)
def do_sync(
project,
dev=False,
three=None,
python=None,
bare=False,
dont_upgrade=False,
user=False,
clear=False,
unused=False,
sequential=False,
pypi_mirror=None,
system=False,
deploy=False,
):
# The lock file needs to exist because sync won't write to it.
if not project.lockfile_exists:
raise exceptions.LockfileNotFound("Pipfile.lock")
# Ensure that virtualenv is available if not system.
ensure_project(
project,
three=three,
python=python,
validate=False,
system=system,
deploy=deploy,
pypi_mirror=pypi_mirror,
clear=clear,
)
# Install everything.
requirements_dir = vistir.path.create_tracked_tempdir(
suffix="-requirements", prefix="pipenv-"
)
if system:
project.s.PIPENV_USE_SYSTEM = True
os.environ["PIPENV_USE_SYSTEM"] = "1"
do_init(
project,
dev=dev,
allow_global=system,
concurrent=(not sequential),
requirements_dir=requirements_dir,
ignore_pipfile=True, # Don't check if Pipfile and lock match.
pypi_mirror=pypi_mirror,
deploy=deploy,
system=system,
)
if not bare:
click.echo(crayons.green("All dependencies are now up-to-date!"))
def do_clean(
project, three=None, python=None, dry_run=False, bare=False, pypi_mirror=None,
system=False
):
# Ensure that virtualenv is available.
from packaging.utils import canonicalize_name
ensure_project(project, three=three, python=python, validate=False, pypi_mirror=pypi_mirror)
ensure_lockfile(project, pypi_mirror=pypi_mirror)
# Make sure that the virtualenv's site packages are configured correctly
# otherwise we may end up removing from the global site packages directory
installed_package_names = project.installed_package_names.copy()
# Remove known "bad packages" from the list.
for bad_package in BAD_PACKAGES:
if canonicalize_name(bad_package) in installed_package_names:
if project.s.is_verbose():
click.echo(f"Ignoring {bad_package}.", err=True)
installed_package_names.remove(canonicalize_name(bad_package))
# Intelligently detect if --dev should be used or not.
locked_packages = {
canonicalize_name(pkg) for pkg in project.lockfile_package_names["combined"]
}
for used_package in locked_packages:
if used_package in installed_package_names:
installed_package_names.remove(used_package)
failure = False
cmd = [which_pip(project, allow_global=system), "uninstall", "-y", "-qq"]
for apparent_bad_package in installed_package_names:
if dry_run and not bare:
click.echo(apparent_bad_package)
else:
if not bare:
click.echo(
crayons.white(
fix_utf8(f"Uninstalling {apparent_bad_package}..."), bold=True
)
)
# Uninstall the package.
cmd = [which_pip(project), "uninstall", apparent_bad_package, "-y"]
c = run_command(cmd, is_verbose=project.s.is_verbose())
if c.returncode != 0:
failure = True
sys.exit(int(failure))
|
GHSA-qc9x-gjcv-465w
|
pipenv/utils.py
|
@@ -1643,6 +1643,30 @@ def get_url_name(url):
return urllib3_util.parse_url(url).host
+def get_host_and_port(url):
+ """Get the host, or the host:port pair if port is explicitly included, for the given URL.
+
+ Examples:
+ >>> get_host_and_port('example.com')
+ 'example.com'
+ >>> get_host_and_port('example.com:443')
+ 'example.com:443'
+ >>> get_host_and_port('http://example.com')
+ 'example.com'
+ >>> get_host_and_port('https://example.com/')
+ 'example.com'
+ >>> get_host_and_port('https://example.com:8081')
+ 'example.com:8081'
+ >>> get_host_and_port('ssh://example.com')
+ 'example.com'
+
+ :param url: the URL string to parse
+ :return: a string with the host:port pair if the URL includes port number explicitly; otherwise, returns host only
+ """
+ url = urllib3_util.parse_url(url)
+ return '{}:{}'.format(url.host, url.port) if url.port else url.host
+
+
def get_canonical_names(packages):
"""Canonicalize a list of packages and return a set of canonical names"""
from .vendor.packaging.utils import canonicalize_name
|
import contextlib
import errno
import logging
import os
import posixpath
import re
import shlex
import hashlib
import shutil
import signal
import stat
import subprocess
import sys
import warnings
from contextlib import contextmanager
from distutils.spawn import find_executable
from pathlib import Path
from urllib.parse import urlparse
import crayons
import parse
import toml
import tomlkit
from click import echo as click_echo
from pipenv import environments
from pipenv.exceptions import (
PipenvCmdError, PipenvUsageError, RequirementError, ResolutionFailure
)
from pipenv.pep508checker import lookup
from pipenv.vendor.packaging.markers import Marker
from pipenv.vendor.urllib3 import util as urllib3_util
from pipenv.vendor.vistir.compat import (
Mapping, ResourceWarning, Sequence, Set, TemporaryDirectory, lru_cache
)
from pipenv.vendor.vistir.misc import fs_str, run
from pipenv.vendor.vistir.contextmanagers import open_file
if environments.MYPY_RUNNING:
from typing import Any, Dict, List, Optional, Text, Tuple, Union
from pipenv.project import Project, TSource
from pipenv.vendor.requirementslib.models.pipfile import Pipfile
from pipenv.vendor.requirementslib.models.requirements import (
Line, Requirement
)
logging.basicConfig(level=logging.ERROR)
specifiers = [k for k in lookup.keys()]
# List of version control systems we support.
VCS_LIST = ("git", "svn", "hg", "bzr")
SCHEME_LIST = ("http://", "https://", "ftp://", "ftps://", "file://")
requests_session = None # type: ignore
def _get_requests_session(max_retries=1):
"""Load requests lazily."""
global requests_session
if requests_session is not None:
return requests_session
import requests
requests_session = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=max_retries)
requests_session.mount("https://pypi.org/pypi", adapter)
return requests_session
def cleanup_toml(tml):
toml = tml.split("\n")
new_toml = []
# Remove all empty lines from TOML.
for line in toml:
if line.strip():
new_toml.append(line)
toml = "\n".join(new_toml)
new_toml = []
# Add newlines between TOML sections.
for i, line in enumerate(toml.split("\n")):
# Skip the first line.
if line.startswith("["):
if i > 0:
# Insert a newline before the heading.
new_toml.append("")
new_toml.append(line)
# adding new line at the end of the TOML file
new_toml.append("")
toml = "\n".join(new_toml)
return toml
def convert_toml_outline_tables(parsed):
"""Converts all outline tables to inline tables."""
def convert_tomlkit_table(section):
if isinstance(section, tomlkit.items.Table):
body = section.value._body
else:
body = section._body
for key, value in body:
if not key:
continue
if hasattr(value, "keys") and not isinstance(value, tomlkit.items.InlineTable):
table = tomlkit.inline_table()
table.update(value.value)
section[key.key] = table
def convert_toml_table(section):
for package, value in section.items():
if hasattr(value, "keys") and not isinstance(value, toml.decoder.InlineTableDict):
table = toml.TomlDecoder().get_empty_inline_table()
table.update(value)
section[package] = table
is_tomlkit_parsed = isinstance(parsed, tomlkit.container.Container)
for section in ("packages", "dev-packages"):
table_data = parsed.get(section, {})
if not table_data:
continue
if is_tomlkit_parsed:
convert_tomlkit_table(table_data)
else:
convert_toml_table(table_data)
return parsed
def run_command(cmd, *args, is_verbose=False, **kwargs):
"""
Take an input command and run it, handling exceptions and error codes and returning
its stdout and stderr.
:param cmd: The list of command and arguments.
:type cmd: list
:returns: A 2-tuple of the output and error from the command
:rtype: Tuple[str, str]
:raises: exceptions.PipenvCmdError
"""
from ._compat import decode_for_output
from .cmdparse import Script
catch_exceptions = kwargs.pop("catch_exceptions", True)
if isinstance(cmd, ((str,), list, tuple)):
cmd = Script.parse(cmd)
if not isinstance(cmd, Script):
raise TypeError("Command input must be a string, list or tuple")
if "env" not in kwargs:
kwargs["env"] = os.environ.copy()
kwargs["env"]["PYTHONIOENCODING"] = "UTF-8"
command = [cmd.command, *cmd.args]
if is_verbose:
click_echo(f"Running command: $ {cmd.cmdify()}")
c = subprocess_run(command, *args, **kwargs)
if is_verbose:
click_echo("Command output: {}".format(
crayons.cyan(decode_for_output(c.stdout))
), err=True)
if c.returncode and catch_exceptions:
raise PipenvCmdError(cmd.cmdify(), c.stdout, c.stderr, c.returncode)
return c
def parse_python_version(output):
"""Parse a Python version output returned by `python --version`.
Return a dict with three keys: major, minor, and micro. Each value is a
string containing a version part.
Note: The micro part would be `'0'` if it's missing from the input string.
"""
version_line = output.split("\n", 1)[0]
version_pattern = re.compile(
r"""
^ # Beginning of line.
Python # Literally "Python".
\s # Space.
(?P<major>\d+) # Major = one or more digits.
\. # Dot.
(?P<minor>\d+) # Minor = one or more digits.
(?: # Unnamed group for dot-micro.
\. # Dot.
(?P<micro>\d+) # Micro = one or more digit.
)? # Micro is optional because pypa/pipenv#1893.
.* # Trailing garbage.
$ # End of line.
""",
re.VERBOSE,
)
match = version_pattern.match(version_line)
if not match:
return None
return match.groupdict(default="0")
def python_version(path_to_python):
from .vendor.pythonfinder.utils import get_python_version
if not path_to_python:
return None
try:
version = get_python_version(path_to_python)
except Exception:
return None
return version
def escape_grouped_arguments(s):
"""Prepares a string for the shell (on Windows too!)
Only for use on grouped arguments (passed as a string to Popen)
"""
if s is None:
return None
# Additional escaping for windows paths
if os.name == "nt":
s = "{}".format(s.replace("\\", "\\\\"))
return '"' + s.replace("'", "'\\''") + '"'
def clean_pkg_version(version):
"""Uses pip to prepare a package version string, from our internal version."""
return pep440_version(str(version).replace("==", ""))
class HackedPythonVersion:
"""A Beautiful hack, which allows us to tell pip which version of Python we're using."""
def __init__(self, python_version, python_path):
self.python_version = python_version
self.python_path = python_path
def __enter__(self):
# Only inject when the value is valid
if self.python_version:
os.environ["PIPENV_REQUESTED_PYTHON_VERSION"] = str(self.python_version)
if self.python_path:
os.environ["PIP_PYTHON_PATH"] = str(self.python_path)
def __exit__(self, *args):
# Restore original Python version information.
try:
del os.environ["PIPENV_REQUESTED_PYTHON_VERSION"]
except KeyError:
pass
def prepare_pip_source_args(sources, pip_args=None):
if pip_args is None:
pip_args = []
if sources:
# Add the source to notpip.
package_url = sources[0].get("url")
if not package_url:
raise PipenvUsageError("[[source]] section does not contain a URL.")
pip_args.extend(["-i", package_url])
# Trust the host if it's not verified.
if not sources[0].get("verify_ssl", True):
url_parts = urllib3_util.parse_url(package_url)
url_port = f":{url_parts.port}" if url_parts.port else ""
pip_args.extend(
["--trusted-host", f"{url_parts.host}{url_port}"]
)
# Add additional sources as extra indexes.
if len(sources) > 1:
for source in sources[1:]:
url = source.get("url")
if not url: # not harmless, just don't continue
continue
pip_args.extend(["--extra-index-url", url])
# Trust the host if it's not verified.
if not source.get("verify_ssl", True):
url_parts = urllib3_util.parse_url(url)
url_port = f":{url_parts.port}" if url_parts.port else ""
pip_args.extend(
["--trusted-host", f"{url_parts.host}{url_port}"]
)
return pip_args
def get_project_index(project, index=None, trusted_hosts=None):
# type: (Optional[Union[str, TSource]], Optional[List[str]], Optional[Project]) -> TSource
from .project import SourceNotFound
if trusted_hosts is None:
trusted_hosts = []
if isinstance(index, Mapping):
return project.find_source(index.get("url"))
try:
source = project.find_source(index)
except SourceNotFound:
index_url = urllib3_util.parse_url(index)
src_name = project.src_name_from_url(index)
verify_ssl = index_url.host not in trusted_hosts
source = {"url": index, "verify_ssl": verify_ssl, "name": src_name}
return source
def get_source_list(
project, # type: Project
index=None, # type: Optional[Union[str, TSource]]
extra_indexes=None, # type: Optional[List[str]]
trusted_hosts=None, # type: Optional[List[str]]
pypi_mirror=None, # type: Optional[str]
):
# type: (...) -> List[TSource]
sources = [] # type: List[TSource]
if index:
sources.append(get_project_index(project, index))
if extra_indexes:
if isinstance(extra_indexes, str):
extra_indexes = [extra_indexes]
for source in extra_indexes:
extra_src = get_project_index(project, source)
if not sources or extra_src["url"] != sources[0]["url"]:
sources.append(extra_src)
else:
for source in project.pipfile_sources:
if not sources or source["url"] != sources[0]["url"]:
sources.append(source)
if not sources:
sources = project.pipfile_sources[:]
if pypi_mirror:
sources = [
create_mirror_source(pypi_mirror) if is_pypi_url(source["url"]) else source
for source in sources
]
return sources
def get_indexes_from_requirement(req, project, index=None, extra_indexes=None, trusted_hosts=None, pypi_mirror=None):
# type: (Requirement, Project, Optional[Text], Optional[List[Text]], Optional[List[Text]], Optional[Text]) -> Tuple[TSource, List[TSource], List[Text]]
index_sources = [] # type: List[TSource]
if not trusted_hosts:
trusted_hosts = [] # type: List[Text]
if extra_indexes is None:
extra_indexes = []
project_indexes = project.pipfile_sources[:]
indexes = []
if req.index:
indexes.append(req.index)
if getattr(req, "extra_indexes", None):
if not isinstance(req.extra_indexes, list):
indexes.append(req.extra_indexes)
else:
indexes.extend(req.extra_indexes)
indexes.extend(project_indexes)
if len(indexes) > 1:
index, extra_indexes = indexes[0], indexes[1:]
index_sources = get_source_list(project, index=index, extra_indexes=extra_indexes, trusted_hosts=trusted_hosts, pypi_mirror=pypi_mirror)
if len(index_sources) > 1:
index_source, extra_index_sources = index_sources[0], index_sources[1:]
else:
index_source, extra_index_sources = index_sources[0], []
return index_source, extra_index_sources
@lru_cache()
def get_pipenv_sitedir():
# type: () -> Optional[str]
import pkg_resources
site_dir = next(
iter(d for d in pkg_resources.working_set if d.key.lower() == "pipenv"), None
)
if site_dir is not None:
return site_dir.location
return None
class HashCacheMixin:
"""Caches hashes of PyPI artifacts so we do not need to re-download them.
Hashes are only cached when the URL appears to contain a hash in it and the
cache key includes the hash value returned from the server). This ought to
avoid issues where the location on the server changes.
"""
def __init__(self, directory, session):
self.session = session
if not os.path.isdir(directory):
os.makedirs(directory, exist_ok=True)
super().__init__(directory=directory)
def get_hash(self, link):
# If there is no link hash (i.e., md5, sha256, etc.), we don't want
# to store it.
hash_value = self.get(link.url)
if not hash_value:
hash_value = self._get_file_hash(link).encode()
self.set(link.url, hash_value)
return hash_value.decode("utf8")
def _get_file_hash(self, link):
from pipenv.vendor.pip_shims import shims
h = hashlib.new(shims.FAVORITE_HASH)
with open_file(link.url, self.session) as fp:
for chunk in iter(lambda: fp.read(8096), b""):
h.update(chunk)
return ":".join([h.name, h.hexdigest()])
class Resolver:
def __init__(
self, constraints, req_dir, project, sources, index_lookup=None,
markers_lookup=None, skipped=None, clear=False, pre=False
):
self.initial_constraints = constraints
self.req_dir = req_dir
self.project = project
self.sources = sources
self.resolved_tree = set()
self.hashes = {}
self.clear = clear
self.pre = pre
self.results = None
self.markers_lookup = markers_lookup if markers_lookup is not None else {}
self.index_lookup = index_lookup if index_lookup is not None else {}
self.skipped = skipped if skipped is not None else {}
self.markers = {}
self.requires_python_markers = {}
self._pip_args = None
self._constraints = None
self._parsed_constraints = None
self._resolver = None
self._finder = None
self._ignore_compatibility_finder = None
self._session = None
self._constraint_file = None
self._pip_options = None
self._pip_command = None
self._retry_attempts = 0
self._hash_cache = None
def __repr__(self):
return (
"<Resolver (constraints={self.initial_constraints}, req_dir={self.req_dir}, "
"sources={self.sources})>".format(self=self)
)
@staticmethod
@lru_cache()
def _get_pip_command():
from pipenv.vendor.pip_shims import shims
return shims.InstallCommand()
@property
def hash_cache(self):
from pipenv.vendor.pip_shims import shims
if not self._hash_cache:
self._hash_cache = type("HashCache", (HashCacheMixin, shims.SafeFileCache), {})(
os.path.join(self.project.s.PIPENV_CACHE_DIR, "hashes"), self.session
)
return self._hash_cache
@classmethod
def get_metadata(
cls,
deps, # type: List[str]
index_lookup, # type: Dict[str, str]
markers_lookup, # type: Dict[str, str]
project, # type: Project
sources, # type: Dict[str, str]
req_dir=None, # type: Optional[str]
pre=False, # type: bool
clear=False, # type: bool
):
# type: (...) -> Tuple[Set[str], Dict[str, Dict[str, Union[str, bool, List[str]]]], Dict[str, str], Dict[str, str]]
constraints = set() # type: Set[str]
skipped = dict() # type: Dict[str, Dict[str, Union[str, bool, List[str]]]]
if index_lookup is None:
index_lookup = {}
if markers_lookup is None:
markers_lookup = {}
if not req_dir:
from .vendor.vistir.path import create_tracked_tempdir
req_dir = create_tracked_tempdir(prefix="pipenv-", suffix="-reqdir")
transient_resolver = cls(
[], req_dir, project, sources, index_lookup=index_lookup,
markers_lookup=markers_lookup, clear=clear, pre=pre
)
for dep in deps:
if not dep:
continue
req, req_idx, markers_idx = cls.parse_line(
dep, index_lookup=index_lookup, markers_lookup=markers_lookup, project=project
)
index_lookup.update(req_idx)
markers_lookup.update(markers_idx)
# Add dependencies of any file (e.g. wheels/tarballs), source, or local
# directories into the initial constraint pool to be resolved with the
# rest of the dependencies, while adding the files/vcs deps/paths themselves
# to the lockfile directly
constraint_update, lockfile_update = cls.get_deps_from_req(
req, resolver=transient_resolver, resolve_vcs=project.s.PIPENV_RESOLVE_VCS
)
constraints |= constraint_update
skipped.update(lockfile_update)
return constraints, skipped, index_lookup, markers_lookup
@classmethod
def parse_line(
cls,
line, # type: str
index_lookup=None, # type: Dict[str, str]
markers_lookup=None, # type: Dict[str, str]
project=None # type: Optional[Project]
):
# type: (...) -> Tuple[Requirement, Dict[str, str], Dict[str, str]]
from .vendor.requirementslib.models.requirements import Requirement
from .vendor.requirementslib.models.utils import DIRECT_URL_RE
if index_lookup is None:
index_lookup = {}
if markers_lookup is None:
markers_lookup = {}
if project is None:
from .project import Project
project = Project()
index, extra_index, trust_host, remainder = parse_indexes(line)
line = " ".join(remainder)
req = None # type: Requirement
try:
req = Requirement.from_line(line)
except ValueError:
direct_url = DIRECT_URL_RE.match(line)
if direct_url:
line = "{}#egg={}".format(line, direct_url.groupdict()["name"])
try:
req = Requirement.from_line(line)
except ValueError:
raise ResolutionFailure(f"Failed to resolve requirement from line: {line!s}")
else:
raise ResolutionFailure(f"Failed to resolve requirement from line: {line!s}")
if index:
try:
index_lookup[req.normalized_name] = project.get_source(
url=index, refresh=True).get("name")
except TypeError:
pass
try:
req.normalized_name
except TypeError:
raise RequirementError(req=req)
# strip the marker and re-add it later after resolution
# but we will need a fallback in case resolution fails
# eg pypiwin32
if req.markers:
markers_lookup[req.normalized_name] = req.markers.replace('"', "'")
return req, index_lookup, markers_lookup
@classmethod
def get_deps_from_req(cls, req, resolver=None, resolve_vcs=True):
# type: (Requirement, Optional["Resolver"], bool) -> Tuple[Set[str], Dict[str, Dict[str, Union[str, bool, List[str]]]]]
from .vendor.requirementslib.models.requirements import Requirement
from .vendor.requirementslib.models.utils import (
_requirement_to_str_lowercase_name
)
from .vendor.requirementslib.utils import is_installable_dir
# TODO: this is way too complex, refactor this
constraints = set() # type: Set[str]
locked_deps = dict() # type: Dict[str, Dict[str, Union[str, bool, List[str]]]]
if (req.is_file_or_url or req.is_vcs) and not req.is_wheel:
# for local packages with setup.py files and potential direct url deps:
if req.is_vcs:
req_list, lockfile = get_vcs_deps(reqs=[req])
req = next(iter(req for req in req_list if req is not None), req_list)
entry = lockfile[pep423_name(req.normalized_name)]
else:
_, entry = req.pipfile_entry
parsed_line = req.req.parsed_line # type: Line
setup_info = None # type: Any
try:
name = req.normalized_name
except TypeError:
raise RequirementError(req=req)
setup_info = req.req.setup_info
setup_info.get_info()
locked_deps[pep423_name(name)] = entry
requirements = []
# Allow users to toggle resolution off for non-editable VCS packages
# but leave it on for local, installable folders on the filesystem
if resolve_vcs or (
req.editable or parsed_line.is_wheel or (
req.is_file_or_url and parsed_line.is_local
and is_installable_dir(parsed_line.path)
)
):
requirements = [v for v in getattr(setup_info, "requires", {}).values()]
for r in requirements:
if getattr(r, "url", None) and not getattr(r, "editable", False):
if r is not None:
if not r.url:
continue
line = _requirement_to_str_lowercase_name(r)
new_req, _, _ = cls.parse_line(line)
if r.marker and not r.marker.evaluate():
new_constraints = {}
_, new_entry = req.pipfile_entry
new_lock = {
pep423_name(new_req.normalized_name): new_entry
}
else:
new_constraints, new_lock = cls.get_deps_from_req(
new_req, resolver
)
locked_deps.update(new_lock)
constraints |= new_constraints
# if there is no marker or there is a valid marker, add the constraint line
elif r and (not r.marker or (r.marker and r.marker.evaluate())):
line = _requirement_to_str_lowercase_name(r)
constraints.add(line)
# ensure the top level entry remains as provided
# note that we shouldn't pin versions for editable vcs deps
if not req.is_vcs:
if req.specifiers:
locked_deps[name]["version"] = req.specifiers
elif parsed_line.setup_info and parsed_line.setup_info.version:
locked_deps[name]["version"] = "=={}".format(
parsed_line.setup_info.version
)
# if not req.is_vcs:
locked_deps.update({name: entry})
else:
# if the dependency isn't installable, don't add it to constraints
# and instead add it directly to the lock
if req and req.requirement and (
req.requirement.marker and not req.requirement.marker.evaluate()
):
pypi = resolver.finder if resolver else None
ireq = req.ireq
best_match = pypi.find_best_candidate(ireq.name, ireq.specifier).best_candidate if pypi else None
if best_match:
ireq.req.specifier = ireq.specifier.__class__(f"=={best_match.version}")
hashes = resolver.collect_hashes(ireq) if resolver else []
new_req = Requirement.from_ireq(ireq)
new_req = new_req.add_hashes(hashes)
name, entry = new_req.pipfile_entry
locked_deps[pep423_name(name)] = translate_markers(entry)
click_echo(
"{} doesn't match your environment, "
"its dependencies won't be resolved.".format(req.as_line()),
err=True
)
else:
click_echo(
"Could not find a version of {} that matches your environment, "
"it will be skipped.".format(req.as_line()),
err=True
)
return constraints, locked_deps
constraints.add(req.constraint_line)
return constraints, locked_deps
return constraints, locked_deps
@classmethod
def create(
cls,
deps, # type: List[str]
project, # type: Project
index_lookup=None, # type: Dict[str, str]
markers_lookup=None, # type: Dict[str, str]
sources=None, # type: List[str]
req_dir=None, # type: str
clear=False, # type: bool
pre=False # type: bool
):
# type: (...) -> "Resolver"
from pipenv.vendor.vistir.path import create_tracked_tempdir
if not req_dir:
req_dir = create_tracked_tempdir(suffix="-requirements", prefix="pipenv-")
if index_lookup is None:
index_lookup = {}
if markers_lookup is None:
markers_lookup = {}
if sources is None:
sources = project.sources
constraints, skipped, index_lookup, markers_lookup = cls.get_metadata(
deps, index_lookup, markers_lookup, project, sources, req_dir=req_dir,
pre=pre, clear=clear
)
return Resolver(
constraints, req_dir, project, sources, index_lookup=index_lookup,
markers_lookup=markers_lookup, skipped=skipped, clear=clear, pre=pre
)
@classmethod
def from_pipfile(cls, project, pipfile=None, dev=False, pre=False, clear=False):
# type: (Optional[Project], Optional[Pipfile], bool, bool, bool) -> "Resolver"
from pipenv.vendor.vistir.path import create_tracked_tempdir
if not pipfile:
pipfile = project._pipfile
req_dir = create_tracked_tempdir(suffix="-requirements", prefix="pipenv-")
index_lookup, markers_lookup = {}, {}
deps = set()
if dev:
deps.update({req.as_line() for req in pipfile.dev_packages})
deps.update({req.as_line() for req in pipfile.packages})
constraints, skipped, index_lookup, markers_lookup = cls.get_metadata(
list(deps), index_lookup, markers_lookup, project, project.sources,
req_dir=req_dir, pre=pre, clear=clear
)
return Resolver(
constraints, req_dir, project, project.sources, index_lookup=index_lookup,
markers_lookup=markers_lookup, skipped=skipped, clear=clear, pre=pre
)
@property
def pip_command(self):
if self._pip_command is None:
self._pip_command = self._get_pip_command()
return self._pip_command
def prepare_pip_args(self, use_pep517=None, build_isolation=True):
pip_args = []
if self.sources:
pip_args = prepare_pip_source_args(self.sources, pip_args)
if use_pep517 is False:
pip_args.append("--no-use-pep517")
if build_isolation is False:
pip_args.append("--no-build-isolation")
if self.pre:
pip_args.append("--pre")
pip_args.extend(["--cache-dir", self.project.s.PIPENV_CACHE_DIR])
return pip_args
@property
def pip_args(self):
use_pep517 = environments.get_from_env("USE_PEP517", prefix="PIP")
build_isolation = environments.get_from_env("BUILD_ISOLATION", prefix="PIP")
if self._pip_args is None:
self._pip_args = self.prepare_pip_args(
use_pep517=use_pep517, build_isolation=build_isolation
)
return self._pip_args
def prepare_constraint_file(self):
from pipenv.vendor.vistir.path import create_tracked_tempfile
constraints_file = create_tracked_tempfile(
mode="w",
prefix="pipenv-",
suffix="-constraints.txt",
dir=self.req_dir,
delete=False,
)
skip_args = ("build-isolation", "use-pep517", "cache-dir")
args_to_add = [
arg for arg in self.pip_args
if not any(bad_arg in arg for bad_arg in skip_args)
]
if self.sources:
requirementstxt_sources = " ".join(args_to_add) if args_to_add else ""
requirementstxt_sources = requirementstxt_sources.replace(" --", "\n--")
constraints_file.write(f"{requirementstxt_sources}\n")
constraints = self.initial_constraints
constraints_file.write("\n".join([c for c in constraints]))
constraints_file.close()
return constraints_file.name
@property
def constraint_file(self):
if self._constraint_file is None:
self._constraint_file = self.prepare_constraint_file()
return self._constraint_file
@property
def pip_options(self):
if self._pip_options is None:
pip_options, _ = self.pip_command.parser.parse_args(self.pip_args)
pip_options.cache_dir = self.project.s.PIPENV_CACHE_DIR
pip_options.no_python_version_warning = True
pip_options.no_input = True
pip_options.progress_bar = "off"
pip_options.ignore_requires_python = True
pip_options.pre = self.pre or self.project.settings.get("allow_prereleases", False)
self._pip_options = pip_options
return self._pip_options
@property
def session(self):
if self._session is None:
self._session = self.pip_command._build_session(self.pip_options)
return self._session
@property
def finder(self):
from pipenv.vendor.pip_shims import shims
if self._finder is None:
self._finder = shims.get_package_finder(
install_cmd=self.pip_command,
options=self.pip_options,
session=self.session
)
return self._finder
@property
def ignore_compatibility_finder(self):
from pipenv.vendor.pip_shims import shims
if self._ignore_compatibility_finder is None:
ignore_compatibility_finder = shims.get_package_finder(
install_cmd=self.pip_command,
options=self.pip_options,
session=self.session,
)
# It would be nice if `shims.get_package_finder` took an
# `ignore_compatibility` parameter, but that's some vendorered code
# we'd rather avoid touching.
ignore_compatibility_finder._ignore_compatibility = True
self._ignore_compatibility_finder = ignore_compatibility_finder
return self._ignore_compatibility_finder
@property
def parsed_constraints(self):
from pipenv.vendor.pip_shims import shims
if self._parsed_constraints is None:
self._parsed_constraints = shims.parse_requirements(
self.constraint_file, finder=self.finder, session=self.session,
options=self.pip_options
)
return self._parsed_constraints
@property
def constraints(self):
from pipenv.patched.notpip._internal.req.constructors import install_req_from_parsed_requirement
if self._constraints is None:
self._constraints = [
install_req_from_parsed_requirement(
c, isolated=self.pip_options.build_isolation,
use_pep517=self.pip_options.use_pep517, user_supplied=True
)
for c in self.parsed_constraints
]
return self._constraints
@contextlib.contextmanager
def get_resolver(self, clear=False):
from pipenv.vendor.pip_shims.shims import (
WheelCache, get_requirement_tracker, global_tempdir_manager
)
with global_tempdir_manager(), get_requirement_tracker() as req_tracker, TemporaryDirectory(suffix="-build", prefix="pipenv-") as directory:
pip_options = self.pip_options
finder = self.finder
wheel_cache = WheelCache(pip_options.cache_dir, pip_options.format_control)
directory.path = directory.name
preparer = self.pip_command.make_requirement_preparer(
temp_build_dir=directory,
options=pip_options,
req_tracker=req_tracker,
session=self.session,
finder=finder,
use_user_site=False,
)
resolver = self.pip_command.make_resolver(
preparer=preparer,
finder=finder,
options=pip_options,
wheel_cache=wheel_cache,
use_user_site=False,
ignore_installed=True,
ignore_requires_python=pip_options.ignore_requires_python,
force_reinstall=pip_options.force_reinstall,
upgrade_strategy="to-satisfy-only",
use_pep517=pip_options.use_pep517,
)
yield resolver
def resolve(self):
from pipenv.vendor.pip_shims.shims import InstallationError
from pipenv.exceptions import ResolutionFailure
with temp_environ(), self.get_resolver() as resolver:
try:
results = resolver.resolve(self.constraints, check_supported_wheels=False)
except InstallationError as e:
raise ResolutionFailure(message=str(e))
else:
self.results = set(results.all_requirements)
self.resolved_tree.update(self.results)
return self.resolved_tree
def resolve_constraints(self):
from .vendor.requirementslib.models.markers import marker_from_specifier
new_tree = set()
for result in self.resolved_tree:
if result.markers:
self.markers[result.name] = result.markers
else:
candidate = self.finder.find_best_candidate(result.name, result.specifier).best_candidate
if candidate:
requires_python = candidate.link.requires_python
if requires_python:
marker = marker_from_specifier(requires_python)
self.markers[result.name] = marker
result.markers = marker
if result.req:
result.req.marker = marker
new_tree.add(result)
self.resolved_tree = new_tree
@classmethod
def prepend_hash_types(cls, checksums, hash_type):
cleaned_checksums = set()
for checksum in checksums:
if not checksum:
continue
if not checksum.startswith(f"{hash_type}:"):
checksum = f"{hash_type}:{checksum}"
cleaned_checksums.add(checksum)
return cleaned_checksums
def _get_hashes_from_pypi(self, ireq):
from pipenv.vendor.pip_shims import shims
pkg_url = f"https://pypi.org/pypi/{ireq.name}/json"
session = _get_requests_session(self.project.s.PIPENV_MAX_RETRIES)
try:
collected_hashes = set()
# Grab the hashes from the new warehouse API.
r = session.get(pkg_url, timeout=10)
api_releases = r.json()["releases"]
cleaned_releases = {}
for api_version, api_info in api_releases.items():
api_version = clean_pkg_version(api_version)
cleaned_releases[api_version] = api_info
version = ""
if ireq.specifier:
spec = next(iter(s for s in ireq.specifier), None)
if spec:
version = spec.version
for release in cleaned_releases[version]:
collected_hashes.add(release["digests"][shims.FAVORITE_HASH])
return self.prepend_hash_types(collected_hashes, shims.FAVORITE_HASH)
except (ValueError, KeyError, ConnectionError):
if self.project.s.is_verbose():
click_echo(
"{}: Error generating hash for {}".format(
crayons.red("Warning", bold=True), ireq.name
), err=True
)
return None
def collect_hashes(self, ireq):
if ireq.link:
link = ireq.link
if link.is_vcs or (link.is_file and link.is_existing_dir()):
return set()
if ireq.original_link:
return {self._get_hash_from_link(ireq.original_link)}
if not is_pinned_requirement(ireq):
return set()
if any(
"python.org" in source["url"] or "pypi.org" in source["url"]
for source in self.sources
):
hashes = self._get_hashes_from_pypi(ireq)
if hashes:
return hashes
applicable_candidates = self.ignore_compatibility_finder.find_best_candidate(
ireq.name, ireq.specifier
).iter_applicable()
return {
self._get_hash_from_link(candidate.link)
for candidate in applicable_candidates
}
def resolve_hashes(self):
if self.results is not None:
for ireq in self.results:
self.hashes[ireq] = self.collect_hashes(ireq)
return self.hashes
def _get_hash_from_link(self, link):
from pipenv.vendor.pip_shims import shims
if link.hash and link.hash_name == shims.FAVORITE_HASH:
return f"{link.hash_name}:{link.hash}"
return self.hash_cache.get_hash(link)
def _clean_skipped_result(self, req, value):
ref = None
if req.is_vcs:
ref = req.commit_hash
ireq = req.as_ireq()
entry = value.copy()
entry["name"] = req.name
if entry.get("editable", False) and entry.get("version"):
del entry["version"]
ref = ref if ref is not None else entry.get("ref")
if ref:
entry["ref"] = ref
collected_hashes = self.collect_hashes(ireq)
if collected_hashes:
entry["hashes"] = sorted(set(collected_hashes))
return req.name, entry
def clean_results(self):
from pipenv.vendor.requirementslib.models.requirements import (
Requirement
)
reqs = [(Requirement.from_ireq(ireq), ireq) for ireq in self.resolved_tree]
results = {}
for req, ireq in reqs:
if (req.vcs and req.editable and not req.is_direct_url):
continue
elif req.normalized_name in self.skipped.keys():
continue
collected_hashes = self.hashes.get(ireq, set())
req = req.add_hashes(collected_hashes)
if collected_hashes:
collected_hashes = sorted(collected_hashes)
name, entry = format_requirement_for_lockfile(
req, self.markers_lookup, self.index_lookup, collected_hashes
)
entry = translate_markers(entry)
if name in results:
results[name].update(entry)
else:
results[name] = entry
for k in list(self.skipped.keys()):
req = Requirement.from_pipfile(k, self.skipped[k])
name, entry = self._clean_skipped_result(req, self.skipped[k])
entry = translate_markers(entry)
if name in results:
results[name].update(entry)
else:
results[name] = entry
results = list(results.values())
return results
def format_requirement_for_lockfile(req, markers_lookup, index_lookup, hashes=None):
if req.specifiers:
version = str(req.get_version())
else:
version = None
index = index_lookup.get(req.normalized_name)
markers = markers_lookup.get(req.normalized_name)
req.index = index
name, pf_entry = req.pipfile_entry
name = pep423_name(req.name)
entry = {}
if isinstance(pf_entry, str):
entry["version"] = pf_entry.lstrip("=")
else:
entry.update(pf_entry)
if version is not None and not req.is_vcs:
entry["version"] = version
if req.line_instance.is_direct_url and not req.is_vcs:
entry["file"] = req.req.uri
if hashes:
entry["hashes"] = sorted(set(hashes))
entry["name"] = name
if index:
entry.update({"index": index})
if markers:
entry.update({"markers": markers})
entry = translate_markers(entry)
if req.vcs or req.editable:
for key in ("index", "version", "file"):
try:
del entry[key]
except KeyError:
pass
return name, entry
def _show_warning(message, category, filename, lineno, line):
warnings.showwarning(message=message, category=category, filename=filename,
lineno=lineno, file=sys.stderr, line=line)
sys.stderr.flush()
def actually_resolve_deps(
deps,
index_lookup,
markers_lookup,
project,
sources,
clear,
pre,
req_dir=None,
):
from pipenv.vendor.vistir.path import create_tracked_tempdir
if not req_dir:
req_dir = create_tracked_tempdir(suffix="-requirements", prefix="pipenv-")
warning_list = []
with warnings.catch_warnings(record=True) as warning_list:
resolver = Resolver.create(
deps, project, index_lookup, markers_lookup, sources, req_dir, clear, pre
)
resolver.resolve()
hashes = resolver.resolve_hashes()
resolver.resolve_constraints()
results = resolver.clean_results()
for warning in warning_list:
_show_warning(warning.message, warning.category, warning.filename, warning.lineno,
warning.line)
return (results, hashes, resolver.markers_lookup, resolver, resolver.skipped)
@contextlib.contextmanager
def create_spinner(text, setting, nospin=None, spinner_name=None):
from .vendor.vistir import spin
from .vendor.vistir.misc import fs_str
if not spinner_name:
spinner_name = setting.PIPENV_SPINNER
if nospin is None:
nospin = setting.PIPENV_NOSPIN
with spin.create_spinner(
spinner_name=spinner_name,
start_text=fs_str(text),
nospin=nospin, write_to_stdout=False
) as sp:
yield sp
def resolve(cmd, sp, project):
from ._compat import decode_output
from .cmdparse import Script
from .vendor.vistir.misc import echo
c = subprocess_run(Script.parse(cmd).cmd_args, block=False, env=os.environ.copy())
is_verbose = project.s.is_verbose()
err = ""
for line in iter(c.stderr.readline, ""):
line = decode_output(line)
if not line.rstrip():
continue
err += line
if is_verbose:
sp.hide_and_write(line.rstrip())
c.wait()
returncode = c.poll()
out = c.stdout.read()
if returncode != 0:
sp.red.fail(environments.PIPENV_SPINNER_FAIL_TEXT.format(
"Locking Failed!"
))
echo(out.strip(), err=True)
if not is_verbose:
echo(err, err=True)
sys.exit(returncode)
if is_verbose:
echo(out.strip(), err=True)
return subprocess.CompletedProcess(c.args, returncode, out, err)
def get_locked_dep(dep, pipfile_section, prefer_pipfile=True):
# the prefer pipfile flag is not used yet, but we are introducing
# it now for development purposes
# TODO: Is this implementation clear? How can it be improved?
entry = None
cleaner_kwargs = {
"is_top_level": False,
"pipfile_entry": None
}
if isinstance(dep, Mapping) and dep.get("name", ""):
dep_name = pep423_name(dep["name"])
name = next(iter(
k for k in pipfile_section.keys()
if pep423_name(k) == dep_name
), None)
entry = pipfile_section[name] if name else None
if entry:
cleaner_kwargs.update({"is_top_level": True, "pipfile_entry": entry})
lockfile_entry = clean_resolved_dep(dep, **cleaner_kwargs)
if entry and isinstance(entry, Mapping):
version = entry.get("version", "") if entry else ""
else:
version = entry if entry else ""
lockfile_name, lockfile_dict = lockfile_entry.copy().popitem()
lockfile_version = lockfile_dict.get("version", "")
# Keep pins from the lockfile
if prefer_pipfile and lockfile_version != version and version.startswith("==") and "*" not in version:
lockfile_dict["version"] = version
lockfile_entry[lockfile_name] = lockfile_dict
return lockfile_entry
def prepare_lockfile(results, pipfile, lockfile):
# from .vendor.requirementslib.utils import is_vcs
for dep in results:
if not dep:
continue
# Merge in any relevant information from the pipfile entry, including
# markers, normalized names, URL info, etc that we may have dropped during lock
# if not is_vcs(dep):
lockfile_entry = get_locked_dep(dep, pipfile)
name = next(iter(k for k in lockfile_entry.keys()))
current_entry = lockfile.get(name)
if current_entry:
if not isinstance(current_entry, Mapping):
lockfile[name] = lockfile_entry[name]
else:
lockfile[name].update(lockfile_entry[name])
lockfile[name] = translate_markers(lockfile[name])
else:
lockfile[name] = lockfile_entry[name]
return lockfile
def venv_resolve_deps(
deps,
which,
project,
pre=False,
clear=False,
allow_global=False,
pypi_mirror=None,
dev=False,
pipfile=None,
lockfile=None,
keep_outdated=False
):
"""
Resolve dependencies for a pipenv project, acts as a portal to the target environment.
Regardless of whether a virtual environment is present or not, this will spawn
a subproces which is isolated to the target environment and which will perform
dependency resolution. This function reads the output of that call and mutates
the provided lockfile accordingly, returning nothing.
:param List[:class:`~requirementslib.Requirement`] deps: A list of dependencies to resolve.
:param Callable which: [description]
:param project: The pipenv Project instance to use during resolution
:param Optional[bool] pre: Whether to resolve pre-release candidates, defaults to False
:param Optional[bool] clear: Whether to clear the cache during resolution, defaults to False
:param Optional[bool] allow_global: Whether to use *sys.executable* as the python binary, defaults to False
:param Optional[str] pypi_mirror: A URL to substitute any time *pypi.org* is encountered, defaults to None
:param Optional[bool] dev: Whether to target *dev-packages* or not, defaults to False
:param pipfile: A Pipfile section to operate on, defaults to None
:type pipfile: Optional[Dict[str, Union[str, Dict[str, bool, List[str]]]]]
:param Dict[str, Any] lockfile: A project lockfile to mutate, defaults to None
:param bool keep_outdated: Whether to retain outdated dependencies and resolve with them in mind, defaults to False
:raises RuntimeError: Raised on resolution failure
:return: Nothing
:rtype: None
"""
import json
from . import resolver
from ._compat import decode_for_output
from .vendor.vistir.compat import JSONDecodeError, NamedTemporaryFile, Path
from .vendor.vistir.misc import fs_str
from .vendor.vistir.path import create_tracked_tempdir
results = []
pipfile_section = "dev-packages" if dev else "packages"
lockfile_section = "develop" if dev else "default"
if not deps:
if not project.pipfile_exists:
return None
deps = project.parsed_pipfile.get(pipfile_section, {})
if not deps:
return None
if not pipfile:
pipfile = getattr(project, pipfile_section, {})
if not lockfile:
lockfile = project._lockfile
req_dir = create_tracked_tempdir(prefix="pipenv", suffix="requirements")
cmd = [
which("python", allow_global=allow_global),
Path(resolver.__file__.rstrip("co")).as_posix()
]
if pre:
cmd.append("--pre")
if clear:
cmd.append("--clear")
if allow_global:
cmd.append("--system")
if dev:
cmd.append("--dev")
target_file = NamedTemporaryFile(prefix="resolver", suffix=".json", delete=False)
target_file.close()
cmd.extend(["--write", make_posix(target_file.name)])
with temp_environ():
os.environ.update({fs_str(k): fs_str(val) for k, val in os.environ.items()})
if pypi_mirror:
os.environ["PIPENV_PYPI_MIRROR"] = str(pypi_mirror)
os.environ["PIPENV_VERBOSITY"] = str(project.s.PIPENV_VERBOSITY)
os.environ["PIPENV_REQ_DIR"] = fs_str(req_dir)
os.environ["PIP_NO_INPUT"] = fs_str("1")
pipenv_site_dir = get_pipenv_sitedir()
if pipenv_site_dir is not None:
os.environ["PIPENV_SITE_DIR"] = pipenv_site_dir
else:
os.environ.pop("PIPENV_SITE_DIR", None)
if keep_outdated:
os.environ["PIPENV_KEEP_OUTDATED"] = fs_str("1")
with create_spinner(text=decode_for_output("Locking..."), setting=project.s) as sp:
# This conversion is somewhat slow on local and file-type requirements since
# we now download those requirements / make temporary folders to perform
# dependency resolution on them, so we are including this step inside the
# spinner context manager for the UX improvement
sp.write(decode_for_output("Building requirements..."))
deps = convert_deps_to_pip(
deps, project, r=False, include_index=True
)
constraints = set(deps)
os.environ["PIPENV_PACKAGES"] = str("\n".join(constraints))
sp.write(decode_for_output("Resolving dependencies..."))
c = resolve(cmd, sp, project=project)
results = c.stdout.strip()
if c.returncode == 0:
sp.green.ok(environments.PIPENV_SPINNER_OK_TEXT.format("Success!"))
if not project.s.is_verbose() and c.stderr.strip():
click_echo(crayons.yellow(f"Warning: {c.stderr.strip()}"), err=True)
else:
sp.red.fail(environments.PIPENV_SPINNER_FAIL_TEXT.format("Locking Failed!"))
click_echo(f"Output: {c.stdout.strip()}", err=True)
click_echo(f"Error: {c.stderr.strip()}", err=True)
try:
with open(target_file.name) as fh:
results = json.load(fh)
except (IndexError, JSONDecodeError):
click_echo(c.stdout.strip(), err=True)
click_echo(c.stderr.strip(), err=True)
if os.path.exists(target_file.name):
os.unlink(target_file.name)
raise RuntimeError("There was a problem with locking.")
if os.path.exists(target_file.name):
os.unlink(target_file.name)
if lockfile_section not in lockfile:
lockfile[lockfile_section] = {}
prepare_lockfile(results, pipfile, lockfile[lockfile_section])
def resolve_deps(
deps,
which,
project,
sources=None,
python=False,
clear=False,
pre=False,
allow_global=False,
req_dir=None
):
"""Given a list of dependencies, return a resolved list of dependencies,
using pip-tools -- and their hashes, using the warehouse API / pip.
"""
index_lookup = {}
markers_lookup = {}
python_path = which("python", allow_global=allow_global)
if not os.environ.get("PIP_SRC"):
os.environ["PIP_SRC"] = project.virtualenv_src_location
backup_python_path = sys.executable
results = []
resolver = None
if not deps:
return results, resolver
# First (proper) attempt:
req_dir = req_dir if req_dir else os.environ.get("req_dir", None)
if not req_dir:
from .vendor.vistir.path import create_tracked_tempdir
req_dir = create_tracked_tempdir(prefix="pipenv-", suffix="-requirements")
with HackedPythonVersion(python_version=python, python_path=python_path):
try:
results, hashes, markers_lookup, resolver, skipped = actually_resolve_deps(
deps,
index_lookup,
markers_lookup,
project,
sources,
clear,
pre,
req_dir=req_dir,
)
except RuntimeError:
# Don't exit here, like usual.
results = None
# Second (last-resort) attempt:
if results is None:
with HackedPythonVersion(
python_version=".".join([str(s) for s in sys.version_info[:3]]),
python_path=backup_python_path,
):
try:
# Attempt to resolve again, with different Python version information,
# particularly for particularly particular packages.
results, hashes, markers_lookup, resolver, skipped = actually_resolve_deps(
deps,
index_lookup,
markers_lookup,
project,
sources,
clear,
pre,
req_dir=req_dir,
)
except RuntimeError:
sys.exit(1)
return results, resolver
def is_star(val):
return isinstance(val, str) and val == "*"
def is_pinned(val):
if isinstance(val, Mapping):
val = val.get("version")
return isinstance(val, str) and val.startswith("==")
def is_pinned_requirement(ireq):
"""
Returns whether an InstallRequirement is a "pinned" requirement.
"""
if ireq.editable:
return False
if ireq.req is None or len(ireq.specifier) != 1:
return False
spec = next(iter(ireq.specifier))
return spec.operator in {"==", "==="} and not spec.version.endswith(".*")
def convert_deps_to_pip(deps, project=None, r=True, include_index=True):
""""Converts a Pipfile-formatted dependency to a pip-formatted one."""
from .vendor.requirementslib.models.requirements import Requirement
dependencies = []
for dep_name, dep in deps.items():
if project:
project.clear_pipfile_cache()
indexes = getattr(project, "pipfile_sources", []) if project is not None else []
new_dep = Requirement.from_pipfile(dep_name, dep)
if new_dep.index:
include_index = True
req = new_dep.as_line(sources=indexes if include_index else None).strip()
dependencies.append(req)
if not r:
return dependencies
# Write requirements.txt to tmp directory.
from .vendor.vistir.path import create_tracked_tempfile
f = create_tracked_tempfile(suffix="-requirements.txt", delete=False)
f.write("\n".join(dependencies).encode("utf-8"))
f.close()
return f.name
def mkdir_p(newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
From: http://code.activestate.com/recipes/82465-a-friendly-mkdir/
"""
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError(
"a file with the same name as the desired dir, '{}', already exists.".format(
newdir
)
)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
mkdir_p(head)
if tail:
# Even though we've checked that the directory doesn't exist above, it might exist
# now if some other process has created it between now and the time we checked it.
try:
os.mkdir(newdir)
except OSError as exn:
# If we failed because the directory does exist, that's not a problem -
# that's what we were trying to do anyway. Only re-raise the exception
# if we failed for some other reason.
if exn.errno != errno.EEXIST:
raise
def is_required_version(version, specified_version):
"""Check to see if there's a hard requirement for version
number provided in the Pipfile.
"""
# Certain packages may be defined with multiple values.
if isinstance(specified_version, dict):
specified_version = specified_version.get("version", "")
if specified_version.startswith("=="):
return version.strip() == specified_version.split("==")[1].strip()
return True
def is_editable(pipfile_entry):
if hasattr(pipfile_entry, "get"):
return pipfile_entry.get("editable", False) and any(
pipfile_entry.get(key) for key in ("file", "path") + VCS_LIST
)
return False
def is_installable_file(path):
"""Determine if a path can potentially be installed"""
from .patched.notpip._internal.utils.packaging import specifiers
from .vendor.pip_shims.shims import is_archive_file, is_installable_dir
if hasattr(path, "keys") and any(
key for key in path.keys() if key in ["file", "path"]
):
path = urlparse(path["file"]).path if "file" in path else path["path"]
if not isinstance(path, str) or path == "*":
return False
# If the string starts with a valid specifier operator, test if it is a valid
# specifier set before making a path object (to avoid breaking windows)
if any(path.startswith(spec) for spec in "!=<>~"):
try:
specifiers.SpecifierSet(path)
# If this is not a valid specifier, just move on and try it as a path
except specifiers.InvalidSpecifier:
pass
else:
return False
if not os.path.exists(os.path.abspath(path)):
return False
lookup_path = Path(path)
absolute_path = f"{lookup_path.absolute()}"
if lookup_path.is_dir() and is_installable_dir(absolute_path):
return True
elif lookup_path.is_file() and is_archive_file(absolute_path):
return True
return False
def is_file(package):
"""Determine if a package name is for a File dependency."""
if hasattr(package, "keys"):
return any(key for key in package.keys() if key in ["file", "path"])
if os.path.exists(str(package)):
return True
for start in SCHEME_LIST:
if str(package).startswith(start):
return True
return False
def pep440_version(version):
"""Normalize version to PEP 440 standards"""
# Use pip built-in version parser.
from pipenv.vendor.pip_shims import shims
return str(shims.parse_version(version))
def pep423_name(name):
"""Normalize package name to PEP 423 style standard."""
name = name.lower()
if any(i not in name for i in (VCS_LIST + SCHEME_LIST)):
return name.replace("_", "-")
else:
return name
def proper_case(package_name):
"""Properly case project name from pypi.org."""
# Hit the simple API.
r = _get_requests_session().get(
f"https://pypi.org/pypi/{package_name}/json", timeout=0.3, stream=True
)
if not r.ok:
raise OSError(
f"Unable to find package {package_name} in PyPI repository."
)
r = parse.parse("https://pypi.org/pypi/{name}/json", r.url)
good_name = r["name"]
return good_name
def get_windows_path(*args):
"""Sanitize a path for windows environments
Accepts an arbitrary list of arguments and makes a clean windows path"""
return os.path.normpath(os.path.join(*args))
def find_windows_executable(bin_path, exe_name):
"""Given an executable name, search the given location for an executable"""
requested_path = get_windows_path(bin_path, exe_name)
if os.path.isfile(requested_path):
return requested_path
try:
pathext = os.environ["PATHEXT"]
except KeyError:
pass
else:
for ext in pathext.split(os.pathsep):
path = get_windows_path(bin_path, exe_name + ext.strip().lower())
if os.path.isfile(path):
return path
return find_executable(exe_name)
def path_to_url(path):
return Path(normalize_drive(os.path.abspath(path))).as_uri()
def normalize_path(path):
return os.path.expandvars(os.path.expanduser(
os.path.normcase(os.path.normpath(os.path.abspath(str(path))))
))
def get_url_name(url):
if not isinstance(url, str):
return
return urllib3_util.parse_url(url).host
def get_canonical_names(packages):
"""Canonicalize a list of packages and return a set of canonical names"""
from .vendor.packaging.utils import canonicalize_name
if not isinstance(packages, Sequence):
if not isinstance(packages, str):
return packages
packages = [packages]
return {canonicalize_name(pkg) for pkg in packages if pkg}
def walk_up(bottom):
"""Mimic os.walk, but walk 'up' instead of down the directory tree.
From: https://gist.github.com/zdavkeos/1098474
"""
bottom = os.path.realpath(bottom)
# Get files in current dir.
try:
names = os.listdir(bottom)
except Exception:
return
dirs, nondirs = [], []
for name in names:
if os.path.isdir(os.path.join(bottom, name)):
dirs.append(name)
else:
nondirs.append(name)
yield bottom, dirs, nondirs
new_path = os.path.realpath(os.path.join(bottom, ".."))
# See if we are at the top.
if new_path == bottom:
return
yield from walk_up(new_path)
def find_requirements(max_depth=3):
"""Returns the path of a requirements.txt file in parent directories."""
i = 0
for c, d, f in walk_up(os.getcwd()):
i += 1
if i < max_depth:
r = os.path.join(c, "requirements.txt")
if os.path.isfile(r):
return r
raise RuntimeError("No requirements.txt found!")
# Borrowed from Pew.
# See https://github.com/berdario/pew/blob/master/pew/_utils.py#L82
@contextmanager
def temp_environ():
"""Allow the ability to set os.environ temporarily"""
environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(environ)
@contextmanager
def temp_path():
"""Allow the ability to set os.environ temporarily"""
path = [p for p in sys.path]
try:
yield
finally:
sys.path = [p for p in path]
def load_path(python):
import json
from pathlib import Path
python = Path(python).as_posix()
json_dump_commmand = '"import json, sys; print(json.dumps(sys.path));"'
c = subprocess_run([python, "-c", json_dump_commmand])
if c.returncode == 0:
return json.loads(c.stdout.strip())
else:
return []
def is_valid_url(url):
"""Checks if a given string is an url"""
pieces = urlparse(url)
return all([pieces.scheme, pieces.netloc])
def is_pypi_url(url):
return bool(re.match(r"^http[s]?:\/\/pypi(?:\.python)?\.org\/simple[\/]?$", url))
def replace_pypi_sources(sources, pypi_replacement_source):
return [pypi_replacement_source] + [
source for source in sources if not is_pypi_url(source["url"])
]
def create_mirror_source(url):
return {
"url": url,
"verify_ssl": url.startswith("https://"),
"name": urlparse(url).hostname,
}
def download_file(url, filename, max_retries=1):
"""Downloads file from url to a path with filename"""
r = _get_requests_session(max_retries).get(url, stream=True)
if not r.ok:
raise OSError("Unable to download file")
with open(filename, "wb") as f:
f.write(r.content)
def normalize_drive(path):
"""Normalize drive in path so they stay consistent.
This currently only affects local drives on Windows, which can be
identified with either upper or lower cased drive names. The case is
always converted to uppercase because it seems to be preferred.
See: <https://github.com/pypa/pipenv/issues/1218>
"""
if os.name != "nt" or not isinstance(path, str):
return path
drive, tail = os.path.splitdrive(path)
# Only match (lower cased) local drives (e.g. 'c:'), not UNC mounts.
if drive.islower() and len(drive) == 2 and drive[1] == ":":
return f"{drive.upper()}{tail}"
return path
def is_readonly_path(fn):
"""Check if a provided path exists and is readonly.
Permissions check is `bool(path.stat & stat.S_IREAD)` or `not os.access(path, os.W_OK)`
"""
if os.path.exists(fn):
return (os.stat(fn).st_mode & stat.S_IREAD) or not os.access(fn, os.W_OK)
return False
def set_write_bit(fn):
if isinstance(fn, str) and not os.path.exists(fn):
return
os.chmod(fn, stat.S_IWRITE | stat.S_IWUSR | stat.S_IRUSR)
return
def rmtree(directory, ignore_errors=False):
shutil.rmtree(
directory, ignore_errors=ignore_errors, onerror=handle_remove_readonly
)
def handle_remove_readonly(func, path, exc):
"""Error handler for shutil.rmtree.
Windows source repo folders are read-only by default, so this error handler
attempts to set them as writeable and then proceed with deletion."""
# Check for read-only attribute
default_warning_message = (
"Unable to remove file due to permissions restriction: {!r}"
)
# split the initial exception out into its type, exception, and traceback
exc_type, exc_exception, exc_tb = exc
if is_readonly_path(path):
# Apply write permission and call original function
set_write_bit(path)
try:
func(path)
except OSError as e:
if e.errno in [errno.EACCES, errno.EPERM]:
warnings.warn(default_warning_message.format(path), ResourceWarning)
return
if exc_exception.errno in [errno.EACCES, errno.EPERM]:
warnings.warn(default_warning_message.format(path), ResourceWarning)
return
raise exc
def escape_cmd(cmd):
if any(special_char in cmd for special_char in ["<", ">", "&", ".", "^", "|", "?"]):
cmd = f'\"{cmd}\"'
return cmd
def safe_expandvars(value):
"""Call os.path.expandvars if value is a string, otherwise do nothing.
"""
if isinstance(value, str):
return os.path.expandvars(value)
return value
def get_vcs_deps(
project=None,
dev=False,
pypi_mirror=None,
packages=None,
reqs=None
):
from .vendor.requirementslib.models.requirements import Requirement
section = "vcs_dev_packages" if dev else "vcs_packages"
if reqs is None:
reqs = []
lockfile = {}
if not reqs:
if not project and not packages:
raise ValueError(
"Must supply either a project or a pipfile section to lock vcs dependencies."
)
if not packages:
try:
packages = getattr(project, section)
except AttributeError:
return [], []
reqs = [Requirement.from_pipfile(name, entry) for name, entry in packages.items()]
result = []
for requirement in reqs:
name = requirement.normalized_name
commit_hash = None
if requirement.is_vcs:
try:
with temp_path(), locked_repository(requirement) as repo:
from pipenv.vendor.requirementslib.models.requirements import (
Requirement
)
# from distutils.sysconfig import get_python_lib
# sys.path = [repo.checkout_directory, "", ".", get_python_lib(plat_specific=0)]
commit_hash = repo.get_commit_hash()
name = requirement.normalized_name
lockfile[name] = requirement.pipfile_entry[1]
lockfile[name]['ref'] = commit_hash
result.append(requirement)
except OSError:
continue
return result, lockfile
def translate_markers(pipfile_entry):
"""Take a pipfile entry and normalize its markers
Provide a pipfile entry which may have 'markers' as a key or it may have
any valid key from `packaging.markers.marker_context.keys()` and standardize
the format into {'markers': 'key == "some_value"'}.
:param pipfile_entry: A dictionariy of keys and values representing a pipfile entry
:type pipfile_entry: dict
:returns: A normalized dictionary with cleaned marker entries
"""
if not isinstance(pipfile_entry, Mapping):
raise TypeError("Entry is not a pipfile formatted mapping.")
from .vendor.packaging.markers import default_environment
from .vendor.vistir.misc import dedup
allowed_marker_keys = ["markers"] + list(default_environment().keys())
provided_keys = list(pipfile_entry.keys()) if hasattr(pipfile_entry, "keys") else []
pipfile_markers = set(provided_keys) & set(allowed_marker_keys)
new_pipfile = dict(pipfile_entry).copy()
marker_set = set()
if "markers" in new_pipfile:
marker_str = new_pipfile.pop("markers")
if marker_str:
marker = str(Marker(marker_str))
if 'extra' not in marker:
marker_set.add(marker)
for m in pipfile_markers:
entry = f"{pipfile_entry[m]}"
if m != "markers":
marker_set.add(str(Marker(f"{m} {entry}")))
new_pipfile.pop(m)
if marker_set:
new_pipfile["markers"] = str(Marker(" or ".join(
f"{s}" if " and " in s else s
for s in sorted(dedup(marker_set))
))).replace('"', "'")
return new_pipfile
def clean_resolved_dep(dep, is_top_level=False, pipfile_entry=None):
from .vendor.requirementslib.utils import is_vcs
name = pep423_name(dep["name"])
lockfile = {}
# We use this to determine if there are any markers on top level packages
# So we can make sure those win out during resolution if the packages reoccur
if "version" in dep and dep["version"] and not dep.get("editable", False):
version = "{}".format(dep["version"])
if not version.startswith("=="):
version = f"=={version}"
lockfile["version"] = version
if is_vcs(dep):
ref = dep.get("ref", None)
if ref is not None:
lockfile["ref"] = ref
vcs_type = next(iter(k for k in dep.keys() if k in VCS_LIST), None)
if vcs_type:
lockfile[vcs_type] = dep[vcs_type]
if "subdirectory" in dep:
lockfile["subdirectory"] = dep["subdirectory"]
for key in ["hashes", "index", "extras", "editable"]:
if key in dep:
lockfile[key] = dep[key]
# In case we lock a uri or a file when the user supplied a path
# remove the uri or file keys from the entry and keep the path
fs_key = next(iter(k for k in ["path", "file"] if k in dep), None)
pipfile_fs_key = None
if pipfile_entry:
pipfile_fs_key = next(iter(k for k in ["path", "file"] if k in pipfile_entry), None)
if fs_key and pipfile_fs_key and fs_key != pipfile_fs_key:
lockfile[pipfile_fs_key] = pipfile_entry[pipfile_fs_key]
elif fs_key is not None:
lockfile[fs_key] = dep[fs_key]
# If a package is **PRESENT** in the pipfile but has no markers, make sure we
# **NEVER** include markers in the lockfile
if "markers" in dep and dep.get("markers", "").strip():
# First, handle the case where there is no top level dependency in the pipfile
if not is_top_level:
translated = translate_markers(dep).get("markers", "").strip()
if translated:
try:
lockfile["markers"] = translated
except TypeError:
pass
# otherwise make sure we are prioritizing whatever the pipfile says about the markers
# If the pipfile says nothing, then we should put nothing in the lockfile
else:
try:
pipfile_entry = translate_markers(pipfile_entry)
lockfile["markers"] = pipfile_entry.get("markers")
except TypeError:
pass
return {name: lockfile}
def get_workon_home():
workon_home = os.environ.get("WORKON_HOME")
if not workon_home:
if os.name == "nt":
workon_home = "~/.virtualenvs"
else:
workon_home = os.path.join(
os.environ.get("XDG_DATA_HOME", "~/.local/share"), "virtualenvs"
)
# Create directory if it does not already exist
expanded_path = Path(os.path.expandvars(workon_home)).expanduser()
mkdir_p(str(expanded_path))
return expanded_path
def is_virtual_environment(path):
"""Check if a given path is a virtual environment's root.
This is done by checking if the directory contains a Python executable in
its bin/Scripts directory. Not technically correct, but good enough for
general usage.
"""
if not path.is_dir():
return False
for bindir_name in ('bin', 'Scripts'):
for python in path.joinpath(bindir_name).glob('python*'):
try:
exeness = python.is_file() and os.access(str(python), os.X_OK)
except OSError:
exeness = False
if exeness:
return True
return False
@contextmanager
def locked_repository(requirement):
from .vendor.vistir.path import create_tracked_tempdir
if not requirement.is_vcs:
return
original_base = os.environ.pop("PIP_SHIMS_BASE_MODULE", None)
os.environ["PIP_SHIMS_BASE_MODULE"] = fs_str("pipenv.patched.notpip")
src_dir = create_tracked_tempdir(prefix="pipenv-", suffix="-src")
try:
with requirement.req.locked_vcs_repo(src_dir=src_dir) as repo:
yield repo
finally:
if original_base:
os.environ["PIP_SHIMS_BASE_MODULE"] = original_base
@contextmanager
def chdir(path):
"""Context manager to change working directories."""
if not path:
return
prev_cwd = Path.cwd().as_posix()
if isinstance(path, Path):
path = path.as_posix()
os.chdir(str(path))
try:
yield
finally:
os.chdir(prev_cwd)
def looks_like_dir(path):
seps = (sep for sep in (os.path.sep, os.path.altsep) if sep is not None)
return any(sep in path for sep in seps)
def parse_indexes(line, strict=False):
from argparse import ArgumentParser
comment_re = re.compile(r"(?:^|\s+)#.*$")
line = comment_re.sub("", line)
parser = ArgumentParser("indexes", allow_abbrev=False)
parser.add_argument("-i", "--index-url", dest="index")
parser.add_argument("--extra-index-url", dest="extra_index")
parser.add_argument("--trusted-host", dest="trusted_host")
args, remainder = parser.parse_known_args(line.split())
index = args.index
extra_index = args.extra_index
trusted_host = args.trusted_host
if strict and sum(
bool(arg) for arg in (index, extra_index, trusted_host, remainder)
) > 1:
raise ValueError("Index arguments must be on their own lines.")
return index, extra_index, trusted_host, remainder
@contextmanager
def sys_version(version_tuple):
"""
Set a temporary sys.version_info tuple
:param version_tuple: a fake sys.version_info tuple
"""
old_version = sys.version_info
sys.version_info = version_tuple
yield
sys.version_info = old_version
def add_to_set(original_set, element):
"""Given a set and some arbitrary element, add the element(s) to the set"""
if not element:
return original_set
if isinstance(element, Set):
original_set |= element
elif isinstance(element, (list, tuple)):
original_set |= set(element)
else:
original_set.add(element)
return original_set
def is_url_equal(url, other_url):
# type: (str, str) -> bool
"""
Compare two urls by scheme, host, and path, ignoring auth
:param str url: The initial URL to compare
:param str url: Second url to compare to the first
:return: Whether the URLs are equal without **auth**, **query**, and **fragment**
:rtype: bool
>>> is_url_equal("https://user:[email protected]/some/path?some_query",
"https://user2:[email protected]/some/path")
True
>>> is_url_equal("https://user:[email protected]/some/path?some_query",
"https://mydomain.com/some?some_query")
False
"""
if not isinstance(url, str):
raise TypeError(f"Expected string for url, received {url!r}")
if not isinstance(other_url, str):
raise TypeError(f"Expected string for url, received {other_url!r}")
parsed_url = urllib3_util.parse_url(url)
parsed_other_url = urllib3_util.parse_url(other_url)
unparsed = parsed_url._replace(auth=None, query=None, fragment=None).url
unparsed_other = parsed_other_url._replace(auth=None, query=None, fragment=None).url
return unparsed == unparsed_other
@lru_cache()
def make_posix(path):
# type: (str) -> str
"""
Convert a path with possible windows-style separators to a posix-style path
(with **/** separators instead of **\\** separators).
:param Text path: A path to convert.
:return: A converted posix-style path
:rtype: Text
>>> make_posix("c:/users/user/venvs/some_venv\\Lib\\site-packages")
"c:/users/user/venvs/some_venv/Lib/site-packages"
>>> make_posix("c:\\users\\user\\venvs\\some_venv")
"c:/users/user/venvs/some_venv"
"""
if not isinstance(path, str):
raise TypeError(f"Expected a string for path, received {path!r}...")
starts_with_sep = path.startswith(os.path.sep)
separated = normalize_path(path).split(os.path.sep)
if isinstance(separated, (list, tuple)):
path = posixpath.join(*separated)
if starts_with_sep:
path = f"/{path}"
return path
def get_pipenv_dist(pkg="pipenv", pipenv_site=None):
from .resolver import find_site_path
pipenv_libdir = os.path.dirname(os.path.abspath(__file__))
if pipenv_site is None:
pipenv_site = os.path.dirname(pipenv_libdir)
pipenv_dist, _ = find_site_path(pkg, site_dir=pipenv_site)
return pipenv_dist
def find_python(finder, line=None):
"""
Given a `pythonfinder.Finder` instance and an optional line, find a corresponding python
:param finder: A :class:`pythonfinder.Finder` instance to use for searching
:type finder: :class:pythonfinder.Finder`
:param str line: A version, path, name, or nothing, defaults to None
:return: A path to python
:rtype: str
"""
if line and not isinstance(line, str):
raise TypeError(
f"Invalid python search type: expected string, received {line!r}"
)
if line and os.path.isabs(line):
if os.name == "nt":
line = make_posix(line)
return line
if not finder:
from pipenv.vendor.pythonfinder import Finder
finder = Finder(global_search=True)
if not line:
result = next(iter(finder.find_all_python_versions()), None)
elif line and line[0].isdigit() or re.match(r'[\d\.]+', line):
result = finder.find_python_version(line)
else:
result = finder.find_python_version(name=line)
if not result:
result = finder.which(line)
if not result and not line.startswith("python"):
line = f"python{line}"
result = find_python(finder, line)
if result:
if not isinstance(result, str):
return result.path.as_posix()
return result
return
def is_python_command(line):
"""
Given an input, checks whether the input is a request for python or notself.
This can be a version, a python runtime name, or a generic 'python' or 'pythonX.Y'
:param str line: A potential request to find python
:returns: Whether the line is a python lookup
:rtype: bool
"""
if not isinstance(line, str):
raise TypeError(f"Not a valid command to check: {line!r}")
from pipenv.vendor.pythonfinder.utils import PYTHON_IMPLEMENTATIONS
is_version = re.match(r'\d+(\.\d+)*', line)
if (line.startswith("python") or is_version
or any(line.startswith(v) for v in PYTHON_IMPLEMENTATIONS)):
return True
# we are less sure about this but we can guess
if line.startswith("py"):
return True
return False
@contextlib.contextmanager
def interrupt_handled_subprocess(
cmd, verbose=False, return_object=True, write_to_stdout=False, combine_stderr=True,
block=True, nospin=True, env=None
):
"""Given a :class:`subprocess.Popen` instance, wrap it in exception handlers.
Terminates the subprocess when and if a `SystemExit` or `KeyboardInterrupt` are
processed.
Arguments:
:param str cmd: A command to run
:param bool verbose: Whether to run with verbose mode enabled, default False
:param bool return_object: Whether to return a subprocess instance or a 2-tuple, default True
:param bool write_to_stdout: Whether to write directly to stdout, default False
:param bool combine_stderr: Whether to combine stdout and stderr, default True
:param bool block: Whether the subprocess should be a blocking subprocess, default True
:param bool nospin: Whether to suppress the spinner with the subprocess, default True
:param Optional[Dict[str, str]] env: A dictionary to merge into the subprocess environment
:return: A subprocess, wrapped in exception handlers, as a context manager
:rtype: :class:`subprocess.Popen` obj: An instance of a running subprocess
"""
obj = run(
cmd, verbose=verbose, return_object=True, write_to_stdout=False,
combine_stderr=False, block=True, nospin=True, env=env,
)
try:
yield obj
except (SystemExit, KeyboardInterrupt):
if os.name == "nt":
os.kill(obj.pid, signal.CTRL_BREAK_EVENT)
else:
os.kill(obj.pid, signal.SIGINT)
obj.wait()
raise
def subprocess_run(
args, *, block=True, text=True, capture_output=True,
encoding="utf-8", env=None, **other_kwargs
):
"""A backward compatible version of subprocess.run().
It outputs text with default encoding, and store all outputs in the returned object instead of
printing onto stdout.
"""
_env = os.environ.copy()
_env["PYTHONIOENCODING"] = encoding
if env:
_env.update(env)
other_kwargs["env"] = _env
if capture_output:
other_kwargs['stdout'] = subprocess.PIPE
other_kwargs['stderr'] = subprocess.PIPE
if block:
return subprocess.run(
args, universal_newlines=text,
encoding=encoding, **other_kwargs
)
else:
return subprocess.Popen(
args, universal_newlines=text,
encoding=encoding, **other_kwargs
)
def cmd_list_to_shell(args):
"""Convert a list of arguments to a quoted shell command."""
return " ".join(shlex.quote(str(token)) for token in args)
|
GHSA-qc9x-gjcv-465w
|
tests/unit/test_utils.py
|
@@ -142,7 +142,7 @@ def test_convert_deps_to_pip_unicode():
("--extra-index-url=https://example.com/simple/", (None, "https://example.com/simple/", None, [])),
("--trusted-host=example.com", (None, None, "example.com", [])),
("# -i https://example.com/simple/", (None, None, None, [])),
- ("requests", (None, None, None, ["requests"]))
+ ("requests # -i https://example.com/simple/", (None, None, None, ["requests"])),
])
@pytest.mark.utils
def test_parse_indexes(line, result):
|
import os
import pytest
import pipenv.utils
from pipenv.exceptions import PipenvUsageError
# Pipfile format <-> requirements.txt format.
DEP_PIP_PAIRS = [
({"requests": "*"}, "requests"),
({"requests": {"extras": ["socks"], "version": "*"}}, "requests[socks]"),
({"django": ">1.10"}, "django>1.10"),
({"Django": ">1.10"}, "Django>1.10"),
({"requests": {"extras": ["socks"], "version": ">1.10"}}, "requests[socks]>1.10"),
({"requests": {"extras": ["socks"], "version": "==1.10"}}, "requests[socks]==1.10"),
(
{
"pinax": {
"git": "git://github.com/pinax/pinax.git",
"ref": "1.4",
"editable": True,
}
},
"-e git+git://github.com/pinax/[email protected]#egg=pinax",
),
(
{"pinax": {"git": "git://github.com/pinax/pinax.git", "ref": "1.4"}},
"git+git://github.com/pinax/[email protected]#egg=pinax",
),
( # Mercurial.
{
"MyProject": {
"hg": "http://hg.myproject.org/MyProject",
"ref": "da39a3ee5e6b",
}
},
"hg+http://hg.myproject.org/MyProject@da39a3ee5e6b#egg=MyProject",
),
( # SVN.
{
"MyProject": {
"svn": "svn://svn.myproject.org/svn/MyProject",
"editable": True,
}
},
"-e svn+svn://svn.myproject.org/svn/MyProject#egg=MyProject",
),
(
# Extras in url
{
"discord.py": {
"file": "https://github.com/Rapptz/discord.py/archive/async.zip",
"extras": ["voice"],
}
},
"https://github.com/Rapptz/discord.py/archive/async.zip#egg=discord.py[voice]",
),
(
{
"requests": {
"git": "https://github.com/requests/requests.git",
"ref": "master",
"extras": ["security"],
"editable": False,
}
},
"git+https://github.com/requests/requests.git@master#egg=requests[security]",
),
]
def mock_unpack(link, source_dir, download_dir, only_download=False, session=None,
hashes=None, progress_bar="off"):
return
@pytest.mark.utils
@pytest.mark.parametrize("deps, expected", DEP_PIP_PAIRS)
@pytest.mark.needs_internet
def test_convert_deps_to_pip(monkeypatch, deps, expected):
with monkeypatch.context() as m:
import pip_shims
m.setattr(pip_shims.shims, "unpack_url", mock_unpack)
if expected.startswith("Django"):
expected = expected.lower()
assert pipenv.utils.convert_deps_to_pip(deps, r=False) == [expected]
@pytest.mark.utils
@pytest.mark.parametrize(
"deps, expected",
[
# This one should be collapsed and treated as {'requests': '*'}.
({"requests": {}}, "requests"),
# Hash value should be passed into the result.
(
{
"FooProject": {
"version": "==1.2",
"hash": "sha256:2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824",
}
},
"FooProject==1.2 --hash=sha256:2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824",
),
(
{
"FooProject": {
"version": "==1.2",
"extras": ["stuff"],
"hash": "sha256:2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824",
}
},
"FooProject[stuff]==1.2 --hash=sha256:2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824",
),
(
{
"requests": {
"git": "https://github.com/requests/requests.git",
"ref": "master",
"extras": ["security"],
}
},
"git+https://github.com/requests/requests.git@master#egg=requests[security]",
),
],
)
def test_convert_deps_to_pip_one_way(deps, expected):
assert pipenv.utils.convert_deps_to_pip(deps, r=False) == [expected.lower()]
@pytest.mark.skipif(isinstance("", str), reason="don't need to test if unicode is str")
@pytest.mark.utils
def test_convert_deps_to_pip_unicode():
deps = {"django": "==1.10"}
deps = pipenv.utils.convert_deps_to_pip(deps, r=False)
assert deps[0] == "django==1.10"
@pytest.mark.parametrize("line,result", [
("-i https://example.com/simple/", ("https://example.com/simple/", None, None, [])),
("--extra-index-url=https://example.com/simple/", (None, "https://example.com/simple/", None, [])),
("--trusted-host=example.com", (None, None, "example.com", [])),
("# -i https://example.com/simple/", (None, None, None, [])),
("requests", (None, None, None, ["requests"]))
])
@pytest.mark.utils
def test_parse_indexes(line, result):
assert pipenv.utils.parse_indexes(line) == result
@pytest.mark.parametrize("line", [
"-i https://example.com/simple/ --extra-index-url=https://extra.com/simple/",
"--extra-index-url https://example.com/simple/ --trusted-host=example.com",
"requests -i https://example.com/simple/",
])
@pytest.mark.utils
def test_parse_indexes_individual_lines(line):
with pytest.raises(ValueError):
pipenv.utils.parse_indexes(line, strict=True)
class TestUtils:
"""Test utility functions in pipenv"""
@pytest.mark.utils
@pytest.mark.parametrize(
"version, specified_ver, expected",
[
("*", "*", True),
("2.1.6", "==2.1.4", False),
("20160913", ">=20140815", True),
(
"1.4",
{"svn": "svn://svn.myproj.org/svn/MyProj", "version": "==1.4"},
True,
),
("2.13.0", {"extras": ["socks"], "version": "==2.12.4"}, False),
],
)
def test_is_required_version(self, version, specified_ver, expected):
assert pipenv.utils.is_required_version(version, specified_ver) is expected
@pytest.mark.utils
@pytest.mark.parametrize(
"entry, expected",
[
({"git": "package.git", "ref": "v0.0.1"}, True),
({"hg": "https://package.com/package", "ref": "v1.2.3"}, True),
("*", False),
({"some_value": 5, "other_value": object()}, False),
("package", False),
("git+https://github.com/requests/requests.git#egg=requests", True),
("[email protected]:requests/requests.git#egg=requests", True),
("gitdb2", False),
],
)
@pytest.mark.vcs
def test_is_vcs(self, entry, expected):
from pipenv.vendor.requirementslib.utils import is_vcs
assert is_vcs(entry) is expected
@pytest.mark.utils
def test_python_version_from_bad_path(self):
assert pipenv.utils.python_version("/fake/path") is None
@pytest.mark.utils
def test_python_version_from_non_python(self):
assert pipenv.utils.python_version("/dev/null") is None
@pytest.mark.utils
@pytest.mark.parametrize(
"version_output, version",
[
("Python 3.6.2", "3.6.2"),
("Python 3.6.2 :: Continuum Analytics, Inc.", "3.6.2"),
("Python 3.6.20 :: Continuum Analytics, Inc.", "3.6.20"),
(
"Python 3.5.3 (3f6eaa010fce78cc7973bdc1dfdb95970f08fed2, Jan 13 2018, 18:14:01)\n[PyPy 5.10.1 with GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.39.2)]",
"3.5.3",
),
],
)
# @patch(".vendor.pythonfinder.utils.get_python_version")
def test_python_version_output_variants(
self, monkeypatch, version_output, version
):
def mock_version(path):
return version_output.split()[1]
monkeypatch.setattr("pipenv.vendor.pythonfinder.utils.get_python_version", mock_version)
assert pipenv.utils.python_version("some/path") == version
@pytest.mark.utils
@pytest.mark.windows
@pytest.mark.skipif(os.name != "nt", reason="Windows test only")
def test_windows_shellquote(self):
test_path = r"C:\Program Files\Python36\python.exe"
expected_path = '"C:\\\\Program Files\\\\Python36\\\\python.exe"'
assert pipenv.utils.escape_grouped_arguments(test_path) == expected_path
@pytest.mark.utils
def test_is_valid_url(self):
url = "https://github.com/psf/requests.git"
not_url = "something_else"
assert pipenv.utils.is_valid_url(url)
assert pipenv.utils.is_valid_url(not_url) is False
@pytest.mark.utils
@pytest.mark.needs_internet
def test_download_file(self):
url = "https://github.com/pypa/pipenv/blob/master/README.md"
output = "test_download.md"
pipenv.utils.download_file(url, output)
assert os.path.exists(output)
os.remove(output)
@pytest.mark.utils
@pytest.mark.parametrize('line, expected', [
("python", True),
("python3.7", True),
("python2.7", True),
("python2", True),
("python3", True),
("pypy3", True),
("anaconda3-5.3.0", True),
("which", False),
("vim", False),
("miniconda", True),
("micropython", True),
("ironpython", True),
("jython3.5", True),
("2", True),
("2.7", True),
("3.7", True),
("3", True)
])
def test_is_python_command(self, line, expected):
assert pipenv.utils.is_python_command(line) == expected
@pytest.mark.utils
def test_new_line_end_of_toml_file(this):
# toml file that needs clean up
toml = """
[dev-packages]
"flake8" = ">=3.3.0,<4"
pytest = "*"
mock = "*"
sphinx = "<=1.5.5"
"-e ." = "*"
twine = "*"
"sphinx-click" = "*"
"pytest-xdist" = "*"
"""
new_toml = pipenv.utils.cleanup_toml(toml)
# testing if the end of the generated file contains a newline
assert new_toml[-1] == "\n"
@pytest.mark.utils
@pytest.mark.parametrize(
"input_path, expected",
[
(
"c:\\Program Files\\Python36\\python.exe",
"C:\\Program Files\\Python36\\python.exe",
),
(
"C:\\Program Files\\Python36\\python.exe",
"C:\\Program Files\\Python36\\python.exe",
),
("\\\\host\\share\\file.zip", "\\\\host\\share\\file.zip"),
("artifacts\\file.zip", "artifacts\\file.zip"),
(".\\artifacts\\file.zip", ".\\artifacts\\file.zip"),
("..\\otherproject\\file.zip", "..\\otherproject\\file.zip"),
],
)
@pytest.mark.skipif(os.name != "nt", reason="Windows file paths tested")
def test_win_normalize_drive(self, input_path, expected):
assert pipenv.utils.normalize_drive(input_path) == expected
@pytest.mark.utils
@pytest.mark.parametrize(
"input_path, expected",
[
("/usr/local/bin/python", "/usr/local/bin/python"),
("artifacts/file.zip", "artifacts/file.zip"),
("./artifacts/file.zip", "./artifacts/file.zip"),
("../otherproject/file.zip", "../otherproject/file.zip"),
],
)
@pytest.mark.skipif(os.name == "nt", reason="*nix file paths tested")
def test_nix_normalize_drive(self, input_path, expected):
assert pipenv.utils.normalize_drive(input_path) == expected
@pytest.mark.utils
@pytest.mark.parametrize(
"sources, expected_args",
[
(
[{"url": "https://test.example.com/simple", "verify_ssl": True}],
["-i", "https://test.example.com/simple"],
),
(
[{"url": "https://test.example.com/simple", "verify_ssl": False}],
[
"-i",
"https://test.example.com/simple",
"--trusted-host",
"test.example.com",
],
),
(
[{"url": "https://test.example.com:12345/simple", "verify_ssl": False}],
[
"-i",
"https://test.example.com:12345/simple",
"--trusted-host",
"test.example.com:12345",
],
),
(
[
{"url": "https://pypi.org/simple"},
{"url": "https://custom.example.com/simple"},
],
[
"-i",
"https://pypi.org/simple",
"--extra-index-url",
"https://custom.example.com/simple",
],
),
(
[
{"url": "https://pypi.org/simple"},
{"url": "https://custom.example.com/simple", "verify_ssl": False},
],
[
"-i",
"https://pypi.org/simple",
"--extra-index-url",
"https://custom.example.com/simple",
"--trusted-host",
"custom.example.com",
],
),
(
[
{"url": "https://pypi.org/simple"},
{"url": "https://custom.example.com:12345/simple", "verify_ssl": False},
],
[
"-i",
"https://pypi.org/simple",
"--extra-index-url",
"https://custom.example.com:12345/simple",
"--trusted-host",
"custom.example.com:12345",
],
),
(
[
{"url": "https://pypi.org/simple"},
{
"url": "https://user:[email protected]/simple",
"verify_ssl": False,
},
],
[
"-i",
"https://pypi.org/simple",
"--extra-index-url",
"https://user:[email protected]/simple",
"--trusted-host",
"custom.example.com",
],
),
(
[
{"url": "https://pypi.org/simple"},
{"url": "https://user:[email protected]/simple"},
],
[
"-i",
"https://pypi.org/simple",
"--extra-index-url",
"https://user:[email protected]/simple",
],
),
(
[
{
"url": "https://user:[email protected]/simple",
"verify_ssl": False,
},
],
[
"-i",
"https://user:[email protected]/simple",
"--trusted-host",
"custom.example.com",
],
),
],
)
def test_prepare_pip_source_args(self, sources, expected_args):
assert (
pipenv.utils.prepare_pip_source_args(sources, pip_args=None)
== expected_args
)
@pytest.mark.utils
def test_invalid_prepare_pip_source_args(self):
sources = [{}]
with pytest.raises(PipenvUsageError):
pipenv.utils.prepare_pip_source_args(sources, pip_args=None)
@pytest.mark.utils
def test_parse_python_version(self):
ver = pipenv.utils.parse_python_version("Python 3.6.5\n")
assert ver == {"major": "3", "minor": "6", "micro": "5"}
@pytest.mark.utils
def test_parse_python_version_suffix(self):
ver = pipenv.utils.parse_python_version("Python 3.6.5rc1\n")
assert ver == {"major": "3", "minor": "6", "micro": "5"}
@pytest.mark.utils
def test_parse_python_version_270(self):
ver = pipenv.utils.parse_python_version("Python 2.7\n")
assert ver == {"major": "2", "minor": "7", "micro": "0"}
@pytest.mark.utils
def test_parse_python_version_270_garbage(self):
ver = pipenv.utils.parse_python_version("Python 2.7+\n")
assert ver == {"major": "2", "minor": "7", "micro": "0"}
|
GHSA-qc9x-gjcv-465w
|
tensorflow/python/kernel_tests/nn_ops/pooling_ops_test.py
|
@@ -2510,6 +2510,21 @@ def testAvgPoolGradInvalidInputShapeRaiseError(self):
data_format="NHWC")
self.evaluate(t)
+ def testAvgPoolGradInvalidStrideRaiseErrorProperly(self):
+ with self.assertRaises(errors_impl.InvalidArgumentError):
+ with self.cached_session():
+ orig_input_shape = [11, 9, 78, 9]
+ grad = constant_op.constant(
+ 0.1, shape=[16, 16, 16, 16], dtype=dtypes.float64)
+ t = gen_nn_ops.AvgPoolGrad(
+ orig_input_shape=orig_input_shape,
+ grad=grad,
+ ksize=[1, 40, 128, 1],
+ strides=[1, 128, 128, 30],
+ padding="SAME",
+ data_format="NHWC")
+ self.evaluate(t)
+
def GetMaxPoolFwdTest(input_size, filter_size, strides, padding):
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for pooling operations."""
import collections
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
import tensorflow.python.framework.config as config_exec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def GetDeviceScope(self, use_gpu=False):
if context.executing_eagerly():
if use_gpu and test.is_gpu_available():
return ops.device("GPU:0")
return ops.device("CPU:0")
else:
return self.session(use_gpu=use_gpu)
# TODO(jlebar): Convert the rest of this file to parameters.parameterized().
# Then remove GetTestConfigs() and rename GetTestConfigsDicts().
def GetTestConfigsDicts(v1_fn,
v2_fn=None,
one_dimensional=False,
allow_gpu=True):
# (data_format, use_gpu) tuple
if one_dimensional:
configs0 = [
("NWC", False),
("NWC", True),
("NCW", True),
]
else:
configs0 = [
("NHWC", False),
("NHWC", True),
("NCHW", True),
]
# NCHW_VECT_C only supported for max_pool.
if (v1_fn == nn_ops.max_pool or v1_fn == nn_ops.max_pool1d or
v2_fn == nn_ops.max_pool_v2 or v2_fn == gen_nn_ops.max_pool_v2):
configs0.append(("NCHW_VECT_C", True))
# (data_format, use_gpu, data_type) tuple
configs1 = []
for data_format, use_gpu in configs0:
configs1.append((data_format, use_gpu, dtypes.float32))
# In our test, VECT_C always uses float32. (It gets converted to int8 in
# the test runner.)
if data_format == "NCHW_VECT_C":
continue
configs1 += [(data_format, use_gpu, dtypes.float16),
(data_format, use_gpu, dtypes.float64)]
# Convert from tuple to dict and add v1/v2 versions.
ret = []
for data_format, use_gpu, data_type in configs1:
ret.append({
"pool_func": v1_fn,
"data_format": data_format,
"data_type": data_type,
"use_gpu": use_gpu,
"v2": False
})
if v2_fn:
ret.append({
"pool_func": v2_fn,
"data_format": data_format,
"data_type": data_type,
"use_gpu": use_gpu,
"v2": False
})
ret.append({
"pool_func": v2_fn,
"data_format": data_format,
"data_type": data_type,
"use_gpu": use_gpu,
"v2": True
})
# Filter out GPU configs if necessary.
if not allow_gpu:
ret = [c for c in ret if not c["use_gpu"]]
return ret
def GetTestConfigs(include_nchw_vect_c=False, one_dimensional=False):
"""Get all the valid tests configs to run.
Args:
include_nchw_vect_c: Whether to include NCHW_VECT_C in the test configs.
one_dimensional: If it's a 1D test
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
if one_dimensional:
test_configs = [("NWC", False), ("NWC", True)]
if test.is_gpu_available(cuda_only=True):
test_configs += [("NCW", True)]
return test_configs
test_configs = [("NHWC", False), ("NHWC", True)]
if not test.is_gpu_available(cuda_only=True):
tf_logging.info("NCHW and NCHW_VECT_C tests skipped because not run with "
"--config=cuda or no GPUs available.")
return test_configs
# "NCHW" format is currently supported exclusively on CUDA GPUs.
test_configs += [("NCHW", True)]
if include_nchw_vect_c:
if test.is_gpu_available(
cuda_only=True, min_cuda_compute_capability=(6, 1)):
test_configs += [("NCHW_VECT_C", True)]
else:
tf_logging.info("NCHW_VECT_C test skipped because no GPUs with "
"compute capability >= 6.1 are available.")
return test_configs
def GetShrunkInceptionMaxPoolShapes(shrink=30):
"""Iterator for some of the max pool ops in the Inception 2015 model.
Args:
shrink: Factor to shrink depth relative to Inception.
Yields:
Tuple (name, input_size, filter_size, out_size, strides, padding)
"""
names = ["maxpool2", "maxpool3", "maxpool4", "maxpool5"]
input_sizes = [[32, 71, 71, 192], [32, 35, 35, 288], [32, 17, 17, 1248],
[32, 8, 8, 2048]]
filter_sizes = [[1, 3, 3, 1], [1, 3, 3, 1], [1, 3, 3, 1], [1, 3, 3, 1]]
output_sizes = [[32, 35, 35, 192], [32, 17, 17, 288], [32, 8, 8, 1248],
[32, 8, 8, 2048]]
strides = [[1, 2, 2, 1], [1, 2, 2, 1], [1, 2, 2, 1], [1, 1, 1, 1]]
# Shrink each depth value
for i in input_sizes:
i[3] //= shrink
for o in output_sizes:
o[3] //= shrink
paddings = ["VALID", "VALID", "VALID", "SAME"]
for n, i, f, o, s, p in zip(names, input_sizes, filter_sizes, output_sizes,
strides, paddings):
yield n, i, f, o, s, p
@test_util.with_eager_op_as_function
class PoolingTest(test.TestCase, parameterized.TestCase):
def _isMaxPool(self, func):
return func in (nn_ops.max_pool, nn_ops.max_pool_v2)
def _VerifyOneType(self, pool_func, input_sizes, ksize, strides, padding,
data_format, data_type, expected, use_gpu, v2,
use_negative_input=False):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
data_type: The data type to use to run the pooling operation.
expected: An array containing the expected operation outputs.
use_gpu: Whether we are running on GPU.
v2: Whether to use v2 version.
use_negative_input: If the input values should be negative.
"""
# Check that this test is compatible with the hardware we have. (Really
# this should be done in GetTestConfigsDicts(), but when that runs, we
# haven't initialized enough of TF to know what our hardware is!)
if use_gpu and not test.is_gpu_available():
self.skipTest("No GPU is available.")
if use_gpu and data_type == dtypes.float64 and test.is_built_with_rocm():
self.skipTest("ROCm pooling ops don't support float64.")
if use_gpu and data_format == "NCHW_VECT_C" and not test.is_gpu_available(
cuda_only=True, min_cuda_compute_capability=(6, 1)):
self.skipTest("NCHW_VECT_C requires sm61+.")
if v2 and data_format != "NHWC":
self.skipTest("v2 not supported for %s" % data_format)
if v2 and not isinstance(padding, str):
self.skipTest("non-constant ksize/strides requires nonexplicit padding")
if data_format == "NCHW_VECT_C":
if data_type != dtypes.float32:
self.skipTest("quantization to qint8 not implemented for %r" %
data_type)
if input_sizes[-1] % 4 != 0:
self.skipTest("Skipping test for depth %d" % input_sizes[-1])
total_size = 1
for s in input_sizes:
total_size *= s
tf_logging.info("Running %s test. %r %r %d %r %r %r %s", data_format, v2,
input_sizes, total_size, pool_func, ksize, strides,
data_type)
# Initializes the input tensor with array containing incrementing
# numbers from 1, wrapping round to -127 after 127 to support int8.
y = -1 if use_negative_input else 1
x = [(((f + 128) % 255) - 127)*y for f in range(total_size)]
with self.cached_session(use_gpu=use_gpu):
t = constant_op.constant(x, shape=input_sizes, dtype=data_type)
if data_format in ("NCHW", "NCHW_VECT_C", "NCW"):
if data_format == "NCHW_VECT_C":
t = test_util.NHWCToNCHW_VECT_C(t)
t, _, _ = gen_array_ops.quantize_v2(t, -128.0, 127.0, dtypes.qint8)
else:
t = test_util.NHWCToNCHW(t)
ksize = test_util.NHWCToNCHW(ksize)
strides = test_util.NHWCToNCHW(strides)
if isinstance(padding, list):
padding = test_util.NHWCToNCHW(padding)
ksize_placeholder = array_ops.placeholder(dtypes.int32, shape=[4])
strides_placeholder = array_ops.placeholder(dtypes.int32, shape=[4])
if v2:
t = pool_func(
t,
ksize=ksize_placeholder,
strides=strides_placeholder,
padding=padding,
data_format=data_format)
else:
t = pool_func(
t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW_VECT_C":
t = gen_array_ops.dequantize(t, -128, 127)
t = test_util.NCHW_VECT_CToNHWC(t)
elif data_format == "NCHW":
t = test_util.NCHWToNHWC(t)
if v2:
actual = t.eval(feed_dict={
ksize_placeholder: ksize,
strides_placeholder: strides
})
else:
actual = self.evaluate(t)
self.assertShapeEqual(actual, t)
self.assertAllCloseAccordingToType(expected, actual.flatten())
def _VerifyOneTest(self, pool_func, input_sizes, ksize, strides, padding,
data_format, expected, use_gpu, v2,
use_negative_input=False):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
expected: An array containing the expected operation outputs.
use_gpu: Whether we are running on GPU.
v2: Whether to use v2 version.
use_negative_input: If the input values should be negative."
"""
if data_format == "NCHW_VECT_C":
avg_pool_func = nn_ops.avg_pool
tf_logging.info("pool_func=%s", pool_func)
if pool_func == avg_pool_func:
tf_logging.info("NCHW_VECT_C not yet implemented for avg_pool")
return
if (self._isMaxPool(pool_func) and isinstance(padding, list)):
tf_logging.info("NCHW_VECT_C not yet implemented for max pool" +
" with explicit padding")
return
self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,
data_format, dtypes.float32, expected, use_gpu, v2,
use_negative_input)
if not test.is_built_with_rocm():
# double datatype is not supported for pooling ops on the ROCm platform
self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,
data_format, dtypes.float64, expected, use_gpu, v2,
use_negative_input)
if not use_gpu or test_util.GpuSupportsHalfMatMulAndConv():
self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,
data_format, dtypes.float16, expected, use_gpu, v2,
use_negative_input)
def _VerifyValues(self,
pool_func,
input_sizes,
ksize,
strides,
padding,
expected,
use_gpu,
v2=False,
one_dim=False,
use_negative_input=False):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
expected: An array containing the expected operation outputs.
use_gpu: Whether we are running on GPU.
v2: Whether to use v2 version.
one_dim: If one dimensional pools should be done instead of two
dimensional pools.
use_negative_input: If the input values should be negative.
"""
for (data_format, use_gpu_2) in GetTestConfigs(
include_nchw_vect_c=True, one_dimensional=one_dim):
if use_gpu_2 == use_gpu:
self._VerifyOneTest(pool_func, input_sizes, ksize, strides, padding,
data_format, expected, use_gpu, v2,
use_negative_input)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolValidPadding(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=[7.0, 8.0, 9.0],
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolEmpty(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 3, 3, 0],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=[],
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolSamePadding(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 2, 4, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[8.5, 9.5, 10.5, 14.5, 15.5, 16.5],
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolSamePaddingNonSquareWindow(self, **kwargs):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
# [avg(1.0, 2.0), avg(2.0, padded0),
# avg(3.0, 4.0), avg(4.0, padded0)]
self._VerifyOneType(
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[1.5, 2.0, 3.5, 4.0],
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolSamePaddingNonSquareWindow_2(self, **kwargs):
# Window of [x,
# x] should do:
# [avg(1.0, 3.0), avg(2.0, 4.0)
# avg(3.0, padded0), avg(4.0, padded0)]
self._VerifyOneType(
input_sizes=[1, 2, 2, 1],
ksize=[1, 2, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 3.0, 3.0, 4.0],
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolSamePaddingNonSquareWindowMultiBatch(self, **kwargs):
self._VerifyOneType(
input_sizes=[2, 2, 2, 2],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[
2.0, 3.0, 3.0, 4.0, 6.0, 7.0, 7.0, 8.0, 10.0, 11.0, 11.0, 12.0,
14.0, 15.0, 15.0, 16.0
],
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolSamePaddingNonSquareWindowMultiBatch_2(self, **kwargs):
self._VerifyOneType(
input_sizes=[2, 2, 2, 2],
ksize=[1, 2, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[
3.0, 4.0, 5.0, 6.0, 5.0, 6.0, 7.0, 8.0, 11.0, 12.0, 13.0, 14.0,
13.0, 14.0, 15.0, 16.0
],
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolValidPaddingUnevenStride(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
expected=[7.0, 8.0, 9.0, 16.0, 17.0, 18.0],
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolValidPaddingUnevenStride_2(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
expected=[7.0, 8.0, 9.0, 10.0, 11.0, 12.0],
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolSamePadding_2(self, **kwargs):
expected_output = [
11.0, 12.0, 13.0, 14.0, 19.0, 20.0, 21.0, 22.0, 43.0, 44.0, 45.0, 46.0,
51.0, 52.0, 53.0, 54.0
]
self._VerifyOneType(
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolSamePaddingPacket_4(self, **kwargs):
expected_output = [
21.0, 22.0, 23.0, 24.0, 27.0, 28.0, 29.0, 30.0, 45.0, 46.0, 47.0, 48.0,
51.0, 52.0, 53.0, 54.0
]
self._VerifyOneType(
input_sizes=[1, 4, 4, 4],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolSamePaddingPacket_8(self, **kwargs):
expected_output = [
-12.0, -11.0, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, 4.0, 5.0, 6.0, 7.0,
8.0, 9.0, 10.0, 11.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0,
32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, -3.5, -54.0, -53.0, -52.0,
-51.0, -50.0, -49.0, -48.0, -47.0, -38.0, -37.0, -36.0, -35.0, -34.0,
-33.0, -32.0, -31.0, -22.0, -21.0, -20.0, -19.0, -18.0, -17.0, -16.0,
-15.0, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, -4.0, -3.0, -11.0, -10.0,
-9.0, -8.0, -7.0, -6.0, -5.0, -4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0,
12.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 33.0, 34.0, 35.0,
36.0, 37.0, 38.0, -3.5, -2.5, -85.0, -84.0, -83.0, -82.0, -81.0, -80.0,
-79.0, -78.0, -69.0, -68.0, -67.0, -66.0, -65.0, -64.0, -63.0, -62.0,
-53.0, -52.0, -51.0, -50.0, -49.0, -48.0, -47.0, -46.0, -41.0, -40.0,
-39.0, -38.0, -37.0, -36.0, -35.0, -34.0
]
self._VerifyOneType(
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolEmptyInput(self, **kwargs):
self._VerifyOneType(
input_sizes=[0, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[],
**kwargs)
@test_util.run_in_graph_and_eager_modes
def testRawAvgPoolLargeKsizeRaiseError(self):
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
with self.cached_session():
t = gen_nn_ops.avg_pool(
value=np.ones([1, 1, 1, 1]),
ksize=[1, 1e20, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME",
data_format="NHWC")
self.evaluate(t)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))
@test_util.run_deprecated_v1
def testMaxPoolValidPadding(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=[13.0, 14.0, 15.0],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))
@test_util.run_deprecated_v1
def testMaxPoolSamePadding(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[13.0, 14.0, 15.0, 16.0, 17.0, 18.0],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, nn_ops.max_pool_v2))
@test_util.xla_allow_fallback("XLA doesn't support explicit padding")
@test_util.run_deprecated_v1
def testMaxPoolZeroExplicitPadding(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 3, 3, 1],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding=[[0, 0], [0, 0], [0, 0], [0, 0]],
expected=[9.0],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, nn_ops.max_pool_v2))
@test_util.xla_allow_fallback("XLA doesn't support explicit padding")
@test_util.run_deprecated_v1
def testMaxPoolNegativeInputExpPadding(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 3, 3, 1],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding=[[0, 0], [2, 1], [2, 1], [0, 0]],
expected=[-1, -1, -1, -1],
use_negative_input=True,
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, nn_ops.max_pool_v2))
@test_util.xla_allow_fallback("XLA doesn't support explicit padding")
@test_util.run_deprecated_v1
def testMaxPoolExplicitPadding(self, **kwargs):
expected_output = [9.0, 9.0]
self._VerifyOneType(
input_sizes=[1, 3, 3, 1],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding=[[0, 0], [0, 2], [0, 1], [0, 0]],
expected=expected_output,
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, nn_ops.max_pool_v2))
@test_util.xla_allow_fallback("XLA doesn't support explicit padding")
@test_util.run_deprecated_v1
def testMaxPoolExplicitPaddingAdvanced(self, **kwargs):
expected_output = [7, 9, 11, 12, 19, 21, 23, 24, 31, 33, 35, 36, 31, 33,
35, 36]
self._VerifyOneType(
input_sizes=[1, 6, 6, 1],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding=[[0, 0], [1, 2], [2, 1], [0, 0]],
expected=expected_output,
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, nn_ops.max_pool_v2))
@test_util.xla_allow_fallback("XLA doesn't support explicit padding")
@test_util.run_deprecated_v1
def testMaxPoolNegativeInputExpPaddingAdv(self, **kwargs):
expected_output = [-1, -1, -3, -5, -7, -7, -9, -11, -19, -19, -21, -23, -31,
-31, -33, -35]
self._VerifyOneType(
input_sizes=[1, 6, 6, 1],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding=[[0, 0], [1, 2], [2, 1], [0, 0]],
expected=expected_output,
use_negative_input=True,
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, nn_ops.max_pool_v2))
@test_util.xla_allow_fallback("XLA doesn't support explicit padding")
@test_util.run_deprecated_v1
def testMaxPoolExplicitPadding2_(self, **kwargs):
expected_output = [9.0, 9.0]
self._VerifyOneType(
input_sizes=[1, 3, 3, 1],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding=[[0, 0], [0, 2], [0, 1], [0, 0]],
expected=expected_output,
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(
nn_ops.max_pool1d, nn_ops.max_pool_v2, one_dimensional=True))
@test_util.xla_allow_fallback("XLA doesn't support explicit padding")
@test_util.run_deprecated_v1
def testMaxPoolExplicitPadding_1D(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 3, 1],
ksize=[1, 2, 1],
strides=[1, 2, 1],
padding=[[0, 0], [0, 1], [0, 0]],
expected=[2.0, 3.0],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))
@test_util.run_deprecated_v1
def testMaxPoolSamePaddingNonSquareWindow(self, **kwargs):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
#
# [max(1.0, 2.0), max(2.0, padded0),
# max(3.0, 4.0), max(4.0, padded0)]
self._VerifyOneType(
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 2.0, 4.0, 4.0],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))
@test_util.run_deprecated_v1
def testMaxPoolValidPaddingUnevenStride(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))
@test_util.run_deprecated_v1
def testMaxPoolValidPaddingUnevenStride2_(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))
@test_util.run_deprecated_v1
def testMaxPoolSamePaddingPacket4_(self, **kwargs):
expected_output = [
21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0, 54.0, 55.0, 56.0,
61.0, 62.0, 63.0, 64.0
]
self._VerifyOneType(
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))
@test_util.run_deprecated_v1
def testMaxPoolSamePaddingPacket8_(self, **kwargs):
expected_output = [
81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 97.0, 98.0, 99.0, 100.0,
101.0, 102.0, 103.0, 104.0, 113.0, 114.0, 115.0, 116.0, 117.0, 118.0,
119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 125.0, 126.0, 127.0, 120.0,
18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 34.0, 35.0, 36.0, 37.0,
38.0, 39.0, 40.0, 41.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0,
58.0, 59.0, 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 82.0, 83.0, 84.0, 85.0,
86.0, 87.0, 88.0, 89.0, 98.0, 99.0, 100.0, 101.0, 102.0, 103.0, 104.0,
105.0, 114.0, 115.0, 116.0, 117.0, 118.0, 119.0, 120.0, 121.0, 122.0,
123.0, 124.0, 125.0, 126.0, 127.0, 120.0, 121.0, -45.0, -44.0, -43.0,
-42.0, -41.0, -40.0, -39.0, -38.0, -29.0, -28.0, -27.0, -26.0, -25.0,
-24.0, -23.0, -22.0, -13.0, -12.0, -11.0, -10.0, -9.0, -8.0, -7.0, -6.0,
-5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0
]
self._VerifyOneType(
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))
@test_util.run_deprecated_v1
def testMaxPoolEmptyInput(self, **kwargs):
self._VerifyOneType(
input_sizes=[0, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2))
@test_util.run_deprecated_v1
def testMaxPoolInvalidFilterSize(self, **kwargs):
with self.cached_session(use_gpu=test.is_gpu_available()):
t = constant_op.constant(1.0, shape=[1, 1, 1, 1])
with self.assertRaisesRegex(
(errors_impl.InvalidArgumentError, ValueError),
"Negative dimension size"):
t = self.evaluate(
nn_ops.max_pool(t, ksize=[1, 1, 2, 1], strides=1, padding="VALID"))
# Tests for DepthwiseMaxPooling on CPU only.
@parameterized.parameters(
GetTestConfigsDicts(
nn_ops.max_pool, gen_nn_ops.max_pool_v2, allow_gpu=False))
@test_util.run_deprecated_v1
def testDepthwiseMaxPool1x1DepthWindow(self, **kwargs):
# input is:
# [1.0, ..., 10.0] along depth,
#
# We maxpool by depth in patches of 2.
self._VerifyOneType(
input_sizes=[1, 1, 1, 10],
ksize=[1, 1, 1, 2],
strides=[1, 1, 1, 2],
padding="SAME",
expected=[2.0, 4.0, 6.0, 8.0, 10.0],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(
nn_ops.max_pool, gen_nn_ops.max_pool_v2, allow_gpu=False))
@test_util.run_deprecated_v1
def testDepthwiseMaxPool2x2DepthWindow(self, **kwargs):
# input is:
#
# a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2
# output. Each node has contiguous values, so the depthwise max
# should be multiples of 3.0.
self._VerifyOneType(
input_sizes=[1, 2, 2, 6],
ksize=[1, 1, 1, 3],
strides=[1, 1, 1, 3],
padding="SAME",
expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(
nn_ops.max_pool, gen_nn_ops.max_pool_v2, allow_gpu=False))
@test_util.run_deprecated_v1
def testMaxPoolKernelSmallerThanStrideValid(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID",
expected=[9, 12, 30, 33],
**kwargs)
@parameterized.parameters(GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testAvgPoolKernelSmallerThanStride(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID",
expected=[5, 8, 26, 29],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2) +
GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testKernelSmallerThanStrideSame1_(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 7, 9],
**kwargs)
@parameterized.parameters(
GetTestConfigsDicts(nn_ops.max_pool, gen_nn_ops.max_pool_v2) +
GetTestConfigsDicts(nn_ops.avg_pool))
@test_util.run_deprecated_v1
def testKernelSmallerThanStrideSame2_(self, **kwargs):
self._VerifyOneType(
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 9, 11],
**kwargs)
def _testDepthwiseMaxPoolInvalidConfig(self,
in_size,
ksize,
strides,
error_msg,
use_gpu=False):
with self.cached_session(use_gpu=use_gpu):
t = constant_op.constant(1.0, shape=in_size)
with self.assertRaisesRegex(errors_impl.UnimplementedError, error_msg):
t = nn_ops.max_pool(
t, ksize=ksize, strides=strides, padding="SAME").eval()
@test_util.disable_xla("b/123338077") # Passes with XLA
def testDepthwiseMaxPoolInvalidConfigs(self):
self._testDepthwiseMaxPoolInvalidConfig(
[1, 2, 2, 4], [1, 2, 2, 2], [1, 1, 1, 2],
"exactly one of pooling across depth")
self._testDepthwiseMaxPoolInvalidConfig(
[1, 2, 2, 4], [1, 1, 1, 2], [1, 1, 1, 1],
"depth window to equal the depth stride")
self._testDepthwiseMaxPoolInvalidConfig([1, 2, 2, 4], [1, 1, 1, 3],
[1, 1, 1, 3], "evenly divide")
if test.is_gpu_available():
with self.session():
t = variables.Variable(np.ones([1, 2, 2, 4]))
self.evaluate(variables.global_variables_initializer())
with self.assertRaisesOpError("for CPU devices"):
nn_ops.max_pool(
t, ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2],
padding="SAME").eval()
# The following are tests that verify that the CPU and GPU implementations
# produce the same results.
def _CompareMaxPoolingFwd(self, input_shape, ksize, strides, padding):
# double datatype is currently not supported for pooling ops
# on the ROCm platform
for dtype in [np.float32, np.float16] \
+ [np.float64] if not test.is_built_with_rocm() else []:
tensor_input = np.random.rand(*input_shape).astype(dtype)
with self.cached_session():
t = constant_op.constant(tensor_input, shape=input_shape)
out_op, _ = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
gpu_val = self.evaluate(out_op)
with self.cached_session(use_gpu=False):
t = constant_op.constant(tensor_input, shape=input_shape)
out_op = nn_ops.max_pool(t, ksize, strides, padding)
cpu_val = self.evaluate(out_op)
self.assertAllCloseAccordingToType(cpu_val, gpu_val)
def _CompareMaxPoolingBk(self, input_shape, output_shape, ksize, strides,
padding):
# double datatype is currently not supported for pooling ops
# on the ROCm platform
for dtype in [np.float32, np.float16] \
+ [np.float64] if not test.is_built_with_rocm() else []:
# Generate numbers in a narrow range, so that there are many duplicates
# in the input.
tensor_input = np.random.random_integers(0, 3, input_shape).astype(dtype)
tensor_output = np.random.rand(*output_shape).astype(dtype)
with self.cached_session():
t = constant_op.constant(tensor_input, shape=input_shape)
_, argmax_op = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
argmax = self.evaluate(argmax_op)
grad_in = constant_op.constant(tensor_output, shape=output_shape)
out_op = gen_nn_ops.max_pool_grad_with_argmax(t, grad_in, argmax, ksize,
strides, padding)
gpu_val = self.evaluate(out_op)
self.assertShapeEqual(gpu_val, out_op)
with self.cached_session(use_gpu=False):
t = constant_op.constant(tensor_input, shape=input_shape)
out_op = nn_ops.max_pool(t, ksize, strides, padding)
orig_out = self.evaluate(out_op)
grad_in = constant_op.constant(tensor_output, shape=output_shape)
out_op = gen_nn_ops.max_pool_grad(t, orig_out, grad_in, ksize, strides,
padding)
cpu_val = self.evaluate(out_op)
self.assertShapeEqual(cpu_val, out_op)
# The CPU version accumulates its gradient on fp16, so it's less
# accurate than the GPU version that does the accumulation on fp32
self.assertAllCloseAccordingToType(
cpu_val, gpu_val, half_rtol=0.01, half_atol=0.01)
def _CompareMaxPoolingGradBk(self, input_shape, output_shape, ksize, strides,
padding):
# double datatype is currently not supported for pooling ops
# on the ROCm platform
for dtype in [np.float32, np.float16] \
+ [np.float64] if not test.is_built_with_rocm() else []:
# Generate numbers in a narrow range, so that there are many duplicates
# in the input.
tensor_input = np.random.random_integers(0, 3, input_shape).astype(dtype)
with self.cached_session(use_gpu=False):
t = constant_op.constant(tensor_input, shape=input_shape)
_, argmax_op = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
argmax = self.evaluate(argmax_op)
grad_in = constant_op.constant(tensor_input, shape=input_shape)
out_op = gen_nn_ops.max_pool_grad_grad_with_argmax(
t, grad_in, argmax, ksize, strides, padding)
gpu_val = self.evaluate(out_op)
self.assertShapeEqual(gpu_val, out_op)
with self.cached_session(use_gpu=False):
t = constant_op.constant(tensor_input, shape=input_shape)
out_op = nn_ops.max_pool(t, ksize, strides, padding)
orig_out = self.evaluate(out_op)
grad_in = constant_op.constant(tensor_input, shape=input_shape)
out_op = gen_nn_ops.max_pool_grad_grad(t, orig_out, grad_in, ksize,
strides, padding)
cpu_val = self.evaluate(out_op)
self.assertShapeEqual(cpu_val, out_op)
# The CPU version accumulates its gradient on fp16, so it's less
# accurate than the GPU version that does the accumulation on fp32
self.assertAllCloseAccordingToType(
cpu_val, gpu_val, half_rtol=0.01, half_atol=0.01)
def testMaxPoolingWithArgmax(self):
tensor_input = [
1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0,
0.0, 1.0, 0.0, 1.0
]
Config = collections.namedtuple(
"Config", ["use_gpu", "include_batch_in_index", "argmax", "Targmax"])
configs = [
Config(False, False, [0, 1, 3, 5, 0, 2, 6, 8], dtypes.int64),
Config(False, True, [0, 1, 3, 5, 9, 11, 15, 17], dtypes.int64),
Config(False, False, [0, 1, 3, 5, 0, 2, 6, 8], dtypes.int32),
Config(False, True, [0, 1, 3, 5, 9, 11, 15, 17], dtypes.int32),
Config(True, False, [0, 1, 3, 5, 0, 2, 6, 8], dtypes.int64),
Config(True, True, [0, 1, 3, 5, 9, 11, 15, 17], dtypes.int64),
]
for config in configs:
with GetDeviceScope(self, use_gpu=config.use_gpu):
t = constant_op.constant(tensor_input, shape=[2, 3, 3, 1])
out_op, argmax_op = nn_ops.max_pool_with_argmax(
t,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
Targmax=config.Targmax,
padding="VALID",
include_batch_in_index=config.include_batch_in_index)
out, argmax = self.evaluate([out_op, argmax_op])
self.assertShapeEqual(out, out_op)
self.assertShapeEqual(argmax, argmax_op)
self.assertAllClose(out.ravel(),
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
self.assertAllEqual(argmax.ravel(), config.argmax)
def testMaxPoolingGradWithArgmax(self):
orig_input = [
1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0,
0.0, 1.0, 0.0, 1.0
]
tensor_input = [11.0, 12.0, 13.0, 14.0, 21.0, 22.0, 23.0, 24.0]
Config = collections.namedtuple(
"Config", ["use_gpu", "include_batch_in_index", "argmax"])
configs = [
Config(False, False, [0, 1, 3, 5, 0, 2, 6, 8]),
Config(False, True, [0, 1, 3, 5, 9, 11, 15, 17]),
Config(True, False, [0, 1, 3, 5, 0, 2, 6, 8]),
Config(True, True, [0, 1, 3, 5, 9, 11, 15, 17])
]
for config in configs:
with GetDeviceScope(self, config.use_gpu):
orig_in = constant_op.constant(orig_input, shape=[2, 3, 3, 1])
t = constant_op.constant(tensor_input, shape=[2, 2, 2, 1])
argmax_t = constant_op.constant(
config.argmax, shape=[2, 2, 2, 1], dtype=dtypes.int64)
out_op = gen_nn_ops.max_pool_grad_with_argmax(
orig_in,
t,
argmax_t,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
padding="VALID",
include_batch_in_index=config.include_batch_in_index)
out = self.evaluate(out_op).flatten()
self.assertAllClose(out, [
11.0, 12.0, 0.0, 13.0, 0.0, 14.0, 0.0, 0.0, 0.0, 21.0, 0.0, 22.0,
0.0, 0.0, 0.0, 23.0, 0.0, 24.0
])
def testMaxPoolingGradThrowDeterminismError(self):
if test.is_gpu_available(cuda_only=True):
try:
config_exec.enable_op_determinism()
orig_input = [
1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0,
0.0, 1.0, 0.0, 1.0
]
tensor_input = [11.0, 12.0, 13.0, 14.0, 21.0, 22.0, 23.0, 24.0]
with GetDeviceScope(self, True):
orig_in = constant_op.constant(orig_input, shape=[2, 3, 3, 1])
t = constant_op.constant(tensor_input, shape=[2, 2, 2, 1])
argmax_t = constant_op.constant(
[0, 1, 3, 5, 0, 2, 6, 8], shape=[2, 2, 2, 1], dtype=dtypes.int64)
with self.assertRaisesRegexp(
errors_impl.UnimplementedError, "Determinism is not yet supported "
"for MaxPoolGradWithArgmax."):
out_op = gen_nn_ops.max_pool_grad_with_argmax(
orig_in,
t,
argmax_t,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
padding="VALID",
include_batch_in_index=False)
self.evaluate(out_op)
finally:
config_exec.disable_op_determinism()
else:
try:
config_exec.enable_op_determinism()
orig_input = [
1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0,
0.0, 1.0, 0.0, 1.0
]
tensor_input = [11.0, 12.0, 13.0, 14.0, 21.0, 22.0, 23.0, 24.0]
with GetDeviceScope(self, False):
orig_in = constant_op.constant(orig_input, shape=[2, 3, 3, 1])
t = constant_op.constant(tensor_input, shape=[2, 2, 2, 1])
argmax_t = constant_op.constant(
[0, 1, 3, 5, 0, 2, 6, 8], shape=[2, 2, 2, 1], dtype=dtypes.int64)
out_op = gen_nn_ops.max_pool_grad_with_argmax(
orig_in,
t,
argmax_t,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
padding="VALID",
include_batch_in_index=False)
self.evaluate(out_op)
finally:
config_exec.disable_op_determinism()
def testMaxPoolingGradGradWithArgmax(self):
# MaxPoolWithArgMax is implemented only on CUDA.
if not test.is_gpu_available(cuda_only=True):
return
orig_input = [
1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0,
0.0, 1.0, 0.0, 1.0
]
tensor_input = [
11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 21.0, 22.0, 23.0,
24.0, 25.0, 26.0, 27.0, 28.0, 29.0
]
Config = collections.namedtuple(
"Config", ["use_gpu", "include_batch_in_index", "argmax"])
configs = [
Config(True, False, [0, 1, 3, 5, 0, 2, 6, 8]),
Config(True, True, [0, 1, 3, 5, 9, 11, 15, 17])
]
for config in configs:
with GetDeviceScope(self, config.use_gpu):
orig_in = constant_op.constant(orig_input, shape=[2, 3, 3, 1])
t = constant_op.constant(tensor_input, shape=[2, 3, 3, 1])
argmax_t = constant_op.constant(
config.argmax, shape=[2, 2, 2, 1], dtype=dtypes.int64)
out_op = gen_nn_ops.max_pool_grad_grad_with_argmax(
orig_in,
t,
argmax_t,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
padding="VALID",
include_batch_in_index=config.include_batch_in_index)
out = self.evaluate(out_op).flatten()
self.assertAllClose(out,
[11.0, 12.0, 14.0, 16.0, 21.0, 23.0, 27.0, 29.0])
def _ConstructAndTestGradient(self,
pool_func,
input_sizes,
output_sizes,
window_rows,
window_cols,
row_stride,
col_stride,
padding,
data_format,
use_gpu,
x_init_value=None):
"""Verifies the gradients of the max or avg pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
output_sizes: Output tensor dimensions.
window_rows: kernel size in row dim
window_cols: kernel size in col dim
row_stride: Row Stride.
col_stride: Col Stride.
padding: Padding type.
data_format: Data format.
use_gpu: whether we are running on GPU
x_init_value: Values to be passed to the gradient checker.
"""
assert input_sizes[0] == output_sizes[0]
assert input_sizes[3] == output_sizes[3]
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.cached_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(x, shape=input_sizes, name="input")
if pool_func == nn_ops.avg_pool:
func_name = "avg_pool"
err_tolerance = 1e-4
else:
if x_init_value is None:
x_init_value = np.asfarray(
np.arange(1, total_size + 1),
dtype=np.float32).reshape(input_sizes)
func_name = "max_pool"
err_tolerance = 1e-3
if data_format == "NCHW":
ksize = [1, 1, window_rows, window_cols]
strides = [1, 1, row_stride, col_stride]
if isinstance(padding, list):
padding = test_util.NHWCToNCHW(padding)
t = test_util.NHWCToNCHW(input_tensor)
else:
ksize = [1, window_rows, window_cols, 1]
strides = [1, row_stride, col_stride, 1]
t = input_tensor
t = pool_func(
t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=func_name)
if data_format == "NCHW":
t = test_util.NCHWToNHWC(t)
err = gradient_checker.compute_gradient_error(
input_tensor,
input_sizes,
t,
output_sizes,
x_init_value=x_init_value,
delta=1e-2)
tf_logging.info("%s gradient error = %.4f" % (func_name, err))
self.assertLess(err, err_tolerance)
def _ConstructAndTestSecondGradient(self,
pool_func,
input_sizes,
output_sizes,
window_rows,
window_cols,
row_stride,
col_stride,
padding,
data_format,
use_gpu,
x_init_value=None):
"""Verifies the second-order gradients of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
output_sizes: Output tensor dimensions.
window_rows: kernel size in row dim
window_cols: kernel size in col dim
row_stride: Row Stride.
col_stride: Col Stride.
padding: Padding type.
data_format: Data format.
use_gpu: whether we are running on GPU
x_init_value: Values to be passed to the gradient checker.
"""
assert input_sizes[0] == output_sizes[0]
assert input_sizes[3] == output_sizes[3]
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.cached_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(x, shape=input_sizes, name="input")
if pool_func == nn_ops.avg_pool:
func_name = "avg_pool"
err_tolerance = 1e-3
else:
if x_init_value is None:
x_init_value = np.asfarray(
np.arange(1, total_size + 1),
dtype=np.float32).reshape(input_sizes)
func_name = "max_pool"
err_tolerance = 1e-2
if data_format == "NCHW":
ksize = [1, 1, window_rows, window_rows]
strides = [1, 1, row_stride, col_stride]
t = test_util.NHWCToNCHW(input_tensor)
else:
ksize = [1, window_rows, window_rows, 1]
strides = [1, row_stride, col_stride, 1]
t = input_tensor
t = pool_func(
t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=func_name)
if data_format == "NCHW":
t = test_util.NHWCToNCHW(t)
t_g = gradients_impl.gradients(t**2, input_tensor)[0]
err = gradient_checker.compute_gradient_error(
input_tensor,
input_sizes,
t_g,
input_sizes,
x_init_value=x_init_value,
delta=1e-2)
tf_logging.info("%s second-order gradient error = %.4f" % (func_name, err))
self.assertLess(err, err_tolerance)
def _testMaxPoolGradValidPadding1_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 3, 3, 1],
output_sizes=[1, 3, 3, 1],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_1_6(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 6, 6, 3],
output_sizes=[2, 5, 5, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_1_7(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 7, 7, 3],
output_sizes=[2, 6, 6, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradValidPadding1_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 3, 3, 1],
output_sizes=[1, 2, 2, 1],
window_rows=1,
window_cols=1,
row_stride=2,
col_stride=2,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 2, 3],
output_sizes=[2, 1, 1, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding1_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding1_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3],
window_rows=1,
window_cols=1,
row_stride=2,
col_stride=2,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding2_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding2_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding3_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 7, 7, 1],
output_sizes=[1, 7, 7, 1],
window_rows=3,
window_cols=3,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolExplicitPadding_1(self, data_format, use_gpu):
for pool_func in [nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 7, 7, 1],
output_sizes=[1, 7, 7, 1],
window_rows=3,
window_cols=3,
row_stride=1,
col_stride=1,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolExplicitPadding_2(self, data_format, use_gpu):
for pool_func in [nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 7, 7, 1],
output_sizes=[1, 6, 8, 1],
window_rows=3,
window_cols=5,
row_stride=1,
col_stride=1,
padding=[[0, 0], [0, 1], [2, 3], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolExplicitPaddingLeftGreater(self, data_format, use_gpu):
for pool_func in [nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 7, 7, 1],
output_sizes=[1, 6, 8, 1],
window_rows=3,
window_cols=5,
row_stride=1,
col_stride=1,
padding=[[0, 0], [0, 1], [3, 2], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolExplicitPaddingBatchChannel(self, data_format, use_gpu):
for pool_func in [nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[4, 7, 7, 3],
output_sizes=[4, 6, 8, 3],
window_rows=3,
window_cols=5,
row_stride=1,
col_stride=1,
padding=[[0, 0], [0, 1], [3, 2], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolExplicitPaddingStrides(self, data_format, use_gpu):
for pool_func in [nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 7, 7, 1],
output_sizes=[1, 4, 3, 1],
window_rows=3,
window_cols=3,
row_stride=2,
col_stride=3,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testMaxPoolGrad(self):
for (data_format, use_gpu) in GetTestConfigs():
self._testMaxPoolGradValidPadding1_1(data_format, use_gpu)
self._testMaxPoolGradValidPadding1_2(data_format, use_gpu)
self._testMaxPoolGradValidPadding2_1_6(data_format, use_gpu)
self._testMaxPoolGradValidPadding2_1_7(data_format, use_gpu)
self._testMaxPoolGradValidPadding2_2(data_format, use_gpu)
self._testMaxPoolGradSamePadding1_1(data_format, use_gpu)
self._testMaxPoolGradSamePadding1_2(data_format, use_gpu)
self._testMaxPoolGradSamePadding2_1(data_format, use_gpu)
self._testMaxPoolGradSamePadding2_2(data_format, use_gpu)
self._testMaxPoolGradSamePadding3_1(data_format, use_gpu)
self._testMaxPoolExplicitPadding_1(data_format, use_gpu)
self._testMaxPoolExplicitPadding_2(data_format, use_gpu)
self._testMaxPoolExplicitPaddingStrides(data_format, use_gpu)
self._testMaxPoolExplicitPaddingLeftGreater(data_format, use_gpu)
self._testMaxPoolExplicitPaddingBatchChannel(data_format, use_gpu)
def _MaxPoolGrad(self, orig_input, orig_output, grad, window_rows,
window_cols, row_stride, col_stride, padding, v2):
"""Max Pooling Gradient.
Args:
orig_input: A float Tensor. The original input tensor.
orig_output: A float Tensor. The original output tensor.
grad: A float Tensor.
The 4D (batch x rows x cols x depth) output backprop.
window_rows: integer. Kernel size along rows dimension.
window_cols: integer. Kernel size along cols dimension.
row_stride: integer. Stride along rows dimension
col_stride: integer. Stride along cols dimension
padding: PoolingOpDef.Padding. Padding type.
Returns:
A Tensor.
"""
pool_func = gen_nn_ops.max_pool_grad_v2 if v2 else gen_nn_ops.max_pool_grad
if v2:
return pool_func(orig_input, orig_output, grad,
[1, window_rows, window_cols, 1],
[1, row_stride, col_stride, 1], padding)
else:
padding, explicit_paddings = nn_ops.convert_padding(padding)
return pool_func(orig_input, orig_output, grad,
[1, window_rows, window_cols, 1],
[1, row_stride, col_stride, 1], padding,
explicit_paddings)
def _testMaxPoolGradDirect(self, input_data, output_backprop,
expected_input_backprop, input_sizes, output_sizes,
window_rows, window_cols, row_stride, col_stride,
padding, use_gpu, v2):
pool_func = gen_nn_ops.max_pool_v2 if v2 else nn_ops.max_pool
with self.cached_session(use_gpu=use_gpu):
input_tensor = variables.Variable(
np.array(input_data, dtype=np.float32).reshape(input_sizes))
self.evaluate(variables.global_variables_initializer())
output_tensor = pool_func(input_tensor, [1, window_rows, window_cols, 1],
[1, row_stride, col_stride, 1], padding)
output_backprop_tensor = constant_op.constant(
output_backprop, shape=output_sizes)
input_backprop_tensor = self._MaxPoolGrad(
input_tensor, output_tensor, output_backprop_tensor, window_rows,
window_cols, row_stride, col_stride, padding, v2)
actual_input_backprop = self.evaluate(input_backprop_tensor)
self.assertShapeEqual(actual_input_backprop, input_backprop_tensor)
actual_input_backprop = actual_input_backprop.flatten()
actual_input_backprop = self._GetNdArray(actual_input_backprop)
actual_output = self.evaluate(output_tensor).flatten()
actual_output = self._GetNdArray(actual_output)
self.assertAllClose(
expected_input_backprop, actual_input_backprop, rtol=1e-6, atol=1e-6)
def _testMaxPoolGradDirect1_1(self):
input_data = [
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0
]
output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]
expected_input_backprop = [
11.0, 12.0, 13.0, 0.0, 15.0, 16.0, 17.0, 0.0, 19.0, 20.0, 21.0, 0.0,
0.0, 0.0, 0.0, 0.0
]
for use_gpu in True, False:
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolGradDirect1_2(self):
input_data = [
1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0,
0.0, 1.0
]
output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]
expected_input_backprop = [
11.0, 0.0, 25.0, 0.0, 0.0, 31.0, 0.0, 17.0, 19.0, 0.0, 41.0, 0.0, 0.0,
0.0, 0.0, 0.0
]
for use_gpu in True, False:
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolGradDirect1_3(self):
input_data = [
1.0,
0.0,
1.0,
0.0,
0.0,
1.0,
0.0,
1.0,
1.0,
0.0,
1.0,
0.0,
0.0,
1.0,
0.0,
1.0,
]
output_backprop = [
11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0,
23.0, 24.0, 25.0, 26.0
]
expected_input_backprop = [
54,
0.0,
62,
0.0,
0.0,
60,
0.0,
22.0,
47,
0.0,
51,
0.0,
0.0,
0.0,
0.0,
0.0,
]
for use_gpu in True, False:
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 4, 4, 1],
window_rows=3,
window_cols=3,
row_stride=1,
col_stride=1,
padding="SAME",
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolGradZeroExplicitPadding(self):
input_data = [
1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0,
0.0, 1.0
]
output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]
expected_input_backprop = [
11.0, 0.0, 25.0, 0.0, 0.0, 31.0, 0.0, 17.0, 19.0, 0.0, 41.0, 0.0, 0.0,
0.0, 0.0, 0.0
]
for use_gpu in True, False:
for v2 in [False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding=[[0, 0], [0, 0], [0, 0], [0, 0]],
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolGradExplicitPadding_1(self):
input_data = [
1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0,
0.0, 1.0
]
output_backprop = [11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0,
20.0, 21.0, 22.0]
expected_input_backprop = [
11.0, 0.0, 25.0, 0.0, 0.0, 31.0, 0.0, 49.0, 19.0, 0.0, 41.0, 0.0, 0.0,
0.0, 0.0, 22.0
]
for use_gpu in True, False:
for v2 in [False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 4, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding=[[0, 0], [0, 0], [0, 1], [0, 0]],
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolGradExplicitPadding_2(self):
input_data = [
1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0,
0.0, 1.0
]
output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]
expected_input_backprop = [
54.0, 0.0, 30.0, 0.0, 0.0, 0.0, 0.0, 0.0, 39.0, 0.0, 21.0, 0.0, 0.0,
0.0, 0.0, 0.0
]
for use_gpu in True, False:
for v2 in [False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=3,
window_cols=3,
row_stride=2,
col_stride=2,
padding=[[0, 0], [2, 1], [2, 1], [0, 0]],
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolGradExplicitPadding_3(self):
input_data = [
-1.0, -5.0, -1.0, -5.0, -5.0, -1.0, -5.0, -1.0, -1.0, -5.0, -1.0, -5.0,
-5.0, -1.0, -5.0, -1.0
]
output_backprop = [11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0,
20.0, 21.0, 22.0]
expected_input_backprop = [
11.0, 0.0, 25.0, 0.0, 0.0, 31.0, 0.0, 49.0, 19.0, 0.0, 41.0, 0.0, 0.0,
0.0, 0.0, 22.0
]
for use_gpu in True, False:
for v2 in [False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 4, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding=[[0, 0], [0, 0], [0, 1], [0, 0]],
use_gpu=use_gpu,
v2=v2)
@test_util.no_xla_auto_jit("b/123923733") # NaNs handled differently
def _testMaxPoolGradDirectWithNans2_1(self):
input_data = [float("nan")] * 16
output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]
# Test the CPU implementation, which propagates diffs in case of NaN
expected_input_backprop_tf_cpu = [
11.0, 12.0, 13.0, 0.0, 15.0, 16.0, 17.0, 0.0, 19.0, 20.0, 21.0, 0.0,
0.0, 0.0, 0.0, 0.0
]
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_tf_cpu,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=False,
v2=v2)
if not test.is_gpu_available():
return
# The functionality associated with TF_ENABLE_NANPROP is currently
# not supported on the ROCm platform, so skip this part of the test
# NANs in input lead to non-deterministic results, and hence skipping
# the remaining tests altogether on the ROCm platform
if test.is_built_with_rocm():
return
# Test the GPU implementation that uses cudnn for now.
saved_nanprop = os.environ.get("TF_ENABLE_MAXPOOL_NANPROP")
# Do not propagate the diff in cases of NaNs
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = "0"
expected_input_backprop_cudnn = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0
]
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=True,
v2=v2)
# Propagate the diff in cases of NaNs
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = "1"
expected_input_backprop_cudnn = expected_input_backprop_tf_cpu
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=True,
v2=v2)
if saved_nanprop:
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = saved_nanprop
else:
del os.environ["TF_ENABLE_MAXPOOL_NANPROP"]
@test_util.no_xla_auto_jit("b/123923733") # NaNs handled differently
def _testMaxPoolGradDirectWithNans2_2(self):
input_data = [float("nan")] * 16
output_backprop = [
float("nan"), 12.0, 13.0, 15.0,
float("nan"), 17.0, 19.0, 20.0,
float("nan")
]
# Test the CPU implementation, which propagates diffs in case of NaN
expected_input_backprop_tf_cpu = [
float("nan"), 12.0, 13.0, 0.0, 15.0,
float("nan"), 17.0, 0.0, 19.0, 20.0,
float("nan"), 0.0, 0.0, 0.0, 0.0, 0.0
]
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_tf_cpu,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=False,
v2=v2)
if not test.is_gpu_available():
return
# The functionality associated with TF_ENABLE_NANPROP is currently
# not supported on the ROCm platform, so skip this part of the test
# NANs in input lead to non-deterministic results, and hence skipping
# the remaining tests altogether on the ROCm platform
if test.is_built_with_rocm():
return
# Test the GPU implementation that uses cudnn for now.
saved_nanprop = os.environ.get("TF_ENABLE_MAXPOOL_NANPROP")
# Do not propagate the diff in cases of NaNs
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = "0"
expected_input_backprop_cudnn = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0
]
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=True,
v2=v2)
# Propagate the diff in cases of NaNs
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = "1"
expected_input_backprop_cudnn = expected_input_backprop_tf_cpu
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=True,
v2=v2)
if saved_nanprop:
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = saved_nanprop
else:
del os.environ["TF_ENABLE_MAXPOOL_NANPROP"]
@test_util.run_deprecated_v1
def testMaxPoolGradDirect(self):
self._testMaxPoolGradDirect1_1()
self._testMaxPoolGradDirect1_2()
self._testMaxPoolGradDirect1_3()
self._testMaxPoolGradDirectWithNans2_1()
self._testMaxPoolGradDirectWithNans2_2()
self._testMaxPoolGradZeroExplicitPadding()
self._testMaxPoolGradExplicitPadding_1()
self._testMaxPoolGradExplicitPadding_2()
self._testMaxPoolGradExplicitPadding_3()
def _testMaxPoolGradGradValidPadding1_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[1, 3, 3, 1],
output_sizes=[1, 3, 3, 1],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradValidPadding2_1_6(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 6, 6, 3],
output_sizes=[2, 5, 5, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradValidPadding2_1_7(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 7, 7, 3],
output_sizes=[2, 6, 6, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradValidPadding2_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 2, 2, 3],
output_sizes=[2, 1, 1, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradSamePadding1_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradSamePadding2_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradSamePadding2_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradSamePadding3_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[1, 7, 7, 1],
output_sizes=[1, 7, 7, 1],
window_rows=3,
window_cols=3,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testMaxPoolGradGrad(self):
for (data_format, use_gpu) in GetTestConfigs():
self._testMaxPoolGradGradValidPadding1_1(data_format, use_gpu)
self._testMaxPoolGradGradValidPadding2_1_6(data_format, use_gpu)
self._testMaxPoolGradGradValidPadding2_1_7(data_format, use_gpu)
self._testMaxPoolGradGradValidPadding2_2(data_format, use_gpu)
self._testMaxPoolGradGradSamePadding1_1(data_format, use_gpu)
self._testMaxPoolGradGradSamePadding2_1(data_format, use_gpu)
self._testMaxPoolGradGradSamePadding2_2(data_format, use_gpu)
self._testMaxPoolGradGradSamePadding3_1(data_format, use_gpu)
def _MaxPoolGradGrad(self, orig_input, orig_output, grad, window_rows,
window_cols, row_stride, col_stride, padding):
"""Max Pooling Second-Order Gradient.
Args:
orig_input: A float Tensor. The original input tensor.
orig_output: A float Tensor. The original output tensor.
grad: A float Tensor.
The 4D (batch x out_rows x out_cols x depth) output backprop.
window_rows: integer. Kernel size along rows dimension.
window_cols: integer. Kernel size along cols dimension.
row_stride: integer. Stride along rows dimension
col_stride: integer. Stride along cols dimension
padding: PoolingOpDef.Padding. Padding type.
Returns:
A Tensor.
"""
return gen_nn_ops.max_pool_grad_grad(
orig_input, orig_output, grad, [1, window_rows, window_cols, 1],
[1, row_stride, col_stride, 1], padding)
@test_util.run_deprecated_v1
def testAvgPoolGrad(self):
for (data_format, use_gpu) in GetTestConfigs():
self._testAvgPoolGradValidPadding1_1(data_format, use_gpu)
self._testAvgPoolGradValidPadding1_2(data_format, use_gpu)
self._testAvgPoolGradValidPadding2_1(data_format, use_gpu)
self._testAvgPoolGradValidPadding2_2(data_format, use_gpu)
self._testAvgPoolGradSamePadding1_1(data_format, use_gpu)
self._testAvgPoolGradSamePadding1_2(data_format, use_gpu)
self._testAvgPoolGradSamePadding2_1(data_format, use_gpu)
self._testAvgPoolGradSamePadding2_2(data_format, use_gpu)
self._testAvgPoolGradSamePadding3_1(data_format, use_gpu)
def _testAvgPoolGradValidPadding1_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 3, 3, 3],
output_sizes=[2, 3, 3, 3],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradValidPadding1_2(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 3, 3, 3],
output_sizes=[2, 2, 2, 3],
window_rows=1,
window_cols=1,
row_stride=2,
col_stride=2,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradValidPadding2_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 3, 3, 3],
output_sizes=[2, 2, 2, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradValidPadding2_2(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 2, 2, 3],
output_sizes=[2, 1, 1, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradSamePadding1_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradSamePadding1_2(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3],
window_rows=1,
window_cols=1,
row_stride=2,
col_stride=2,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradSamePadding2_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradSamePadding2_2(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradSamePadding3_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[1, 7, 7, 1],
output_sizes=[1, 7, 7, 1],
window_rows=3,
window_cols=3,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
for pool_func in [nn_ops.max_pool, nn_ops.avg_pool]:
p = pool_func(
array_ops.placeholder(dtypes.float32),
ksize=[1, 1, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], p.get_shape().as_list())
p, am = nn_ops.max_pool_with_argmax(
array_ops.placeholder(dtypes.float32),
ksize=[1, 1, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], p.get_shape().as_list())
self.assertEqual([None, None, None, None], am.get_shape().as_list())
# Incorrect input shape.
for pool_func in [
nn_ops.max_pool, nn_ops.avg_pool, nn_ops.max_pool_with_argmax
]:
with self.assertRaises(ValueError):
pool_func(
array_ops.placeholder(dtypes.float32, shape=[1, 3]),
ksize=[1, 1, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME")
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123337890") # Error messages differ
def testOpEdgeCases(self):
with self.session(use_gpu=test.is_gpu_available()) as sess:
pool_funcs = [nn_ops.max_pool, nn_ops.avg_pool]
if test.is_gpu_available():
pool_funcs.append(nn_ops.max_pool_with_argmax)
for pool_func in pool_funcs:
if pool_func != nn_ops.max_pool:
# Illegal strides.
with self.assertRaisesRegex(
errors_impl.UnimplementedError,
"Pooling is not yet supported on the batch"):
sess.run(
pool_func(
array_ops.placeholder(dtypes.float32),
ksize=[1, 1, 1, 1],
strides=[2, 1, 1, 1],
padding="SAME"))
# Filter too large.
with self.assertRaisesRegex(ValueError, "Negative dimension size"):
sess.run(
pool_func(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),
ksize=[1, 20, 21, 1],
strides=[1, 1, 1, 1],
padding="VALID"))
with self.assertRaisesRegex(ValueError, "Negative dimension size"):
pool_func(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),
ksize=[1, 21, 20, 1],
strides=[1, 1, 1, 1],
padding="VALID")
@test_util.run_deprecated_v1
def testEdgeCasesRaiseErrors(self):
with self.assertRaisesRegexp(
ValueError, "NCHW_VECT_C.*is not supported with "
"explicit padding|XLA does not support pooling ops with explicit "
"padding"):
nn_ops.max_pool(
array_ops.placeholder(dtypes.float32, shape=[1, 3, 3, 1]),
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding=[[0, 0], [0, 1], [0, 1], [0, 0]],
data_format="NCHW_VECT_C")
with self.assertRaisesRegexp(
ValueError, "Explicit padding is not supported with an input "
"tensor of rank 5"):
nn_ops.max_pool_v2(
array_ops.placeholder(dtypes.float32, shape=[1, 3, 3, 1, 1]),
ksize=[1, 2, 2, 1, 1],
strides=[1, 2, 2, 1, 1],
padding=[[0, 0], [0, 1], [0, 1], [0, 0]],
data_format="NCHW")
with self.assertRaisesRegexp(
ValueError, "Attr 'padding' of 'MaxPoolV2' Op passed "
"string 'EXPLICIT'"):
gen_nn_ops.max_pool_v2(
array_ops.placeholder(dtypes.float32, shape=[1, 3, 3, 1, 1]),
ksize=[1, 2, 2, 1, 1],
strides=[1, 2, 2, 1, 1],
padding="EXPLICIT",
data_format="NHWC")
@test_util.run_deprecated_v1
def testEdgeCasesExcessPadding(self):
with self.session(use_gpu=test.is_gpu_available()) as sess:
with self.assertRaisesRegexp(
(errors_impl.UnimplementedError, errors_impl.InvalidArgumentError),
"Right padding 2 needs to be smaller than the window size 2|"
"XLA does not support pooling ops with explicit padding"):
input_sizes = [1, 3, 3, 1]
x = [(((f + 128) % 255) - 127) for f in range(9)]
t = constant_op.constant(x, shape=input_sizes, dtype=dtypes.float32)
sess.run(gen_nn_ops.max_pool(
t,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="EXPLICIT",
explicit_paddings=[0, 0, 0, 1, 0, 2, 0, 0],
data_format="NHWC"))
@test_util.run_deprecated_v1
def testNegativePadding(self):
with self.session(use_gpu=test.is_gpu_available()) as sess:
with self.assertRaisesRegexp(
ValueError, "All elements of explicit_paddings must be "
"nonnegative for"):
input_sizes = [1, 3, 3, 1]
x = [(((f + 128) % 255) - 127) for f in range(9)]
t = constant_op.constant(x, shape=input_sizes, dtype=dtypes.float32)
sess.run(gen_nn_ops.max_pool(
t,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="EXPLICIT",
explicit_paddings=[0, 0, -1, -1, -1, -1, 0, 0],
data_format="NHWC"))
@test_util.run_deprecated_v1
def testExplicitPaddingBatch(self):
with self.session(use_gpu=test.is_gpu_available()) as sess:
with self.assertRaisesRegexp(
ValueError, "Nonzero explicit padding in the batch or depth "
"dimensions is not supported"):
input_sizes = [1, 3, 3, 1]
x = [(((f + 128) % 255) - 127) for f in range(9)]
t = constant_op.constant(x, shape=input_sizes, dtype=dtypes.float32)
sess.run(gen_nn_ops.max_pool(
t,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="EXPLICIT",
explicit_paddings=[1, 1, 1, 1, 1, 1, 0, 0],
data_format="NHWC"))
@test_util.disable_xla(
"b/205634417") # XLA is not throwing shape errors for multiple *Grad ops.
def testMaxPoolGradEagerShapeErrors(self):
with context.eager_mode():
orig_in = array_ops.ones((1, 1, 1, 1))
# Test invalid orig_out shape
orig_out = array_ops.ones((1, 1, 1, 2))
grad = array_ops.ones((1, 1, 1, 1))
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
r"Expected orig_output shape to be \[1,1,1,1\], but got \[1,1,1,2\]"):
gen_nn_ops.max_pool_grad(
orig_in, orig_out, grad, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="VALID")
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
r"Expected orig_output shape to be \[1,1,1,1\], but got \[1,1,1,2\]"):
gen_nn_ops.max_pool_grad_grad(
orig_in, orig_out, grad, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="VALID")
# Test invalid grad shape
orig_out = array_ops.ones((1, 1, 1, 1))
grad = array_ops.ones((1, 1, 1, 2))
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
r"Expected grad shape to be \[1,1,1,1\], but got \[1,1,1,2\]"):
gen_nn_ops.max_pool_grad(
orig_in, orig_out, grad, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="VALID")
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
r"Expected grad shape to be \[1,1,1,1\], but got \[1,1,1,2\]"):
gen_nn_ops.max_pool_grad_grad(
orig_in, orig_out, grad, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="VALID")
def testMaxPoolGradWithArgmaxEagerShapeErrors(self):
with context.eager_mode():
inp = array_ops.ones((1, 1, 1, 1))
# Test invalid grad shape
grad = array_ops.ones((1, 1, 1, 2))
argmax = array_ops.zeros((1, 1, 1, 1), dtype=dtypes.int64)
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
r"Expected grad shape to be \[1,1,1,1\], but got \[1,1,1,2\]"):
gen_nn_ops.max_pool_grad_with_argmax(
inp, grad, argmax, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="VALID")
# max_pool_grad_grad_with_argmax is only implemented for GPUs
if test.is_gpu_available():
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
r"Expected grad shape to be \[1,1,1,1\], but got \[1,1,1,2\]"):
gen_nn_ops.max_pool_grad_grad_with_argmax(
inp, grad, argmax, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="VALID")
# Test invalid argmax shape
grad = array_ops.ones((1, 1, 1, 1))
argmax = array_ops.ones((1, 1, 1, 2), dtype=dtypes.int64)
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
r"Expected argmax shape to be \[1,1,1,1\], but got \[1,1,1,2\]"):
gen_nn_ops.max_pool_grad_with_argmax(
inp, grad, argmax, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="VALID")
# max_pool_grad_grad_with_argmax is only implemented for GPUs
if test.is_gpu_available():
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
r"Expected argmax shape to be \[1,1,1,1\], but got \[1,1,1,2\]"):
gen_nn_ops.max_pool_grad_grad_with_argmax(
inp, grad, argmax, ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="VALID")
def testAvgPoolGradInvalidInputShapeRaiseError(self):
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
with self.cached_session():
orig_input_shape = constant_op.constant(
-536870912, shape=[4], dtype=dtypes.int32)
grad = constant_op.constant(
.0890338004362538, shape=[1, 5, 7, 1], dtype=dtypes.float64)
t = gen_nn_ops.AvgPoolGrad(
orig_input_shape=orig_input_shape,
grad=grad,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC")
self.evaluate(t)
def GetMaxPoolFwdTest(input_size, filter_size, strides, padding):
def Test(self):
# MaxPoolWithArgMax is implemented only on CUDA.
if not test.is_gpu_available(cuda_only=True):
return
self._CompareMaxPoolingFwd(input_size, filter_size, strides, padding)
return Test
def GetMaxPoolGradTest(input_size, filter_size, output_size, strides, padding):
def Test(self):
# MaxPoolWithArgMax is implemented only on CUDA.
if not test.is_gpu_available(cuda_only=True):
return
self._CompareMaxPoolingBk(input_size, output_size, filter_size, strides,
padding)
return Test
def GetMaxPoolGradGradTest(input_size, filter_size, output_size, strides,
padding):
def Test(self):
# MaxPoolWithArgMax is implemented only on CUDA.
if not test.is_gpu_available(cuda_only=True):
return
self._CompareMaxPoolingGradBk(input_size, output_size, filter_size, strides,
padding)
return Test
if __name__ == "__main__":
for (name_, input_size_, filter_size_, output_size_, stride_,
padding_) in GetShrunkInceptionMaxPoolShapes():
setattr(PoolingTest, "testMaxPoolFwd_" + name_,
GetMaxPoolFwdTest(input_size_, filter_size_, stride_, padding_))
setattr(PoolingTest, "testMaxPoolGrad_" + name_,
GetMaxPoolGradTest(input_size_, filter_size_, output_size_, stride_,
padding_))
setattr(PoolingTest, "testMaxPoolGradGrad_" + name_,
GetMaxPoolGradGradTest(input_size_, filter_size_, output_size_,
stride_, padding_))
test.main()
|
GHSA-6hg6-5c2q-7rcr
|
tests/unit/zhmcclient/test_activation_profile.py
|
@@ -19,9 +19,10 @@
import copy
import re
+import logging
import pytest
-from zhmcclient import Client, ActivationProfile
+from zhmcclient import Client, ActivationProfile, BLANKED_OUT_STRING
from zhmcclient_mock import FakedSession
from tests.common.utils import assert_resources
@@ -338,11 +339,15 @@ def test_profile_repr(self):
}},
{'group-profile-uri': None},
{'zaware-gateway-info': None},
+ {'ssc-master-pw': 'bla', 'zaware-master-pw': 'bla'},
]
)
- def test_profile_update_properties(self, input_props, profile_type):
+ def test_profile_update_properties(self, caplog, input_props, profile_type):
"""Test ActivationProfile.update_properties()."""
+ logger_name = "zhmcclient.api"
+ caplog.set_level(logging.DEBUG, logger=logger_name)
+
mgr_attr = profile_type + '_activation_profiles'
profile_mgr = getattr(self.cpc, mgr_attr)
@@ -354,6 +359,9 @@ def test_profile_update_properties(self, input_props, profile_type):
# Execute the code to be tested
profile.update_properties(properties=input_props)
+ # Get its API call log record
+ call_record = caplog.records[-2]
+
# Verify that the resource object already reflects the property
# updates.
for prop_name in saved_properties:
@@ -376,3 +384,11 @@ def test_profile_update_properties(self, input_props, profile_type):
assert prop_name in profile.properties
prop_value = profile.properties[prop_name]
assert prop_value == exp_prop_value
+
+ # Verify the API call log record for blanked-out properties.
+ if 'ssc-master-pw' in input_props:
+ exp_str = f"'ssc-master-pw': '{BLANKED_OUT_STRING}'"
+ assert call_record.message.find(exp_str) > 0
+ if 'zaware-master-pw' in input_props:
+ exp_str = f"'zaware-master-pw': '{BLANKED_OUT_STRING}'"
+ assert call_record.message.find(exp_str) > 0
|
# Copyright 2016,2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for _activation_profile module.
"""
import copy
import re
import pytest
from zhmcclient import Client, ActivationProfile
from zhmcclient_mock import FakedSession
from tests.common.utils import assert_resources
class TestActivationProfile:
"""
All tests for the ActivationProfile and ActivationProfileManager classes.
"""
def setup_method(self):
"""
Setup that is called by pytest before each test method.
Set up a faked session, and add a faked CPC in classic mode,
and add two faked activation profiles of each type.
"""
# pylint: disable=attribute-defined-outside-init
self.session = FakedSession('fake-host', 'fake-hmc', '2.13.1', '1.8')
self.client = Client(self.session)
self.faked_cpc = self.session.hmc.cpcs.add({
'object-id': 'fake-cpc1-oid',
# object-uri is set up automatically
'parent': None,
'class': 'cpc',
'name': 'fake-cpc1-name',
'description': 'CPC #1 (classic mode)',
'status': 'active',
'dpm-enabled': False,
'is-ensemble-member': False,
'iml-mode': 'lpar',
})
self.cpc = self.client.cpcs.find(name='fake-cpc1-name')
self.faked_reset_ap_1 = self.faked_cpc.reset_activation_profiles.add({
# element-uri is set up automatically
'name': 'rap_1',
'parent': self.faked_cpc.uri,
'class': 'reset-activation-profile',
'description': 'RAP #1',
})
self.faked_reset_ap_2 = self.faked_cpc.reset_activation_profiles.add({
# element-uri is set up automatically
'name': 'rap_2',
'parent': self.faked_cpc.uri,
'class': 'reset-activation-profile',
'description': 'RAP #2',
})
self.faked_image_ap_1 = self.faked_cpc.image_activation_profiles.add({
# element-uri is set up automatically
'name': 'iap_1',
'parent': self.faked_cpc.uri,
'class': 'image-activation-profile',
'description': 'IAP #1',
'ipl-address': '0010',
})
self.faked_image_ap_2 = self.faked_cpc.image_activation_profiles.add({
# element-uri is set up automatically
'name': 'iap_2',
'parent': self.faked_cpc.uri,
'class': 'image-activation-profile',
'description': 'IAP #2',
'ipl-address': '0010',
})
self.faked_load_ap_1 = self.faked_cpc.load_activation_profiles.add({
# element-uri is set up automatically
'name': 'lap_1',
'parent': self.faked_cpc.uri,
'class': 'load-activation-profile',
'description': 'LAP #1',
})
self.faked_load_ap_2 = self.faked_cpc.load_activation_profiles.add({
# element-uri is set up automatically
'name': 'lap_2',
'parent': self.faked_cpc.uri,
'class': 'load-activation-profile',
'description': 'LAP #2',
})
@pytest.mark.parametrize(
"profile_type", ['reset', 'image', 'load']
)
def test_profilemanager_initial_attrs(self, profile_type):
"""Test initial attributes of ActivationProfileManager."""
mgr_attr = profile_type + '_activation_profiles'
profile_mgr = getattr(self.cpc, mgr_attr)
# Verify all public properties of the manager object
assert profile_mgr.resource_class == ActivationProfile
assert profile_mgr.session == self.session
assert profile_mgr.parent == self.cpc
assert profile_mgr.cpc == self.cpc
assert profile_mgr.profile_type == profile_type
# TODO: Test for ActivationProfileManager.__repr__()
@pytest.mark.parametrize(
"profile_type", ['reset', 'image', 'load']
)
@pytest.mark.parametrize(
"full_properties_kwargs, prop_names", [
({},
['name', 'element-uri']),
(dict(full_properties=False),
['name', 'element-uri']),
(dict(full_properties=True),
None),
]
)
def test_profilemanager_list_full_properties(
self, full_properties_kwargs, prop_names, profile_type):
"""Test ActivationProfileManager.list() with full_properties."""
mgr_attr = profile_type + '_activation_profiles'
faked_profile_mgr = getattr(self.faked_cpc, mgr_attr)
exp_faked_profiles = faked_profile_mgr.list()
profile_mgr = getattr(self.cpc, mgr_attr)
# Execute the code to be tested
profiles = profile_mgr.list(**full_properties_kwargs)
assert_resources(profiles, exp_faked_profiles, prop_names)
@pytest.mark.parametrize(
"profile_type, filter_args, exp_names", [
('reset',
{'name': 'rap_2'},
['rap_2']),
('reset',
{'name': '.*rap_1'},
['rap_1']),
('reset',
{'name': 'rap_1.*'},
['rap_1']),
('reset',
{'name': 'rap_.'},
['rap_1', 'rap_2']),
('reset',
{'name': '.ap_1'},
['rap_1']),
('reset',
{'name': '.+'},
['rap_1', 'rap_2']),
('reset',
{'name': 'rap_1.+'},
[]),
('reset',
{'name': '.+rap_1'},
[]),
('image',
{'name': 'iap_1'},
['iap_1']),
('image',
{'name': '.*iap_1'},
['iap_1']),
('image',
{'name': 'iap_1.*'},
['iap_1']),
('image',
{'name': 'iap_.'},
['iap_1', 'iap_2']),
('image',
{'name': '.ap_1'},
['iap_1']),
('image',
{'name': '.+'},
['iap_1', 'iap_2']),
('image',
{'name': 'iap_1.+'},
[]),
('image',
{'name': '.+iap_1'},
[]),
('load',
{'name': 'lap_2'},
['lap_2']),
('load',
{'name': '.*lap_1'},
['lap_1']),
('load',
{'name': 'lap_1.*'},
['lap_1']),
('load',
{'name': 'lap_.'},
['lap_1', 'lap_2']),
('load',
{'name': '.ap_1'},
['lap_1']),
('load',
{'name': '.+'},
['lap_1', 'lap_2']),
('load',
{'name': 'lap_1.+'},
[]),
('load',
{'name': '.+lap_1'},
[]),
('reset',
{'class': 'reset-activation-profile'},
['rap_1', 'rap_2']),
('image',
{'class': 'image-activation-profile'},
['iap_1', 'iap_2']),
('load',
{'class': 'load-activation-profile'},
['lap_1', 'lap_2']),
('reset',
{'class': 'reset-activation-profile',
'description': 'RAP #2'},
['rap_2']),
('image',
{'class': 'image-activation-profile',
'description': 'IAP #1'},
['iap_1']),
('load',
{'class': 'load-activation-profile',
'description': 'LAP #2'},
['lap_2']),
('reset',
{'description': 'RAP #1'},
['rap_1']),
('image',
{'description': 'IAP #2'},
['iap_2']),
('load',
{'description': 'LAP #1'},
['lap_1']),
]
)
def test_profilemanager_list_filter_args(
self, profile_type, filter_args, exp_names):
"""Test ActivationProfileManager.list() with filter_args."""
mgr_attr = profile_type + '_activation_profiles'
profile_mgr = getattr(self.cpc, mgr_attr)
# Execute the code to be tested
profiles = profile_mgr.list(filter_args=filter_args)
assert len(profiles) == len(exp_names)
if exp_names:
names = [ap.properties['name'] for ap in profiles]
assert set(names) == set(exp_names)
@pytest.mark.parametrize(
"list_kwargs, prop_names", [
({},
['element-uri', 'name']),
(dict(additional_properties=[]),
['element-uri', 'name']),
(dict(additional_properties=['description']),
['element-uri', 'name', 'description']),
(dict(additional_properties=['description', 'ipl-address']),
['element-uri', 'name', 'description', 'ipl-address']),
(dict(additional_properties=['ssc-host-name']),
['element-uri', 'name', 'ssc-host-name']
# ssc-host-name is not on every image profile
),
]
)
def test_profilemanager_list_add_props(
self, list_kwargs, prop_names):
"""
Test ActivationProfileManager.list() for image profiles with
additional_properties.
"""
mgr_attr = 'image_activation_profiles'
profile_mgr = getattr(self.cpc, mgr_attr)
# Execute the code to be tested
profiles = profile_mgr.list(**list_kwargs)
exp_faked_profiles = [self.faked_image_ap_1, self.faked_image_ap_2]
assert_resources(profiles, exp_faked_profiles, prop_names)
# TODO: Test for initial ActivationProfile attributes
def test_profile_repr(self):
"""Test ActivationProfile.__repr__()."""
# We test __repr__() just for reset activation profiles, because the
# ActivationProfile class is the same for all profile types and we know
# that __repr__() does not depend on the profile type.
profile_mgr = self.cpc.reset_activation_profiles
reset_ap = profile_mgr.find(name='rap_1')
# Execute the code to be tested
repr_str = repr(reset_ap)
repr_str = repr_str.replace('\n', '\\n')
# We check just the begin of the string:
assert re.match(
rf'^{reset_ap.__class__.__name__}\s+at\s+'
rf'0x{id(reset_ap):08x}\s+\(\\n.*',
repr_str)
@pytest.mark.parametrize(
"profile_type", ['reset', 'image', 'load']
)
@pytest.mark.parametrize(
"input_props", [
{},
{'description': 'New profile description'},
{'description': ''},
{'ssc-network-info': {
'chpid': '1a',
'port': 0,
'ipaddr-type': 'dhcp',
'vlan-id': None,
'static-ip-info': None,
}},
{'group-profile-uri': None},
{'zaware-gateway-info': None},
]
)
def test_profile_update_properties(self, input_props, profile_type):
"""Test ActivationProfile.update_properties()."""
mgr_attr = profile_type + '_activation_profiles'
profile_mgr = getattr(self.cpc, mgr_attr)
profile = profile_mgr.list()[0]
profile.pull_full_properties()
saved_properties = copy.deepcopy(profile.properties)
# Execute the code to be tested
profile.update_properties(properties=input_props)
# Verify that the resource object already reflects the property
# updates.
for prop_name in saved_properties:
if prop_name in input_props:
exp_prop_value = input_props[prop_name]
else:
exp_prop_value = saved_properties[prop_name]
assert prop_name in profile.properties
prop_value = profile.properties[prop_name]
assert prop_value == exp_prop_value
# Refresh the resource object and verify that the resource object
# still reflects the property updates.
profile.pull_full_properties()
for prop_name in saved_properties:
if prop_name in input_props:
exp_prop_value = input_props[prop_name]
else:
exp_prop_value = saved_properties[prop_name]
assert prop_name in profile.properties
prop_value = profile.properties[prop_name]
assert prop_value == exp_prop_value
|
GHSA-p57h-3cmc-xpjq
|
tests/unit/zhmcclient/test_ldap_server_definition.py
|
@@ -19,9 +19,11 @@
import re
import copy
+import logging
import pytest
-from zhmcclient import Client, HTTPError, NotFound, LdapServerDefinition
+from zhmcclient import Client, HTTPError, NotFound, LdapServerDefinition, \
+ BLANKED_OUT_STRING
from zhmcclient_mock import FakedSession
from tests.common.utils import assert_resources
@@ -149,12 +151,21 @@ def test_ldap_srv_def_manager_list(
'search-distinguished-name': 'test{0}'},
['element-uri', 'name', 'description'],
None),
+ ({'name': 'a',
+ 'primary-hostname-ipaddr': '10.11.12.13',
+ 'search-distinguished-name': 'test{0}',
+ 'bind-password': 'bla'},
+ ['element-uri', 'name', 'bind-password'],
+ None),
]
)
def test_ldap_srv_def_manager_create(
- self, input_props, exp_prop_names, exp_exc):
+ self, caplog, input_props, exp_prop_names, exp_exc):
"""Test LdapServerDefinitionManager.create()."""
+ logger_name = "zhmcclient.api"
+ caplog.set_level(logging.DEBUG, logger=logger_name)
+
ldap_srv_def_mgr = self.console.ldap_server_definitions
if exp_exc is not None:
@@ -174,6 +185,9 @@ def test_ldap_srv_def_manager_create(
# Execute the code to be tested.
ldap_srv_def = ldap_srv_def_mgr.create(properties=input_props)
+ # Get its API call log record
+ call_record = caplog.records[-2]
+
# Check the resource for consistency within itself
assert isinstance(ldap_srv_def, LdapServerDefinition)
ldap_srv_def_name = ldap_srv_def.name
@@ -191,6 +205,11 @@ def test_ldap_srv_def_manager_create(
exp_value = input_props[prop_name]
assert value == exp_value
+ # Verify the API call log record for blanked-out properties.
+ if 'bind-password' in input_props:
+ exp_str = f"'bind-password': '{BLANKED_OUT_STRING}'"
+ assert call_record.message.find(exp_str) > 0
+
def test_ldap_srv_def_repr(self):
"""Test LdapServerDefinition.__repr__()."""
@@ -287,11 +306,15 @@ def test_ldap_delete_create_same(self):
"input_props", [
{},
{'description': 'New LDAP Server Definition description'},
+ {'bind-password': 'bla'},
]
)
- def test_ldap_srv_def_update_properties(self, input_props):
+ def test_ldap_srv_def_update_properties(self, caplog, input_props):
"""Test LdapServerDefinition.update_properties()."""
+ logger_name = "zhmcclient.api"
+ caplog.set_level(logging.DEBUG, logger=logger_name)
+
ldap_srv_def_name = 'faked_a'
# Add the LDAP Server Definition to be tested
@@ -306,6 +329,9 @@ def test_ldap_srv_def_update_properties(self, input_props):
# Execute the code to be tested
ldap_srv_def.update_properties(properties=input_props)
+ # Get its API call log record
+ call_record = caplog.records[-2]
+
# Verify that the resource object already reflects the property
# updates.
for prop_name in saved_properties:
@@ -329,3 +355,8 @@ def test_ldap_srv_def_update_properties(self, input_props):
assert prop_name in ldap_srv_def.properties
prop_value = ldap_srv_def.properties[prop_name]
assert prop_value == exp_prop_value
+
+ # Verify the API call log record for blanked-out properties.
+ if 'bind-password' in input_props:
+ exp_str = f"'bind-password': '{BLANKED_OUT_STRING}'"
+ assert call_record.message.find(exp_str) > 0
|
# Copyright 2017,2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for _ldap_srv_def module.
"""
import re
import copy
import pytest
from zhmcclient import Client, HTTPError, NotFound, LdapServerDefinition
from zhmcclient_mock import FakedSession
from tests.common.utils import assert_resources
class TestLdapServerDefinition:
"""All tests for the LdapServerDefinition and LdapServerDefinitionManager
classes."""
def setup_method(self):
"""
Setup that is called by pytest before each test method.
Set up a faked session, and add a faked Console without any
child resources.
"""
# pylint: disable=attribute-defined-outside-init
self.session = FakedSession('fake-host', 'fake-hmc', '2.13.1', '1.8')
self.client = Client(self.session)
self.faked_console = self.session.hmc.consoles.add({
'object-id': None,
# object-uri will be automatically set
'parent': None,
'class': 'console',
'name': 'fake-console1',
'description': 'Console #1',
})
self.console = self.client.consoles.find(name=self.faked_console.name)
def add_ldap_srv_def(self, name):
"""
Add a faked LDAPServerDefinition object to the faked Console
and return it.
"""
faked_ldap_srv_def = self.faked_console.ldap_server_definitions.add({
'element-id': f'oid-{name}',
# element-uri will be automatically set
'parent': '/api/console',
'class': 'ldap-server-definition',
'name': name,
'description': f'LDAP Server Definition {name}',
'primary-hostname-ipaddr': f'host-{name}',
})
return faked_ldap_srv_def
def test_ldap_srv_def_manager_repr(self):
"""Test LdapServerDefinitionManager.__repr__()."""
ldap_srv_def_mgr = self.console.ldap_server_definitions
# Execute the code to be tested
repr_str = repr(ldap_srv_def_mgr)
repr_str = repr_str.replace('\n', '\\n')
# We check just the begin of the string:
assert re.match(
rf'^{ldap_srv_def_mgr.__class__.__name__}\s+at\s+'
rf'0x{id(ldap_srv_def_mgr):08x}\s+\(\\n.*',
repr_str)
def test_ldap_srv_def_manager_initial_attrs(self):
"""Test initial attributes of LdapServerDefinitionManager."""
ldap_srv_def_mgr = self.console.ldap_server_definitions
# Verify all public properties of the manager object
assert ldap_srv_def_mgr.resource_class == LdapServerDefinition
assert ldap_srv_def_mgr.class_name == 'ldap-server-definition'
assert ldap_srv_def_mgr.session is self.session
assert ldap_srv_def_mgr.parent is self.console
assert ldap_srv_def_mgr.console is self.console
@pytest.mark.parametrize(
"full_properties_kwargs, prop_names", [
(dict(full_properties=False),
['element-uri', 'name']),
(dict(full_properties=True),
['element-uri', 'name', 'description']),
({}, # test default for full_properties (False)
['element-uri', 'name']),
]
)
@pytest.mark.parametrize(
"filter_args, exp_names", [
(None,
['a', 'b']),
({},
['a', 'b']),
({'name': 'a'},
['a']),
({'name': 'A'}, # LDAP user definitions have case-insensitive names
['a']),
]
)
def test_ldap_srv_def_manager_list(
self, filter_args, exp_names, full_properties_kwargs, prop_names):
"""Test LdapServerDefinitionManager.list()."""
faked_ldap_srv_def1 = self.add_ldap_srv_def(name='a')
faked_ldap_srv_def2 = self.add_ldap_srv_def(name='b')
faked_ldap_srv_defs = [faked_ldap_srv_def1, faked_ldap_srv_def2]
exp_faked_ldap_srv_defs = [u for u in faked_ldap_srv_defs
if u.name in exp_names]
ldap_srv_def_mgr = self.console.ldap_server_definitions
# Execute the code to be tested
ldap_srv_defs = ldap_srv_def_mgr.list(filter_args=filter_args,
**full_properties_kwargs)
assert_resources(ldap_srv_defs, exp_faked_ldap_srv_defs, prop_names)
@pytest.mark.parametrize(
"input_props, exp_prop_names, exp_exc", [
({}, # props missing
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'description': 'fake description X'}, # props missing
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'description': 'fake description X',
'name': 'a',
'primary-hostname-ipaddr': '10.11.12.13',
'search-distinguished-name': 'test{0}'},
['element-uri', 'name', 'description'],
None),
]
)
def test_ldap_srv_def_manager_create(
self, input_props, exp_prop_names, exp_exc):
"""Test LdapServerDefinitionManager.create()."""
ldap_srv_def_mgr = self.console.ldap_server_definitions
if exp_exc is not None:
with pytest.raises(exp_exc.__class__) as exc_info:
# Execute the code to be tested
ldap_srv_def_mgr.create(properties=input_props)
exc = exc_info.value
if isinstance(exp_exc, HTTPError):
assert exc.http_status == exp_exc.http_status
assert exc.reason == exp_exc.reason
else:
# Execute the code to be tested.
ldap_srv_def = ldap_srv_def_mgr.create(properties=input_props)
# Check the resource for consistency within itself
assert isinstance(ldap_srv_def, LdapServerDefinition)
ldap_srv_def_name = ldap_srv_def.name
exp_ldap_srv_def_name = ldap_srv_def.properties['name']
assert ldap_srv_def_name == exp_ldap_srv_def_name
ldap_srv_def_uri = ldap_srv_def.uri
exp_ldap_srv_def_uri = ldap_srv_def.properties['element-uri']
assert ldap_srv_def_uri == exp_ldap_srv_def_uri
# Check the properties against the expected names and values
for prop_name in exp_prop_names:
assert prop_name in ldap_srv_def.properties
if prop_name in input_props:
value = ldap_srv_def.properties[prop_name]
exp_value = input_props[prop_name]
assert value == exp_value
def test_ldap_srv_def_repr(self):
"""Test LdapServerDefinition.__repr__()."""
faked_ldap_srv_def1 = self.add_ldap_srv_def(name='a')
ldap_srv_def1 = self.console.ldap_server_definitions.find(
name=faked_ldap_srv_def1.name)
# Execute the code to be tested
repr_str = repr(ldap_srv_def1)
repr_str = repr_str.replace('\n', '\\n')
# We check just the begin of the string:
assert re.match(
rf'^{ldap_srv_def1.__class__.__name__}\s+at\s+'
rf'0x{id(ldap_srv_def1):08x}\s+\(\\n.*',
repr_str)
@pytest.mark.parametrize(
"input_props, exp_exc", [
({'name': 'a'},
None),
({'name': 'b'},
None),
]
)
def test_ldap_srv_def_delete(self, input_props, exp_exc):
"""Test LdapServerDefinition.delete()."""
faked_ldap_srv_def = self.add_ldap_srv_def(name=input_props['name'])
ldap_srv_def_mgr = self.console.ldap_server_definitions
ldap_srv_def = ldap_srv_def_mgr.find(name=faked_ldap_srv_def.name)
if exp_exc is not None:
with pytest.raises(exp_exc.__class__) as exc_info:
# Execute the code to be tested
ldap_srv_def.delete()
exc = exc_info.value
if isinstance(exp_exc, HTTPError):
assert exc.http_status == exp_exc.http_status
assert exc.reason == exp_exc.reason
# Check that the LDAP Server Definition still exists
ldap_srv_def_mgr.find(name=faked_ldap_srv_def.name)
else:
# Execute the code to be tested.
ldap_srv_def.delete()
# Check that the LDAP Server Definition no longer exists
with pytest.raises(NotFound) as exc_info:
ldap_srv_def_mgr.find(name=faked_ldap_srv_def.name)
def test_ldap_delete_create_same(self):
"""Test LdapServerDefinition.delete() followed by create() with same
name."""
ldap_srv_def_name = 'faked_a'
# Add the LDAP Server Definition to be tested
self.add_ldap_srv_def(name=ldap_srv_def_name)
# Input properties for a LDAP Server Definition with the same name
sn_ldap_srv_def_props = {
'name': ldap_srv_def_name,
'description': 'LDAP Server Definition with same name',
'primary-hostname-ipaddr': '10.11.12.13',
'search-distinguished-name': 'test{0}',
}
ldap_srv_def_mgr = self.console.ldap_server_definitions
ldap_srv_def = ldap_srv_def_mgr.find(name=ldap_srv_def_name)
# Execute the deletion code to be tested
ldap_srv_def.delete()
# Check that the LDAP Server Definition no longer exists
with pytest.raises(NotFound):
ldap_srv_def_mgr.find(name=ldap_srv_def_name)
# Execute the creation code to be tested.
ldap_srv_def_mgr.create(sn_ldap_srv_def_props)
# Check that the LDAP Server Definition exists again under that name
sn_ldap_srv_def = ldap_srv_def_mgr.find(name=ldap_srv_def_name)
description = sn_ldap_srv_def.get_property('description')
assert description == sn_ldap_srv_def_props['description']
@pytest.mark.parametrize(
"input_props", [
{},
{'description': 'New LDAP Server Definition description'},
]
)
def test_ldap_srv_def_update_properties(self, input_props):
"""Test LdapServerDefinition.update_properties()."""
ldap_srv_def_name = 'faked_a'
# Add the LDAP Server Definition to be tested
self.add_ldap_srv_def(name=ldap_srv_def_name)
ldap_srv_def_mgr = self.console.ldap_server_definitions
ldap_srv_def = ldap_srv_def_mgr.find(name=ldap_srv_def_name)
ldap_srv_def.pull_full_properties()
saved_properties = copy.deepcopy(ldap_srv_def.properties)
# Execute the code to be tested
ldap_srv_def.update_properties(properties=input_props)
# Verify that the resource object already reflects the property
# updates.
for prop_name in saved_properties:
if prop_name in input_props:
exp_prop_value = input_props[prop_name]
else:
exp_prop_value = saved_properties[prop_name]
assert prop_name in ldap_srv_def.properties
prop_value = ldap_srv_def.properties[prop_name]
assert prop_value == exp_prop_value, \
f"Unexpected value for property {prop_name!r}"
# Refresh the resource object and verify that the resource object
# still reflects the property updates.
ldap_srv_def.pull_full_properties()
for prop_name in saved_properties:
if prop_name in input_props:
exp_prop_value = input_props[prop_name]
else:
exp_prop_value = saved_properties[prop_name]
assert prop_name in ldap_srv_def.properties
prop_value = ldap_srv_def.properties[prop_name]
assert prop_value == exp_prop_value
|
GHSA-p57h-3cmc-xpjq
|
tests/unit/zhmcclient/test_logging.py
|
@@ -23,18 +23,26 @@
from testfixtures import LogCapture
from zhmcclient._logging import logged_api_call, get_logger
-
+from zhmcclient._constants import BLANKED_OUT_STRING
#
# Various uses of the @logged_api_call decorator
#
+
@logged_api_call
def decorated_global_function():
"""A decorated function at the global (module) level."""
pass
+@logged_api_call(blanked_properties=['hideme'], properties_pos=0)
+def decorated_global_props_function(properties):
+ """A decorated function with properties at the global (module) level,
+ where the 'hideme' property is blanked out in the API log."""
+ return properties
+
+
def global1_function():
"""An undecorated function at the global (module) level."""
@@ -136,8 +144,8 @@ def call_from_global(func, *args, **kwargs):
# Some expected values that are constant
_EXP_LOGGER_NAME = 'zhmcclient.api'
_EXP_LOG_LEVEL = 'DEBUG'
-_EXP_LOG_MSG_ENTER_PATTERN = "Called: .*, args: .*, kwargs: .*"
-_EXP_LOG_MSG_LEAVE_PATTERN = "Return: .*, result: .*"
+_EXP_LOG_MSG_ENTER_PATTERN = "Called: (.*), args: (.*), kwargs: (.*)"
+_EXP_LOG_MSG_LEAVE_PATTERN = "Return: (.*), result: (.*)"
@pytest.fixture()
@@ -155,9 +163,9 @@ def capture():
# Test cases
#
-def assert_log_capture(log_capture, exp_apifunc):
- # pylint: disable=unused-argument
- # Note: exp_apifunc is shown when pytest displays a traceback.
+def assert_log_capture(
+ log_capture, *, func_pattern=None, args_pattern=None,
+ kwargs_pattern=None, return_pattern=None):
"""
Assert that the log capture is as expected.
"""
@@ -167,13 +175,23 @@ def assert_log_capture(log_capture, exp_apifunc):
assert enter_record.name == _EXP_LOGGER_NAME
assert enter_record.levelname == _EXP_LOG_LEVEL
assert re.match(_EXP_LOG_MSG_ENTER_PATTERN, enter_record.msg)
- # We don't check the function name and its pos and kw args
+ func_str, args_str, kwargs_str = enter_record.args
+ if func_pattern:
+ assert re.search(func_pattern, func_str)
+ if args_pattern:
+ assert re.search(args_pattern, args_str)
+ if kwargs_pattern:
+ assert re.search(kwargs_pattern, kwargs_str)
leave_record = log_capture.records[1]
assert leave_record.name == _EXP_LOGGER_NAME
assert leave_record.levelname == _EXP_LOG_LEVEL
assert re.match(_EXP_LOG_MSG_LEAVE_PATTERN, leave_record.msg)
- # We don't check the function name and its pos and kw args
+ func_str, return_str = leave_record.args
+ if func_pattern:
+ assert re.search(func_pattern, func_str)
+ if return_pattern:
+ assert re.search(return_pattern, return_str)
def test_1a_global_from_global(capture):
@@ -183,7 +201,8 @@ def test_1a_global_from_global(capture):
call_from_global(decorated_global_function)
- assert_log_capture(capture, 'decorated_global_function()')
+ assert_log_capture(
+ capture, func_pattern='decorated_global_function')
def test_1b_global_from_method(capture):
@@ -192,7 +211,40 @@ def test_1b_global_from_method(capture):
CallerClass().call_from_method(decorated_global_function)
- assert_log_capture(capture, 'decorated_global_function()')
+ assert_log_capture(
+ capture, func_pattern='decorated_global_function')
+
+
+def test_1c_global_props_args_from_global(capture):
+ # pylint: disable=redefined-outer-name
+ """Simple test calling a decorated global function with properties as args,
+ from a global function."""
+ props = {
+ 'prop1': 'value1',
+ 'hideme': 'secret',
+ }
+ blanked_props = {
+ 'prop1': 'value1',
+ 'hideme': BLANKED_OUT_STRING,
+ }
+ call_from_global(decorated_global_props_function, props)
+
+ assert_log_capture(
+ capture, func_pattern='decorated_global_props_function',
+ args_pattern=re.escape(str(blanked_props)),
+ return_pattern=re.escape(str(props)))
+
+
+def test_1c_global_props_kwargs_from_global(capture):
+ # pylint: disable=redefined-outer-name
+ """Simple test calling a decorated global function with properties as
+ kwargs, from a global function."""
+
+ call_from_global(decorated_global_props_function,
+ properties={'prop1': 'value1'})
+
+ assert_log_capture(
+ capture, func_pattern='decorated_global_props_function')
def test_2a_global_inner1_from_global(capture):
@@ -204,7 +256,8 @@ def test_2a_global_inner1_from_global(capture):
call_from_global(decorated_inner1_function)
- assert_log_capture(capture, 'global1_function.decorated_inner1_function()')
+ assert_log_capture(
+ capture, func_pattern='global1_function.decorated_inner1_function')
def test_2b_global_inner1_from_method(capture):
@@ -216,7 +269,8 @@ def test_2b_global_inner1_from_method(capture):
CallerClass().call_from_method(decorated_inner1_function)
- assert_log_capture(capture, 'global1_function.decorated_inner1_function()')
+ assert_log_capture(
+ capture, func_pattern='global1_function.decorated_inner1_function')
def test_3a_global_inner2_from_global(capture):
@@ -228,7 +282,8 @@ def test_3a_global_inner2_from_global(capture):
call_from_global(decorated_inner2_function)
- assert_log_capture(capture, 'inner1_function.decorated_inner2_function()')
+ assert_log_capture(
+ capture, func_pattern='inner1_function.decorated_inner2_function')
def test_3b_global_inner1_from_method(capture):
@@ -240,7 +295,8 @@ def test_3b_global_inner1_from_method(capture):
CallerClass().call_from_method(decorated_inner2_function)
- assert_log_capture(capture, 'inner1_function.decorated_inner2_function()')
+ assert_log_capture(
+ capture, func_pattern='inner1_function.decorated_inner2_function')
def test_4a_method_from_global(capture):
@@ -252,7 +308,8 @@ def test_4a_method_from_global(capture):
call_from_global(decorated_method, d)
- assert_log_capture(capture, 'Decorator1Class.decorated_method()')
+ assert_log_capture(
+ capture, func_pattern='Decorator1Class.decorated_method')
def test_4b_method_from_method(capture):
@@ -264,7 +321,8 @@ def test_4b_method_from_method(capture):
CallerClass().call_from_method(decorated_method, d)
- assert_log_capture(capture, 'Decorator1Class.decorated_method()')
+ assert_log_capture(
+ capture, func_pattern='Decorator1Class.decorated_method')
def test_5a_method_from_global(capture):
@@ -277,7 +335,8 @@ def test_5a_method_from_global(capture):
call_from_global(decorated_inner_function)
- assert_log_capture(capture, 'method.decorated_inner_function()')
+ assert_log_capture(
+ capture, func_pattern='method.decorated_inner_function')
def test_5b_method_from_method(capture):
@@ -290,7 +349,8 @@ def test_5b_method_from_method(capture):
CallerClass().call_from_method(decorated_inner_function)
- assert_log_capture(capture, 'method.decorated_inner_function()')
+ assert_log_capture(
+ capture, func_pattern='method.decorated_inner_function')
def test_decorated_class():
|
# Copyright 2016,2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for _logging module.
"""
import re
import logging
import pytest
from testfixtures import LogCapture
from zhmcclient._logging import logged_api_call, get_logger
#
# Various uses of the @logged_api_call decorator
#
@logged_api_call
def decorated_global_function():
"""A decorated function at the global (module) level."""
pass
def global1_function():
"""An undecorated function at the global (module) level."""
@logged_api_call
def decorated_inner1_function():
"""A decorated inner function defined in a global function."""
pass
return decorated_inner1_function
def get_decorated_inner1_function():
"""Return the decorated inner function."""
return global1_function()
def global2_function():
"""An undecorated function at the global (module) level."""
def inner1_function():
"""An undecorated function at the inner 1 level."""
@logged_api_call
def decorated_inner2_function():
"""A decorated inner function defined in another inner function
that is defined in a global function."""
pass
return decorated_inner2_function
return inner1_function()
def get_decorated_inner2_function():
"""Return the decorated inner 2 function."""
return global2_function()
class Decorator1Class:
# pylint: disable=too-few-public-methods
"""A class that has a decorated method."""
@logged_api_call
def decorated_method(self):
"""A decorated method of a class."""
pass
class Decorator2Class:
"""A class that has a decorated method inside a method."""
@staticmethod
def method():
"""A method."""
@logged_api_call
def decorated_inner_function():
"""A decorated inner function defined in a method of a class."""
pass
return decorated_inner_function
@staticmethod
def get_decorated_inner_function():
"""Return the decorated inner function."""
return Decorator2Class.method()
#
# Supporting definitions
#
class CallerClass:
# pylint: disable=too-few-public-methods
"""
A supporting class.
"""
@staticmethod
def call_from_method(func, *args, **kwargs):
"""
A supporting method that calls the specified function with the
specified arguments and keyword arguments. This is used by the test
cases so that this function acts as a caller for the decorated API
function.
"""
return func(*args, **kwargs)
def call_from_global(func, *args, **kwargs):
"""
A supporting global function that calls the specified function with the
specified arguments and keyword arguments. This is used by the test cases
so that this function acts as a caller for the decorated API function.
"""
return func(*args, **kwargs)
# Some expected values that are constant
_EXP_LOGGER_NAME = 'zhmcclient.api'
_EXP_LOG_LEVEL = 'DEBUG'
_EXP_LOG_MSG_ENTER_PATTERN = "Called: .*, args: .*, kwargs: .*"
_EXP_LOG_MSG_LEAVE_PATTERN = "Return: .*, result: .*"
@pytest.fixture()
def capture():
"""
This way of defining a fixture works around the issue that when
using the decorator testfixtures.log_capture() instead, pytest
fails with "fixture 'capture' not found".
"""
with LogCapture(level=logging.DEBUG) as log:
yield log
#
# Test cases
#
def assert_log_capture(log_capture, exp_apifunc):
# pylint: disable=unused-argument
# Note: exp_apifunc is shown when pytest displays a traceback.
"""
Assert that the log capture is as expected.
"""
assert len(log_capture.records) == 2
enter_record = log_capture.records[0]
assert enter_record.name == _EXP_LOGGER_NAME
assert enter_record.levelname == _EXP_LOG_LEVEL
assert re.match(_EXP_LOG_MSG_ENTER_PATTERN, enter_record.msg)
# We don't check the function name and its pos and kw args
leave_record = log_capture.records[1]
assert leave_record.name == _EXP_LOGGER_NAME
assert leave_record.levelname == _EXP_LOG_LEVEL
assert re.match(_EXP_LOG_MSG_LEAVE_PATTERN, leave_record.msg)
# We don't check the function name and its pos and kw args
def test_1a_global_from_global(capture):
# pylint: disable=redefined-outer-name
"""Simple test calling a decorated global function from a global
function."""
call_from_global(decorated_global_function)
assert_log_capture(capture, 'decorated_global_function()')
def test_1b_global_from_method(capture):
# pylint: disable=redefined-outer-name
"""Simple test calling a decorated global function from a method."""
CallerClass().call_from_method(decorated_global_function)
assert_log_capture(capture, 'decorated_global_function()')
def test_2a_global_inner1_from_global(capture):
# pylint: disable=redefined-outer-name
"""Simple test calling a decorated inner function defined in a global
function from a global function."""
decorated_inner1_function = get_decorated_inner1_function()
call_from_global(decorated_inner1_function)
assert_log_capture(capture, 'global1_function.decorated_inner1_function()')
def test_2b_global_inner1_from_method(capture):
# pylint: disable=redefined-outer-name
"""Simple test calling a decorated inner function defined in a global
function from a method."""
decorated_inner1_function = get_decorated_inner1_function()
CallerClass().call_from_method(decorated_inner1_function)
assert_log_capture(capture, 'global1_function.decorated_inner1_function()')
def test_3a_global_inner2_from_global(capture):
# pylint: disable=redefined-outer-name
"""Simple test calling a decorated inner function defined in an inner
function defined in a global function from a global function."""
decorated_inner2_function = get_decorated_inner2_function()
call_from_global(decorated_inner2_function)
assert_log_capture(capture, 'inner1_function.decorated_inner2_function()')
def test_3b_global_inner1_from_method(capture):
# pylint: disable=redefined-outer-name
"""Simple test calling a decorated inner function defined in an inner
function defined in a global function from a method."""
decorated_inner2_function = get_decorated_inner2_function()
CallerClass().call_from_method(decorated_inner2_function)
assert_log_capture(capture, 'inner1_function.decorated_inner2_function()')
def test_4a_method_from_global(capture):
# pylint: disable=redefined-outer-name
"""Simple test calling a decorated method from a global function."""
decorated_method = Decorator1Class.decorated_method
d = Decorator1Class()
call_from_global(decorated_method, d)
assert_log_capture(capture, 'Decorator1Class.decorated_method()')
def test_4b_method_from_method(capture):
# pylint: disable=redefined-outer-name
"""Simple test calling a decorated method from a method."""
decorated_method = Decorator1Class.decorated_method
d = Decorator1Class()
CallerClass().call_from_method(decorated_method, d)
assert_log_capture(capture, 'Decorator1Class.decorated_method()')
def test_5a_method_from_global(capture):
# pylint: disable=redefined-outer-name
"""Simple test calling a decorated inner function defined in a method
from a global function."""
decorated_inner_function = \
Decorator2Class.get_decorated_inner_function()
call_from_global(decorated_inner_function)
assert_log_capture(capture, 'method.decorated_inner_function()')
def test_5b_method_from_method(capture):
# pylint: disable=redefined-outer-name
"""Simple test calling a decorated inner function defined in a method
from a method."""
decorated_inner_function = \
Decorator2Class.get_decorated_inner_function()
CallerClass().call_from_method(decorated_inner_function)
assert_log_capture(capture, 'method.decorated_inner_function()')
def test_decorated_class():
# pylint: disable=unused-variable
"""Test that using the decorator on a class raises TypeError."""
with pytest.raises(TypeError):
@logged_api_call
class DecoratedClass:
# pylint: disable=too-few-public-methods
"""A decorated class"""
pass
def test_decorated_property():
# pylint: disable=unused-variable
"""Test that using the decorator on a property raises TypeError."""
with pytest.raises(TypeError):
class Class:
# pylint: disable=too-few-public-methods
"""A class with a decorated property"""
@logged_api_call
@property
def decorated_property(self):
"""A decorated property"""
return self
def test_root_logger():
"""Test that get_logger('') returns the Python root logger."""
py_logger = logging.getLogger()
zhmc_logger = get_logger('')
assert zhmc_logger is py_logger
def test_foo_logger():
"""Test that get_logger('zhmcclient.foo') returns the same-named
Python logger and has at least one handler."""
py_logger = logging.getLogger('zhmcclient.foo')
zhmc_logger = get_logger('zhmcclient.foo')
assert zhmc_logger is py_logger
assert len(zhmc_logger.handlers) >= 1
|
GHSA-p57h-3cmc-xpjq
|
tests/unit/zhmcclient/test_lpar.py
|
@@ -19,11 +19,13 @@
import re
import copy
+import logging
from unittest import mock
import pytest
import requests_mock
-from zhmcclient import Client, Lpar, HTTPError, StatusTimeout, Job
+from zhmcclient import Client, Lpar, HTTPError, StatusTimeout, Job, \
+ BLANKED_OUT_STRING
from zhmcclient_mock import FakedSession, LparActivateHandler, \
LparDeactivateHandler, LparLoadHandler
from tests.common.utils import assert_resources
@@ -319,13 +321,18 @@ def test_lpar_repr(self):
{'description': 'New lpar description'},
{'acceptable-status': ['operating', 'not-operating'],
'description': 'New lpar description'},
- {'ssc-master-userid': None,
- 'ssc-master-pw': None},
+ {'ssc-master-userid': 'user',
+ 'ssc-master-pw': 'bla'},
+ {'zaware-master-userid': 'user',
+ 'zaware-master-pw': 'bla'},
]
)
- def test_lpar_update_properties(self, input_props, lpar_name):
+ def test_lpar_update_properties(self, caplog, input_props, lpar_name):
"""Test Lpar.update_properties()."""
+ logger_name = "zhmcclient.api"
+ caplog.set_level(logging.DEBUG, logger=logger_name)
+
# Add faked lpars
self.add_lpar1()
self.add_lpar2()
@@ -339,6 +346,9 @@ def test_lpar_update_properties(self, input_props, lpar_name):
# Execute the code to be tested
lpar.update_properties(properties=input_props)
+ # Get its API call log record
+ call_record = caplog.records[-2]
+
# Verify that the resource object already reflects the property
# updates.
for prop_name in saved_properties:
@@ -362,6 +372,14 @@ def test_lpar_update_properties(self, input_props, lpar_name):
prop_value = lpar.properties[prop_name]
assert prop_value == exp_prop_value
+ # Verify the API call log record for blanked-out properties.
+ if 'ssc-master-pw' in input_props:
+ exp_str = f"'ssc-master-pw': '{BLANKED_OUT_STRING}'"
+ assert call_record.message.find(exp_str) > 0
+ if 'zaware-master-pw' in input_props:
+ exp_str = f"'zaware-master-pw': '{BLANKED_OUT_STRING}'"
+ assert call_record.message.find(exp_str) > 0
+
@pytest.mark.parametrize(
"initial_profile, profile_kwargs, exp_profile, exp_profile_exc", [
('', {},
|
# Copyright 2016,2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for _lpar module.
"""
import re
import copy
from unittest import mock
import pytest
import requests_mock
from zhmcclient import Client, Lpar, HTTPError, StatusTimeout, Job
from zhmcclient_mock import FakedSession, LparActivateHandler, \
LparDeactivateHandler, LparLoadHandler
from tests.common.utils import assert_resources
# pylint: disable=unused-import,line-too-long
from tests.common.http_mocked_fixtures import http_mocked_session # noqa: F401
from tests.common.http_mocked_fixtures import http_mocked_cpc_classic # noqa: F401,E501
from tests.common.http_mocked_fixtures import http_mocked_lpar # noqa: F401
# pylint: enable=unused-import,line-too-long
# Object IDs and names of our faked LPARs:
LPAR1_OID = 'lpar1-oid'
LPAR1_NAME = 'lpar 1'
LPAR2_OID = 'lpar2-oid'
LPAR2_NAME = 'lpar 2'
# Object IDs and names of our faked image activation profiles:
IMAGEPROFILE1_OID = 'imageprofile1-oid'
IMAGEPROFILE1_NAME = 'imageprofile 1'
IMAGEPROFILE2_OID = 'imageprofile2-oid'
IMAGEPROFILE2_NAME = 'imageprofile 2'
CPC_NAME = 'fake-cpc1-name'
# Properties returned by default from list_permitted_lpars()
LIST_PERMITTED_LPARS_PROPS = [
'name', 'object-uri', 'activation-mode', 'status',
'has-unacceptable-status', 'cpc-name', 'cpc-object-uri',
# The zhmcclient_mock support always returns 'se-version'
'se-version'
]
class TestLpar:
"""All tests for Lpar and LparManager classes."""
def setup_method(self):
"""
Setup that is called by pytest before each test method.
Set up a faked session, and add a faked CPC in classic mode without any
child resources.
"""
# pylint: disable=attribute-defined-outside-init
self.session = FakedSession('fake-host', 'fake-hmc', '2.16.0', '4.1')
self.session.retry_timeout_config.status_timeout = 1
self.client = Client(self.session)
self.faked_cpc = self.session.hmc.cpcs.add({
'object-id': 'fake-cpc1-oid',
# object-uri is set up automatically
'parent': None,
'class': 'cpc',
'name': CPC_NAME,
'description': 'CPC #1 (classic mode)',
'status': 'active',
'dpm-enabled': False,
'is-ensemble-member': False,
'iml-mode': 'lpar',
})
self.cpc = self.client.cpcs.find(name=CPC_NAME)
def add_lpar1(self):
"""Add lpar 1 (type linux)."""
faked_lpar = self.faked_cpc.lpars.add({
'object-id': LPAR1_OID,
# object-uri will be automatically set
'parent': self.faked_cpc.uri,
'class': 'logical-partition',
'name': LPAR1_NAME,
'description': 'LPAR #1 (Linux)',
'status': 'operating',
'activation-mode': 'linux',
'last-used-load-address': '',
'last-used-load-parameter': '',
'last-used-world-wide-port-name': '',
'last-used-logical-unit-number': '',
'last-used-disk-partition-id': 0,
'last-used-operating-system-specific-load-parameters': '',
'last-used-boot-record-logical-block-address': '0',
'last-used-load-type': 'ipltype-standard',
'last-used-secure-boot': False,
'last-used-clear-indicator': True,
})
return faked_lpar
def add_imageprofile1(self):
"""Add image profile for lpar 1 (no auto-load)."""
faked_imageprofile = self.faked_cpc.image_activation_profiles.add({
'object-id': IMAGEPROFILE1_OID,
# object-uri will be automatically set
'parent': self.faked_cpc.uri,
'class': 'image-activation-profile',
'name': LPAR1_NAME,
'description': 'Image profile for LPAR #1 (Linux)',
'ipl-address': '00000',
'ipl-parameter': '',
'ipl-type': 'ipltype-standard',
'load-at-activation': False,
'operating-mode': 'linux',
})
return faked_imageprofile
def add_lpar2(self):
"""Add lpar 2 (type ssc)."""
faked_lpar = self.faked_cpc.lpars.add({
'object-id': LPAR2_OID,
# object-uri will be automatically set
'parent': self.faked_cpc.uri,
'class': 'logical-partition',
'name': LPAR2_NAME,
'description': 'LPAR #2 (SSC)',
'status': 'operating',
'activation-mode': 'ssc',
'last-used-load-address': '',
'last-used-load-parameter': '',
'last-used-world-wide-port-name': '',
'last-used-logical-unit-number': '',
'last-used-disk-partition-id': 0,
'last-used-operating-system-specific-load-parameters': '',
'last-used-boot-record-logical-block-address': '0',
'last-used-load-type': 'ipltype-standard',
'last-used-secure-boot': False,
'last-used-clear-indicator': True,
})
return faked_lpar
def add_imageprofile2(self):
"""Add image profile for lpar 2 (auto-load due to SSC)."""
faked_imageprofile = self.faked_cpc.image_activation_profiles.add({
'object-id': IMAGEPROFILE2_OID,
# object-uri will be automatically set
'parent': self.faked_cpc.uri,
'class': 'image-activation-profile',
'name': LPAR2_NAME,
'description': 'Image profile for LPAR #2 (SSC)',
'ipl-address': '00000',
'ipl-parameter': '',
'ipl-type': 'ipltype-standard',
'load-at-activation': False,
'operating-mode': 'ssc',
})
return faked_imageprofile
def test_lparmanager_initial_attrs(self):
"""Test initial attributes of LparManager."""
lpar_mgr = self.cpc.lpars
# Verify all public properties of the manager object
assert lpar_mgr.resource_class == Lpar
assert lpar_mgr.session == self.session
assert lpar_mgr.parent == self.cpc
assert lpar_mgr.cpc == self.cpc
# TODO: Test for LparManager.__repr__()
@pytest.mark.parametrize(
"full_properties_kwargs, prop_names", [
({},
['object-uri', 'name', 'status']),
(dict(full_properties=False),
['object-uri', 'name', 'status']),
(dict(full_properties=True),
None),
]
)
def test_lparmanager_list_full_properties(
self, full_properties_kwargs, prop_names):
"""Test LparManager.list() with full_properties."""
# Add two faked LPARs
faked_lpar1 = self.add_lpar1()
faked_lpar2 = self.add_lpar2()
exp_faked_lpars = [faked_lpar1, faked_lpar2]
lpar_mgr = self.cpc.lpars
# Execute the code to be tested
lpars = lpar_mgr.list(**full_properties_kwargs)
assert_resources(lpars, exp_faked_lpars, prop_names)
@pytest.mark.parametrize(
"filter_args, exp_names", [
({'object-id': LPAR1_OID},
[LPAR1_NAME]),
({'object-id': LPAR2_OID},
[LPAR2_NAME]),
({'object-id': [LPAR1_OID, LPAR2_OID]},
[LPAR1_NAME, LPAR2_NAME]),
({'object-id': [LPAR1_OID, LPAR1_OID]},
[LPAR1_NAME]),
({'object-id': LPAR1_OID + 'foo'},
[]),
({'object-id': [LPAR1_OID, LPAR2_OID + 'foo']},
[LPAR1_NAME]),
({'object-id': [LPAR2_OID + 'foo', LPAR1_OID]},
[LPAR1_NAME]),
({'name': LPAR1_NAME},
[LPAR1_NAME]),
({'name': LPAR2_NAME},
[LPAR2_NAME]),
({'name': [LPAR1_NAME, LPAR2_NAME]},
[LPAR1_NAME, LPAR2_NAME]),
({'name': LPAR1_NAME + 'foo'},
[]),
({'name': [LPAR1_NAME, LPAR2_NAME + 'foo']},
[LPAR1_NAME]),
({'name': [LPAR2_NAME + 'foo', LPAR1_NAME]},
[LPAR1_NAME]),
({'name': [LPAR1_NAME, LPAR1_NAME]},
[LPAR1_NAME]),
({'name': '.*lpar 1'},
[LPAR1_NAME]),
({'name': 'lpar 1.*'},
[LPAR1_NAME]),
({'name': 'lpar .'},
[LPAR1_NAME, LPAR2_NAME]),
({'name': '.par 1'},
[LPAR1_NAME]),
({'name': '.+'},
[LPAR1_NAME, LPAR2_NAME]),
({'name': 'lpar 1.+'},
[]),
({'name': '.+lpar 1'},
[]),
({'name': LPAR1_NAME,
'object-id': LPAR1_OID},
[LPAR1_NAME]),
({'name': LPAR1_NAME,
'object-id': LPAR1_OID + 'foo'},
[]),
({'name': LPAR1_NAME + 'foo',
'object-id': LPAR1_OID},
[]),
({'name': LPAR1_NAME + 'foo',
'object-id': LPAR1_OID + 'foo'},
[]),
]
)
def test_lparmanager_list_filter_args(self, filter_args, exp_names):
"""Test LparManager.list() with filter_args."""
# Add two faked LPARs
self.add_lpar1()
self.add_lpar2()
lpar_mgr = self.cpc.lpars
# Execute the code to be tested
lpars = lpar_mgr.list(filter_args=filter_args)
assert len(lpars) == len(exp_names)
if exp_names:
names = [p.properties['name'] for p in lpars]
assert set(names) == set(exp_names)
def test_lpar_repr(self):
"""Test Lpar.__repr__()."""
# Add a faked LPAR
faked_lpar = self.add_lpar1()
lpar_mgr = self.cpc.lpars
lpar = lpar_mgr.find(name=faked_lpar.name)
# Execute the code to be tested
repr_str = repr(lpar)
repr_str = repr_str.replace('\n', '\\n')
# We check just the begin of the string:
assert re.match(
rf'^{lpar.__class__.__name__}\s+at\s+'
rf'0x{id(lpar):08x}\s+\(\\n.*',
repr_str)
@pytest.mark.parametrize(
"lpar_name", [
LPAR1_NAME,
LPAR2_NAME,
]
)
@pytest.mark.parametrize(
"input_props", [
{},
{'description': 'New lpar description'},
{'acceptable-status': ['operating', 'not-operating'],
'description': 'New lpar description'},
{'ssc-master-userid': None,
'ssc-master-pw': None},
]
)
def test_lpar_update_properties(self, input_props, lpar_name):
"""Test Lpar.update_properties()."""
# Add faked lpars
self.add_lpar1()
self.add_lpar2()
lpar_mgr = self.cpc.lpars
lpar = lpar_mgr.find(name=lpar_name)
lpar.pull_full_properties()
saved_properties = copy.deepcopy(lpar.properties)
# Execute the code to be tested
lpar.update_properties(properties=input_props)
# Verify that the resource object already reflects the property
# updates.
for prop_name in saved_properties:
if prop_name in input_props:
exp_prop_value = input_props[prop_name]
else:
exp_prop_value = saved_properties[prop_name]
assert prop_name in lpar.properties
prop_value = lpar.properties[prop_name]
assert prop_value == exp_prop_value
# Refresh the resource object and verify that the resource object
# still reflects the property updates.
lpar.pull_full_properties()
for prop_name in saved_properties:
if prop_name in input_props:
exp_prop_value = input_props[prop_name]
else:
exp_prop_value = saved_properties[prop_name]
assert prop_name in lpar.properties
prop_value = lpar.properties[prop_name]
assert prop_value == exp_prop_value
@pytest.mark.parametrize(
"initial_profile, profile_kwargs, exp_profile, exp_profile_exc", [
('', {},
None, HTTPError({'http-status': 500, 'reason': 263})),
(LPAR1_NAME, {},
LPAR1_NAME, None),
(LPAR2_NAME, {},
None, HTTPError({'http-status': 500, 'reason': 263})),
('', dict(activation_profile_name=LPAR1_NAME),
LPAR1_NAME, None),
(LPAR1_NAME, dict(activation_profile_name=LPAR1_NAME),
LPAR1_NAME, None),
(LPAR2_NAME, dict(activation_profile_name=LPAR1_NAME),
LPAR1_NAME, None),
('', dict(activation_profile_name=LPAR2_NAME),
None, HTTPError({'http-status': 500, 'reason': 263})),
(LPAR1_NAME, dict(activation_profile_name=LPAR2_NAME),
None, HTTPError({'http-status': 500, 'reason': 263})),
(LPAR2_NAME, dict(activation_profile_name=LPAR2_NAME),
None, HTTPError({'http-status': 500, 'reason': 263})),
]
)
@pytest.mark.parametrize(
"initial_status, status_kwargs, act_exp_status, exp_status_exc", [
('not-activated', {}, # Verify that force has a default
'not-operating', None),
('not-activated', dict(force=False),
'not-operating', None),
('not-activated', dict(force=True),
'not-operating', None),
('not-operating', dict(force=False),
'not-operating', None),
('not-operating', dict(force=True),
'not-operating', None),
('operating', {}, # Verify that force default is False
'not-operating', HTTPError({'http-status': 500, 'reason': 263})),
('operating', dict(force=False),
'not-operating', HTTPError({'http-status': 500, 'reason': 263})),
('operating', dict(force=True),
'not-operating', None),
('exceptions', dict(force=False),
'not-operating', None),
('exceptions', dict(force=True),
'not-operating', None),
('not-activated', {},
'exceptions', StatusTimeout(None, None, None, None)),
('not-activated', dict(allow_status_exceptions=False),
'exceptions', StatusTimeout(None, None, None, None)),
('not-activated', dict(allow_status_exceptions=True),
'exceptions', None),
]
)
@mock.patch.object(LparActivateHandler, 'get_status')
def test_lpar_activate(
self, get_status_mock,
initial_status, status_kwargs, act_exp_status, exp_status_exc,
initial_profile, profile_kwargs, exp_profile, exp_profile_exc):
"""Test Lpar.activate()."""
# Add a faked LPAR
faked_lpar = self.add_lpar1()
faked_lpar.properties['status'] = initial_status
faked_lpar.properties['next-activation-profile-name'] = initial_profile
# Add a faked image profile
self.add_imageprofile1()
lpar_mgr = self.cpc.lpars
lpar = lpar_mgr.find(name=faked_lpar.name)
input_kwargs = dict(status_kwargs, **profile_kwargs)
exp_excs = []
if exp_status_exc:
exp_excs.append(exp_status_exc)
if exp_profile_exc:
exp_excs.append(exp_profile_exc)
get_status_mock.return_value = act_exp_status
if exp_excs:
with pytest.raises(Exception) as exc_info:
# Execute the code to be tested
lpar.activate(**input_kwargs)
exc = exc_info.value
exp_exc_classes = [e.__class__ for e in exp_excs]
assert isinstance(exc, tuple(exp_exc_classes))
if isinstance(exc, HTTPError):
exp_httperror = [e for e in exp_excs
if isinstance(e, HTTPError)][0]
assert exc.http_status == exp_httperror.http_status
assert exc.reason == exp_httperror.reason
else:
# Execute the code to be tested.
ret = lpar.activate(**input_kwargs)
assert ret is None
lpar.pull_full_properties()
status = lpar.get_property('status')
assert status == act_exp_status
last_profile_name = lpar.get_property(
'last-used-activation-profile')
assert last_profile_name == exp_profile
@pytest.mark.parametrize(
"initial_status, input_kwargs, act_exp_status, exp_status_exc", [
('not-activated', {}, # Verify that force has a default
'not-activated', HTTPError({'http-status': 500, 'reason': 263})),
('not-activated', dict(force=False),
'not-activated', HTTPError({'http-status': 500, 'reason': 263})),
('not-activated', dict(force=True),
'not-activated', None),
('not-operating', dict(force=False),
'not-activated', None),
('not-operating', dict(force=True),
'not-activated', None),
('operating', {}, # Verify that force default is False
'not-activated', HTTPError({'http-status': 500, 'reason': 263})),
('operating', dict(force=False),
'not-activated', HTTPError({'http-status': 500, 'reason': 263})),
('operating', dict(force=True),
'not-activated', None),
('exceptions', dict(force=False),
'not-activated', None),
('exceptions', dict(force=True),
'not-activated', None),
('not-operating', {},
'exceptions', StatusTimeout(None, None, None, None)),
('not-operating', dict(allow_status_exceptions=False),
'exceptions', StatusTimeout(None, None, None, None)),
('not-operating', dict(allow_status_exceptions=True),
'exceptions', None),
]
)
@mock.patch.object(LparDeactivateHandler, 'get_status')
def test_lpar_deactivate(
self, get_status_mock,
initial_status, input_kwargs, act_exp_status, exp_status_exc):
"""Test Lpar.deactivate()."""
# Add a faked LPAR
faked_lpar = self.add_lpar1()
faked_lpar.properties['status'] = initial_status
lpar_mgr = self.cpc.lpars
lpar = lpar_mgr.find(name=faked_lpar.name)
get_status_mock.return_value = act_exp_status
exp_excs = []
if exp_status_exc:
exp_excs.append(exp_status_exc)
if exp_excs:
with pytest.raises(Exception) as exc_info:
# Execute the code to be tested
lpar.deactivate(**input_kwargs)
exc = exc_info.value
exp_exc_classes = [e.__class__ for e in exp_excs]
assert isinstance(exc, tuple(exp_exc_classes))
if isinstance(exc, HTTPError):
exp_httperror = [e for e in exp_excs
if isinstance(e, HTTPError)][0]
assert exc.http_status == exp_httperror.http_status
assert exc.reason == exp_httperror.reason
else:
# Execute the code to be tested.
ret = lpar.deactivate(**input_kwargs)
assert ret is None
lpar.pull_full_properties()
status = lpar.get_property('status')
assert status == act_exp_status
@pytest.mark.parametrize(
"initial_loadparm, loadparm_kwargs, exp_loadparm, exp_loadparm_exc", [
(None, {},
'', None),
(None, dict(load_parameter='abcd'),
'abcd', None),
('abcd', {},
'abcd', None),
('fooo', dict(load_parameter='abcd'),
'abcd', None),
]
)
@pytest.mark.parametrize(
"initial_loadaddr, loadaddr_kwargs, exp_loadaddr, exp_loadaddr_exc", [
(None, {},
None, HTTPError({'http-status': 400, 'reason': 5})),
(None, dict(load_address='5176'),
'5176', None),
('5176', {},
'5176', None),
('1234', dict(load_address='5176'),
'5176', None),
]
)
@pytest.mark.parametrize(
"initial_status, status_kwargs, act_exp_status, exp_status_exc"
", initial_stored_status, exp_stored_status, exp_store_status_exc", [
('not-activated', {},
'operating', HTTPError({'http-status': 409, 'reason': 0}),
None, None, None),
('not-activated', dict(force=False),
'operating', HTTPError({'http-status': 409, 'reason': 0}),
None, None, None),
('not-activated', dict(force=True),
'operating', HTTPError({'http-status': 409, 'reason': 0}),
None, None, None),
('not-operating', dict(force=False),
'operating', None,
None, None, None),
('not-operating', dict(force=True),
'operating', None,
None, None, None),
('operating', {},
'operating', HTTPError({'http-status': 500, 'reason': 263}),
None, None, None),
('operating', dict(force=False),
'operating', HTTPError({'http-status': 500, 'reason': 263}),
None, None, None),
('operating', dict(force=True),
'operating', None,
None, None, None),
('exceptions', dict(force=False),
'operating', None,
None, None, None),
('exceptions', dict(force=True),
'operating', None,
None, None, None),
('not-operating', {},
'exceptions', StatusTimeout(None, None, None, None),
None, None, None),
('not-operating', dict(allow_status_exceptions=False),
'exceptions', StatusTimeout(None, None, None, None),
None, None, None),
('not-operating', dict(allow_status_exceptions=True),
'exceptions', None,
None, None, None),
('not-operating', dict(store_status_indicator=False),
'operating', None,
None, None, None),
('not-operating', dict(store_status_indicator=True),
'operating', None,
None, 'not-operating', None),
]
)
@pytest.mark.parametrize(
"initial_memory, memory_kwargs, exp_memory, exp_memory_exc", [
('foobar', {},
'', None),
('foobar', dict(clear_indicator=False),
'foobar', None),
('foobar', dict(clear_indicator=True),
'', None),
]
)
@mock.patch.object(LparLoadHandler, 'get_status')
def test_lpar_load(
self, get_status_mock,
initial_status, status_kwargs, act_exp_status, exp_status_exc,
initial_loadaddr, loadaddr_kwargs, exp_loadaddr, exp_loadaddr_exc,
initial_loadparm, loadparm_kwargs, exp_loadparm, exp_loadparm_exc,
initial_memory, memory_kwargs, exp_memory, exp_memory_exc,
initial_stored_status, exp_stored_status, exp_store_status_exc):
# pylint: disable=unused-argument
"""Test Lpar.load()."""
# Add a faked LPAR
faked_lpar = self.add_lpar1()
faked_lpar.properties['status'] = initial_status
faked_lpar.properties['last-used-load-address'] = initial_loadaddr
faked_lpar.properties['last-used-load-parameter'] = initial_loadparm
faked_lpar.properties['memory'] = initial_memory
# Add a faked image profile
self.add_imageprofile1()
lpar_mgr = self.cpc.lpars
lpar = lpar_mgr.find(name=faked_lpar.name)
input_kwargs = dict(status_kwargs, **loadaddr_kwargs)
input_kwargs.update(**loadparm_kwargs)
input_kwargs.update(**memory_kwargs)
exp_excs = []
if exp_status_exc:
exp_excs.append(exp_status_exc)
if exp_loadaddr_exc:
exp_excs.append(exp_loadaddr_exc)
if exp_loadparm_exc:
exp_excs.append(exp_loadparm_exc)
if exp_memory_exc:
exp_excs.append(exp_memory_exc)
if exp_store_status_exc:
exp_excs.append(exp_store_status_exc)
get_status_mock.return_value = act_exp_status
if exp_excs:
with pytest.raises(Exception) as exc_info:
# Execute the code to be tested
lpar.load(**input_kwargs)
exc = exc_info.value
exp_exc_classes = [e.__class__ for e in exp_excs]
assert isinstance(exc, tuple(exp_exc_classes))
if isinstance(exc, HTTPError):
exp_httperror = [e for e in exp_excs
if isinstance(e, HTTPError)][0]
assert exc.http_status == exp_httperror.http_status
assert exc.reason == exp_httperror.reason
else:
# Execute the code to be tested.
ret = lpar.load(**input_kwargs)
assert ret is None
lpar.pull_full_properties()
status = lpar.get_property('status')
assert status == act_exp_status
last_loadaddr = lpar.get_property('last-used-load-address')
assert last_loadaddr == exp_loadaddr
last_loadparm = lpar.get_property('last-used-load-parameter')
assert last_loadparm == exp_loadparm
last_memory = lpar.get_property('memory')
assert last_memory == exp_memory
stored_status = lpar.get_property('stored-status')
assert stored_status == exp_stored_status
TESTCASES_SCSI_LOAD = [
# Testcases for test_lpar_scsi_load()
# Each testcase is a tuple of:
# * desc: description
# * initial_status: Status before scsi_load() is called
# * result_status: Status to be set by scsi_load()
# * input_kwargs: Keyword arguments to scsi_load()
# * exp_properties: Props to validate after a successful scsi_load()
# * exc_exp: Expected exception object, or None
(
"Missing input parameter 'load_address'",
'activated',
'operating',
{'wwpn': '1234',
'lun': '5678'},
{},
TypeError()
),
(
"Missing input parameter 'wwpn'",
'activated',
'operating',
{'load_address': '0010A',
'lun': '5678'},
{},
TypeError()
),
(
"Missing input parameter 'lun'",
'activated',
'operating',
{'load_address': '0010A',
'wwpn': '1234'},
{},
TypeError()
),
(
"Minimally required input parameters, test defaults for optional",
'activated',
'operating',
{'load_address': '0010A',
'wwpn': '1234',
'lun': '5678'},
{'status': 'operating',
'last-used-load-address': '0010A',
'last-used-load-parameter': '',
'last-used-world-wide-port-name': '1234',
'last-used-logical-unit-number': '5678',
'last-used-disk-partition-id': 0,
'last-used-operating-system-specific-load-parameters': '',
'last-used-boot-record-logical-block-address': '0',
'last-used-load-type': 'ipltype-scsi',
'last-used-secure-boot': False,
'last-used-clear-indicator': True},
None
),
(
"All input parameters for last-used props",
'activated',
'operating',
{'load_address': '0010A',
'wwpn': '1234',
'lun': '5678',
'load_parameter': 'foo_lp',
'disk_partition_id': 42,
'operating_system_specific_load_parameters': 'foo_oslp',
'boot_record_logical_block_address': '42',
'secure_boot': True,
'clear_indicator': False},
{'status': 'operating',
'last-used-load-address': '0010A',
'last-used-load-parameter': 'foo_lp',
'last-used-world-wide-port-name': '1234',
'last-used-logical-unit-number': '5678',
'last-used-disk-partition-id': 42,
'last-used-operating-system-specific-load-parameters': 'foo_oslp',
'last-used-boot-record-logical-block-address': '42',
'last-used-load-type': 'ipltype-scsi',
'last-used-secure-boot': True,
'last-used-clear-indicator': False},
None
),
(
"Incorrect initial status 'not-activated'",
'not-activated',
'operating',
{'load_address': '0010A',
'wwpn': '1234',
'lun': '5678'},
{},
HTTPError({'http-status': 409, 'reason': 0})
),
(
"Initial status 'operating', testing default for 'force'",
'operating',
'operating',
{'load_address': '0010A',
'wwpn': '1234',
'lun': '5678'},
{},
HTTPError({'http-status': 500, 'reason': 263}) # TODO: Check
),
(
"Initial status 'operating', 'force' is False",
'operating',
'operating',
{'load_address': '0010A',
'wwpn': '1234',
'lun': '5678',
'force': False},
{},
HTTPError({'http-status': 500, 'reason': 263}) # TODO: Check
),
(
"Initial status 'operating', 'force' is True",
'operating',
'operating',
{'load_address': '0010A',
'wwpn': '1234',
'lun': '5678',
'force': True},
{'status': 'operating'},
None
),
(
"Initial status 'exceptions'",
'exceptions',
'operating',
{'load_address': '0010A',
'wwpn': '1234',
'lun': '5678'},
{'status': 'operating'},
None
),
]
@pytest.mark.parametrize(
"desc, initial_status, result_status, input_kwargs, exp_properties, "
"exc_exp",
TESTCASES_SCSI_LOAD)
@mock.patch.object(LparLoadHandler, 'get_status')
def test_lpar_scsi_load(
self, get_status_mock,
desc, initial_status, result_status, input_kwargs, exp_properties,
exc_exp):
# pylint: disable=unused-argument
"""Test Lpar.scsi_load()."""
# Add a faked LPAR and set its properties
faked_lpar = self.add_lpar1()
faked_lpar.properties['status'] = initial_status
# Add a faked image profile
self.add_imageprofile1()
lpar_mgr = self.cpc.lpars
lpar = lpar_mgr.find(name=faked_lpar.name)
get_status_mock.return_value = result_status
if exc_exp:
with pytest.raises(Exception) as exc_info:
# Execute the code to be tested
lpar.scsi_load(**input_kwargs)
exc = exc_info.value
assert isinstance(exc, exc_exp.__class__)
if isinstance(exc, HTTPError):
assert exc.http_status == exc_exp.http_status
assert exc.reason == exc_exp.reason
else:
# Execute the code to be tested.
ret = lpar.scsi_load(**input_kwargs)
# TODO: Job result not implemented yet
assert ret is None
lpar.pull_full_properties()
for pname, exp_value in exp_properties.items():
act_value = lpar.get_property(pname)
assert act_value == exp_value, \
f"Unexpected value for property {pname!r}: " \
f"got {act_value!r}, expected {exp_value!r}"
TESTCASES_SCSI_DUMP = [
# Testcases for test_lpar_scsi_dump()
# Each testcase is a tuple of:
# * desc: description
# * initial_status: Status before scsi_dump() is called
# * result_status: Status to be set by scsi_dump()
# * input_kwargs: Keyword arguments to scsi_dump()
# * exp_properties: Props to validate after a successful scsi_dump()
# * exc_exp: Expected exception object, or None
(
"Missing input parameter 'load_address'",
'activated',
'operating',
{'wwpn': '1234',
'lun': '5678'},
{},
TypeError()
),
(
"Missing input parameter 'wwpn'",
'activated',
'operating',
{'load_address': '0010A',
'lun': '5678'},
{},
TypeError()
),
(
"Missing input parameter 'lun'",
'activated',
'operating',
{'load_address': '0010A',
'wwpn': '1234'},
{},
TypeError()
),
(
"Minimally required input parameters, test defaults for optional",
'activated',
'operating',
{'load_address': '0010A',
'wwpn': '1234',
'lun': '5678'},
{'status': 'operating',
'last-used-load-address': '0010A',
'last-used-load-parameter': '',
'last-used-world-wide-port-name': '1234',
'last-used-logical-unit-number': '5678',
'last-used-disk-partition-id': 0,
'last-used-operating-system-specific-load-parameters': '',
'last-used-boot-record-logical-block-address': '0',
'last-used-load-type': 'ipltype-scsidump',
'last-used-secure-boot': False,
'last-used-clear-indicator': True},
None
),
(
"All input parameters for last-used props",
'activated',
'operating',
{'load_address': '0010A',
'wwpn': '1234',
'lun': '5678',
'load_parameter': 'foo_lp',
'disk_partition_id': 42,
'operating_system_specific_load_parameters': 'foo_oslp',
'boot_record_logical_block_address': '42',
'secure_boot': True},
{'status': 'operating',
'last-used-load-address': '0010A',
'last-used-load-parameter': 'foo_lp',
'last-used-world-wide-port-name': '1234',
'last-used-logical-unit-number': '5678',
'last-used-disk-partition-id': 42,
'last-used-operating-system-specific-load-parameters': 'foo_oslp',
'last-used-boot-record-logical-block-address': '42',
'last-used-load-type': 'ipltype-scsidump',
'last-used-secure-boot': True},
None
),
(
"Incorrect initial status 'not-activated'",
'not-activated',
'operating',
{'load_address': '0010A',
'wwpn': '1234',
'lun': '5678'},
{},
HTTPError({'http-status': 409, 'reason': 0})
),
(
"Initial status 'operating', testing default for 'force'",
'operating',
'operating',
{'load_address': '0010A',
'wwpn': '1234',
'lun': '5678'},
{},
HTTPError({'http-status': 500, 'reason': 263}) # TODO: Check
),
(
"Initial status 'operating', 'force' is False",
'operating',
'operating',
{'load_address': '0010A',
'wwpn': '1234',
'lun': '5678',
'force': False},
{},
HTTPError({'http-status': 500, 'reason': 263}) # TODO: Check
),
(
"Initial status 'operating', 'force' is True",
'operating',
'operating',
{'load_address': '0010A',
'wwpn': '1234',
'lun': '5678',
'force': True},
{'status': 'operating'},
None
),
(
"Initial status 'exceptions'",
'exceptions',
'operating',
{'load_address': '0010A',
'wwpn': '1234',
'lun': '5678'},
{'status': 'operating'},
None
),
]
@pytest.mark.parametrize(
"desc, initial_status, result_status, input_kwargs, exp_properties, "
"exc_exp",
TESTCASES_SCSI_DUMP)
@mock.patch.object(LparLoadHandler, 'get_status')
def test_lpar_scsi_dump(
self, get_status_mock,
desc, initial_status, result_status, input_kwargs, exp_properties,
exc_exp):
# pylint: disable=unused-argument
"""Test Lpar.scsi_dump())."""
# Add a faked LPAR and set its properties
faked_lpar = self.add_lpar1()
faked_lpar.properties['status'] = initial_status
# Add a faked image profile
self.add_imageprofile1()
lpar_mgr = self.cpc.lpars
lpar = lpar_mgr.find(name=faked_lpar.name)
get_status_mock.return_value = result_status
if exc_exp:
with pytest.raises(Exception) as exc_info:
# Execute the code to be tested
lpar.scsi_dump(**input_kwargs)
exc = exc_info.value
assert isinstance(exc, exc_exp.__class__)
if isinstance(exc, HTTPError):
assert exc.http_status == exc_exp.http_status
assert exc.reason == exc_exp.reason
else:
# Execute the code to be tested.
ret = lpar.scsi_dump(**input_kwargs)
# TODO: Job result not implemented yet
assert ret is None
lpar.pull_full_properties()
for pname, exp_value in exp_properties.items():
act_value = lpar.get_property(pname)
assert act_value == exp_value, \
f"Unexpected value for property {pname!r}: " \
f"got {act_value!r}, expected {exp_value!r}"
TESTCASES_NVME_LOAD = [
# Testcases for test_lpar_nvme_load()
# Each testcase is a tuple of:
# * desc: description
# * initial_status: Status before nvme_load() is called
# * result_status: Status to be set by nvme_load()
# * input_kwargs: Keyword arguments to nvme_load()
# * exp_properties: Props to validate after a successful nvme_load()
# * exc_exp: Expected exception object, or None
(
"Missing input parameter 'load_address'",
'activated',
'operating',
{},
{},
TypeError()
),
(
"Minimally required input parameters, test defaults for optional",
'activated',
'operating',
{'load_address': '0010A'},
{'status': 'operating',
'last-used-load-address': '0010A',
'last-used-load-parameter': '',
'last-used-disk-partition-id': 0,
'last-used-operating-system-specific-load-parameters': '',
'last-used-boot-record-logical-block-address': '0',
'last-used-load-type': 'ipltype-nvme',
'last-used-secure-boot': False,
'last-used-clear-indicator': True},
None
),
(
"All input parameters for last-used props",
'activated',
'operating',
{'load_address': '0010A',
'load_parameter': 'foo_lp',
'disk_partition_id': 42,
'operating_system_specific_load_parameters': 'foo_oslp',
'boot_record_logical_block_address': '42',
'secure_boot': True,
'clear_indicator': False},
{'status': 'operating',
'last-used-load-address': '0010A',
'last-used-load-parameter': 'foo_lp',
'last-used-disk-partition-id': 42,
'last-used-operating-system-specific-load-parameters': 'foo_oslp',
'last-used-boot-record-logical-block-address': '42',
'last-used-load-type': 'ipltype-nvme',
'last-used-secure-boot': True,
'last-used-clear-indicator': False},
None
),
(
"Incorrect initial status 'not-activated'",
'not-activated',
'operating',
{'load_address': '0010A'},
{},
HTTPError({'http-status': 409, 'reason': 0})
),
(
"Initial status 'operating', testing default for 'force'",
'operating',
'operating',
{'load_address': '0010A'},
{},
HTTPError({'http-status': 500, 'reason': 263}) # TODO: Check
),
(
"Initial status 'operating', 'force' is False",
'operating',
'operating',
{'load_address': '0010A',
'force': False},
{},
HTTPError({'http-status': 500, 'reason': 263}) # TODO: Check
),
(
"Initial status 'operating', 'force' is True",
'operating',
'operating',
{'load_address': '0010A',
'force': True},
{'status': 'operating'},
None
),
(
"Initial status 'exceptions'",
'exceptions',
'operating',
{'load_address': '0010A'},
{'status': 'operating'},
None
),
]
@pytest.mark.parametrize(
"desc, initial_status, result_status, input_kwargs, exp_properties, "
"exc_exp",
TESTCASES_NVME_LOAD)
@mock.patch.object(LparLoadHandler, 'get_status')
def test_lpar_nvme_load(
self, get_status_mock,
desc, initial_status, result_status, input_kwargs, exp_properties,
exc_exp):
# pylint: disable=unused-argument
"""Test Lpar.nvme_load()."""
# Add a faked LPAR and set its properties
faked_lpar = self.add_lpar1()
faked_lpar.properties['status'] = initial_status
# Add a faked image profile
self.add_imageprofile1()
lpar_mgr = self.cpc.lpars
lpar = lpar_mgr.find(name=faked_lpar.name)
get_status_mock.return_value = result_status
if exc_exp:
with pytest.raises(Exception) as exc_info:
# Execute the code to be tested
lpar.nvme_load(**input_kwargs)
exc = exc_info.value
assert isinstance(exc, exc_exp.__class__)
if isinstance(exc, HTTPError):
assert exc.http_status == exc_exp.http_status
assert exc.reason == exc_exp.reason
else:
# Execute the code to be tested.
ret = lpar.nvme_load(**input_kwargs)
# TODO: Job result not implemented yet
assert ret is None
lpar.pull_full_properties()
for pname, exp_value in exp_properties.items():
act_value = lpar.get_property(pname)
assert act_value == exp_value, \
f"Unexpected value for property {pname!r}: " \
f"got {act_value!r}, expected {exp_value!r}"
TESTCASES_NVME_DUMP = [
# Testcases for test_lpar_nvme_dump()
# Each testcase is a tuple of:
# * desc: description
# * initial_status: Status before nvme_dump() is called
# * result_status: Status to be set by nvme_dump()
# * input_kwargs: Keyword arguments to nvme_dump()
# * exp_properties: Props to validate after a successful nvme_dump()
# * exc_exp: Expected exception object, or None
(
"Missing input parameter 'load_address'",
'activated',
'operating',
{},
{},
TypeError()
),
(
"Minimally required input parameters, test defaults for optional",
'activated',
'operating',
{'load_address': '0010A'},
{'status': 'operating',
'last-used-load-address': '0010A',
'last-used-load-parameter': '',
'last-used-disk-partition-id': 0,
'last-used-operating-system-specific-load-parameters': '',
'last-used-boot-record-logical-block-address': '0',
'last-used-load-type': 'ipltype-nvmedump',
'last-used-secure-boot': False,
'last-used-clear-indicator': True},
None
),
(
"All input parameters for last-used props",
'activated',
'operating',
{'load_address': '0010A',
'load_parameter': 'foo_lp',
'disk_partition_id': 42,
'operating_system_specific_load_parameters': 'foo_oslp',
'boot_record_logical_block_address': '42',
'secure_boot': True},
{'status': 'operating',
'last-used-load-address': '0010A',
'last-used-load-parameter': 'foo_lp',
'last-used-disk-partition-id': 42,
'last-used-operating-system-specific-load-parameters': 'foo_oslp',
'last-used-boot-record-logical-block-address': '42',
'last-used-load-type': 'ipltype-nvmedump',
'last-used-secure-boot': True},
None
),
(
"Incorrect initial status 'not-activated'",
'not-activated',
'operating',
{'load_address': '0010A'},
{},
HTTPError({'http-status': 409, 'reason': 0})
),
(
"Initial status 'operating', testing default for 'force'",
'operating',
'operating',
{'load_address': '0010A'},
{},
HTTPError({'http-status': 500, 'reason': 263}) # TODO: Check
),
(
"Initial status 'operating', 'force' is False",
'operating',
'operating',
{'load_address': '0010A',
'force': False},
{},
HTTPError({'http-status': 500, 'reason': 263}) # TODO: Check
),
(
"Initial status 'operating', 'force' is True",
'operating',
'operating',
{'load_address': '0010A',
'force': True},
{'status': 'operating'},
None
),
(
"Initial status 'exceptions'",
'exceptions',
'operating',
{'load_address': '0010A'},
{'status': 'operating'},
None
),
]
@pytest.mark.parametrize(
"desc, initial_status, result_status, input_kwargs, exp_properties, "
"exc_exp",
TESTCASES_NVME_DUMP)
@mock.patch.object(LparLoadHandler, 'get_status')
def test_lpar_nvme_dump(
self, get_status_mock,
desc, initial_status, result_status, input_kwargs, exp_properties,
exc_exp):
# pylint: disable=unused-argument
"""Test Lpar.nvme_dump())."""
# Add a faked LPAR and set its properties
faked_lpar = self.add_lpar1()
faked_lpar.properties['status'] = initial_status
# Add a faked image profile
self.add_imageprofile1()
lpar_mgr = self.cpc.lpars
lpar = lpar_mgr.find(name=faked_lpar.name)
get_status_mock.return_value = result_status
if exc_exp:
with pytest.raises(Exception) as exc_info:
# Execute the code to be tested
lpar.nvme_dump(**input_kwargs)
exc = exc_info.value
assert isinstance(exc, exc_exp.__class__)
if isinstance(exc, HTTPError):
assert exc.http_status == exc_exp.http_status
assert exc.reason == exc_exp.reason
else:
# Execute the code to be tested.
ret = lpar.nvme_dump(**input_kwargs)
# TODO: Job result not implemented yet
assert ret is None
lpar.pull_full_properties()
for pname, exp_value in exp_properties.items():
act_value = lpar.get_property(pname)
assert act_value == exp_value, \
f"Unexpected value for property {pname!r}: " \
f"got {act_value!r}, expected {exp_value!r}"
@pytest.mark.parametrize(
"filter_args, exp_names", [
({'cpc-name': 'bad'},
[]),
({'cpc-name': CPC_NAME},
[LPAR1_NAME, LPAR2_NAME]),
({},
[LPAR1_NAME, LPAR2_NAME]),
(None,
[LPAR1_NAME, LPAR2_NAME]),
({'name': LPAR1_NAME},
[LPAR1_NAME]),
]
)
def test_console_list_permitted_lpars(self, filter_args, exp_names):
"""Test Console.list_permitted_lpars() with filter_args."""
# Add two faked partitions
self.add_lpar1()
self.add_lpar2()
self.session.hmc.consoles.add({
'object-id': None,
# object-uri will be automatically set
'parent': None,
'class': 'console',
'name': 'fake-console1',
'description': 'Console #1',
})
console = self.client.consoles.console
# Execute the code to be tested
lpars = console.list_permitted_lpars(filter_args=filter_args)
assert len(lpars) == len(exp_names)
if exp_names:
names = [p.properties['name'] for p in lpars]
assert set(names) == set(exp_names)
for lpar in lpars:
lpar_props = dict(lpar.properties)
for pname in LIST_PERMITTED_LPARS_PROPS:
assert pname in lpar_props, (
f"Property {pname!r} missing from returned LPAR "
f"properties, got: {lpar_props!r}")
@pytest.mark.parametrize(
"list_kwargs, prop_names", [
({},
LIST_PERMITTED_LPARS_PROPS),
(dict(additional_properties=[]),
LIST_PERMITTED_LPARS_PROPS),
(dict(additional_properties=['description']),
LIST_PERMITTED_LPARS_PROPS + ['description']),
(dict(additional_properties=['description', 'activation-mode']),
LIST_PERMITTED_LPARS_PROPS + ['description', 'activation-mode']),
(dict(additional_properties=['ssc-host-name']),
LIST_PERMITTED_LPARS_PROPS + ['ssc-host-name']),
]
)
def test_console_list_permlpars_add_props(
self, list_kwargs, prop_names):
"""
Test Console.list_permitted_lpars() with additional_properties.
"""
# Add two faked partitions
faked_lpar1 = self.add_lpar1()
faked_lpar2 = self.add_lpar2()
self.session.hmc.consoles.add({
'object-id': None,
# object-uri will be automatically set
'parent': None,
'class': 'console',
'name': 'fake-console1',
'description': 'Console #1',
})
console = self.client.consoles.console
# Execute the code to be tested
lpars = console.list_permitted_lpars(**list_kwargs)
exp_faked_lpars = [faked_lpar1, faked_lpar2]
assert_resources(lpars, exp_faked_lpars, prop_names)
def test_lpar_start(http_mocked_lpar): # noqa: F811
# pylint: disable=redefined-outer-name,unused-argument
"""
Test function for Lpar.start()
"""
session = http_mocked_lpar.manager.session
uri = http_mocked_lpar.uri + '/operations/start'
job_uri = '/api/jobs/job-1'
exp_request_body = None
exp_status_code = 202
result_body = {
'job-uri': job_uri,
}
exp_result_job = Job(session, job_uri, 'POST', uri)
rm_adapter = requests_mock.Adapter(case_sensitive=True)
with requests_mock.mock(adapter=rm_adapter) as m:
m.post(uri, status_code=exp_status_code, json=result_body)
result_job = http_mocked_lpar.start(wait_for_completion=False)
assert rm_adapter.called
request_body = rm_adapter.last_request.body
assert request_body == exp_request_body
assert result_job.uri == exp_result_job.uri
assert result_job.op_method == exp_result_job.op_method
assert result_job.op_uri == exp_result_job.op_uri
def test_lpar_stop(http_mocked_lpar): # noqa: F811
# pylint: disable=redefined-outer-name,unused-argument
"""
Test function for Lpar.stop()
"""
session = http_mocked_lpar.manager.session
uri = http_mocked_lpar.uri + '/operations/stop'
job_uri = '/api/jobs/job-1'
exp_request_body = None
exp_status_code = 202
result_body = {
'job-uri': job_uri,
}
exp_result_job = Job(session, job_uri, 'POST', uri)
rm_adapter = requests_mock.Adapter(case_sensitive=True)
with requests_mock.mock(adapter=rm_adapter) as m:
m.post(uri, status_code=exp_status_code, json=result_body)
result_job = http_mocked_lpar.stop(wait_for_completion=False)
assert rm_adapter.called
request_body = rm_adapter.last_request.body
assert request_body == exp_request_body
assert result_job.uri == exp_result_job.uri
assert result_job.op_method == exp_result_job.op_method
assert result_job.op_uri == exp_result_job.op_uri
# TODO: Test for Lpar.psw_restart()
# TODO: Test for Lpar.reset_clear()
# TODO: Test for Lpar.reset_normal()
def test_lpar_load_from_ftp(http_mocked_lpar): # noqa: F811
# pylint: disable=redefined-outer-name,unused-argument
"""
Test function for Lpar.load_from_ftp()
"""
session = http_mocked_lpar.manager.session
uri = http_mocked_lpar.uri + '/operations/load-from-ftp'
# Define the input parameters for the test call
host = 'test-ftp-host-1'
username = 'test-user'
password = 'test-pwd'
load_file = '/images/load1.img'
protocol = 'sftp'
job_uri = '/api/jobs/job-1'
exp_request_body = {
'host-name': host,
'user-name': username,
'password': password,
'file-path': load_file,
'protocol': protocol,
}
exp_status_code = 202
result_body = {
'job-uri': job_uri,
}
exp_result_job = Job(session, job_uri, 'POST', uri)
rm_adapter = requests_mock.Adapter(case_sensitive=True)
with requests_mock.mock(adapter=rm_adapter) as m:
m.post(uri, status_code=exp_status_code, json=result_body)
result_job = http_mocked_lpar.load_from_ftp(
host, username, password, load_file, protocol,
wait_for_completion=False)
assert rm_adapter.called
request_body = rm_adapter.last_request.json()
assert request_body == exp_request_body
assert result_job.uri == exp_result_job.uri
assert result_job.op_method == exp_result_job.op_method
assert result_job.op_uri == exp_result_job.op_uri
|
GHSA-p57h-3cmc-xpjq
|
tests/unit/zhmcclient/test_partition.py
|
@@ -19,9 +19,11 @@
import re
import copy
+import logging
import pytest
-from zhmcclient import Client, Partition, HTTPError, NotFound
+from zhmcclient import Client, Partition, HTTPError, NotFound, \
+ BLANKED_OUT_STRING
from zhmcclient_mock import FakedSession
from tests.common.utils import assert_resources
@@ -307,23 +309,43 @@ def test_pm_list_add_props(
({'name': 'fake-part-x',
'ifl-processors': 2,
'initial-memory': 4096,
- 'maximum-memory': 4096},
+ 'maximum-memory': 4096,
+ 'description': 'fake description X'},
['object-uri', 'name', 'initial-memory', 'maximum-memory',
- 'ifl-processors'],
+ 'ifl-processors', 'description'],
None),
({'name': 'fake-part-x',
'ifl-processors': 2,
'initial-memory': 4096,
'maximum-memory': 4096,
- 'description': 'fake description X'},
+ 'boot-device': 'ftp',
+ 'boot-ftp-host': 'host',
+ 'boot-ftp-username': 'user',
+ 'boot-ftp-password': 'bla',
+ 'boot-ftp-insfile': 'ins'},
['object-uri', 'name', 'initial-memory', 'maximum-memory',
- 'ifl-processors', 'description'],
+ 'ifl-processors', 'boot-device', 'boot-ftp-host',
+ 'boot-ftp-username', 'boot-ftp-insfile'],
+ None),
+ ({'name': 'fake-part-x',
+ 'ifl-processors': 2,
+ 'initial-memory': 4096,
+ 'maximum-memory': 4096,
+ 'type': 'ssc',
+ 'ssc-host-name': 'host',
+ 'ssc-master-userid': 'user',
+ 'ssc-master-pw': 'bla'},
+ ['object-uri', 'name', 'initial-memory', 'maximum-memory',
+ 'ifl-processors', 'type', 'ssc-host-name', 'ssc-master-userid'],
None),
]
)
- def test_pm_create(self, input_props, exp_prop_names, exp_exc):
+ def test_pm_create(self, caplog, input_props, exp_prop_names, exp_exc):
"""Test PartitionManager.create()."""
+ logger_name = "zhmcclient.api"
+ caplog.set_level(logging.DEBUG, logger=logger_name)
+
partition_mgr = self.cpc.partitions
if exp_exc is not None:
@@ -345,6 +367,9 @@ def test_pm_create(self, input_props, exp_prop_names, exp_exc):
# the input properties plus 'object-uri'.
partition = partition_mgr.create(properties=input_props)
+ # Get its API call log record
+ call_record = caplog.records[-2]
+
# Check the resource for consistency within itself
assert isinstance(partition, Partition)
partition_name = partition.name
@@ -362,6 +387,14 @@ def test_pm_create(self, input_props, exp_prop_names, exp_exc):
exp_value = input_props[prop_name]
assert value == exp_value
+ # Verify the API call log record for blanked-out properties.
+ if 'boot-ftp-password' in input_props:
+ exp_str = f"'boot-ftp-password': '{BLANKED_OUT_STRING}'"
+ assert call_record.message.find(exp_str) > 0
+ if 'ssc-master-pw' in input_props:
+ exp_str = f"'ssc-master-pw': '{BLANKED_OUT_STRING}'"
+ assert call_record.message.find(exp_str) > 0
+
def test_pm_resource_object(self):
"""
Test PartitionManager.resource_object().
@@ -673,9 +706,13 @@ def test_partition_feature_info(
'ssc-master-pw': None},
]
)
- def test_partition_update_properties(self, input_props, partition_name):
+ def test_partition_update_properties(
+ self, caplog, input_props, partition_name):
"""Test Partition.update_properties()."""
+ logger_name = "zhmcclient.api"
+ caplog.set_level(logging.DEBUG, logger=logger_name)
+
# Add faked partitions
self.add_partition1()
self.add_partition2()
@@ -689,6 +726,9 @@ def test_partition_update_properties(self, input_props, partition_name):
# Execute the code to be tested
partition.update_properties(properties=input_props)
+ # Get its API call log record
+ call_record = caplog.records[-2]
+
# Verify that the resource object already reflects the property
# updates.
for prop_name in saved_properties:
@@ -712,6 +752,14 @@ def test_partition_update_properties(self, input_props, partition_name):
prop_value = partition.properties[prop_name]
assert prop_value == exp_prop_value
+ # Verify the API call log record for blanked-out properties.
+ if 'boot-ftp-password' in input_props:
+ exp_str = f"'boot-ftp-password': '{BLANKED_OUT_STRING}'"
+ assert call_record.message.find(exp_str) > 0
+ if 'ssc-master-pw' in input_props:
+ exp_str = f"'ssc-master-pw': '{BLANKED_OUT_STRING}'"
+ assert call_record.message.find(exp_str) > 0
+
def test_partition_update_name(self):
"""
Test Partition.update_properties() with 'name' property.
|
# Copyright 2016,2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for _partition module.
"""
import re
import copy
import pytest
from zhmcclient import Client, Partition, HTTPError, NotFound
from zhmcclient_mock import FakedSession
from tests.common.utils import assert_resources
# Object IDs and names of our faked partitions:
PART1_OID = 'part1-oid'
PART1_NAME = 'part 1'
PART2_OID = 'part2-oid'
PART2_NAME = 'part 2'
PART3_OID = 'part3-oid'
PART3_NAME = 'part 3'
CPC_NAME = 'fake-cpc1-name'
# Properties returned by default from list_permitted_partitions()
LIST_PERMITTED_PARTITIONS_PROPS = [
'name', 'object-uri', 'type', 'status', 'has-unacceptable-status',
'cpc-name', 'cpc-object-uri',
# The zhmcclient_mock support always returns 'se-version'
'se-version'
]
class TestPartition:
"""All tests for the Partition and PartitionManager classes."""
def setup_method(self):
"""
Setup that is called by pytest before each test method.
Set up a faked session, and add a faked CPC in DPM mode without any
child resources.
"""
# pylint: disable=attribute-defined-outside-init
self.session = FakedSession('fake-host', 'fake-hmc', '2.13.1', '1.8')
self.client = Client(self.session)
self.faked_cpc = self.session.hmc.cpcs.add({
'object-id': 'fake-cpc1-oid',
# object-uri is set up automatically
'parent': None,
'class': 'cpc',
'name': CPC_NAME,
'description': 'CPC #1 (DPM mode)',
'status': 'active',
'dpm-enabled': True,
'is-ensemble-member': False,
'iml-mode': 'dpm',
})
self.cpc = self.client.cpcs.find(name=CPC_NAME)
def add_partition1(self):
"""Add partition 1 (type linux)."""
faked_partition = self.faked_cpc.partitions.add({
'object-id': PART1_OID,
# object-uri will be automatically set
'parent': self.faked_cpc.uri,
'class': 'partition',
'name': PART1_NAME,
'description': 'Partition #1',
'status': 'active',
'type': 'linux',
'ifl-processors': 2,
'initial-memory': 4096,
'maximum-memory': 8192,
})
return faked_partition
def add_partition2(self):
"""Add partition 2 (type ssc)."""
faked_partition = self.faked_cpc.partitions.add({
'object-id': PART2_OID,
# object-uri will be automatically set
'parent': self.faked_cpc.uri,
'class': 'partition',
'name': PART2_NAME,
'description': 'Partition #2',
'status': 'active',
'type': 'ssc',
'ifl-processors': 2,
'initial-memory': 4096,
'maximum-memory': 8192,
})
return faked_partition
def add_partition3(self):
"""Add partition 3 (support for firmware features)."""
faked_partition = self.faked_cpc.partitions.add({
'object-id': PART3_OID,
# object-uri will be automatically set
'parent': self.faked_cpc.uri,
'class': 'partition',
'name': PART3_NAME,
'description': 'Partition #3',
'status': 'active',
'type': 'linux',
'ifl-processors': 2,
'initial-memory': 4096,
'maximum-memory': 8192,
'available-features-list': [],
})
return faked_partition
def add_partition(self, part_name):
"""Add a partition (using one of the known names)."""
if part_name == PART1_NAME:
faked_partition = self.add_partition1()
elif part_name == PART2_NAME:
faked_partition = self.add_partition2()
else:
assert part_name == PART3_NAME
faked_partition = self.add_partition3()
return faked_partition
def test_pm_initial_attrs(self):
"""Test initial attributes of PartitionManager."""
partition_mgr = self.cpc.partitions
# Verify all public properties of the manager object
assert partition_mgr.resource_class == Partition
assert partition_mgr.session == self.session
assert partition_mgr.parent == self.cpc
assert partition_mgr.cpc == self.cpc
# TODO: Test for PartitionManager.__repr__()
@pytest.mark.parametrize(
"full_properties_kwargs, prop_names", [
({},
['object-uri', 'name', 'status']),
(dict(full_properties=False),
['object-uri', 'name', 'status']),
(dict(full_properties=True),
None),
]
)
def test_pm_list_full_properties(
self, full_properties_kwargs, prop_names):
"""Test PartitionManager.list() with full_properties."""
# Add two faked partitions
faked_partition1 = self.add_partition1()
faked_partition2 = self.add_partition2()
exp_faked_partitions = [faked_partition1, faked_partition2]
partition_mgr = self.cpc.partitions
# Execute the code to be tested
partitions = partition_mgr.list(**full_properties_kwargs)
assert_resources(partitions, exp_faked_partitions, prop_names)
@pytest.mark.parametrize(
"filter_args, exp_names", [
({'object-id': PART1_OID},
[PART1_NAME]),
({'object-id': PART2_OID},
[PART2_NAME]),
({'object-id': [PART1_OID, PART2_OID]},
[PART1_NAME, PART2_NAME]),
({'object-id': [PART1_OID, PART1_OID]},
[PART1_NAME]),
({'object-id': PART1_OID + 'foo'},
[]),
({'object-id': [PART1_OID, PART2_OID + 'foo']},
[PART1_NAME]),
({'object-id': [PART2_OID + 'foo', PART1_OID]},
[PART1_NAME]),
({'name': PART1_NAME},
[PART1_NAME]),
({'name': PART2_NAME},
[PART2_NAME]),
({'name': [PART1_NAME, PART2_NAME]},
[PART1_NAME, PART2_NAME]),
({'name': PART1_NAME + 'foo'},
[]),
({'name': [PART1_NAME, PART2_NAME + 'foo']},
[PART1_NAME]),
({'name': [PART2_NAME + 'foo', PART1_NAME]},
[PART1_NAME]),
({'name': [PART1_NAME, PART1_NAME]},
[PART1_NAME]),
({'name': '.*part 1'},
[PART1_NAME]),
({'name': 'part 1.*'},
[PART1_NAME]),
({'name': 'part .'},
[PART1_NAME, PART2_NAME]),
({'name': '.art 1'},
[PART1_NAME]),
({'name': '.+'},
[PART1_NAME, PART2_NAME]),
({'name': 'part 1.+'},
[]),
({'name': '.+part 1'},
[]),
({'name': PART1_NAME,
'object-id': PART1_OID},
[PART1_NAME]),
({'name': PART1_NAME,
'object-id': PART1_OID + 'foo'},
[]),
({'name': PART1_NAME + 'foo',
'object-id': PART1_OID},
[]),
({'name': PART1_NAME + 'foo',
'object-id': PART1_OID + 'foo'},
[]),
]
)
def test_pm_list_filter_args(self, filter_args, exp_names):
"""Test PartitionManager.list() with filter_args."""
# Add two faked partitions
self.add_partition1()
self.add_partition2()
partition_mgr = self.cpc.partitions
# Execute the code to be tested
partitions = partition_mgr.list(filter_args=filter_args)
assert len(partitions) == len(exp_names)
if exp_names:
names = [p.properties['name'] for p in partitions]
assert set(names) == set(exp_names)
@pytest.mark.parametrize(
"list_kwargs, prop_names", [
({},
['object-uri', 'name', 'status']),
(dict(additional_properties=[]),
['object-uri', 'name', 'status']),
(dict(additional_properties=['description']),
['object-uri', 'name', 'status', 'description']),
(dict(additional_properties=['description', 'se-version']),
['object-uri', 'name', 'status', 'description', 'se-version']),
(dict(additional_properties=['ssc-host-name']),
['object-uri', 'name', 'status', 'ssc-host-name']
# ssc-host-name is not on every partition
),
]
)
def test_pm_list_add_props(
self, list_kwargs, prop_names):
"""
Test PartitionManager.list() with additional_properties.
"""
# Add two faked partitions
faked_part1 = self.add_partition1()
faked_part2 = self.add_partition2()
partition_mgr = self.cpc.partitions
# Execute the code to be tested
parts = partition_mgr.list(**list_kwargs)
exp_faked_parts = [faked_part1, faked_part2]
assert_resources(parts, exp_faked_parts, prop_names)
@pytest.mark.parametrize(
"input_props, exp_prop_names, exp_exc", [
({},
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'description': 'fake description X'},
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'name': 'fake-part-x'},
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'name': 'fake-part-x',
'initial-memory': 1024},
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'name': 'fake-part-x',
'ifl-processors': 2,
'initial-memory': 4096,
'maximum-memory': 4096},
['object-uri', 'name', 'initial-memory', 'maximum-memory',
'ifl-processors'],
None),
({'name': 'fake-part-x',
'ifl-processors': 2,
'initial-memory': 4096,
'maximum-memory': 4096,
'description': 'fake description X'},
['object-uri', 'name', 'initial-memory', 'maximum-memory',
'ifl-processors', 'description'],
None),
]
)
def test_pm_create(self, input_props, exp_prop_names, exp_exc):
"""Test PartitionManager.create()."""
partition_mgr = self.cpc.partitions
if exp_exc is not None:
with pytest.raises(exp_exc.__class__) as exc_info:
# Execute the code to be tested
partition = partition_mgr.create(properties=input_props)
exc = exc_info.value
if isinstance(exp_exc, HTTPError):
assert exc.http_status == exp_exc.http_status
assert exc.reason == exp_exc.reason
else:
# Execute the code to be tested.
# Note: the Partition object returned by Partition.create() has
# the input properties plus 'object-uri'.
partition = partition_mgr.create(properties=input_props)
# Check the resource for consistency within itself
assert isinstance(partition, Partition)
partition_name = partition.name
exp_partition_name = partition.properties['name']
assert partition_name == exp_partition_name
partition_uri = partition.uri
exp_partition_uri = partition.properties['object-uri']
assert partition_uri == exp_partition_uri
# Check the properties against the expected names and values
for prop_name in exp_prop_names:
assert prop_name in partition.properties
if prop_name in input_props:
value = partition.properties[prop_name]
exp_value = input_props[prop_name]
assert value == exp_value
def test_pm_resource_object(self):
"""
Test PartitionManager.resource_object().
This test exists for historical reasons, and by now is covered by the
test for BaseManager.resource_object().
"""
partition_mgr = self.cpc.partitions
partition_oid = 'fake-partition-id42'
# Execute the code to be tested
partition = partition_mgr.resource_object(partition_oid)
partition_uri = "/api/partitions/" + partition_oid
assert isinstance(partition, Partition)
assert partition.uri == partition_uri
assert partition.properties['object-uri'] == partition_uri
assert partition.properties['object-id'] == partition_oid
assert partition.properties['class'] == 'partition'
assert partition.properties['parent'] == self.cpc.uri
# TODO: Test for initial Partition attributes (nics, hbas,
# virtual_functions)
def test_partition_repr(self):
"""Test Partition.__repr__()."""
# Add a faked partition
faked_partition = self.add_partition1()
partition_mgr = self.cpc.partitions
partition = partition_mgr.find(name=faked_partition.name)
# Execute the code to be tested
repr_str = repr(partition)
repr_str = repr_str.replace('\n', '\\n')
# We check just the begin of the string:
assert re.match(
rf'^{partition.__class__.__name__}\s+at\s+'
rf'0x{id(partition):08x}\s+\(\\n.*',
repr_str)
@pytest.mark.parametrize(
"initial_status, exp_exc", [
('stopped', None),
('terminated', HTTPError({'http-status': 409, 'reason': 1})),
('starting', HTTPError({'http-status': 409, 'reason': 1})),
('active', HTTPError({'http-status': 409, 'reason': 1})),
('stopping', HTTPError({'http-status': 409, 'reason': 1})),
('degraded', HTTPError({'http-status': 409, 'reason': 1})),
('reservation-error',
HTTPError({'http-status': 409, 'reason': 1})),
('paused', HTTPError({'http-status': 409, 'reason': 1})),
]
)
def test_partition_delete(self, initial_status, exp_exc):
"""Test Partition.delete()."""
# Add a faked partition to be tested and another one
faked_partition = self.add_partition1()
self.add_partition2()
# Set the initial status of the faked partition
faked_partition.properties['status'] = initial_status
partition_mgr = self.cpc.partitions
partition = partition_mgr.find(name=faked_partition.name)
if exp_exc is not None:
with pytest.raises(exp_exc.__class__) as exc_info:
# Execute the code to be tested
partition.delete()
exc = exc_info.value
if isinstance(exp_exc, HTTPError):
assert exc.http_status == exp_exc.http_status
assert exc.reason == exp_exc.reason
# Check that the partition still exists
partition_mgr.find(name=faked_partition.name)
else:
# Execute the code to be tested.
partition.delete()
# Check that the partition no longer exists
with pytest.raises(NotFound) as exc_info:
partition_mgr.find(name=faked_partition.name)
def test_partition_delete_create_same_name(self):
"""Test Partition.delete() followed by create() with same name."""
# Add a faked partition to be tested and another one
faked_partition = self.add_partition1()
partition_name = faked_partition.name
self.add_partition2()
# Construct the input properties for a third partition
part3_props = {
'name': partition_name,
'description': 'Third partition',
'ifl-processors': 4,
'initial-memory': 4096,
'maximum-memory': 8192,
}
# Set the initial status of the faked partition
faked_partition.properties['status'] = 'stopped' # deletable
partition_mgr = self.cpc.partitions
partition = partition_mgr.find(name=partition_name)
# Execute the deletion code to be tested.
partition.delete()
# Check that the partition no longer exists
with pytest.raises(NotFound):
partition_mgr.find(name=partition_name)
# Execute the creation code to be tested.
partition_mgr.create(part3_props)
# Check that the partition exists again under that name
partition3 = partition_mgr.find(name=partition_name)
description = partition3.get_property('description')
assert description == 'Third partition'
@pytest.mark.parametrize(
"desc, partition_name, available_features, feature_name, "
"exp_feature_enabled, exp_exc", [
(
"No feature support on the CPC",
PART1_NAME,
None,
'fake-feature1', None, ValueError()
),
(
"Feature not available on the partition (empty feature list)",
PART3_NAME,
[],
'fake-feature1', None, ValueError()
),
(
"Feature not available on the part (one other feature avail)",
PART3_NAME,
[
dict(name='fake-feature-foo', state=True),
],
'fake-feature1', None, ValueError()
),
(
"Feature disabled (the only feature available)",
PART3_NAME,
[
dict(name='fake-feature1', state=False),
],
'fake-feature1', False, None
),
(
"Feature enabled (the only feature available)",
PART3_NAME,
[
dict(name='fake-feature1', state=True),
],
'fake-feature1', True, None
),
]
)
def test_partition_feature_enabled(
self, desc, partition_name, available_features, feature_name,
exp_feature_enabled, exp_exc):
# pylint: disable=unused-argument
"""Test Partition.feature_enabled()."""
# Add a faked Partition
faked_partition = self.add_partition(partition_name)
# Set up the firmware feature list
if available_features is not None:
faked_partition.properties['available-features-list'] = \
available_features
partition_mgr = self.cpc.partitions
partition = partition_mgr.find(name=partition_name)
if exp_exc:
with pytest.raises(exp_exc.__class__):
# Execute the code to be tested
partition.feature_enabled(feature_name)
else:
# Execute the code to be tested
act_feature_enabled = partition.feature_enabled(feature_name)
assert act_feature_enabled == exp_feature_enabled
@pytest.mark.parametrize(
"desc, partition_name, available_features, exp_exc", [
(
"No feature support on the CPC",
PART1_NAME,
None,
ValueError()
),
(
"Feature not available on the partition (empty feature list)",
PART3_NAME,
[],
None
),
(
"Feature not available on the part (one other feature avail)",
PART3_NAME,
[
dict(name='fake-feature-foo', state=True),
],
None
),
(
"Feature disabled (the only feature available)",
PART3_NAME,
[
dict(name='fake-feature1', state=False),
],
None
),
(
"Feature enabled (the only feature available)",
PART3_NAME,
[
dict(name='fake-feature1', state=True),
],
None
),
]
)
def test_partition_feature_info(
self, desc, partition_name, available_features, exp_exc):
# pylint: disable=unused-argument
"""Test Partition.feature_info()."""
# Add a faked Partition
faked_partition = self.add_partition(partition_name)
# Set up the firmware feature list
if available_features is not None:
faked_partition.properties['available-features-list'] = \
available_features
partition_mgr = self.cpc.partitions
partition = partition_mgr.find(name=partition_name)
if exp_exc:
with pytest.raises(exp_exc.__class__):
# Execute the code to be tested
partition.feature_info()
else:
# Execute the code to be tested
act_features = partition.feature_info()
assert act_features == available_features
@pytest.mark.parametrize(
"partition_name", [
PART1_NAME,
PART2_NAME,
]
)
@pytest.mark.parametrize(
"input_props", [
{},
{'description': 'New partition description'},
{'initial-memory': 512,
'description': 'New partition description'},
{'autogenerate-partition-id': True,
'partition-id': None},
{'boot-device': 'none',
'boot-ftp-host': None,
'boot-ftp-username': None,
'boot-ftp-password': None,
'boot-ftp-insfile': None},
{'boot-device': 'none',
'boot-network-device': None},
{'boot-device': 'none',
'boot-removable-media': None,
'boot-removable-media-type': None},
{'boot-device': 'none',
'boot-storage-device': None,
'boot-logical-unit-number': None,
'boot-world-wide-port-name': None},
{'boot-device': 'none',
'boot-iso-image-name': None,
'boot-iso-insfile': None},
{'ssc-ipv4-gateway': None,
'ssc-ipv6-gateway': None,
'ssc-master-userid': None,
'ssc-master-pw': None},
]
)
def test_partition_update_properties(self, input_props, partition_name):
"""Test Partition.update_properties()."""
# Add faked partitions
self.add_partition1()
self.add_partition2()
partition_mgr = self.cpc.partitions
partition = partition_mgr.find(name=partition_name)
partition.pull_full_properties()
saved_properties = copy.deepcopy(partition.properties)
# Execute the code to be tested
partition.update_properties(properties=input_props)
# Verify that the resource object already reflects the property
# updates.
for prop_name in saved_properties:
if prop_name in input_props:
exp_prop_value = input_props[prop_name]
else:
exp_prop_value = saved_properties[prop_name]
assert prop_name in partition.properties
prop_value = partition.properties[prop_name]
assert prop_value == exp_prop_value
# Refresh the resource object and verify that the resource object
# still reflects the property updates.
partition.pull_full_properties()
for prop_name in saved_properties:
if prop_name in input_props:
exp_prop_value = input_props[prop_name]
else:
exp_prop_value = saved_properties[prop_name]
assert prop_name in partition.properties
prop_value = partition.properties[prop_name]
assert prop_value == exp_prop_value
def test_partition_update_name(self):
"""
Test Partition.update_properties() with 'name' property.
"""
# Add a faked partition
faked_partition = self.add_partition1()
partition_name = faked_partition.name
partition_mgr = self.cpc.partitions
partition = partition_mgr.find(name=partition_name)
new_partition_name = "new-" + partition_name
# Execute the code to be tested
partition.update_properties(properties={'name': new_partition_name})
# Verify that the resource is no longer found by its old name, using
# list() (this does not use the name-to-URI cache).
partitions_list = partition_mgr.list(
filter_args=dict(name=partition_name))
assert len(partitions_list) == 0
# Verify that the resource is no longer found by its old name, using
# find() (this uses the name-to-URI cache).
with pytest.raises(NotFound):
partition_mgr.find(name=partition_name)
# Verify that the resource object already reflects the update, even
# though it has not been refreshed yet.
assert partition.properties['name'] == new_partition_name
# Refresh the resource object and verify that it still reflects the
# update.
partition.pull_full_properties()
assert partition.properties['name'] == new_partition_name
# Verify that the resource can be found by its new name, using find()
new_partition_find = partition_mgr.find(name=new_partition_name)
assert new_partition_find.properties['name'] == new_partition_name
# Verify that the resource can be found by its new name, using list()
new_partitions_list = partition_mgr.list(
filter_args=dict(name=new_partition_name))
assert len(new_partitions_list) == 1
new_partition_list = new_partitions_list[0]
assert new_partition_list.properties['name'] == new_partition_name
@pytest.mark.parametrize(
"initial_status, exp_exc", [
('stopped', None),
('terminated', HTTPError({'http-status': 409, 'reason': 1})),
('starting', HTTPError({'http-status': 409, 'reason': 1})),
('active', HTTPError({'http-status': 409, 'reason': 1})),
('stopping', HTTPError({'http-status': 409, 'reason': 1})),
('degraded', HTTPError({'http-status': 409, 'reason': 1})),
('reservation-error',
HTTPError({'http-status': 409, 'reason': 1})),
('paused', HTTPError({'http-status': 409, 'reason': 1})),
]
)
def test_partition_start(self, initial_status, exp_exc):
"""Test Partition.start()."""
# Add a faked partition
faked_partition = self.add_partition1()
# Set the initial status of the faked partition
faked_partition.properties['status'] = initial_status
partition_mgr = self.cpc.partitions
partition = partition_mgr.find(name=faked_partition.name)
if exp_exc is not None:
with pytest.raises(exp_exc.__class__) as exc_info:
# Execute the code to be tested
partition.start()
exc = exc_info.value
if isinstance(exp_exc, HTTPError):
assert exc.http_status == exp_exc.http_status
assert exc.reason == exp_exc.reason
else:
# Execute the code to be tested.
ret = partition.start()
assert ret == {}
partition.pull_full_properties()
status = partition.get_property('status')
assert status == 'active'
@pytest.mark.parametrize(
"initial_status, exp_exc", [
('stopped', HTTPError({'http-status': 409, 'reason': 1})),
('terminated', None),
('starting', HTTPError({'http-status': 409, 'reason': 1})),
('active', None),
('stopping', HTTPError({'http-status': 409, 'reason': 1})),
('degraded', HTTPError({'http-status': 409, 'reason': 1})),
('reservation-error',
HTTPError({'http-status': 409, 'reason': 1})),
('paused', None),
]
)
def test_partition_stop(self, initial_status, exp_exc):
"""Test Partition.stop()."""
# Add a faked partition
faked_partition = self.add_partition1()
# Set the initial status of the faked partition
faked_partition.properties['status'] = initial_status
partition_mgr = self.cpc.partitions
partition = partition_mgr.find(name=faked_partition.name)
if exp_exc is not None:
with pytest.raises(exp_exc.__class__) as exc_info:
# Execute the code to be tested
partition.stop()
exc = exc_info.value
if isinstance(exp_exc, HTTPError):
assert exc.http_status == exp_exc.http_status
assert exc.reason == exp_exc.reason
else:
# Execute the code to be tested.
ret = partition.stop()
assert ret == {}
partition.pull_full_properties()
status = partition.get_property('status')
assert status == 'stopped'
# TODO: Re-enable test_partition_dump_partition() once supported in hdlr
def xtest_partition_dump_partition(self):
"""Test Partition.dump_partition()."""
# Add a faked partition
faked_partition = self.add_partition1()
partition_mgr = self.cpc.partitions
partition = partition_mgr.find(name=faked_partition.name)
parameters = {
'dump-load-hba-uri': 'fake-hba-uri',
'dump-world-wide-port-name': 'fake-wwpn',
'dump-logical-unit-number': 'fake-lun',
}
# Execute the code to be tested.
ret = partition.dump_partition(parameters=parameters)
assert ret == {}
def test_partition_start_dump_program(self):
"""Test Partition.start_dump_program()."""
# Add a faked partition
faked_partition = self.add_partition1()
partition_mgr = self.cpc.partitions
partition = partition_mgr.find(name=faked_partition.name)
parameters = {
'dump-program-type': 'storage',
'dump-program-info': {
'storage-volume-uri': 'sv1' # dummy
},
}
# Execute the code to be tested.
ret = partition.start_dump_program(parameters=parameters)
assert ret == {}
# TODO: Re-enable test_partition_psw_restart() once supported in hdlr
def xtest_partition_psw_restart(self):
"""Test Partition.psw_restart()."""
# Add a faked partition
faked_partition = self.add_partition1()
partition_mgr = self.cpc.partitions
partition = partition_mgr.find(name=faked_partition.name)
# Execute the code to be tested.
ret = partition.psw_restart()
assert ret == {}
# TODO: Re-enable test_partition_mount_iso_image() once supported in hdlr
def xtest_partition_mount_iso_image(self):
"""Test Partition.mount_iso_image()."""
# Add a faked partition
faked_partition = self.add_partition1()
partition_mgr = self.cpc.partitions
partition = partition_mgr.find(name=faked_partition.name)
# TODO: Add test case where a file-like object is passed as image
image = b'fake-image-data'
image_name = 'fake-image-name'
ins_file_name = 'fake-ins-file-name'
# Execute the code to be tested.
ret = partition.mount_iso_image(image=image, image_name=image_name,
ins_file_name=ins_file_name)
assert ret is None
# TODO: Re-enable test_partition_unmount_iso_image() once supported in hdlr
def xtest_partition_unmount_iso_image(self):
"""Test Partition.unmount_iso_image()."""
# Add a faked partition
faked_partition = self.add_partition1()
partition_mgr = self.cpc.partitions
partition = partition_mgr.find(name=faked_partition.name)
# Execute the code to be tested.
ret = partition.unmount_iso_image()
assert ret is None
@pytest.mark.parametrize(
"filter_args, additional_props, exp_names", [
({'cpc-name': 'bad'},
None,
[]),
({'cpc-name': CPC_NAME},
['ifl-processors', 'maximum-memory'],
[PART1_NAME, PART2_NAME]),
({},
None,
[PART1_NAME, PART2_NAME]),
(None,
None,
[PART1_NAME, PART2_NAME]),
({'name': PART1_NAME},
['maximum-memory'],
[PART1_NAME]),
({'name': PART1_NAME, 'cpc-name': CPC_NAME},
None,
[PART1_NAME]),
({'name': PART1_NAME, 'cpc-name': 'bad'},
None,
[])
]
)
def test_console_list_permitted_partitions(self, filter_args,
additional_props, exp_names):
"""Test Console.list_permitted_partitions() with filter_args."""
# Add two faked partitions
self.add_partition1()
self.add_partition2()
self.session.hmc.consoles.add({
'object-id': None,
# object-uri will be automatically set
'parent': None,
'class': 'console',
'name': 'fake-console1',
'description': 'Console #1',
})
console = self.client.consoles.console
# Execute the code to be tested
partitions = console.list_permitted_partitions(
filter_args=filter_args,
additional_properties=additional_props)
assert len(partitions) == len(exp_names)
if exp_names:
names = [p.properties['name'] for p in partitions]
assert set(names) == set(exp_names)
for partition in partitions:
partition_props = dict(partition.properties)
for pname in LIST_PERMITTED_PARTITIONS_PROPS:
assert pname in partition_props, (
f"Property {pname!r} missing from returned partition "
f"properties, got: {partition_props!r}")
if additional_props:
for pname in additional_props:
assert pname in partition_props, (
f"Property {pname!r} missing from returned partition "
f"properties, got: {partition_props!r}")
# TODO: Test for Partition.send_os_command()
# TODO: Test for Partition.create_os_websocket()
# TODO: Test for Partition.wait_for_status()
# TODO: Test for Partition.increase_crypto_config()
# TODO: Test for Partition.decrease_crypto_config()
# TODO: Test for Partition.change_crypto_domain_config()
# TODO: Test for Partition.zeroize_crypto_domain()
# TODO: Test for Partition.attach_storage_group()
# TODO: Test for Partition.detach_storage_group()
# TODO: Test for Partition.list_attached_storage_groups()
|
GHSA-p57h-3cmc-xpjq
|
tests/unit/zhmcclient/test_user.py
|
@@ -19,9 +19,10 @@
import re
import copy
+import logging
import pytest
-from zhmcclient import Client, HTTPError, NotFound, User
+from zhmcclient import Client, HTTPError, NotFound, User, BLANKED_OUT_STRING
from zhmcclient_mock import FakedSession
from tests.common.utils import assert_resources
@@ -180,9 +181,13 @@ def test_user_manager_list(
None),
]
)
- def test_user_manager_create(self, input_props, exp_prop_names, exp_exc):
+ def test_user_manager_create(
+ self, caplog, input_props, exp_prop_names, exp_exc):
"""Test UserManager.create()."""
+ logger_name = "zhmcclient.api"
+ caplog.set_level(logging.DEBUG, logger=logger_name)
+
user_mgr = self.console.users
if exp_exc is not None:
@@ -202,6 +207,9 @@ def test_user_manager_create(self, input_props, exp_prop_names, exp_exc):
# Execute the code to be tested.
user = user_mgr.create(properties=input_props)
+ # Get its API call log record
+ call_record = caplog.records[-2]
+
# Check the resource for consistency within itself
assert isinstance(user, User)
user_name = user.name
@@ -219,6 +227,11 @@ def test_user_manager_create(self, input_props, exp_prop_names, exp_exc):
exp_value = input_props[prop_name]
assert value == exp_value
+ # Verify the API call log record for blanked-out properties.
+ if 'password' in input_props:
+ exp_str = f"'password': '{BLANKED_OUT_STRING}'"
+ assert call_record.message.find(exp_str) > 0
+
def test_user_repr(self):
"""Test User.__repr__()."""
|
# Copyright 2017,2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for _user module.
"""
import re
import copy
import pytest
from zhmcclient import Client, HTTPError, NotFound, User
from zhmcclient_mock import FakedSession
from tests.common.utils import assert_resources
class TestUser:
"""All tests for the User and UserManager classes."""
def setup_method(self):
"""
Setup that is called by pytest before each test method.
Set up a faked session, and add a faked Console without any
child resources.
"""
# pylint: disable=attribute-defined-outside-init
self.session = FakedSession('fake-host', 'fake-hmc', '2.13.1', '1.8')
self.client = Client(self.session)
self.faked_console = self.session.hmc.consoles.add({
'object-id': None,
# object-uri will be automatically set
'parent': None,
'class': 'console',
'name': 'fake-console1',
'description': 'Console #1',
})
self.console = self.client.consoles.find(name=self.faked_console.name)
def add_user(self, name, type_):
"""
Add a faked user object to the faked Console and return it.
"""
faked_user = self.faked_console.users.add({
'object-id': f'oid-{name}',
# object-uri will be automatically set
'parent': '/api/console',
'class': 'user',
'name': name,
'description': f'User {name}',
'type': type_,
'authentication-type': 'local',
})
return faked_user
def add_user_role(self, name, type_):
"""
Add a faked user role object to the faked Console and return it.
"""
faked_user_role = self.faked_console.user_roles.add({
'object-id': f'oid-{name}',
# object-uri will be automatically set
'parent': '/api/console',
'class': 'user-role',
'name': name,
'description': f'User Role {name}',
'type': type_,
})
return faked_user_role
def test_user_manager_repr(self):
"""Test UserManager.__repr__()."""
user_mgr = self.console.users
# Execute the code to be tested
repr_str = repr(user_mgr)
repr_str = repr_str.replace('\n', '\\n')
# We check just the begin of the string:
assert re.match(
rf'^{user_mgr.__class__.__name__}\s+at\s+'
rf'0x{id(user_mgr):08x}\s+\(\\n.*',
repr_str)
def test_user_manager_initial_attrs(self):
"""Test initial attributes of UserManager."""
user_mgr = self.console.users
# Verify all public properties of the manager object
assert user_mgr.resource_class == User
assert user_mgr.class_name == 'user'
assert user_mgr.session is self.session
assert user_mgr.parent is self.console
assert user_mgr.console is self.console
@pytest.mark.parametrize(
"full_properties_kwargs, prop_names", [
(dict(full_properties=False),
['object-uri', 'name', 'type']),
(dict(full_properties=True),
['object-uri', 'name', 'type', 'description']),
({}, # test default for full_properties (False)
['object-uri', 'name', 'type']),
]
)
@pytest.mark.parametrize(
"filter_args, exp_names", [
(None,
['a', 'b']),
({},
['a', 'b']),
({'name': 'a'},
['a']),
({'name': 'A'}, # users have case-insensitive names
['a']),
]
)
def test_user_manager_list(
self, filter_args, exp_names, full_properties_kwargs, prop_names):
"""Test UserManager.list()."""
faked_user1 = self.add_user(name='a', type_='standard')
faked_user2 = self.add_user(name='b', type_='standard')
faked_users = [faked_user1, faked_user2]
exp_faked_users = [u for u in faked_users if u.name in exp_names]
user_mgr = self.console.users
# Execute the code to be tested
users = user_mgr.list(filter_args=filter_args,
**full_properties_kwargs)
assert_resources(users, exp_faked_users, prop_names)
@pytest.mark.parametrize(
"input_props, exp_prop_names, exp_exc", [
({},
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'description': 'fake description X'},
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'name': 'fake-name-x'},
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'name': 'fake-name-x',
'type': 'standard'},
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'name': 'fake-name-x',
'type': 'standard',
'authentication-type': 'local',
'password': 'bla',
'password-rule-uri': '/api/console/password-rules/dummy'},
['object-uri', 'name', 'type', 'authentication-type'],
None),
({'name': 'fake-name-x',
'type': 'standard',
'authentication-type': 'local',
'password': 'bla',
'password-rule-uri': '/api/console/password-rules/dummy',
'description': 'fake description X'},
['object-uri', 'name', 'type', 'authentication-type',
'description'],
None),
]
)
def test_user_manager_create(self, input_props, exp_prop_names, exp_exc):
"""Test UserManager.create()."""
user_mgr = self.console.users
if exp_exc is not None:
with pytest.raises(exp_exc.__class__) as exc_info:
# Execute the code to be tested
user_mgr.create(properties=input_props)
exc = exc_info.value
if isinstance(exp_exc, HTTPError):
assert exc.http_status == exp_exc.http_status
assert exc.reason == exp_exc.reason
else:
# Execute the code to be tested.
user = user_mgr.create(properties=input_props)
# Check the resource for consistency within itself
assert isinstance(user, User)
user_name = user.name
exp_user_name = user.properties['name']
assert user_name == exp_user_name
user_uri = user.uri
exp_user_uri = user.properties['object-uri']
assert user_uri == exp_user_uri
# Check the properties against the expected names and values
for prop_name in exp_prop_names:
assert prop_name in user.properties
if prop_name in input_props:
value = user.properties[prop_name]
exp_value = input_props[prop_name]
assert value == exp_value
def test_user_repr(self):
"""Test User.__repr__()."""
faked_user1 = self.add_user(name='a', type_='standard')
user1 = self.console.users.find(name=faked_user1.name)
# Execute the code to be tested
repr_str = repr(user1)
repr_str = repr_str.replace('\n', '\\n')
# We check just the begin of the string:
assert re.match(
rf'^{user1.__class__.__name__}\s+at\s+'
rf'0x{id(user1):08x}\s+\(\\n.*',
repr_str)
@pytest.mark.parametrize(
"input_name, input_type, exp_exc", [
('a', 'standard', None),
('b', 'template', None),
('c', 'pattern-based',
HTTPError({'http-status': 400, 'reason': 312})),
('d', 'system-defined', None),
]
)
def test_user_delete(self, input_name, input_type, exp_exc):
"""Test User.delete()."""
faked_user = self.add_user(name=input_name, type_=input_type)
user_mgr = self.console.users
user = user_mgr.find(name=faked_user.name)
if exp_exc is not None:
with pytest.raises(exp_exc.__class__) as exc_info:
# Execute the code to be tested
user.delete()
exc = exc_info.value
if isinstance(exp_exc, HTTPError):
assert exc.http_status == exp_exc.http_status
assert exc.reason == exp_exc.reason
# Check that the user still exists
user_mgr.find(name=faked_user.name)
else:
# Execute the code to be tested.
user.delete()
# Check that the user no longer exists
with pytest.raises(NotFound) as exc_info:
user_mgr.find(name=faked_user.name)
def test_user_delete_create_same_name(self):
"""Test User.delete() followed by create() with same name."""
user_name = 'faked_a'
# Add the user to be tested
self.add_user(name=user_name, type_='standard')
# Input properties for a user with the same name
sn_user_props = {
'name': user_name,
'description': 'User with same name',
'type': 'standard',
'authentication-type': 'local',
'password': 'bla',
'password-rule-uri': '/api/console/password-rules/dummy',
}
user_mgr = self.console.users
user = user_mgr.find(name=user_name)
# Execute the deletion code to be tested
user.delete()
# Check that the user no longer exists
with pytest.raises(NotFound):
user_mgr.find(name=user_name)
# Execute the creation code to be tested.
user_mgr.create(sn_user_props)
# Check that the user exists again under that name
sn_user = user_mgr.find(name=user_name)
description = sn_user.get_property('description')
assert description == sn_user_props['description']
@pytest.mark.parametrize(
"input_props", [
{},
{'description': 'New user description'},
{'authentication-type': 'ldap',
'description': 'New user description'},
]
)
def test_user_update_properties(self, input_props):
"""Test User.update_properties()."""
# Add the user to be tested
faked_user = self.add_user(name='a', type_='standard')
user_mgr = self.console.users
user = user_mgr.find(name=faked_user.name)
user.pull_full_properties()
saved_props = copy.deepcopy(user.properties)
# Execute the code to be tested
user.update_properties(properties=input_props)
# Verify that the resource object already reflects the property
# updates.
for prop_name in saved_props:
if prop_name in input_props:
exp_prop_value = input_props[prop_name]
else:
exp_prop_value = saved_props[prop_name]
assert prop_name in user.properties
prop_value = user.properties[prop_name]
assert prop_value == exp_prop_value, \
f"Unexpected value for property {prop_name!r}"
# Refresh the resource object and verify that the resource object
# still reflects the property updates.
user.pull_full_properties()
for prop_name in saved_props:
if prop_name in input_props:
exp_prop_value = input_props[prop_name]
else:
exp_prop_value = saved_props[prop_name]
assert prop_name in user.properties
prop_value = user.properties[prop_name]
assert prop_value == exp_prop_value
@pytest.mark.parametrize(
"user_name, user_type, exp_exc", [
('a', 'standard', None),
('b', 'template', None),
('c', 'pattern-based',
HTTPError({'http-status': 400, 'reason': 314})),
('d', 'system-defined',
HTTPError({'http-status': 400, 'reason': 314})),
]
)
@pytest.mark.parametrize(
"role_name, role_type", [
('ra', 'user-defined'),
('rb', 'system-defined'),
]
)
def test_user_add_user_role(
self, role_name, role_type, user_name, user_type, exp_exc):
"""Test User.add_user_role()."""
faked_user = self.add_user(name=user_name, type_=user_type)
user_mgr = self.console.users
user = user_mgr.find(name=faked_user.name)
faked_user_role = self.add_user_role(name=role_name, type_=role_type)
user_role_mgr = self.console.user_roles
user_role = user_role_mgr.find(name=faked_user_role.name)
if exp_exc is not None:
with pytest.raises(exp_exc.__class__) as exc_info:
# Execute the code to be tested
user.add_user_role(user_role)
exc = exc_info.value
if isinstance(exp_exc, HTTPError):
assert exc.http_status == exp_exc.http_status
assert exc.reason == exp_exc.reason
# Check that the user does not have that user role
user.pull_full_properties()
if 'user-roles' in user.properties:
user_role_uris = user.properties['user-roles']
user_role_uri = user_role.uri
assert user_role_uri not in user_role_uris
else:
# Execute the code to be tested.
ret = user.add_user_role(user_role)
assert ret is None
# Check that the user has that user role
user.pull_full_properties()
assert 'user-roles' in user.properties
user_role_uris = user.properties['user-roles']
user_role_uri = user_role.uri
assert user_role_uri in user_role_uris
@pytest.mark.parametrize(
"user_name, user_type, exp_exc", [
('a', 'standard', None),
('b', 'template', None),
('c', 'pattern-based',
HTTPError({'http-status': 400, 'reason': 314})),
('d', 'system-defined',
HTTPError({'http-status': 400, 'reason': 314})),
]
)
@pytest.mark.parametrize(
"role_name, role_type", [
('ra', 'user-defined'),
('rb', 'system-defined'),
]
)
def test_user_remove_user_role(
self, role_name, role_type, user_name, user_type, exp_exc):
"""Test User.remove_user_role()."""
faked_user = self.add_user(name=user_name, type_=user_type)
user_mgr = self.console.users
user = user_mgr.find(name=faked_user.name)
faked_user_role = self.add_user_role(name=role_name, type_=role_type)
user_role_mgr = self.console.user_roles
user_role = user_role_mgr.find(name=faked_user_role.name)
# Prepare the user with the initial user role
if 'user-roles' not in faked_user.properties:
faked_user.properties['user-roles'] = []
faked_user.properties['user-roles'].append(faked_user_role.uri)
if exp_exc is not None:
with pytest.raises(exp_exc.__class__) as exc_info:
# Execute the code to be tested
user.remove_user_role(user_role)
exc = exc_info.value
if isinstance(exp_exc, HTTPError):
assert exc.http_status == exp_exc.http_status
assert exc.reason == exp_exc.reason
# Check that the user still has that user role
user.pull_full_properties()
if 'user-roles' in user.properties:
user_role_uris = user.properties['user-roles']
user_role_uri = user_role.uri
assert user_role_uri in user_role_uris
else:
# Execute the code to be tested.
ret = user.remove_user_role(user_role)
assert ret is None
# Check that the user no longer has that user role
user.pull_full_properties()
assert 'user-roles' in user.properties
user_role_uris = user.properties['user-roles']
user_role_uri = user_role.uri
assert user_role_uri not in user_role_uris
|
GHSA-p57h-3cmc-xpjq
|
zhmcclient/_activation_profile.py
|
@@ -224,7 +224,8 @@ def list(self, full_properties=False, filter_args=None,
list_uri, result_prop, full_properties, filter_args,
additional_properties)
- @logged_api_call
+ @logged_api_call(blanked_properties=['ssc-master-pw', 'zaware-master-pw'],
+ properties_pos=1)
def create(self, properties):
"""
Create and configure an Activation Profiles on this CPC, of the profile
@@ -344,7 +345,8 @@ def delete(self):
self.get_properties_local(self.manager._name_prop, None))
self.cease_existence_local()
- @logged_api_call
+ @logged_api_call(blanked_properties=['ssc-master-pw', 'zaware-master-pw'],
+ properties_pos=1)
def update_properties(self, properties):
"""
Update writeable properties of this Activation Profile.
|
# Copyright 2016,2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An :term:`Activation Profile` controls the activation of a :term:`CPC`
or :term:`LPAR`. They are used to tailor the operation of a CPC and are
stored in the Support Element associated with the CPC.
Activation Profile resources are contained in CPC resources.
Activation Profile resources only exist in CPCs that are not in DPM mode.
TODO: If Reset Activation Profiles are used to determine the CPC mode,
should they not exist in all CPC modes?
There are three types of Activation Profiles:
1. Reset:
The Reset Activation Profile defines for a CPC the mode in which the CPC
licensed internal code will be loaded (e.g. DPM mode or classic mode) and
how much central storage and expanded storage will be used.
2. Image:
For CPCs in classic mode, each LPAR can have an Image Activation Profile.
The Image Activation Profile determines the number of CPs that the LPAR will
use and whether these CPs will be dedicated to the LPAR or shared. It also
allows assigning the amount of central storage and expanded storage that
will be used by each LPAR.
3. Load:
For CPCs in classic mode, each LPAR can have a Load Activation Profile.
The Load Activation Profile defines the channel address of the device that
the operating system for that LPAR will be loaded (booted) from.
"""
import copy
import warnings
from ._manager import BaseManager
from ._resource import BaseResource
from ._logging import logged_api_call
from ._utils import RC_RESET_ACTIVATION_PROFILE, RC_IMAGE_ACTIVATION_PROFILE, \
RC_LOAD_ACTIVATION_PROFILE
__all__ = ['ActivationProfileManager', 'ActivationProfile']
# Resource class names, by profile type:
ACTIVATION_PROFILE_CLASSES = {
'reset': RC_RESET_ACTIVATION_PROFILE,
'image': RC_IMAGE_ACTIVATION_PROFILE,
'load': RC_LOAD_ACTIVATION_PROFILE,
}
class ActivationProfileManager(BaseManager):
"""
Manager providing access to the
:term:`Activation Profiles <Activation Profile>` of a particular type in
a particular :term:`CPC` (the scoping CPC).
Possible types of activation profiles are:
* Reset Activation Profile
* Image Activation Profile
* Load Activation Profile
Derived from :class:`~zhmcclient.BaseManager`; see there for common methods
and attributes.
Objects of this class are not directly created by the user; they are
accessible via the following instance variables of a
:class:`~zhmcclient.Cpc` object (in classic mode or ensemble mode):
* :attr:`~zhmcclient.Cpc.reset_activation_profiles`
* :attr:`~zhmcclient.Cpc.image_activation_profiles`
* :attr:`~zhmcclient.Cpc.load_activation_profiles`
HMC/SE version requirements: None
"""
def __init__(self, cpc, profile_type):
# This function should not go into the docs.
# Parameters:
# cpc (:class:`~zhmcclient.Cpc`):
# CPC defining the scope for this manager.
# profile_type (string):
# Type of Activation Profiles:
# * `reset`: Reset Activation Profiles
# * `image`: Image Activation Profiles
# * `load`: Load Activation Profiles
# Resource properties that are supported as filter query parameters.
# If the support for a resource property changes within the set of HMC
# versions that support this type of resource, this list must be set up
# for the version of the HMC this session is connected to.
query_props = [
'name',
]
try:
activation_profile_class = ACTIVATION_PROFILE_CLASSES[profile_type]
except KeyError:
raise ValueError(f"Unknown activation profile type: {profile_type}")
super().__init__(
resource_class=ActivationProfile,
class_name=activation_profile_class,
session=cpc.manager.session,
parent=cpc,
base_uri=f'{cpc.uri}/{profile_type}-activation-profiles',
oid_prop='name', # This is an exception!
uri_prop='element-uri',
name_prop='name',
query_props=query_props,
supports_properties=True)
self._profile_type = profile_type
@property
def cpc(self):
"""
:class:`~zhmcclient.Cpc`: :term:`CPC` defining the scope for this
manager.
"""
return self._parent
@property
def profile_type(self):
"""
:term:`string`: Type of the Activation Profiles managed by this object:
* ``'reset'`` - Reset Activation Profiles
* ``'image'`` - Image Activation Profiles
* ``'load'`` - Load Activation Profiles
"""
return self._profile_type
@logged_api_call
# pylint: disable=arguments-differ
def list(self, full_properties=False, filter_args=None,
additional_properties=None):
"""
List the Activation Profiles of this CPC, of the profile type
managed by this object.
Any resource property may be specified in a filter argument. For
details about filter arguments, see :ref:`Filtering`.
The listing of resources is handled in an optimized way:
* If this manager is enabled for :ref:`auto-updating`, a locally
maintained resource list is used (which is automatically updated via
inventory notifications from the HMC) and the provided filter
arguments are applied.
* Otherwise, if the filter arguments specify the resource name as a
single filter argument with a straight match string (i.e. without
regular expressions), an optimized lookup is performed based on a
locally maintained name-URI cache.
* Otherwise, the HMC List operation is performed with the subset of the
provided filter arguments that can be handled on the HMC side and the
remaining filter arguments are applied on the client side on the list
result.
HMC/SE version requirements: None
Authorization requirements:
* Object-access permission to this CPC.
Parameters:
full_properties (bool):
Controls whether the full set of resource properties should be
retrieved, vs. only the short set as returned by the list
operation.
filter_args (dict):
Filter arguments that narrow the list of returned resources to
those that match the specified filter arguments. For details, see
:ref:`Filtering`.
`None` causes no filtering to happen, i.e. all resources are
returned.
additional_properties (list of string):
List of property names that are to be returned in addition to the
default properties.
This parameter requires HMC 2.16.0 or higher, and is supported
only for image profiles.
Returns:
: A list of :class:`~zhmcclient.ActivationProfile` objects.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
result_prop = self._profile_type + '-activation-profiles'
list_uri = f'{self.cpc.uri}/{result_prop}'
if self._profile_type != 'image' and additional_properties is not None:
raise TypeError(
f"list() for {self._profile_type} profiles does not support "
"'additional_properties' parameter")
return self._list_with_operation(
list_uri, result_prop, full_properties, filter_args,
additional_properties)
@logged_api_call
def create(self, properties):
"""
Create and configure an Activation Profiles on this CPC, of the profile
type managed by this object.
HMC/SE version requirements:
* :ref:`API feature <API features>` "create-delete-activation-profiles"
Authorization requirements:
* Object-access permission to this CPC.
* Task permission to the "Customize/Delete Activation Profiles" task.
Parameters:
properties (dict): Initial property values.
Allowable properties are defined in section 'Request body contents'
in section 'Create Reset/Image/Load Activation Profile' in the
:term:`HMC API` book.
Note that the input profile name for creation must be provided in
property 'profile-name', even though it shows up on the created
resource in property 'name'. This applies to all three types of
activation profiles.
Returns:
ActivationProfile:
The resource object for the new Activation Profile.
The object will have its 'element-uri' property set, and will also
have the input properties set.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
ap_selector = self._profile_type + '-activation-profiles'
uri = f'{self.cpc.uri}/{ap_selector}'
result = self.session.post(uri, body=properties)
# The "Create ... Activation Profile" operations do not return the
# resource URI, so we construct it ourselves. Also, these operations
# specify the profile name in input property 'profile-name'.
if result is not None:
warnings.warn(
f"The Create {self._profile_type} Activation Profile operation "
f"now has response data with properties: {result.keys()!r}",
UserWarning)
name = properties['profile-name']
uri = f'{uri}/{name}'
props = copy.deepcopy(properties)
props[self._uri_prop] = uri
profile = ActivationProfile(self, uri, name, props)
self._name_uri_cache.update(name, uri)
return profile
class ActivationProfile(BaseResource):
"""
Representation of an :term:`Activation Profile` of a particular type.
Derived from :class:`~zhmcclient.BaseResource`; see there for common
methods and attributes.
Objects of this class are not directly created by the user; they are
returned from creation or list functions on their manager object
(in this case, :class:`~zhmcclient.ActivationProfileManager`).
HMC/SE version requirements: None
"""
def __init__(self, manager, uri, name=None, properties=None):
# This function should not go into the docs.
# manager (:class:`~zhmcclient.ActivationProfileManager`):
# Manager object for this resource object.
# uri (string):
# Canonical URI path of the resource.
# name (string):
# Name of the resource.
# properties (dict):
# Properties to be set for this resource object. May be `None` or
# empty.
assert isinstance(manager, ActivationProfileManager), (
"ActivationProfile init: Expected manager type "
f"{ActivationProfileManager}, got {type(manager)}")
super().__init__(manager, uri, name, properties)
@logged_api_call
def delete(self):
"""
Delete this Activation Profile.
HMC/SE version requirements:
* :ref:`API feature <API features>` "create-delete-activation-profiles"
Authorization requirements:
* Task permission to the "Customize/Delete Activation Profiles" task.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
# pylint: disable=protected-access
self.manager.session.delete(self.uri, resource=self)
self.manager._name_uri_cache.delete(
self.get_properties_local(self.manager._name_prop, None))
self.cease_existence_local()
@logged_api_call
def update_properties(self, properties):
"""
Update writeable properties of this Activation Profile.
This method serializes with other methods that access or change
properties on the same Python object.
HMC/SE version requirements: None
Authorization requirements:
* Object-access permission to the CPC of this Activation Profile.
* Task permission for the "Customize/Delete Activation Profiles" task.
Parameters:
properties (dict): New values for the properties to be updated.
Properties not to be updated are omitted.
Allowable properties are the properties with qualifier (w) in
section 'Data model' in section
'<profile_type> activation profile' in the :term:`HMC API` book,
where <profile_type> is the profile type of this object
(e.g. Reset, Load, Image).
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
# pylint: disable=protected-access
self.manager.session.post(self.uri, resource=self, body=properties)
# Attempts to change the 'name' property will be rejected by the HMC,
# so we don't need to update the name-to-URI cache.
assert self.manager._name_prop not in properties
self.update_properties_local(copy.deepcopy(properties))
@logged_api_call
def assign_certificate(self, certificate):
"""
Assigns a :term:`Certificate` to this Image Activation Profile.
HMC/SE version requirements:
* :ref:`API feature <API features>` "secure-boot-with-certificates".
Authorization requirements:
* Object-access permission to this Activation Profile.
* Object-access permission to the specified certificate.
* Task permission to the "Assign Secure Boot Certificates" task.
Parameters:
certificate (:class:`~zhmcclient.Certificate`):
Certificate to be assigned. The certificate must not currently
be assigned to this LPAR.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'certificate-uri': certificate.uri}
self.manager.session.post(
self.uri + '/operations/assign-certificate', resource=self,
body=body)
@logged_api_call
def unassign_certificate(self, certificate):
"""
Unassign a :term:`Certificate` from this Image Activation Profile.
HMC/SE version requirements:
* :ref:`API feature <API features>` "secure-boot-with-certificates".
Authorization requirements:
* Object-access permission to this Image Activation Profile.
* Object-access permission to the specified certificate.
* Task permission to the "Assign Secure Boot Certificates" task.
Parameters:
certificate (:class:`~zhmcclient.Certificate`):
Certificate to be unassigned. The certificate must currently be
assigned to this LPAR.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'certificate-uri': certificate.uri}
self.manager.session.post(
self.uri + '/operations/unassign-certificate', resource=self,
body=body)
|
GHSA-p57h-3cmc-xpjq
|
zhmcclient/_constants.py
|
@@ -50,7 +50,8 @@
'HTML_REASON_WEB_SERVICES_DISABLED',
'HTML_REASON_OTHER',
'STOMP_MIN_CONNECTION_CHECK_TIME',
- 'DEFAULT_WS_TIMEOUT']
+ 'DEFAULT_WS_TIMEOUT',
+ 'BLANKED_OUT_STRING']
#: Default HTTP connect timeout in seconds,
@@ -187,3 +188,7 @@
#: Default WebSocket connect and receive timeout in seconds, for interacting
#: with the :class:`zhmcclient.OSConsole` class.
DEFAULT_WS_TIMEOUT = 5
+
+#: Replacement string for blanked out sensitive values in log entries, such as
+#: passwords or session tokens.
+BLANKED_OUT_STRING = '********'
|
# Copyright 2017,2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Public constants.
These constants are not meant to be changed by the user, they are made
available for inspection and documentation purposes only.
For technical reasons, the online documentation shows these constants in the
``zhmcclient._constants`` namespace, but they are also available in the
``zhmcclient`` namespace and should be used from there.
"""
__all__ = ['DEFAULT_CONNECT_TIMEOUT',
'DEFAULT_CONNECT_RETRIES',
'DEFAULT_HMC_PORT',
'DEFAULT_READ_TIMEOUT',
'DEFAULT_READ_RETRIES',
'DEFAULT_STOMP_PORT',
'DEFAULT_MAX_REDIRECTS',
'DEFAULT_OPERATION_TIMEOUT',
'DEFAULT_STATUS_TIMEOUT',
'DEFAULT_NAME_URI_CACHE_TIMETOLIVE',
'DEFAULT_STOMP_CONNECT_TIMEOUT',
'DEFAULT_STOMP_CONNECT_RETRIES',
'DEFAULT_STOMP_RECONNECT_SLEEP_INITIAL',
'DEFAULT_STOMP_RECONNECT_SLEEP_INCREASE',
'DEFAULT_STOMP_RECONNECT_SLEEP_MAX',
'DEFAULT_STOMP_RECONNECT_SLEEP_JITTER',
'DEFAULT_STOMP_KEEPALIVE',
'DEFAULT_STOMP_HEARTBEAT_SEND_CYCLE',
'DEFAULT_STOMP_HEARTBEAT_RECEIVE_CYCLE',
'DEFAULT_STOMP_HEARTBEAT_RECEIVE_CHECK',
'HMC_LOGGER_NAME',
'JMS_LOGGER_NAME',
'API_LOGGER_NAME',
'OS_LOGGER_NAME',
'HTML_REASON_WEB_SERVICES_DISABLED',
'HTML_REASON_OTHER',
'STOMP_MIN_CONNECTION_CHECK_TIME',
'DEFAULT_WS_TIMEOUT']
#: Default HTTP connect timeout in seconds,
#: if not specified in the ``retry_timeout_config`` init argument to
#: :class:`~zhmcclient.Session`.
DEFAULT_CONNECT_TIMEOUT = 30
#: Default number of HTTP connect retries,
#: if not specified in the ``retry_timeout_config`` init argument to
#: :class:`~zhmcclient.Session`.
DEFAULT_CONNECT_RETRIES = 3
#: Default HMC port number
DEFAULT_HMC_PORT = 6794
#: Default HTTP read timeout in seconds,
#: if not specified in the ``retry_timeout_config`` init argument to
#: :class:`~zhmcclient.Session`.
#:
#: Note: The default value for this parameter has been increased to a large
#: value in order to mitigate the behavior of the 'requests' module to
#: retry HTTP methods even if they are not idempotent (e.g. DELETE).
#: See zhmcclient `issue #249
#: <https://github.com/zhmcclient/python-zhmcclient/issues/249>`_.
DEFAULT_READ_TIMEOUT = 3600
#: Default port on which the HMC issues JMS over STOMP messages.
DEFAULT_STOMP_PORT = 61612
#: Default number of HTTP read retries,
#: if not specified in the ``retry_timeout_config`` init argument to
#: :class:`~zhmcclient.Session`.
#:
#: Note: The default value for this parameter has been set to 0 in order to
#: mitigate the behavior of the 'requests' module to retry HTTP methods even if
#: they are not idempotent (e.g. DELETE).
#: See zhmcclient `issue #249
#: <https://github.com/zhmcclient/python-zhmcclient/issues/249>`_.
DEFAULT_READ_RETRIES = 0
#: Default max. number of HTTP redirects,
#: if not specified in the ``retry_timeout_config`` init argument to
#: :class:`~zhmcclient.Session`.
DEFAULT_MAX_REDIRECTS = 30
#: Default timeout in seconds for waiting for completion of an asynchronous
#: HMC operation,
#: if not specified in the ``retry_timeout_config`` init argument to
#: :class:`~zhmcclient.Session`.
#:
#: This is used as a default value in asynchronous methods on
#: resource objects (e.g. :meth:`zhmcclient.Partition.start`), in the
#: :meth:`zhmcclient.Job.wait_for_completion` method, and in the
#: low level method :meth:`zhmcclient.Session.post`.
DEFAULT_OPERATION_TIMEOUT = 3600
#: Default timeout in seconds for waiting for completion of deferred status
#: changes for LPARs and Partitions,
#: if not specified in the ``retry_timeout_config`` init argument to
#: :class:`~zhmcclient.Session`.
DEFAULT_STATUS_TIMEOUT = 60
#: Default time to the next automatic invalidation of the Name-URI cache of
#: manager objects, in seconds since the last invalidation,
#: if not specified in the ``retry_timeout_config`` init argument to
#: :class:`~zhmcclient.Session`.
#:
#: The special value 0 means that no Name-URI cache is maintained (i.e. the
#: caching is disabled).
DEFAULT_NAME_URI_CACHE_TIMETOLIVE = 300
#: Default value for the `connect_timeout` property of the
#: :class:`~zhmcclient.StompRetryTimeoutConfig` configuration.
DEFAULT_STOMP_CONNECT_TIMEOUT = 30
#: Default value for the `connect_retries` property of the
#: :class:`~zhmcclient.StompRetryTimeoutConfig` configuration.
DEFAULT_STOMP_CONNECT_RETRIES = 3
#: Default value for the `reconnect_sleep_initial` property of the
#: :class:`~zhmcclient.StompRetryTimeoutConfig` configuration.
DEFAULT_STOMP_RECONNECT_SLEEP_INITIAL = 0.1
#: Default value for the `reconnect_sleep_increase` property of the
#: :class:`~zhmcclient.StompRetryTimeoutConfig` configuration.
DEFAULT_STOMP_RECONNECT_SLEEP_INCREASE = 0.5
#: Default value for the `reconnect_sleep_max` property of the
#: :class:`~zhmcclient.StompRetryTimeoutConfig` configuration.
DEFAULT_STOMP_RECONNECT_SLEEP_MAX = 60
#: Default value for the `reconnect_sleep_jitter` property of the
#: :class:`~zhmcclient.StompRetryTimeoutConfig` configuration.
DEFAULT_STOMP_RECONNECT_SLEEP_JITTER = 0.1
#: Default value for the `keepalive` property of the
#: :class:`~zhmcclient.StompRetryTimeoutConfig` configuration.
DEFAULT_STOMP_KEEPALIVE = True
#: Default value for the `heartbeat_send_cycle` property of the
#: :class:`~zhmcclient.StompRetryTimeoutConfig` configuration.
DEFAULT_STOMP_HEARTBEAT_SEND_CYCLE = 5.0
#: Default value for the `heartbeat_receive_cycle` property of the
#: :class:`~zhmcclient.StompRetryTimeoutConfig` configuration.
DEFAULT_STOMP_HEARTBEAT_RECEIVE_CYCLE = 5.0
#: Default value for the `heartbeat_receive_check` property of the
#: :class:`~zhmcclient.StompRetryTimeoutConfig` configuration.
DEFAULT_STOMP_HEARTBEAT_RECEIVE_CHECK = 1.0
#: Name of the Python logger that logs HMC operations.
HMC_LOGGER_NAME = 'zhmcclient.hmc'
#: Name of the Python logger that logs zhmcclient API calls made by the user.
API_LOGGER_NAME = 'zhmcclient.api'
#: Name of the Python logger that logs JMS notifications.
JMS_LOGGER_NAME = 'zhmcclient.jms'
#: Name of the Python logger that logs interactions with OS consoles.
OS_LOGGER_NAME = 'zhmcclient.os'
#: HTTP reason code: Web Services API is not enabled on the HMC.
HTML_REASON_WEB_SERVICES_DISABLED = 900
#: HTTP reason code: Other HTML-formatted error response. Note that over time,
#: there may be more specific reason codes introduced for such situations.
HTML_REASON_OTHER = 999
#: Minimum time between checks for STOMP connection loss.
STOMP_MIN_CONNECTION_CHECK_TIME = 5.0
#: Default WebSocket connect and receive timeout in seconds, for interacting
#: with the :class:`zhmcclient.OSConsole` class.
DEFAULT_WS_TIMEOUT = 5
|
GHSA-p57h-3cmc-xpjq
|
zhmcclient/_ldap_server_definition.py
|
@@ -151,7 +151,7 @@ def list(self, full_properties=False, filter_args=None):
return self._list_with_operation(
list_uri, result_prop, full_properties, filter_args, None)
- @logged_api_call
+ @logged_api_call(blanked_properties=['bind-password'], properties_pos=1)
def create(self, properties):
"""
Create a new LDAP Server Definition in this HMC.
@@ -257,7 +257,7 @@ def delete(self):
self.get_properties_local(self.manager._name_prop, None))
self.cease_existence_local()
- @logged_api_call
+ @logged_api_call(blanked_properties=['bind-password'], properties_pos=1)
def update_properties(self, properties):
"""
Update writeable properties of this LDAP Server Definitions.
|
# Copyright 2017,2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A :term:`LDAP Server Definition` resource represents a definition that contains
information about an LDAP server that may be used for HMC user authentication
purposes.
"""
import copy
from ._manager import BaseManager
from ._resource import BaseResource
from ._logging import logged_api_call
from ._utils import RC_LDAP_SERVER_DEFINITION
__all__ = ['LdapServerDefinitionManager', 'LdapServerDefinition']
class LdapServerDefinitionManager(BaseManager):
"""
Manager providing access to the :term:`LDAP Server Definition` resources of
a HMC.
Derived from :class:`~zhmcclient.BaseManager`; see there for common methods
and attributes.
Objects of this class are not directly created by the user; they are
accessible via the following instance variable of a
:class:`~zhmcclient.Console` object:
* :attr:`zhmcclient.Console.ldap_server_definitions`
HMC/SE version requirements:
* HMC version == 2.13.0
"""
def __init__(self, console):
# This function should not go into the docs.
# Parameters:
# console (:class:`~zhmcclient.Console`):
# Console object representing the HMC.
# Resource properties that are supported as filter query parameters.
# If the support for a resource property changes within the set of HMC
# versions that support this type of resource, this list must be set up
# for the version of the HMC this session is connected to.
# Because this resource has case-insensitive names, this list must
# contain the name property.
query_props = [
'name',
]
super().__init__(
resource_class=LdapServerDefinition,
class_name=RC_LDAP_SERVER_DEFINITION,
session=console.manager.session,
parent=console,
base_uri='/api/console/ldap-server-definitions',
oid_prop='element-id',
uri_prop='element-uri',
name_prop='name',
query_props=query_props,
case_insensitive_names=True)
@property
def console(self):
"""
:class:`~zhmcclient.Console`: :term:`Console` defining the scope for
this manager.
"""
return self._parent
@logged_api_call
def list(self, full_properties=False, filter_args=None):
"""
List the :term:`LDAP Server Definition` resources representing the
definitions of LDAp servers in this HMC.
Any resource property may be specified in a filter argument. For
details about filter arguments, see :ref:`Filtering`.
The listing of resources is handled in an optimized way:
* If this manager is enabled for :ref:`auto-updating`, a locally
maintained resource list is used (which is automatically updated via
inventory notifications from the HMC) and the provided filter
arguments are applied.
* Otherwise, if the filter arguments specify the resource name as a
single filter argument with a straight match string (i.e. without
regular expressions), an optimized lookup is performed based on a
locally maintained name-URI cache.
* Otherwise, the HMC List operation is performed with the subset of the
provided filter arguments that can be handled on the HMC side and the
remaining filter arguments are applied on the client side on the list
result.
HMC/SE version requirements:
* HMC version == 2.13.0
Authorization requirements:
* User-related-access permission to the LDAP Server Definition objects
included in the result, or task permission to the "Manage LDAP Server
Definitions" task.
Parameters:
full_properties (bool):
Controls whether the full set of resource properties should be
retrieved, vs. only the short set as returned by the list
operation.
filter_args (dict):
Filter arguments that narrow the list of returned resources to
those that match the specified filter arguments. For details, see
:ref:`Filtering`.
`None` causes no filtering to happen, i.e. all resources are
returned.
Returns:
: A list of :class:`~zhmcclient.LdapServerDefinition` objects.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
result_prop = 'ldap-server-definitions'
list_uri = f'{self.console.uri}/ldap-server-definitions'
return self._list_with_operation(
list_uri, result_prop, full_properties, filter_args, None)
@logged_api_call
def create(self, properties):
"""
Create a new LDAP Server Definition in this HMC.
HMC/SE version requirements:
* HMC version == 2.13.0
Authorization requirements:
* Task permission to the "Manage LDAP Server Definitions" task.
Parameters:
properties (dict): Initial property values.
Allowable properties are defined in section 'Request body contents'
in section 'Create LDAP Server Definition' in the :term:`HMC API`
book.
Returns:
LdapServerDefinition:
The resource object for the new LDAP Server Definition.
The object will have its 'object-uri' property set as returned by
the HMC, and will also have the input properties set.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
result = self.session.post(
self.console.uri + '/ldap-server-definitions', body=properties)
# There should not be overlaps, but just in case there are, the
# returned props should overwrite the input props:
props = copy.deepcopy(properties)
props.update(result)
name = props.get(self._name_prop, None)
uri = props[self._uri_prop]
ldap_server_definition = LdapServerDefinition(self, uri, name, props)
self._name_uri_cache.update(name, uri)
return ldap_server_definition
class LdapServerDefinition(BaseResource):
"""
Representation of a :term:`LDAP Server Definition`.
Derived from :class:`~zhmcclient.BaseResource`; see there for common
methods and attributes.
Objects of this class are not directly created by the user; they are
returned from creation or list functions on their manager object
(in this case, :class:`~zhmcclient.LdapServerDefinitionManager`).
HMC/SE version requirements:
* HMC version == 2.13.0
"""
def __init__(self, manager, uri, name=None, properties=None):
# This function should not go into the docs.
# manager (:class:`~zhmcclient.LdapServerDefinitionManager`):
# Manager object for this resource object.
# uri (string):
# Canonical URI path of the resource.
# name (string):
# Name of the resource.
# properties (dict):
# Properties to be set for this resource object. May be `None` or
# empty.
assert isinstance(manager, LdapServerDefinitionManager), (
"Console init: Expected manager type "
f"{LdapServerDefinitionManager}, got {type(manager)}")
super().__init__(
manager, uri, name, properties)
@logged_api_call
def delete(self):
"""
Delete this LDAP Server Definition.
HMC/SE version requirements:
* HMC version == 2.13.0
Authorization requirements:
* Task permission to the "Manage LDAP Server Definitions" task.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
# pylint: disable=protected-access
self.manager.session.delete(self.uri, resource=self)
self.manager._name_uri_cache.delete(
self.get_properties_local(self.manager._name_prop, None))
self.cease_existence_local()
@logged_api_call
def update_properties(self, properties):
"""
Update writeable properties of this LDAP Server Definitions.
This method serializes with other methods that access or change
properties on the same Python object.
HMC/SE version requirements:
* HMC version == 2.13.0
Authorization requirements:
* Task permission to the "Manage LDAP Server Definitions" task.
Parameters:
properties (dict): New values for the properties to be updated.
Properties not to be updated are omitted.
Allowable properties are the properties with qualifier (w) in
section 'Data model' in section 'LDAP Server Definition object' in
the :term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
# pylint: disable=protected-access
self.manager.session.post(self.uri, resource=self, body=properties)
# The name of LDAP Server Definitions cannot be updated. An attempt to
# do so should cause HTTPError to be raised in the POST above, so we
# assert that here, because we omit the extra code for handling name
# updates:
assert self.manager._name_prop not in properties
self.update_properties_local(copy.deepcopy(properties))
|
GHSA-p57h-3cmc-xpjq
|
zhmcclient/_logging.py
|
@@ -75,9 +75,10 @@
import logging
import inspect
-from decorator import decorate # requires decorator>=4.0
+import functools
+from collections.abc import Mapping, Sequence
-from ._constants import API_LOGGER_NAME
+from ._constants import API_LOGGER_NAME, BLANKED_OUT_STRING
__all__ = []
@@ -105,7 +106,8 @@ def get_logger(name):
return logger
-def logged_api_call(func):
+def logged_api_call(
+ org_func=None, *, blanked_properties=None, properties_pos=None):
"""
Function decorator that causes the decorated API function or method to log
calls to itself to a logger.
@@ -115,7 +117,24 @@ def logged_api_call(func):
Parameters:
- func (function object): The original function being decorated.
+ org_func (function object): The original function being decorated.
+ Will be `None` if the decorator is specified with its optional
+ argument 'blanked_properties'.
+
+ blanked_properties (list of str): Optional: List of properties in the
+ 'properties' argument of the decorated API function that should be
+ blanked out before being logged. Can be used to hide password
+ properties.
+ This parameter is required when 'properties_pos' is used.
+ This parameter must be specified as a keyword argument.
+
+ properties_pos (int): Optional: 0-based index of the 'properties'
+ parameter in the argument list of the decorated API function.
+ For methods, the 'self' or 'cls' parameter is included in the position.
+ This parameter is needed in case the properties are passed as a
+ positional argument by the caller of the API function.
+ This parameter is required when 'blanked_properties' is used.
+ This parameter must be specified as a keyword argument.
Returns:
@@ -128,109 +147,196 @@ def logged_api_call(func):
method (and not on top of the @property decorator).
"""
- # Note that in this decorator function, we are in a module loading context,
- # where the decorated functions are being defined. When this decorator
- # function is called, its call stack represents the definition of the
- # decorated functions. Not all global definitions in the module have been
- # defined yet, and methods of classes that are decorated with this
- # decorator are still functions at this point (and not yet methods).
+ if blanked_properties is not None and properties_pos is None:
+ raise TypeError(
+ "If the @logged_api_call decorator is specified with "
+ "'blanked_properties', 'properties_pos' must also be specified.")
- module = inspect.getmodule(func)
- if not inspect.isfunction(func) or not hasattr(module, '__name__'):
- raise TypeError("The @logged_api_call decorator must be used on a "
- "function or method (and not on top of the @property "
- "decorator)")
+ if properties_pos is not None and blanked_properties is None:
+ raise TypeError(
+ "If the @logged_api_call decorator is specified with "
+ "'properties_pos', 'blanked_properties' must also be specified.")
- try:
- # We avoid the use of inspect.getouterframes() because it is slow,
- # and use the pointers up the stack frame, instead.
+ if blanked_properties is not None and (
+ not isinstance(blanked_properties, Sequence) or # noqa: W504
+ isinstance(blanked_properties, str)):
+ raise TypeError(
+ "The 'blanked_properties' parameter of the @logged_api_call "
+ "decorator must be a list of strings.")
- this_frame = inspect.currentframe() # this decorator function here
- apifunc_frame = this_frame.f_back # the decorated API function
-
- apifunc_owner = inspect.getframeinfo(apifunc_frame)[2]
+ def _decorate(func):
+ """
+ The actual decorator function that always gets the original decorated
+ function, independent of whether the 'logged_api_call' decorator was
+ specified with or without its optional arguments.
- finally:
- # Recommended way to deal with frame objects to avoid ref cycles
- del this_frame
- del apifunc_frame
+ Parameters:
- # TODO: For inner functions, show all outer levels instead of just one.
+ func (function object): The original function being decorated.
+ """
- if apifunc_owner == '<module>':
- # The decorated API function is defined globally (at module level)
- apifunc_str = f'{func.__name__}()'
- else:
- # The decorated API function is defined in a class or in a function
- apifunc_str = f'{apifunc_owner}.{func.__name__}()'
+ # Note that in this decorator function, we are in a module loading
+ # context, where the decorated functions are being defined. When this
+ # decorator function is called, its call stack represents the
+ # definition of the decorated functions. Not all global definitions in
+ # the module have been defined yet, and methods of classes that are
+ # decorated with this decorator are still functions at this point (and
+ # not yet methods).
- logger = get_logger(API_LOGGER_NAME)
+ if not inspect.isfunction(func):
+ raise TypeError("The @logged_api_call decorator must be used on a "
+ "function or method (and not on top of the "
+ "@property decorator)")
- def is_external_call():
- """
- Return a boolean indicating whether the call to the decorated API
- function is an external call (vs. b eing an internal call).
- """
try:
# We avoid the use of inspect.getouterframes() because it is slow,
# and use the pointers up the stack frame, instead.
- log_it_frame = inspect.currentframe() # this log_it() function
- log_api_call_frame = log_it_frame.f_back # the log_api_call() func
- apifunc_frame = log_api_call_frame.f_back # the decorated API func
- apicaller_frame = apifunc_frame.f_back # caller of API function
- apicaller_module = inspect.getmodule(apicaller_frame)
- if apicaller_module is None:
- apicaller_module_name = "<unknown>"
- else:
- apicaller_module_name = apicaller_module.__name__
+ this_frame = inspect.currentframe() # this function
+ apifunc_frame = this_frame.f_back # the decorated API function
+ if org_func:
+ # In this case, there is one more decorator function nesting
+ apifunc_frame = apifunc_frame.f_back
+ apifunc_owner = inspect.getframeinfo(apifunc_frame)[2]
+
finally:
# Recommended way to deal with frame objects to avoid ref cycles
- del log_it_frame
- del log_api_call_frame
+ del this_frame
del apifunc_frame
- del apicaller_frame
- del apicaller_module
- # Log only if the caller is not from the zhmcclient package
- return apicaller_module_name.split('.')[0] != 'zhmcclient'
+ # TODO: For inner functions, show all outer levels instead of just one.
+
+ func_name = getattr(func, '__name__', '<unknown>')
+ if apifunc_owner == '<module>':
+ # The decorated API function is defined globally (at module level)
+ apifunc_str = f'{func_name}()'
+ else:
+ # The decorated API function is defined in a class or in a function
+ apifunc_str = f'{apifunc_owner}.{func_name}()'
+
+ logger = get_logger(API_LOGGER_NAME)
+
+ def is_external_call():
+ """
+ Return a boolean indicating whether the call to the decorated API
+ function is made from outside of the zhmcclient package.
+ """
+ try:
+ # We avoid the use of inspect.getouterframes() because it is
+ # slow, and use the pointers up the stack frame, instead.
+
+ this_frame = inspect.currentframe() # this function
+ log_api_call_frame = this_frame.f_back # log_api_call()
+ apifunc_frame = log_api_call_frame.f_back # the decorated func
+ apicaller_frame = apifunc_frame.f_back # caller of API func
+ apicaller_module = inspect.getmodule(apicaller_frame)
+ if apicaller_module is None:
+ apicaller_module_name = "<unknown>"
+ else:
+ apicaller_module_name = apicaller_module.__name__
+ finally:
+ # Recommended way to deal with frame objects to avoid ref
+ # cycles
+ del this_frame
+ del log_api_call_frame
+ del apifunc_frame
+ del apicaller_frame
+ del apicaller_module
+
+ # Log only if the caller is not from the zhmcclient package
+ return apicaller_module_name.split('.')[0] != 'zhmcclient'
+
+ def blanked_dict(properties):
+ """
+ Return a copy of the properties dict, with blanked out values
+ according to the 'blanked_properties' and 'properties_pos'
+ arguments of the 'logged_api_call' decorator.
+ """
+ # properties may also be a DictView (subclass of Mapping)
+ assert isinstance(properties, Mapping)
+ copied_properties = dict(properties)
+ for pn in blanked_properties:
+ try:
+ copied_properties[pn] = BLANKED_OUT_STRING
+ except KeyError:
+ pass
+ return copied_properties
+
+ def blanked_args(args, kwargs):
+ """
+ Return a copy of args and kwargs, whereby the 'properties' argument
+ has items blanked out according to the 'blanked_properties' and
+ 'properties_pos' arguments of the 'logged_api_call' decorator.
+ """
+ logged_kwargs = dict(kwargs)
+ logged_args = list(args)
+ if blanked_properties is not None:
+ if 'properties' in kwargs:
+ logged_kwargs['properties'] = \
+ blanked_dict(kwargs['properties'])
+ else:
+ logged_args[properties_pos] = \
+ blanked_dict(args[properties_pos])
+ return tuple(logged_args), logged_kwargs
+
+ def log_call(args, kwargs):
+ """
+ Log the call to the API function.
+ """
+ logged_args, logged_kwargs = blanked_args(args, kwargs)
+ logger.debug("Called: %s, args: %.500s, kwargs: %.500s",
+ apifunc_str,
+ log_escaped(repr(logged_args)),
+ log_escaped(repr(logged_kwargs)))
+
+ def log_return(result):
+ """
+ Log the return from the API function.
+ """
+ logger.debug("Return: %s, result: %.1000s",
+ apifunc_str, log_escaped(repr(result)))
- def log_api_call(func, *args, **kwargs):
- """
- Log entry to and exit from the decorated function, at the debug level.
+ @functools.wraps(func)
+ def log_api_call(*args, **kwargs):
+ """
+ Log entry to and exit from the decorated function, at the debug
+ level.
- Note that this wrapper function is called every time the decorated
- function/method is called, but that the log message only needs to be
- constructed when logging for this logger and for this log level is
- turned on. Therefore, we do as much as possible in the decorator
- function, plus we use %-formatting and lazy interpolation provided by
- the log functions, in order to save resources in this function here.
+ Note that this wrapper function is called every time the decorated
+ function/method is called, but that the log message only needs to
+ be constructed when logging for this logger and for this log level
+ is turned on. Therefore, we do as much as possible in the decorator
+ function, plus we use %-formatting and lazy interpolation provided
+ by the log functions, in order to save resources in this function
+ here.
- Parameters:
+ Parameters:
- func (function object): The decorated function.
+ func (function object): The decorated function.
- *args: Any positional arguments for the decorated function.
+ *args: Any positional arguments for the decorated function.
- **kwargs: Any keyword arguments for the decorated function.
- """
+ **kwargs: Any keyword arguments for the decorated function.
+ """
- # Note that in this function, we are in the context where the
- # decorated function is actually called.
+ # Note that in this function, we are in the context where the
+ # decorated function is actually called.
+ _log_it = is_external_call() and logger.isEnabledFor(logging.DEBUG)
- _log_it = is_external_call() and logger.isEnabledFor(logging.DEBUG)
+ if _log_it:
+ log_call(args, kwargs)
- if _log_it:
- logger.debug("Called: %s, args: %.500s, kwargs: %.500s",
- apifunc_str, log_escaped(repr(args)),
- log_escaped(repr(kwargs)))
+ result = func(*args, **kwargs) # The zhmcclient function
- result = func(*args, **kwargs)
+ if _log_it:
+ log_return(result)
- if _log_it:
- logger.debug("Return: %s, result: %.1000s",
- apifunc_str, log_escaped(repr(result)))
+ return result
- return result
+ return log_api_call
- return decorate(func, log_api_call)
+ # When the logged_api_call decorator is specified with its optional
+ # arguments, org_func is None
+ if org_func:
+ return _decorate(org_func)
+ return _decorate
|
# Copyright 2016,2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The zhmcclient supports logging using the standard Python :mod:`py:logging`
module, using standard Python :class:`~py:logging.Logger` objects with these
names:
* 'zhmcclient.api' for user-issued calls to zhmcclient API functions, at the
debug level. Internal calls to API functions are not logged.
* 'zhmcclient.hmc' for operations from zhmcclient to the HMC, at the
debug level.
* 'zhmcclient.jms' for notifications from the HMC to zhmcclient, at the
debug, info, warning and error level. At this point, this logger is used only
for the :ref:`auto-updating` support, but not for
the :class:`~zhmcclient.NotificationReceiver` class.
* 'zhmcclient.os' for interactions with OS consoles, at the debug level.
For HMC operations and API calls that contain the HMC password or HMC session
tokens, the password is hidden in the log message by replacing it with a few
'*' characters.
All these loggers have a null-handler (see :class:`~py:logging.NullHandler`)
and have no log formatter (see :class:`~py:logging.Formatter`).
As a result, the loggers are silent by default. If you want to turn on logging,
add a log handler (see :meth:`~py:logging.Logger.addHandler`, and
:mod:`py:logging.handlers` for the handlers included with Python) and set the
log level (see :meth:`~py:logging.Logger.setLevel`, and :ref:`py:levels` for
the defined levels).
If you want to change the default log message format, use
:meth:`~py:logging.Handler.setFormatter`. Its ``form`` parameter is a format
string with %-style placeholders for the log record attributes (see Python
section :ref:`py:logrecord-attributes`).
Examples:
* To output the log records for all HMC operations to ``stdout`` in a
particular format, do this::
import logging
handler = logging.StreamHandler()
format_string = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
handler.setFormatter(logging.Formatter(format_string))
logger = logging.getLogger('zhmcclient.hmc')
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
* This example uses the :func:`~py:logging.basicConfig` convenience function
that sets the same format and level as in the previous example, but for the
root logger. Therefore, it will output all log records, not just from this
package::
import logging
format_string = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=format_string, level=logging.DEBUG)
"""
import logging
import inspect
from decorator import decorate # requires decorator>=4.0
from ._constants import API_LOGGER_NAME
__all__ = []
def log_escaped(string):
"""
Return the escaped input string, for use in log messages.
"""
return string.replace('\n', ' ').replace(' ', ' ').replace(' ', ' ').\
replace(' ', ' ')
def get_logger(name):
"""
Return a :class:`~py:logging.Logger` object with the specified name.
A :class:`~py:logging.NullHandler` handler is added to the logger if it
does not have any handlers yet and if it is not the Python root logger.
This prevents the propagation of log requests up the Python logger
hierarchy, and therefore causes this package to be silent by default.
"""
logger = logging.getLogger(name)
if name != '' and not logger.handlers:
logger.addHandler(logging.NullHandler())
return logger
def logged_api_call(func):
"""
Function decorator that causes the decorated API function or method to log
calls to itself to a logger.
The logger's name is the dotted module name of the module defining the
decorated function (e.g. 'zhmcclient._cpc').
Parameters:
func (function object): The original function being decorated.
Returns:
function object: The function wrappering the original function being
decorated.
Raises:
TypeError: The @logged_api_call decorator must be used on a function or
method (and not on top of the @property decorator).
"""
# Note that in this decorator function, we are in a module loading context,
# where the decorated functions are being defined. When this decorator
# function is called, its call stack represents the definition of the
# decorated functions. Not all global definitions in the module have been
# defined yet, and methods of classes that are decorated with this
# decorator are still functions at this point (and not yet methods).
module = inspect.getmodule(func)
if not inspect.isfunction(func) or not hasattr(module, '__name__'):
raise TypeError("The @logged_api_call decorator must be used on a "
"function or method (and not on top of the @property "
"decorator)")
try:
# We avoid the use of inspect.getouterframes() because it is slow,
# and use the pointers up the stack frame, instead.
this_frame = inspect.currentframe() # this decorator function here
apifunc_frame = this_frame.f_back # the decorated API function
apifunc_owner = inspect.getframeinfo(apifunc_frame)[2]
finally:
# Recommended way to deal with frame objects to avoid ref cycles
del this_frame
del apifunc_frame
# TODO: For inner functions, show all outer levels instead of just one.
if apifunc_owner == '<module>':
# The decorated API function is defined globally (at module level)
apifunc_str = f'{func.__name__}()'
else:
# The decorated API function is defined in a class or in a function
apifunc_str = f'{apifunc_owner}.{func.__name__}()'
logger = get_logger(API_LOGGER_NAME)
def is_external_call():
"""
Return a boolean indicating whether the call to the decorated API
function is an external call (vs. b eing an internal call).
"""
try:
# We avoid the use of inspect.getouterframes() because it is slow,
# and use the pointers up the stack frame, instead.
log_it_frame = inspect.currentframe() # this log_it() function
log_api_call_frame = log_it_frame.f_back # the log_api_call() func
apifunc_frame = log_api_call_frame.f_back # the decorated API func
apicaller_frame = apifunc_frame.f_back # caller of API function
apicaller_module = inspect.getmodule(apicaller_frame)
if apicaller_module is None:
apicaller_module_name = "<unknown>"
else:
apicaller_module_name = apicaller_module.__name__
finally:
# Recommended way to deal with frame objects to avoid ref cycles
del log_it_frame
del log_api_call_frame
del apifunc_frame
del apicaller_frame
del apicaller_module
# Log only if the caller is not from the zhmcclient package
return apicaller_module_name.split('.')[0] != 'zhmcclient'
def log_api_call(func, *args, **kwargs):
"""
Log entry to and exit from the decorated function, at the debug level.
Note that this wrapper function is called every time the decorated
function/method is called, but that the log message only needs to be
constructed when logging for this logger and for this log level is
turned on. Therefore, we do as much as possible in the decorator
function, plus we use %-formatting and lazy interpolation provided by
the log functions, in order to save resources in this function here.
Parameters:
func (function object): The decorated function.
*args: Any positional arguments for the decorated function.
**kwargs: Any keyword arguments for the decorated function.
"""
# Note that in this function, we are in the context where the
# decorated function is actually called.
_log_it = is_external_call() and logger.isEnabledFor(logging.DEBUG)
if _log_it:
logger.debug("Called: %s, args: %.500s, kwargs: %.500s",
apifunc_str, log_escaped(repr(args)),
log_escaped(repr(kwargs)))
result = func(*args, **kwargs)
if _log_it:
logger.debug("Return: %s, result: %.1000s",
apifunc_str, log_escaped(repr(result)))
return result
return decorate(func, log_api_call)
|
GHSA-p57h-3cmc-xpjq
|
zhmcclient/_lpar.py
|
@@ -187,7 +187,8 @@ def __init__(self, manager, uri, name=None, properties=None):
f"got {type(manager)}")
super().__init__(manager, uri, name, properties)
- @logged_api_call
+ @logged_api_call(blanked_properties=['ssc-master-pw', 'zaware-master-pw'],
+ properties_pos=1)
def update_properties(self, properties):
"""
Update writeable properties of this LPAR.
|
# Copyright 2016,2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A :term:`LPAR` (Logical Partition) is a subset of the hardware resources of a
:term:`CPC` in classic mode (or ensemble mode), virtualized as a separate
computer.
LPARs cannot be created or deleted by the user; they can only be listed.
LPAR resources are contained in CPC resources.
LPAR resources only exist in CPCs that are in classic mode (or ensemble mode).
CPCs in DPM mode have :term:`Partition` resources, instead.
"""
import time
import copy
from ._manager import BaseManager
from ._resource import BaseResource
from ._exceptions import StatusTimeout
from ._constants import HMC_LOGGER_NAME
from ._logging import get_logger, logged_api_call
from ._utils import RC_LOGICAL_PARTITION, make_query_str, \
warn_deprecated_parameter, datetime_from_timestamp, timestamp_from_datetime
__all__ = ['LparManager', 'Lpar']
HMC_LOGGER = get_logger(HMC_LOGGER_NAME)
class LparManager(BaseManager):
"""
Manager providing access to the :term:`LPARs <LPAR>` in a particular
:term:`CPC`.
Derived from :class:`~zhmcclient.BaseManager`; see there for common methods
and attributes.
Objects of this class are not directly created by the user; they are
accessible via the following instance variable of a
:class:`~zhmcclient.Cpc` object (in DPM mode):
* :attr:`~zhmcclient.Cpc.lpars`
HMC/SE version requirements: None
"""
def __init__(self, cpc):
# This function should not go into the docs.
# Parameters:
# cpc (:class:`~zhmcclient.Cpc`):
# CPC defining the scope for this manager.
# Resource properties that are supported as filter query parameters.
# If the support for a resource property changes within the set of HMC
# versions that support this type of resource, this list must be set up
# for the version of the HMC this session is connected to.
query_props = [
'name',
]
super().__init__(
resource_class=Lpar,
class_name=RC_LOGICAL_PARTITION,
session=cpc.manager.session,
parent=cpc,
base_uri='/api/logical-partitions',
oid_prop='object-id',
uri_prop='object-uri',
name_prop='name',
query_props=query_props,
supports_properties=True)
@property
def cpc(self):
"""
:class:`~zhmcclient.Cpc`: :term:`CPC` defining the scope for this
manager.
"""
return self._parent
@logged_api_call
def list(self, full_properties=False, filter_args=None):
"""
List the LPARs in this CPC.
Any resource property may be specified in a filter argument. For
details about filter arguments, see :ref:`Filtering`.
The listing of resources is handled in an optimized way:
* If this manager is enabled for :ref:`auto-updating`, a locally
maintained resource list is used (which is automatically updated via
inventory notifications from the HMC) and the provided filter
arguments are applied.
* Otherwise, if the filter arguments specify the resource name as a
single filter argument with a straight match string (i.e. without
regular expressions), an optimized lookup is performed based on a
locally maintained name-URI cache.
* Otherwise, the HMC List operation is performed with the subset of the
provided filter arguments that can be handled on the HMC side and the
remaining filter arguments are applied on the client side on the list
result.
HMC/SE version requirements: None
Authorization requirements:
* Object-access permission to this CPC.
* Object-access permission to any LPAR to be included in the result.
Parameters:
full_properties (bool):
Controls whether the full set of resource properties should be
retrieved, vs. only the short set as returned by the list
operation.
filter_args (dict):
Filter arguments that narrow the list of returned resources to
those that match the specified filter arguments. For details, see
:ref:`Filtering`.
`None` causes no filtering to happen, i.e. all resources are
returned.
Returns:
: A list of :class:`~zhmcclient.Lpar` objects.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
result_prop = 'logical-partitions'
list_uri = f'{self.cpc.uri}/logical-partitions'
return self._list_with_operation(
list_uri, result_prop, full_properties, filter_args, None)
class Lpar(BaseResource):
"""
Representation of an :term:`LPAR`.
Derived from :class:`~zhmcclient.BaseResource`; see there for common
methods and attributes.
Objects of this class are not directly created by the user; they are
returned from creation or list functions on their manager object
(in this case, :class:`~zhmcclient.LparManager`).
HMC/SE version requirements: None
"""
def __init__(self, manager, uri, name=None, properties=None):
# This function should not go into the docs.
# manager (:class:`~zhmcclient.LparManager`):
# Manager object for this resource object.
# uri (string):
# Canonical URI path of the resource.
# name (string):
# Name of the resource.
# properties (dict):
# Properties to be set for this resource object. May be `None` or
# empty.
assert isinstance(manager, LparManager), (
f"Lpar init: Expected manager type {LparManager}, "
f"got {type(manager)}")
super().__init__(manager, uri, name, properties)
@logged_api_call
def update_properties(self, properties):
"""
Update writeable properties of this LPAR.
This method serializes with other methods that access or change
properties on the same Python object.
HMC/SE version requirements: None
Authorization requirements:
* Object-access permission to this LPAR.
* Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access
permission to the CPC of this LPAR.
* Task permission for the "Change Object Definition" task.
* Since HMC 2.14.1: If the "next-activation-profile-name" property is to
be updated, task permission for the "Change Object Options" task or
the "Customize/Delete Activation Profiles" task.
* Before HMC 2.15.0: For an LPAR whose activation-mode is "zaware", task
permission for the "Firmware Details" task.
* Since HMC 2.15.0: If any of the "ssc-*" or "zaware-*" properties is to
be updated, task permission for the "Firmware Details" task.
* Since HMC 2.15.0: If any of the numbers of allocated or reserved cores
is to be updated, task permission for the "Logical Processor Add"
task.
Parameters:
properties (dict): New values for the properties to be updated.
Properties not to be updated are omitted.
Allowable properties are the properties with qualifier (w) in
section 'Data model' in section 'Logical Partition object' in the
:term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
# pylint: disable=protected-access
self.manager.session.post(self.uri, resource=self, body=properties)
# Attempts to change the 'name' property will be rejected by the HMC,
# so we don't need to update the name-to-URI cache.
assert self.manager._name_prop not in properties
self.update_properties_local(copy.deepcopy(properties))
@logged_api_call
def activate(self, wait_for_completion=True,
operation_timeout=None, status_timeout=None,
allow_status_exceptions=False, activation_profile_name=None,
force=False):
"""
Activate (start) this LPAR, using the HMC operation "Activate Logical
Partition".
This HMC operation has deferred status behavior: If the asynchronous
job on the HMC is complete, it may take a few seconds until the LPAR
status has reached the desired value. If `wait_for_completion=True`,
this method repeatedly checks the status of the LPAR after the HMC
operation has completed, and waits until the status is in the desired
state "not-operating" (which indicates that the LPAR is active but
no operating system is running), or "operating", or if
`allow_status_exceptions` was set additionally in the state
"exceptions".
The following approach is used to determine the desired state to wait
for if `wait_for_completion=True`:
- if the 'operating-mode' property of the image profile is 'ssc' or
'zaware', the desired state is "operating".
- if the profile specified in `activation_profile_name` is not the
LPAR's image profile, it is assumed to be a load profile and
the desired state is "operating".
- if the 'load-at-activation' property of the image profile is True,
the desired state is "operating".
- else, the desired state is "not-operating".
HMC/SE version requirements: None
Authorization requirements:
* Object-access permission to this LPAR.
* Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access
permission to the CPC of this LPAR.
* Task permission for the "Activate" task.
Parameters:
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation, and for the status
becoming "not-operating" or "operating" (or in addition
"exceptions", if `allow_status_exceptions` was set.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
status_timeout (:term:`number`):
Timeout in seconds, for waiting that the status of the LPAR has
reached the desired status, after the HMC operation has completed.
The special value 0 means that no timeout is set. `None` means that
the default status timeout of the session is used.
If the timeout expires when `wait_for_completion=True`, a
:exc:`~zhmcclient.StatusTimeout` is raised.
allow_status_exceptions (bool):
Boolean controlling whether LPAR status "exceptions" is considered
an additional acceptable end status when `wait_for_completion` is
set.
activation_profile_name (:term:`string`):
Name of the load or image activation profile to be used instead
of the one specified in the `next-activation-profile-name` property
of the LPAR, or `None`.
If this parameter specifies an image activation profile, its name
must match the LPAR name. For non-SSC partitions, the image
profile's `load-at-activation` property determines whether the
activation is followed by a load of the control program using the
load-related parameters from the image profile. SSC partitions are
always auto-loaded (regardless of the `load-at-activation`
property).
If this parameter specifies a load activation profile, the
activation uses the image profile with the same name as the LPAR.
The activation is always followed by a load of the control program
(regardless of the image profile's `load-at-activation` property)
using the parameters from the load profile.
If this parameter is `None`, the `next-activation-profile-name`
property of the LPAR will be used. That property can again specify
an image profile or a load profile which are treated as described
above. If that property is `None`, the image profile with the same
name as the LPAR is used and is treated as described above.
force (bool):
Boolean controlling whether this operation is permitted when the
LPAR is in the "operating" status.
TBD: What will happen with the LPAR in that case (deactivated then
activated? nothing?)
Returns:
`None` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns `None`.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
This job does not support cancellation.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
:exc:`~zhmcclient.StatusTimeout`: The timeout expired while
waiting for the desired LPAR status.
"""
body = {}
if activation_profile_name:
body['activation-profile-name'] = activation_profile_name
if force:
body['force'] = force
result = self.manager.session.post(
self.uri + '/operations/activate', resource=self, body=body,
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout)
if wait_for_completion:
# If an automatic load is performed, the LPAR status will first go
# to 'not-operating' and then later to 'operating'. So we cannot
# just wait for any of those two, but need to have an understanding
# whether we expect auto-load.
image_profile_mgr = self.manager.parent.image_activation_profiles
image_profile = image_profile_mgr.find(name=self.name)
auto_load = image_profile.get_property('load-at-activation')
# Note that the LPAR 'activation-mode' property is 'not-set' while
# the LPAR is inactive, so we need to look at the image profile
# to determine the mode.
op_mode = image_profile.get_property('operating-mode')
load_profile_specified = activation_profile_name is not None and \
activation_profile_name != self.name
mode_load = op_mode in ('ssc', 'zaware')
if auto_load or load_profile_specified or mode_load:
statuses = ["operating"]
else:
statuses = ["not-operating"]
if allow_status_exceptions:
statuses.append("exceptions")
self.wait_for_status(statuses, status_timeout)
return result
@logged_api_call
def deactivate(self, wait_for_completion=True,
operation_timeout=None, status_timeout=None,
allow_status_exceptions=False, force=False):
"""
De-activate (stop) this LPAR, using the HMC operation "Deactivate
Logical Partition".
This HMC operation has deferred status behavior: If the asynchronous
job on the HMC is complete, it may take a few seconds until the LPAR
status has reached the desired value. If `wait_for_completion=True`,
this method repeatedly checks the status of the LPAR after the HMC
operation has completed, and waits until the status is in the desired
state "not-activated", or if `allow_status_exceptions` was
set additionally in the state "exceptions".
HMC/SE version requirements: None
Authorization requirements:
* Object-access permission to this LPAR.
* Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access
permission to the CPC of this LPAR.
* Task permission for the "Deactivate" task.
Parameters:
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation, and for the status
becoming "not-activated" (or in addition "exceptions", if
`allow_status_exceptions` was set.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
status_timeout (:term:`number`):
Timeout in seconds, for waiting that the status of the LPAR has
reached the desired status, after the HMC operation has completed.
The special value 0 means that no timeout is set. `None` means that
the default status timeout of the session is used.
If the timeout expires when `wait_for_completion=True`, a
:exc:`~zhmcclient.StatusTimeout` is raised.
allow_status_exceptions (bool):
Boolean controlling whether LPAR status "exceptions" is considered
an additional acceptable end status when `wait_for_completion` is
set.
force (bool):
Boolean controlling whether this operation is permitted when the
LPAR is in the "operating" status.
TBD: What will happen with the LPAR in that case (deactivated then
activated? nothing?)
Returns:
`None` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns `None`.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
This job does not support cancellation.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
:exc:`~zhmcclient.StatusTimeout`: The timeout expired while
waiting for the desired LPAR status.
"""
body = {}
if force:
body['force'] = force
result = self.manager.session.post(
self.uri + '/operations/deactivate', resource=self, body=body,
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout)
if wait_for_completion:
statuses = ["not-activated"]
if allow_status_exceptions:
statuses.append("exceptions")
self.wait_for_status(statuses, status_timeout)
return result
@logged_api_call
def scsi_load(self, load_address, wwpn, lun, load_parameter=None,
disk_partition_id=None,
operating_system_specific_load_parameters=None,
boot_record_logical_block_address=None, force=False,
wait_for_completion=True, operation_timeout=None,
status_timeout=None, allow_status_exceptions=False,
secure_boot=False, os_ipl_token=None, clear_indicator=True):
# pylint: disable=invalid-name
"""
Load (boot) this LPAR from a designated SCSI device, using the
HMC operation "SCSI Load".
This HMC operation has deferred status behavior: If the asynchronous
job on the HMC is complete, it takes a few seconds until the LPAR
status has reached the desired value. If `wait_for_completion=True`,
this method repeatedly checks the status of the LPAR after the HMC
operation has completed, and waits until the status is in the desired
state "operating", or if `allow_status_exceptions` was
set additionally in the state "exceptions".
HMC/SE version requirements: None
Authorization requirements:
* Object-access permission to this LPAR.
* Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access
permission to the CPC of this LPAR.
* Task permission for the "SCSI Load" task.
Parameters:
load_address (:term:`string`):
Device number of the boot device.
wwpn (:term:`string`):
Worldwide port name (WWPN) of the target SCSI device to be
used for this operation, in hexadecimal.
lun (:term:`string`):
Hexadecimal logical unit number (LUN) to be used for the
SCSI load.
load_parameter (:term:`string`):
Optional load control string.
If empty string or `None`, it is not passed to the HMC, and the
HMC default of an empty string will be used.
disk_partition_id (:term:`integer`):
Optional disk-partition-id (also called the boot program
selector) to be used for the SCSI load.
If `None`, it is not passed to the HMC, and the HMC default
of 0 will be used.
operating_system_specific_load_parameters (:term:`string`):
Optional operating system specific load parameters to be
used for the SCSI load.
If empty string or `None`, it is not passed to the HMC, and the
HMC default of an empty string will be used.
boot_record_logical_block_address (:term:`string`):
Optional hexadecimal boot record logical block address to
be used for the SCSI load.
If `None`, it is not passed to the HMC, and the HMC default
of "0" will be used.
force (bool):
Boolean controlling whether this operation is permitted when the
LPAR is in the "operating" status.
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation, and for the status
becoming "operating" (or in addition "exceptions", if
`allow_status_exceptions` was set.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
status_timeout (:term:`number`):
Timeout in seconds, for waiting that the status of the LPAR has
reached the desired status, after the HMC operation has completed.
The special value 0 means that no timeout is set. `None` means that
the default status timeout of the session is used.
If the timeout expires when `wait_for_completion=True`, a
:exc:`~zhmcclient.StatusTimeout` is raised.
allow_status_exceptions (bool):
Boolean controlling whether LPAR status "exceptions" is considered
an additional acceptable end status when `wait_for_completion` is
set.
secure_boot (bool):
Boolean controlling whether the system checks the software
signature of what is loaded against what the distributor signed it
with.
If `False` or `None`, it is not passed to the HMC, and the
HMC default of `False` will be used.
Requires the LPAR to be on a z15 or later.
os_ipl_token (:term:`string`):
Optional hexadecimal value to be used for the SCSI load.
If `None`, it is not passed to the HMC.
clear_indicator (bool):
Optional boolean controlling whether the memory should be
cleared before performing the load or not cleared.
If `True` or `None`, it is not passed to the HMC, and the HMC
default of `True` will be used if the LPAR is on a z14 with
SE version 2.14.1 or higher.
Requires the LPAR to be on a z14 with SE version 2.14.1 or higher.
Returns:
`None` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns `None`.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
This job does not support cancellation.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
:exc:`~zhmcclient.StatusTimeout`: The timeout expired while
waiting for the desired LPAR status.
"""
body = {}
body['load-address'] = load_address
body['world-wide-port-name'] = wwpn
body['logical-unit-number'] = lun
if load_parameter:
body['load-parameter'] = load_parameter
if disk_partition_id is not None:
body['disk-partition-id'] = disk_partition_id
if operating_system_specific_load_parameters:
body['operating-system-specific-load-parameters'] = \
operating_system_specific_load_parameters
if boot_record_logical_block_address is not None:
body['boot-record-logical-block-address'] = \
boot_record_logical_block_address
if os_ipl_token is not None:
body['os-ipl-token'] = os_ipl_token
if clear_indicator not in (True, None):
# Note: Requires SE >= 2.14.1, but caller needs to control this
body['clear-indicator'] = clear_indicator
if force:
body['force'] = force
if secure_boot:
# Note: Requires SE >= 2.15, but caller needs to control this
body['secure-boot'] = secure_boot
result = self.manager.session.post(
self.uri + '/operations/scsi-load', resource=self, body=body,
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout)
if wait_for_completion:
statuses = ["operating"]
if allow_status_exceptions:
statuses.append("exceptions")
self.wait_for_status(statuses, status_timeout)
return result
@logged_api_call
def scsi_dump(self, load_address, wwpn, lun, load_parameter=None,
disk_partition_id=None,
operating_system_specific_load_parameters=None,
boot_record_logical_block_address=None, os_ipl_token=None,
wait_for_completion=True, operation_timeout=None,
status_timeout=None, allow_status_exceptions=False,
force=False, secure_boot=False):
# pylint: disable=invalid-name
"""
Load a standalone dump program from a designated SCSI device
in this LPAR, using the HMC operation "SCSI Dump".
This HMC operation has deferred status behavior: If the asynchronous
job on the HMC is complete, it takes a few seconds until the LPAR
status has reached the desired value. If `wait_for_completion=True`,
this method repeatedly checks the status of the LPAR after the HMC
operation has completed, and waits until the status is in the desired
state "operating", or if `allow_status_exceptions` was
set additionally in the state "exceptions".
HMC/SE version requirements: None
Authorization requirements:
* Object-access permission to this LPAR.
* Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access
permission to the CPC of this LPAR.
* Task permission for the "SCSI Dump" task.
Parameters:
load_address (:term:`string`):
Device number of the boot device.
wwpn (:term:`string`):
Worldwide port name (WWPN) of the target SCSI device to be
used for this operation, in hexadecimal.
lun (:term:`string`):
Hexadecimal logical unit number (LUN) to be used for the
SCSI dump.
load_parameter (:term:`string`):
Optional load control string.
If empty string or `None`, it is not passed to the HMC, and the
HMC default of an empty string will be used.
disk_partition_id (:term:`integer`):
Optional disk-partition-id (also called the boot program
selector) to be used for the SCSI dump.
If `None`, it is not passed to the HMC, and the HMC default
of 0 will be used.
operating_system_specific_load_parameters (:term:`string`):
Optional operating system specific load parameters to be
used for the SCSI dump.
If empty string or `None`, it is not passed to the HMC, and the
HMC default of an empty string will be used.
boot_record_logical_block_address (:term:`string`):
Optional hexadecimal boot record logical block address to
be used for the SCSI dump.
If `None`, it is not passed to the HMC, and the HMC default
of "0" will be used.
os_ipl_token (:term:`string`):
Optional hexadecimal value to be used for the SCSI dump.
If `None`, it is not passed to the HMC.
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation, and for the status
becoming "operating" (or in addition "exceptions", if
`allow_status_exceptions` was set.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
status_timeout (:term:`number`):
Timeout in seconds, for waiting that the status of the LPAR has
reached the desired status, after the HMC operation has completed.
The special value 0 means that no timeout is set. `None` means that
the default status timeout of the session is used.
If the timeout expires when `wait_for_completion=True`, a
:exc:`~zhmcclient.StatusTimeout` is raised.
allow_status_exceptions (bool):
Boolean controlling whether LPAR status "exceptions" is considered
an additional acceptable end status when `wait_for_completion` is
set.
force (bool):
Boolean controlling whether this operation is permitted when the
LPAR is in the "operating" status.
secure_boot (bool):
Boolean controlling whether the system checks the software
signature of what is loaded against what the distributor signed it
with.
If `False` or `None`, it is not passed to the HMC, and the
HMC default of `False` will be used.
Requires the LPAR to be on a z15 or later.
Returns:
`None` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns `None`.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
This job does not support cancellation.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
:exc:`~zhmcclient.StatusTimeout`: The timeout expired while
waiting for the desired LPAR status.
"""
body = {}
body['load-address'] = load_address
body['world-wide-port-name'] = wwpn
body['logical-unit-number'] = lun
if load_parameter:
body['load-parameter'] = load_parameter
if disk_partition_id is not None:
body['disk-partition-id'] = disk_partition_id
if operating_system_specific_load_parameters:
body['operating-system-specific-load-parameters'] = \
operating_system_specific_load_parameters
if boot_record_logical_block_address is not None:
body['boot-record-logical-block-address'] = \
boot_record_logical_block_address
if os_ipl_token is not None:
body['os-ipl-token'] = os_ipl_token
if force:
body['force'] = force
if secure_boot:
# Note: Requires SE >= 2.15, but caller needs to control this
body['secure-boot'] = secure_boot
result = self.manager.session.post(
self.uri + '/operations/scsi-dump', resource=self, body=body,
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout)
if wait_for_completion:
statuses = ["operating"]
if allow_status_exceptions:
statuses.append("exceptions")
self.wait_for_status(statuses, status_timeout)
return result
@logged_api_call
def nvme_load(self, load_address, load_parameter=None, secure_boot=False,
clear_indicator=True, disk_partition_id=None,
operating_system_specific_load_parameters=None,
boot_record_logical_block_address=None, force=False,
wait_for_completion=True, operation_timeout=None,
status_timeout=None, allow_status_exceptions=False):
# pylint: disable=invalid-name
"""
Load (boot) this LPAR from a designated NVMe device, using the
HMC operation "NVMe Load".
This operation requires z15 or later.
This HMC operation has deferred status behavior: If the asynchronous
job on the HMC is complete, it takes a few seconds until the LPAR
status has reached the desired value. If `wait_for_completion=True`,
this method repeatedly checks the status of the LPAR after the HMC
operation has completed, and waits until the status is in the desired
state "operating", or if `allow_status_exceptions` was
set additionally in the state "exceptions".
HMC/SE version requirements:
* SE version >= 2.15.0
Authorization requirements:
* Object-access permission to this LPAR.
* Task permission for the "Load" task.
Parameters:
load_address (:term:`string`):
Device number of the boot device.
load_parameter (:term:`string`):
Optional load control string.
If empty string or `None`, it is not passed to the HMC, and the
HMC default of an empty string will be used.
secure_boot (bool):
Boolean controlling whether the system checks the software
signature of what is loaded against what the distributor signed it
with.
If `False` or `None`, it is not passed to the HMC, and the
HMC default of `False` will be used.
Requires the LPAR to be on a z15 or later.
clear_indicator (bool):
Optional boolean controlling whether the memory should be
cleared before performing the load or not cleared.
If `True` or `None`, it is not passed to the HMC, and the HMC
default of `True` will be used if the LPAR is on a z14 with
SE version 2.14.1 or higher.
Requires the LPAR to be on a z14 with SE version 2.14.1 or higher.
disk_partition_id (:term:`integer`):
Optional disk-partition-id (also called the boot program
selector) to be used for the NVMe Load.
If `None`, it is not passed to the HMC, and the HMC default
of 0 will be used.
operating_system_specific_load_parameters (:term:`string`):
Optional operating system specific load parameters to be
used for the NVMe Load.
If empty string or `None`, it is not passed to the HMC, and the
HMC default of an empty string will be used.
boot_record_logical_block_address (:term:`string`):
Optional hexadecimal boot record logical block address to
be used for the NVMe Load.
If `None`, it is not passed to the HMC, and the HMC default
of "0" will be used.
force (bool):
Boolean controlling whether this operation is permitted when the
LPAR is in the "operating" status.
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation, and for the status
becoming "operating" (or in addition "exceptions", if
`allow_status_exceptions` was set.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
status_timeout (:term:`number`):
Timeout in seconds, for waiting that the status of the LPAR has
reached the desired status, after the HMC operation has completed.
The special value 0 means that no timeout is set. `None` means that
the default status timeout of the session is used.
If the timeout expires when `wait_for_completion=True`, a
:exc:`~zhmcclient.StatusTimeout` is raised.
allow_status_exceptions (bool):
Boolean controlling whether LPAR status "exceptions" is considered
an additional acceptable end status when `wait_for_completion` is
set.
Returns:
`None` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns `None`.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
This job does not support cancellation.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
:exc:`~zhmcclient.StatusTimeout`: The timeout expired while
waiting for the desired LPAR status.
"""
body = {}
body['load-address'] = load_address
if load_parameter:
body['load-parameter'] = load_parameter
if disk_partition_id is not None:
body['disk-partition-id'] = disk_partition_id
if operating_system_specific_load_parameters:
body['operating-system-specific-load-parameters'] = \
operating_system_specific_load_parameters
if boot_record_logical_block_address is not None:
body['boot-record-logical-block-address'] = \
boot_record_logical_block_address
if clear_indicator not in (True, None):
# Note: Requires SE >= 2.14.1, but caller needs to control this
body['clear-indicator'] = clear_indicator
if force:
body['force'] = force
if secure_boot:
# Note: Requires SE >= 2.15, but caller needs to control this
body['secure-boot'] = secure_boot
result = self.manager.session.post(
self.uri + '/operations/nvme-load', resource=self, body=body,
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout)
if wait_for_completion:
statuses = ["operating"]
if allow_status_exceptions:
statuses.append("exceptions")
self.wait_for_status(statuses, status_timeout)
return result
@logged_api_call
def nvme_dump(self, load_address, load_parameter=None,
secure_boot=False, disk_partition_id=None,
operating_system_specific_load_parameters=None,
boot_record_logical_block_address=None, force=False,
wait_for_completion=True, operation_timeout=None,
status_timeout=None, allow_status_exceptions=False):
# pylint: disable=invalid-name
"""
Load a standalone dump program from a designated NVMe device
in this LPAR, using the HMC operation "NVMe Dump".
This operation requires z15 or later.
This HMC operation has deferred status behavior: If the asynchronous
job on the HMC is complete, it takes a few seconds until the LPAR
status has reached the desired value. If `wait_for_completion=True`,
this method repeatedly checks the status of the LPAR after the HMC
operation has completed, and waits until the status is in the desired
state "operating", or if `allow_status_exceptions` was
set additionally in the state "exceptions".
HMC/SE version requirements:
* SE version >= 2.15.0
Authorization requirements:
* Object-access permission to this LPAR.
* Task permission for the "NVMe Dump" task.
Parameters:
load_address (:term:`string`):
Device number of the boot device.
load_parameter (:term:`string`):
Optional load control string.
If empty string or `None`, it is not passed to the HMC, and the
HMC default of an empty string will be used.
secure_boot (bool):
Boolean controlling whether the system checks the software
signature of what is loaded against what the distributor signed it
with.
If `False` or `None`, it is not passed to the HMC, and the
HMC default of `False` will be used.
Requires the LPAR to be on a z15 or later.
disk_partition_id (:term:`integer`):
Optional disk-partition-id (also called the boot program
selector) to be used for the NVMe dump.
If `None`, it is not passed to the HMC, and the HMC default
of 0 will be used.
operating_system_specific_load_parameters (:term:`string`):
Optional operating system specific load parameters to be
used for the NVMe dump.
If empty string or `None`, it is not passed to the HMC, and the
HMC default of an empty string will be used.
boot_record_logical_block_address (:term:`string`):
Optional hexadecimal boot record logical block address to
be used for the NVMe dump.
If `None`, it is not passed to the HMC, and the HMC default
of "0" will be used.
force (bool):
Boolean controlling whether this operation is permitted when the
LPAR is in the "operating" status.
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation, and for the status
becoming "operating" (or in addition "exceptions", if
`allow_status_exceptions` was set.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
status_timeout (:term:`number`):
Timeout in seconds, for waiting that the status of the LPAR has
reached the desired status, after the HMC operation has completed.
The special value 0 means that no timeout is set. `None` means that
the default status timeout of the session is used.
If the timeout expires when `wait_for_completion=True`, a
:exc:`~zhmcclient.StatusTimeout` is raised.
allow_status_exceptions (bool):
Boolean controlling whether LPAR status "exceptions" is considered
an additional acceptable end status when `wait_for_completion` is
set.
Returns:
`None` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns `None`.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
This job does not support cancellation.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
:exc:`~zhmcclient.StatusTimeout`: The timeout expired while
waiting for the desired LPAR status.
"""
body = {}
body['load-address'] = load_address
if load_parameter:
body['load-parameter'] = load_parameter
if disk_partition_id is not None:
body['disk-partition-id'] = disk_partition_id
if operating_system_specific_load_parameters:
body['operating-system-specific-load-parameters'] = \
operating_system_specific_load_parameters
if boot_record_logical_block_address is not None:
body['boot-record-logical-block-address'] = \
boot_record_logical_block_address
if force:
body['force'] = force
if secure_boot:
# Note: Requires SE >= 2.15, but caller needs to control this
body['secure-boot'] = secure_boot
result = self.manager.session.post(
self.uri + '/operations/nvme-dump', resource=self, body=body,
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout)
if wait_for_completion:
statuses = ["operating"]
if allow_status_exceptions:
statuses.append("exceptions")
self.wait_for_status(statuses, status_timeout)
return result
@logged_api_call
def load(self, load_address=None, load_parameter=None,
clear_indicator=True, store_status_indicator=False,
wait_for_completion=True, operation_timeout=None,
status_timeout=None, allow_status_exceptions=False,
force=False):
"""
Load (boot) this LPAR from a load address (boot device), using the HMC
operation "Load Logical Partition".
This HMC operation has deferred status behavior: If the asynchronous
job on the HMC is complete, it takes a few seconds until the LPAR
status has reached the desired value. If `wait_for_completion=True`,
this method repeatedly checks the status of the LPAR after the HMC
operation has completed, and waits until the status is in the desired
state "operating", or if `allow_status_exceptions` was
set additionally in the state "exceptions".
HMC/SE version requirements: None
Authorization requirements:
* Object-access permission to this LPAR.
* Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access
permission to the CPC of this LPAR.
* Task permission for the "Load" task.
Parameters:
load_address (:term:`string`): Device number of the boot device.
Up to z13, this parameter is required.
Starting with z14, this parameter is optional and defaults to the
load address specified in the 'last-used-load-address' property of
the Lpar.
load_parameter (:term:`string`): Optional load control string.
If empty string or `None`, it is not passed to the HMC, and the
HMC default of an empty string will be used.
clear_indicator (bool):
Optional boolean controlling whether the memory should be
cleared before performing the load or not cleared. The
default value is `True`.
store_status_indicator (bool):
Optional boolean controlling whether the status should be
stored before performing the Load. The default value is `False`.
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation, and for the status
becoming "operating" (or in addition "exceptions", if
`allow_status_exceptions` was set.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
status_timeout (:term:`number`):
Timeout in seconds, for waiting that the status of the LPAR has
reached the desired status, after the HMC operation has completed.
The special value 0 means that no timeout is set. `None` means that
the default status timeout of the session is used.
If the timeout expires when `wait_for_completion=True`, a
:exc:`~zhmcclient.StatusTimeout` is raised.
allow_status_exceptions (bool):
Boolean controlling whether LPAR status "exceptions" is considered
an additional acceptable end status when `wait_for_completion` is
set.
force (bool):
Boolean controlling whether this operation is permitted when the
LPAR is in the "operating" status.
TBD: What will happen with the LPAR in that case (deactivated then
activated? nothing?)
Returns:
`None` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns `None`.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
This job does not support cancellation.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
:exc:`~zhmcclient.StatusTimeout`: The timeout expired while
waiting for the desired LPAR status.
"""
body = {}
if load_address:
body['load-address'] = load_address
if load_parameter:
body['load-parameter'] = load_parameter
if force:
body['force'] = force
if not clear_indicator:
body['clear-indicator'] = clear_indicator
if store_status_indicator:
body['store-status-indicator'] = store_status_indicator
result = self.manager.session.post(
self.uri + '/operations/load', resource=self, body=body,
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout)
if wait_for_completion:
statuses = ["operating"]
if allow_status_exceptions:
statuses.append("exceptions")
self.wait_for_status(statuses, status_timeout)
return result
@logged_api_call
def load_from_ftp(
self, host, username, password, load_file, protocol='ftp',
wait_for_completion=True, operation_timeout=None,
status_timeout=None, allow_status_exceptions=False):
"""
Load (boot) this LPAR from an FTP server, using the HMC operation
"Load Logical Partition from FTP".
This operation is not permitted for an LPAR whose 'activation-mode'
property is "zaware" or "ssc".
This HMC operation has deferred status behavior: If the asynchronous
job on the HMC is complete, it takes a few seconds until the LPAR
status has reached the desired value. If `wait_for_completion=True`,
this method repeatedly checks the status of the LPAR after the HMC
operation has completed, and waits until the status is in the desired
state "operating", or if `allow_status_exceptions` was
set additionally in the state "exceptions".
HMC/SE version requirements:
* SE version >= 2.16.0
Authorization requirements:
* Object-access permission to this LPAR.
* Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access
permission to the CPC of this LPAR.
* Task permission for the "Load from Removable Media or Server" task.
Parameters:
host (string): Host name or IP address of the FTP server.
username (string): User name for the account on the FTP server.
password (string): Password that is associated with the user name on
the FTP server.
load_file (string): Path name of the file to be read from the FTP
server and loaded into the LPAR.
protocol (string): Network protocol for transferring files. Must be
one of:
* "ftp" - File Transfer Protocol
* "ftps" - FTP Secure
* "sftp" - SSH File Transfer Protocol
Default: "ftp"
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation, and for the status
becoming "operating" (or in addition "exceptions", if
`allow_status_exceptions` was set.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
status_timeout (:term:`number`):
Timeout in seconds, for waiting that the status of the LPAR has
reached the desired status, after the HMC operation has completed.
The special value 0 means that no timeout is set. `None` means that
the default status timeout of the session is used.
If the timeout expires when `wait_for_completion=True`, a
:exc:`~zhmcclient.StatusTimeout` is raised.
allow_status_exceptions (bool):
Boolean controlling whether LPAR status "exceptions" is considered
an additional acceptable end status when `wait_for_completion` is
set.
Returns:
`None` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns `None`.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
This job does not support cancellation.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
:exc:`~zhmcclient.StatusTimeout`: The timeout expired while
waiting for the desired LPAR status.
"""
body = {
'host-name': host,
'user-name': username,
'password': password,
'file-path': load_file,
'protocol': protocol,
}
result = self.manager.session.post(
self.uri + '/operations/load-from-ftp', body=body,
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout)
if wait_for_completion:
statuses = ["operating"]
if allow_status_exceptions:
statuses.append("exceptions")
self.wait_for_status(statuses, status_timeout)
return result
@logged_api_call
def stop(self, wait_for_completion=True, operation_timeout=None,
status_timeout=None, allow_status_exceptions=False):
"""
Stop this LPAR, using the HMC operation "Stop Logical
Partition". The stop operation stops the processors from
processing instructions.
This operation is not permitted for an LPAR whose 'activation-mode'
property is "zaware" or "ssc".
In order to succeed, the 'status' property of the LPAR must have one of
the following values:
* "not-operating"
* "operating"
* "exceptions"
HMC/SE version requirements: None
Authorization requirements:
* Object-access permission to this LPAR.
* Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access
permission to the CPC of this LPAR.
* Task permission for the "Stop" task.
Parameters:
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
status_timeout (:term:`number`):
**Deprecated:** This property was used for handling deferred status
behavior, which is not actually needed. Setting it to a non-default
value will cause a :exc:`~py:exceptions.DeprecationWarning` to be
issued.
allow_status_exceptions (bool):
**Deprecated:** This property was used for handling deferred status
behavior, which is not actually needed. Setting it to a non-default
value will cause a :exc:`~py:exceptions.DeprecationWarning` to be
issued.
Returns:
`None` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns `None`.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
This job does not support cancellation.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
"""
warn_deprecated_parameter(
Lpar, Lpar.stop, 'status_timeout', status_timeout, None)
warn_deprecated_parameter(
Lpar, Lpar.stop, 'allow_status_exceptions',
allow_status_exceptions, False)
body = None
result = self.manager.session.post(
self.uri + '/operations/stop', resource=self, body=body,
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout)
return result
@logged_api_call
def start(self, wait_for_completion=True, operation_timeout=None,
status_timeout=None, allow_status_exceptions=False):
"""
Start this LPAR, using the HMC operation "Start Logical
Partition". The start operation starts the processors to process
instructions.
This operation is not permitted for an LPAR whose 'activation-mode'
property is "zaware" or "ssc".
In order to succeed, the 'status' property of the LPAR must have one of
the following values:
* "not-operating"
* "operating"
* "exceptions"
HMC/SE version requirements: None
Authorization requirements:
* Object-access permission to this LPAR.
* Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access
permission to the CPC of this LPAR.
* Task permission for the "Start" task.
Parameters:
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
status_timeout (:term:`number`):
**Deprecated:** This property was used for handling deferred status
behavior, which is not actually needed. Setting it to a non-default
value will cause a :exc:`~py:exceptions.DeprecationWarning` to be
issued.
allow_status_exceptions (bool):
**Deprecated:** This property was used for handling deferred status
behavior, which is not actually needed. Setting it to a non-default
value will cause a :exc:`~py:exceptions.DeprecationWarning` to be
issued.
Returns:
`None` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns `None`.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
"""
warn_deprecated_parameter(
Lpar, Lpar.start, 'status_timeout', status_timeout, None)
warn_deprecated_parameter(
Lpar, Lpar.start, 'allow_status_exceptions',
allow_status_exceptions, False)
body = None
result = self.manager.session.post(
self.uri + '/operations/start', resource=self, body=body,
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout)
return result
@logged_api_call
def reset_clear(self, force=False, wait_for_completion=True,
operation_timeout=None, status_timeout=None,
allow_status_exceptions=False, os_ipl_token=None):
"""
Reset this LPAR and clears its memory.
This includes clearing its pending interruptions, resetting its channel
subsystem and resetting its processors, and clearing its memory, using
the HMC operation "Reset Clear".
In order to succeed, the 'status' property of the LPAR must have one of
the following values:
* "not-operating"
* "operating" - this requires setting the "force" flag
* "exceptions" - this requires setting the "force" flag
HMC/SE version requirements: None
Authorization requirements:
* Object-access permission to this LPAR.
* Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access
permission to the CPC of this LPAR.
* Task permission for the "Reset Clear" task.
Parameters:
force (bool):
Boolean controlling whether this operation is permitted when the
LPAR is in the "operating" status. The default is `False`.
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
status_timeout (:term:`number`):
**Deprecated:** This property was used for handling deferred status
behavior, which is not actually needed. Setting it to a non-default
value will cause a :exc:`~py:exceptions.DeprecationWarning` to be
issued.
allow_status_exceptions (bool):
**Deprecated:** This property was used for handling deferred status
behavior, which is not actually needed. Setting it to a non-default
value will cause a :exc:`~py:exceptions.DeprecationWarning` to be
issued.
os_ipl_token (:term:`string`):
Applicable only to z/OS, this parameter requests that this
operation only be performed if the provided value matches the
current value of the 'os-ipl-token' property of the LPAR.
Returns:
`None` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns `None`.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
This job does not support cancellation.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
"""
warn_deprecated_parameter(
Lpar, Lpar.reset_clear, 'status_timeout', status_timeout, None)
warn_deprecated_parameter(
Lpar, Lpar.reset_clear, 'allow_status_exceptions',
allow_status_exceptions, False)
body = {}
if force:
body['force'] = force
if os_ipl_token:
body['os-ipl-token'] = os_ipl_token
result = self.manager.session.post(
self.uri + '/operations/reset-clear', resource=self, body=body,
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout)
return result
@logged_api_call
def reset_normal(self, force=False, wait_for_completion=True,
operation_timeout=None, status_timeout=None,
allow_status_exceptions=False, os_ipl_token=None):
"""
Reset this LPAR without clearing its memory.
This includes clearing its pending interruptions, resetting its channel
subsystem and resetting its processors, using the HMC operation
"Reset Normal".
In order to succeed, the 'status' property of the LPAR must have one of
the following values:
* "not-operating"
* "operating" - this requires setting the "force" flag
* "exceptions" - this requires setting the "force" flag
HMC/SE version requirements: None
Authorization requirements:
* Object-access permission to this LPAR.
* Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access
permission to the CPC of this LPAR.
* Task permission for the "Reset Clear" task.
Parameters:
force (bool):
Boolean controlling whether this operation is permitted when the
LPAR is in the "operating" status. The default is `False`.
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
status_timeout (:term:`number`):
**Deprecated:** This property was used for handling deferred status
behavior, which is not actually needed. Setting it to a non-default
value will cause a :exc:`~py:exceptions.DeprecationWarning` to be
issued.
allow_status_exceptions (bool):
**Deprecated:** This property was used for handling deferred status
behavior, which is not actually needed. Setting it to a non-default
value will cause a :exc:`~py:exceptions.DeprecationWarning` to be
issued.
os_ipl_token (:term:`string`):
Applicable only to z/OS, this parameter requests that this
operation only be performed if the provided value matches the
current value of the 'os-ipl-token' property of the LPAR.
Returns:
`None` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns `None`.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
This job does not support cancellation.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
"""
warn_deprecated_parameter(
Lpar, Lpar.reset_normal, 'status_timeout', status_timeout, None)
warn_deprecated_parameter(
Lpar, Lpar.reset_normal, 'allow_status_exceptions',
allow_status_exceptions, False)
body = {}
if force:
body['force'] = force
if os_ipl_token:
body['os-ipl-token'] = os_ipl_token
result = self.manager.session.post(
self.uri + '/operations/reset-normal', resource=self, body=body,
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout)
return result
@logged_api_call
def open_os_message_channel(self, include_refresh_messages=True):
"""
Open a JMS message channel to this LPAR's operating system, returning
the string "topic" representing the message channel.
HMC/SE version requirements:
* SE version >= 2.13.1
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Operating System Messages" task at least
in view-only mode.
Parameters:
include_refresh_messages (bool):
Boolean controlling whether refresh operating systems messages
should be sent, as follows:
* If `True`, refresh messages will be recieved when the user
connects to the topic. The default.
* If `False`, refresh messages will not be recieved when the user
connects to the topic.
Returns:
:term:`string`:
Returns a string representing the os-message-notification JMS
topic. The user can connect to this topic to start the flow of
operating system messages.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'include-refresh-messages': include_refresh_messages}
result = self.manager.session.post(
self.uri + '/operations/open-os-message-channel',
resource=self, body=body)
return result['topic-name']
@logged_api_call
def send_os_command(self, os_command_text, is_priority=False):
"""
Send a command to the operating system running in this LPAR.
HMC/SE version requirements:
* SE version >= 2.13.1
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Operating System Messages" task in
modification mode.
Parameters:
os_command_text (string): The text of the operating system command.
is_priority (bool):
Boolean controlling whether this is a priority operating system
command, as follows:
* If `True`, this message is treated as a priority operating
system command.
* If `False`, this message is not treated as a priority
operating system command. The default.
Returns:
None
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'is-priority': is_priority,
'operating-system-command-text': os_command_text}
self.manager.session.post(
self.uri + '/operations/send-os-cmd', resource=self, body=body)
@logged_api_call
def list_os_messages(
self, begin=None, end=None, is_held=None, is_priority=None,
max_messages=0):
"""
List all currently available operating system messages for this
LPAR.
Only a certain amount of OS message data from each LPAR is
preserved by the HMC for retrieval by this operation. If the OS
produces more than that amount, the oldest non-held, non-priority
OS messages are no longer available. A gap in the sequence numbers
indicates a loss of messages. A loss may be due to that space
limitation, or it may be due to the deletion of messages by a console
user or the OS.
HMC/SE version requirements:
* SE version >= 2.14.0
Authorization requirements:
* Object-access permission to this LPAR.
* Task permission to the "Operating System Messages" task (optionally
in view-only mode).
Parameters:
begin (integer): A message sequence number to limit returned
messages. OS messages with a sequence number less than this are
omitted from the results. If `None`, no such filtering is
performed.
end (integer): A message sequence number to limit returned
messages. OS messages with a sequence number greater than this are
omitted from the results. If `None`, no such filtering is
performed.
is_held(bool): Limit the returned messages to only held (if `True`)
or only non-held (if `False`) messages. If `None`, no such filtering
is performed.
is_priority(bool): Limit the returned messages to only priority (if
`True`) or non-priority (if `False`) messages. If `None`, no such
filtering is performed.
max_messages(int): Limits the returned messages to the specified
maximum number, starting from the begin of the sequence numbers
in the result that would otherwise be returned.
If 0, no such filtering is performed.
Returns:
list of dict: List of OS messages, where each OS message is a dict
with the items defined for the "os-message-info" data structure
in the :term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
query_parms = []
if begin is not None:
query_parms.append(f'begin-sequence-number={begin}')
if end is not None:
query_parms.append(f'end-sequence-number={end}')
if is_held is not None:
query_parms.append(f'is-held={str(is_held).lower()}')
if is_priority is not None:
query_parms.append(
f'is-priority={str(is_priority).lower()}')
if max_messages > 0:
query_parms.append(f'max-messages={max_messages}')
query_str = make_query_str(query_parms)
result = self.manager.session.get(
f'{self.uri}/operations/list-os-messages{query_str}',
resource=self)
return result
@logged_api_call
def psw_restart(self, wait_for_completion=True, operation_timeout=None,
status_timeout=None, allow_status_exceptions=False):
"""
Restart this LPAR, using the HMC operation "PSW Restart".
In order to succeed, the 'status' property of the LPAR must have one of
the following values:
* "not-operating"
* "operating"
* "exceptions"
HMC/SE version requirements: None
Authorization requirements:
* Object-access permission to this LPAR.
* Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access
permission to the CPC of this LPAR.
* Task permission for the "PSW Restart" task.
Parameters:
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
status_timeout (:term:`number`):
**Deprecated:** This property was used for handling deferred status
behavior, which is not actually needed. Setting it to a non-default
value will cause a :exc:`~py:exceptions.DeprecationWarning` to be
issued.
allow_status_exceptions (bool):
**Deprecated:** This property was used for handling deferred status
behavior, which is not actually needed. Setting it to a non-default
value will cause a :exc:`~py:exceptions.DeprecationWarning` to be
issued.
Returns:
`None` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns `None`.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
This job does not support cancellation.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
"""
warn_deprecated_parameter(
Lpar, Lpar.psw_restart, 'status_timeout', status_timeout, None)
warn_deprecated_parameter(
Lpar, Lpar.psw_restart, 'allow_status_exceptions',
allow_status_exceptions, False)
body = None
result = self.manager.session.post(
self.uri + '/operations/psw-restart', resource=self, body=body,
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout)
return result
@logged_api_call
def wait_for_status(self, status, status_timeout=None):
"""
Wait until the status of this LPAR has a desired value.
HMC/SE version requirements: None
Parameters:
status (:term:`string` or iterable of :term:`string`):
Desired LPAR status or set of status values to reach; one or more
of the following values:
* ``"not-activated"`` - The LPAR is not active.
* ``"not-operating"`` - The LPAR is active but no operating system
is running in the LPAR.
* ``"operating"`` - The LPAR is active and an operating system is
running in the LPAR.
* ``"exceptions"`` - The LPAR or its CPC has one or more unusual
conditions.
Note that the description of LPAR status values in the
:term:`HMC API` book (as of its version 2.13.1) is partly
confusing.
status_timeout (:term:`number`):
Timeout in seconds, for waiting that the status of the LPAR has
reached one of the desired status values. The special value 0 means
that no timeout is set.
`None` means that the default status timeout will be used.
If the timeout expires , a :exc:`~zhmcclient.StatusTimeout` is
raised.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.StatusTimeout`: The timeout expired while
waiting for the desired LPAR status.
"""
if status_timeout is None:
status_timeout = \
self.manager.session.retry_timeout_config.status_timeout
if status_timeout > 0:
end_time = time.time() + status_timeout
if isinstance(status, (list, tuple)):
statuses = status
else:
statuses = [status]
HMC_LOGGER.debug("Waiting for LPAR %r to have status: %s "
"(timeout: %s sec)",
self.name, status, status_timeout)
while True:
# Fastest way to get actual status value:
lpars = self.manager.cpc.lpars.list(
filter_args={'name': self.name})
assert len(lpars) == 1
this_lpar = lpars[0]
actual_status = this_lpar.get_property('status')
if actual_status in statuses:
return
# pylint: disable=possibly-used-before-assignment
if status_timeout > 0 and time.time() > end_time:
raise StatusTimeout(
f"Waiting for LPAR {self.name} to reach status(es) "
f"'{statuses}' timed out after {status_timeout} s - "
f"current status is '{actual_status}'",
actual_status, statuses, status_timeout)
time.sleep(1) # Avoid hot spin loop
@logged_api_call
def assign_certificate(self, certificate):
"""
Assigns a :term:`Certificate` to this LPAR.
HMC/SE version requirements:
* :ref`API feature` "secure-boot-with-certificates"
Authorization requirements:
* Object-access permission to this LPAR.
* Object-access permission to the specified certificate.
* Task permission to the "Assign Secure Boot Certificates" task.
Parameters:
certificate (:class:`~zhmcclient.Certificate`):
Certificate to be assigned. The certificate must not currently
be assigned to this LPAR.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'certificate-uri': certificate.uri}
self.manager.session.post(
self.uri + '/operations/assign-certificate', resource=self,
body=body)
@logged_api_call
def unassign_certificate(self, certificate):
"""
Unassign a :term:`Certificate` from this LPAR.
HMC/SE version requirements:
* :ref`API feature` "secure-boot-with-certificates"
Authorization requirements:
* Object-access permission to this LPAR.
* Object-access permission to the specified certificate.
* Task permission to the "Assign Secure Boot Certificates" task.
Parameters:
certificate (:class:`~zhmcclient.Certificate`):
Certificate to be unassigned. The certificate must currently be
assigned to this LPAR.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'certificate-uri': certificate.uri}
self.manager.session.post(
self.uri + '/operations/unassign-certificate', resource=self,
body=body)
@logged_api_call
def get_sustainability_data(
self, range="last-week", resolution="one-hour",
custom_range_start=None, custom_range_end=None):
# pylint: disable=redefined-builtin
"""
Get energy management related metrics for the LPAR on a specific
historical time range. The metrics are returned as multiple data points
covering the requested time range with the requested resolution.
This method performs the "Get LPAR Historical Sustainability Data"
HMC operation.
HMC/SE version requirements:
* :ref`API feature` "environmental-metrics"
Authorization requirements:
* Object-access permission to this LPAR
* Task permission to the "Environmental Dashboard" task
Parameters:
range (:term:`string`):
Time range for the requested data points, as follows:
* "last-day" - Last 24 hours.
* "last-week" - Last 7 days (default).
* "last-month" - Last 30 days.
* "last-three-months" - Last 90 days.
* "last-six-months" - Last 180 days.
* "last-year" - Last 365 days.
* "custom" - From `custom_range_start` to `custom_range_end`.
resolution (:term:`string`):
Resolution for the requested data points. This is the time interval
in between the data points. For systems where the
"environmental-metrics" API feature is not available, the minimum
resolution is "one-hour".
The possible values are as follows:
* "fifteen-minutes" - 15 minutes.
* "one-hour" - 60 minutes (default).
* "one-day" - 24 hours.
* "one-week" - 7 days.
* "one-month" - 30 days.
custom_range_start (:class:`~py:datetime.datetime`):
Start of custom time range. Timezone-naive values are interpreted
using the local system time. Required if `range` is "custom".
custom_range_end (:class:`~py:datetime.datetime`):
End of custom time range. Timezone-naive values are interpreted
using the local system time. Required if `range` is "custom".
Returns:
dict: A dictionary with items as described for the response body
of the "Get LPAR Historical Sustainability Data" HMC operation.
Timestamp fields are represented as timezone-aware
:class:`~py:datetime.datetime` objects.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {
'range': range,
'resolution': resolution,
}
if range == "custom":
body['custom-range-start'] = \
timestamp_from_datetime(custom_range_start)
body['custom-range-end'] = \
timestamp_from_datetime(custom_range_end)
result = self.manager.session.post(
self.uri + '/operations/get-historical-sustainability-data',
body=body)
for field_array in result.values():
for item in field_array:
if 'timestamp' in item:
item['timestamp'] = \
datetime_from_timestamp(item['timestamp'])
return result
|
GHSA-p57h-3cmc-xpjq
|
zhmcclient/_partition.py
|
@@ -176,7 +176,8 @@ def list(self, full_properties=False, filter_args=None,
list_uri, result_prop, full_properties, filter_args,
additional_properties)
- @logged_api_call
+ @logged_api_call(blanked_properties=['boot-ftp-password', 'ssc-master-pw'],
+ properties_pos=1)
def create(self, properties):
"""
Create and configure a Partition in this CPC.
@@ -591,7 +592,8 @@ def delete(self):
self.get_properties_local(self.manager._name_prop, None))
self.cease_existence_local()
- @logged_api_call
+ @logged_api_call(blanked_properties=['boot-ftp-password', 'ssc-master-pw'],
+ properties_pos=1)
def update_properties(self, properties):
"""
Update writeable properties of this Partition.
|
# Copyright 2016,2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A :term:`Partition` is a subset of the hardware resources of a :term:`CPC`
in DPM mode, virtualized as a separate computer.
Partitions can be created and deleted dynamically, and their resources such
as CPU, memory or I/O devices can be configured dynamically.
You can create as many partition definitions as you want, but only a specific
number of partitions can be active at any given time.
TODO: How can a user find out what the maximum is, before it is reached?
Partition resources are contained in CPC resources.
Partition resources only exist in CPCs that are in DPM mode. CPCs in classic
mode (or ensemble mode) have :term:`LPAR` resources, instead.
"""
import time
import copy
from requests.utils import quote
from ._manager import BaseManager
from ._resource import BaseResource
from ._exceptions import StatusTimeout
from ._nic import NicManager
from ._hba import HbaManager
from ._virtual_function import VirtualFunctionManager
from ._logging import logged_api_call
from ._utils import RC_PARTITION, make_query_str, datetime_from_timestamp, \
timestamp_from_datetime
__all__ = ['PartitionManager', 'Partition']
class PartitionManager(BaseManager):
"""
Manager providing access to the :term:`Partitions <Partition>` in a
particular :term:`CPC`.
Derived from :class:`~zhmcclient.BaseManager`; see there for common methods
and attributes.
Objects of this class are not directly created by the user; they are
accessible via the following instance variable of a
:class:`~zhmcclient.Cpc` object (in DPM mode):
* :attr:`~zhmcclient.Cpc.partitions`
HMC/SE version requirements:
* SE version >= 2.13.1
"""
def __init__(self, cpc):
# This function should not go into the docs.
# Parameters:
# cpc (:class:`~zhmcclient.Cpc`):
# CPC defining the scope for this manager.
# Resource properties that are supported as filter query parameters.
# If the support for a resource property changes within the set of HMC
# versions that support this type of resource, this list must be set up
# for the version of the HMC this session is connected to.
query_props = [
'name',
'status',
]
super().__init__(
resource_class=Partition,
class_name=RC_PARTITION,
session=cpc.manager.session,
parent=cpc,
base_uri='/api/partitions',
oid_prop='object-id',
uri_prop='object-uri',
name_prop='name',
query_props=query_props,
supports_properties=True)
@property
def cpc(self):
"""
:class:`~zhmcclient.Cpc`: :term:`CPC` defining the scope for this
manager.
"""
return self._parent
@logged_api_call
# pylint: disable=arguments-differ
def list(self, full_properties=False, filter_args=None,
additional_properties=None):
"""
List the Partitions in this CPC.
Any resource property may be specified in a filter argument. For
details about filter arguments, see :ref:`Filtering`.
The listing of resources is handled in an optimized way:
* If this manager is enabled for :ref:`auto-updating`, a locally
maintained resource list is used (which is automatically updated via
inventory notifications from the HMC) and the provided filter
arguments are applied.
* Otherwise, if the filter arguments specify the resource name as a
single filter argument with a straight match string (i.e. without
regular expressions), an optimized lookup is performed based on a
locally maintained name-URI cache.
* Otherwise, the HMC List operation is performed with the subset of the
provided filter arguments that can be handled on the HMC side and the
remaining filter arguments are applied on the client side on the list
result.
HMC/SE version requirements:
* SE version >= 2.13.1
Authorization requirements:
* Object-access permission to this CPC.
* Object-access permission to any Partition to be included in the
result.
Parameters:
full_properties (bool):
Controls whether the full set of resource properties should be
retrieved, vs. only the short set as returned by the list
operation.
filter_args (dict):
Filter arguments that narrow the list of returned resources to
those that match the specified filter arguments. For details, see
:ref:`Filtering`.
`None` causes no filtering to happen, i.e. all resources are
returned.
additional_properties (list of string):
List of property names that are to be returned in addition to the
default properties.
This parameter requires HMC 2.16.0 or higher.
Returns:
: A list of :class:`~zhmcclient.Partition` objects.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
result_prop = 'partitions'
list_uri = f'{self.cpc.uri}/partitions'
return self._list_with_operation(
list_uri, result_prop, full_properties, filter_args,
additional_properties)
@logged_api_call
def create(self, properties):
"""
Create and configure a Partition in this CPC.
HMC/SE version requirements:
* SE version >= 2.13.1
Authorization requirements:
* Object-access permission to this CPC.
* Task permission to the "New Partition" task.
Parameters:
properties (dict): Initial property values.
Allowable properties are defined in section 'Request body contents'
in section 'Create Partition' in the :term:`HMC API` book.
Returns:
Partition:
The resource object for the new Partition.
The object will have its 'object-uri' property set as returned by
the HMC, and will also have the input properties set.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
result = self.session.post(self.cpc.uri + '/partitions',
body=properties)
# There should not be overlaps, but just in case there are, the
# returned props should overwrite the input props:
props = copy.deepcopy(properties)
props.update(result)
name = props.get(self._name_prop, None)
uri = props[self._uri_prop]
part = Partition(self, uri, name, props)
self._name_uri_cache.update(name, uri)
return part
class Partition(BaseResource):
"""
Representation of a :term:`Partition`.
Derived from :class:`~zhmcclient.BaseResource`; see there for common
methods and attributes.
Objects of this class are not directly created by the user; they are
returned from creation or list functions on their manager object
(in this case, :class:`~zhmcclient.PartitionManager`).
HMC/SE version requirements:
* SE version >= 2.13.1
"""
def __init__(self, manager, uri, name=None, properties=None):
# This function should not go into the docs.
# manager (:class:`~zhmcclient.PartitionManager`):
# Manager object for this resource object.
# uri (string):
# Canonical URI path of the resource.
# name (string):
# Name of the resource.
# properties (dict):
# Properties to be set for this resource object. May be `None` or
# empty.
assert isinstance(manager, PartitionManager), (
f"Partition init: Expected manager type {PartitionManager}, "
f"got {type(manager)}")
super().__init__(manager, uri, name, properties)
# The manager objects for child resources (with lazy initialization):
self._nics = None
self._hbas = None
self._virtual_functions = None
@property
def nics(self):
"""
:class:`~zhmcclient.NicManager`: Access to the :term:`NICs <NIC>` in
this Partition.
"""
# We do here some lazy loading.
if not self._nics:
self._nics = NicManager(self)
return self._nics
@property
def hbas(self):
"""
:class:`~zhmcclient.HbaManager`: Access to the :term:`HBAs <HBA>` in
this Partition.
On systems with the "dpm-storage-management"
:ref:`firmware feature <firmware features>` (i.e. z14), the CPC will
not have any HBA objects anymore (they are now Virtual Storage Resource
objects), but this property still provides a manager object for
consistency.
"""
# We do here some lazy loading.
if not self._hbas:
self._hbas = HbaManager(self)
return self._hbas
@property
def virtual_functions(self):
"""
:class:`~zhmcclient.VirtualFunctionManager`: Access to the
:term:`Virtual Functions <Virtual Function>` in this Partition.
"""
# We do here some lazy loading.
if not self._virtual_functions:
self._virtual_functions = VirtualFunctionManager(self)
return self._virtual_functions
@logged_api_call
def feature_enabled(self, feature_name):
"""
Indicates whether the specified
:ref:`firmware feature <firmware features>` is enabled for the CPC of
this partition.
The specified firmware feature must be available for the CPC.
For a list of available firmware features, see section
"Firmware Features" in the :term:`HMC API` book, or use the
:meth:`feature_info` method.
HMC/SE version requirements:
* HMC version >= 2.14.0 with HMC API version >= 2.23
Authorization requirements:
* Object-access permission to this partition.
Parameters:
feature_name (:term:`string`): The name of the firmware feature.
Returns:
bool: `True` if the firmware feature is enabled, or `False` if the
firmware feature is disabled.
Raises:
:exc:`ValueError`: Firmware features are not supported on the HMC.
:exc:`ValueError`: The specified firmware feature is not available
for the CPC.
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
feature_list = self.prop('available-features-list', None)
if feature_list is None:
raise ValueError("Firmware features are not supported on the HMC")
for feature in feature_list:
if feature['name'] == feature_name:
break
else:
raise ValueError(
f"Firmware feature {feature_name} is not available for CPC "
f"{self.manager.cpc.name}")
return feature['state'] # pylint: disable=undefined-loop-variable
@logged_api_call
def feature_info(self):
"""
Returns information about the :ref:`firmware features` available for
the CPC of this partition.
HMC/SE version requirements:
* HMC version >= 2.14.0 with HMC API version >= 2.23
Authorization requirements:
* Object-access permission to this partition.
Returns:
:term:`iterable`:
An iterable where each item represents one firmware feature that is
available for the CPC of this partition.
Each item is a dictionary with the following items:
* `name` (:term:`unicode string`): Name of the feature.
* `description` (:term:`unicode string`): Short description of
the feature.
* `state` (bool): Enablement state of the feature (`True` if
enabled, `False` if disabled).
Raises:
:exc:`ValueError`: Firmware features are not supported on the HMC.
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
feature_list = self.prop('available-features-list', None)
if feature_list is None:
raise ValueError("Firmware features are not supported on the HMC")
return feature_list
@logged_api_call
def start(self, wait_for_completion=True, operation_timeout=None,
status_timeout=None):
"""
Start (activate) this Partition, using the HMC operation "Start
Partition".
This HMC operation has deferred status behavior: If the asynchronous
job on the HMC is complete, it takes a few seconds until the partition
status has reached the desired value (it still may show status
"paused"). If `wait_for_completion=True`, this method repeatedly checks
the status of the partition after the HMC operation has completed, and
waits until the status is in one of the desired states "active" or
"degraded".
TODO: Describe what happens if the maximum number of active partitions
is exceeded.
HMC/SE version requirements:
* SE version >= 2.13.1
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Start Partition" task.
Parameters:
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
status_timeout (:term:`number`):
Timeout in seconds, for waiting that the status of the partition
has reached the desired status, after the HMC operation has
completed.
The special value 0 means that no timeout is set. `None` means that
the default status timeout of the session is used.
If the timeout expires when `wait_for_completion=True`, a
:exc:`~zhmcclient.StatusTimeout` is raised.
Returns:
:class:`py:dict` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns an empty
:class:`py:dict` object.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
This job supports cancellation. Note that it may no longer be
possible to cancel the job after some point. The job status and
reason codes will indicate whether the job was canceled or ran to
completion.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
:exc:`~zhmcclient.StatusTimeout`: The timeout expired while
waiting for the desired partition status.
"""
result = self.manager.session.post(
self.uri + '/operations/start', resource=self,
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout)
if wait_for_completion:
statuses = ["active", "degraded"]
self.wait_for_status(statuses, status_timeout)
return result
@logged_api_call
def stop(self, wait_for_completion=True, operation_timeout=None,
status_timeout=None):
"""
Stop (deactivate) this Partition, using the HMC operation "Stop
Partition".
HMC/SE version requirements:
* SE version >= 2.13.1
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Stop Partition" task.
Parameters:
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
status_timeout (:term:`number`):
Timeout in seconds, for waiting that the status of the partition
has reached the desired status, after the HMC operation has
completed.
The special value 0 means that no timeout is set. `None` means that
the default status timeout of the session is used.
If the timeout expires when `wait_for_completion=True`, a
:exc:`~zhmcclient.StatusTimeout` is raised.
Returns:
:class:`py:dict` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns an empty
:class:`py:dict` object.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
This job supports cancellation. Note that it may no longer be
possible to cancel the job after some point. The job status and
reason codes will indicate whether the job was canceled or ran to
completion.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
:exc:`~zhmcclient.StatusTimeout`: The timeout expired while
waiting for the desired partition status.
"""
result = self.manager.session.post(
self.uri + '/operations/stop', resource=self,
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout)
if wait_for_completion:
statuses = ["stopped"]
self.wait_for_status(statuses, status_timeout)
return result
@logged_api_call
def delete(self):
"""
Delete this Partition.
HMC/SE version requirements:
* SE version >= 2.13.1
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Delete Partition" task.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
# pylint: disable=protected-access
self.manager.session.delete(self.uri, resource=self)
self.manager._name_uri_cache.delete(
self.get_properties_local(self.manager._name_prop, None))
self.cease_existence_local()
@logged_api_call
def update_properties(self, properties):
"""
Update writeable properties of this Partition.
This method serializes with other methods that access or change
properties on the same Python object.
HMC/SE version requirements:
* SE version >= 2.13.1
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Partition Details" task.
Parameters:
properties (dict): New values for the properties to be updated.
Properties not to be updated are omitted.
Allowable properties are the properties with qualifier (w) in
section 'Data model' in section 'Partition object' in the
:term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
# pylint: disable=protected-access
self.manager.session.post(self.uri, resource=self, body=properties)
is_rename = self.manager._name_prop in properties
if is_rename:
# Delete the old name from the cache
self.manager._name_uri_cache.delete(self.name)
self.update_properties_local(copy.deepcopy(properties))
if is_rename:
# Add the new name to the cache
self.manager._name_uri_cache.update(self.name, self.uri)
@logged_api_call
def dump_partition(self, parameters, wait_for_completion=True,
operation_timeout=None):
"""
Dump this Partition, by loading a standalone dump program from a SCSI
device and starting its execution, using the HMC operation
'Dump Partition'.
HMC/SE version requirements:
* SE version >= 2.13.1 without
:ref:`firmware feature <firmware features>` "dpm-storage-management"
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Dump Partition" task.
Parameters:
parameters (dict): Input parameters for the operation.
Allowable input parameters are defined in section
'Request body contents' in section 'Dump Partition' in the
:term:`HMC API` book.
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
Returns:
:class:`py:dict` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns an empty
:class:`py:dict` object.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
This job does not support cancellation.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
"""
result = self.manager.session.post(
self.uri + '/operations/scsi-dump', resource=self,
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout,
body=parameters)
return result
@logged_api_call
def start_dump_program(self, parameters, wait_for_completion=True,
operation_timeout=None):
"""
Dump this Partition, by loading a standalone dump program from a storage
volume and starting its execution, using the HMC operation
'Start Dump Program'.
HMC/SE version requirements:
* :ref:`firmware feature <firmware features>` "dpm-storage-management"
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Dump Partition" task.
Parameters:
parameters (dict): Input parameters for the operation.
Allowable input parameters are defined in section
'Request body contents' in section 'Start Dump Program' in the
:term:`HMC API` book.
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
Returns:
:class:`py:dict` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns an empty
:class:`py:dict` object.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
This job does not support cancellation.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
"""
result = self.manager.session.post(
self.uri + '/operations/start-dump-program', resource=self,
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout,
body=parameters)
return result
@logged_api_call
def psw_restart(self, wait_for_completion=True, operation_timeout=None):
"""
Initiates a PSW restart for this Partition, using the HMC operation
'Perform PSW Restart'.
HMC/SE version requirements:
* SE version >= 2.13.1
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "PSW Restart" task.
Parameters:
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
Returns:
:class:`py:dict` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns an empty
:class:`py:dict` object.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
This job does not support cancellation.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
"""
result = self.manager.session.post(
self.uri + '/operations/psw-restart', resource=self,
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout)
return result
@logged_api_call
def mount_iso_image(self, image, image_name, ins_file_name):
"""
Upload an ISO image and associate it to this Partition
using the HMC operation 'Mount ISO Image'.
When the partition already has an ISO image associated,
the newly uploaded image replaces the current one.
HMC/SE version requirements:
* SE version >= 2.13.1
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Partition Details" task.
Parameters:
image (:term:`byte string` or file-like object):
The content of the ISO image.
Images larger than 2GB cannot be specified as a Byte string; they
must be specified as a file-like object.
File-like objects must have opened the file in binary mode.
image_name (:term:`string`): The displayable name of the image.
This value must be a valid Linux file name without directories,
must not contain blanks, and must end with '.iso' in lower case.
This value will be shown in the 'boot-iso-image-name' property of
this partition.
ins_file_name (:term:`string`): The path name of the INS file within
the file system of the ISO image.
This value will be shown in the 'boot-iso-ins-file' property of
this partition.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
query_parms_str = (
f"?image-name={quote(image_name, safe='')}"
f"&ins-file-name={quote(ins_file_name, safe='')}")
self.manager.session.post(
self.uri + '/operations/mount-iso-image' + query_parms_str,
resource=self, body=image)
@logged_api_call
def unmount_iso_image(self):
"""
Unmount the currently mounted ISO from this Partition using the HMC
operation 'Unmount ISO Image'. This operation sets the partition's
'boot-iso-image-name' and 'boot-iso-ins-file' properties to null.
HMC/SE version requirements:
* SE version >= 2.13.1
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Partition Details" task.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
self.manager.session.post(
self.uri + '/operations/unmount-iso-image', resource=self)
@logged_api_call
def open_os_message_channel(self, include_refresh_messages=True):
"""
Open a JMS message channel to this partition's operating system,
returning the string "topic" representing the message channel.
HMC/SE version requirements:
* SE version >= 2.13.1
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Operating System Messages" task at least
in view-only mode.
Parameters:
include_refresh_messages (bool):
Boolean controlling whether refresh operating systems messages
should be sent, as follows:
* If `True`, refresh messages will be recieved when the user
connects to the topic. The default.
* If `False`, refresh messages will not be recieved when the user
connects to the topic.
Returns:
:term:`string`:
Returns a string representing the os-message-notification JMS
topic. The user can connect to this topic to start the flow of
operating system messages.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'include-refresh-messages': include_refresh_messages}
result = self.manager.session.post(
self.uri + '/operations/open-os-message-channel', resource=self,
body=body)
return result['topic-name']
@logged_api_call
def send_os_command(self, os_command_text, is_priority=False):
"""
Send a command to the operating system running in this partition.
HMC/SE version requirements:
* SE version >= 2.13.1
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Operating System Messages" task in
modification mode.
Parameters:
os_command_text (string): The text of the operating system command.
is_priority (bool):
Boolean controlling whether this is a priority operating system
command, as follows:
* If `True`, this message is treated as a priority operating
system command.
* If `False`, this message is not treated as a priority
operating system command. The default.
Returns:
None
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'is-priority': is_priority,
'operating-system-command-text': os_command_text}
self.manager.session.post(
self.uri + '/operations/send-os-cmd', resource=self, body=body)
@logged_api_call
def list_os_messages(self, begin=None, end=None):
"""
List all currently available operating system messages for this
partition.
Only a certain amount of OS message data from each partition is
preserved by the HMC for retrieval by this operation. If the OS
produces more than that amount, the oldest non-held, non-priority
OS messages are no longer available. A gap in the sequence numbers
indicates a loss of messages. A loss may be due to that space
limitation, or it may be due to the deletion of messages by a console
user or the OS.
HMC/SE version requirements:
* SE version >= 2.14.0
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Operating System Messages" task (optionally
in view-only mode).
Parameters:
begin (integer): A message sequence number to limit returned
messages. OS messages with a sequence number less than this are
omitted from the results. If `None`, no such filtering is
performed.
end (integer): A message sequence number to limit returned
messages. OS messages with a sequence number greater than this are
omitted from the results. If `None`, no such filtering is
performed.
Returns:
list of dict: List of OS messages, where each OS message is a dict
with the items defined for the "os-message-info" data structure
in the :term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
query_parms = []
if begin is not None:
query_parms.append(f'begin-sequence-number={begin}')
if end is not None:
query_parms.append(f'end-sequence-number={end}')
query_str = make_query_str(query_parms)
result = self.manager.session.get(
f'{self.uri}/operations/list-os-messages{query_str}',
resource=self)
return result
@logged_api_call
def create_os_websocket(self, force_takeover=False):
"""
Create a WebSocket on the HMC, which allows accessing the console of
the operating system running in the partition using the integrated
ASCII console of the HMC, and return the WebSocket URI for use by a
WebSocket client.
This is done by performing the "Get ASCII Console WebSocket URI"
HMC operation.
For more details on how to use a WebSocket client to interact with the
integrated ASCII console, see
:ref:`Using WebSocket to access OS console`.
HMC/SE version requirements:
* HMC version >= 2.14.0 with HMC API version >= 2.22
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Integrated ASCII Console" task.
Parameters:
force_takeover (bool):
Boolean controlling whether to break any possibly existing
WebSockets on other HMCs to the same partition, as follows:
* If `True`, existing WebSockets are broken up and the operation
proceeds.
* If `False`, existing WebSockets are not broken up and the
operation fails.
Note that only existing WebSockets on *other* HMCs can be taken
over, but not existing WebSockets on the current HMC.
Returns:
:term:`string`:
Returns a string representing the canonical URI of the new
WebSocket, e.g.
``/api/websock/4a4f1hj12hldmm26brcpfnydk663gt6gtyxq4iwto26g2r6wq1/1``.
Depending on which WebSocket client is used, a full URI may need to
be constructed from the returned string by prepending the secure
WebSocket URI scheme ``wss`` and the HMC's IP address and port, e.g.
``wss://9.10.11.12:6794/api/websock/4a4f1hj12hldmm26brcpfnydk663gt6gtyxq4iwto26g2r6wq1/1``.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
""" # pylint: disable=line-too-long
body = {'force-takeover': force_takeover}
result = self.manager.session.post(
self.uri + '/operations/get-ascii-console-websocket-uri',
resource=self, body=body)
return result['websocket-uri']
@logged_api_call
def wait_for_status(self, status, status_timeout=None):
"""
Wait until the status of this partition has a desired value.
HMC/SE version requirements:
* SE version >= 2.13.1
Parameters:
status (:term:`string` or iterable of :term:`string`):
Desired partition status or set of status values to reach; one or
more of the values defined for the 'status' property in the
data model for partitions in the :term:`HMC API` book.
status_timeout (:term:`number`):
Timeout in seconds, for waiting that the status of the partition
has reached one of the desired status values. The special value 0
means that no timeout is set.
`None` means that the default status timeout will be used.
If the timeout expires, a :exc:`~zhmcclient.StatusTimeout` is
raised.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.StatusTimeout`: The status timeout expired while
waiting for the desired partition status.
"""
if status_timeout is None:
status_timeout = \
self.manager.session.retry_timeout_config.status_timeout
if status_timeout > 0:
end_time = time.time() + status_timeout
if isinstance(status, (list, tuple)):
statuses = status
else:
statuses = [status]
while True:
actual_status = self.get_properties_pulled('status')
if actual_status in statuses:
return
# pylint: disable=possibly-used-before-assignment
if status_timeout > 0 and time.time() > end_time:
raise StatusTimeout(
f"Waiting for partition {self.name} to reach status(es) "
f"'{statuses}' timed out after {status_timeout} s - "
f"current status is '{actual_status}'",
actual_status, statuses, status_timeout)
time.sleep(1) # Avoid hot spin loop
@logged_api_call
def increase_crypto_config(self, crypto_adapters,
crypto_domain_configurations):
"""
Add crypto adapters and/or crypto domains to the crypto configuration
of this partition.
The general principle for maintaining crypto configurations of
partitions is as follows: Each adapter included in the crypto
configuration of a partition has all crypto domains included in the
crypto configuration. Each crypto domain included in the crypto
configuration has the same access mode on all adapters included in the
crypto configuration.
Example: Assume that the current crypto configuration of a partition
includes crypto adapter A and crypto domains 0 and 1. When this method
is called to add adapter B and domain configurations for domains 1 and
2, the resulting crypto configuration of the partition will include
domains 0, 1, and 2 on each of the adapters A and B.
HMC/SE version requirements:
* SE version >= 2.13.1
Authorization requirements:
* Object-access permission to this Partition.
* Object-access permission to the specified Crypto Adapter.
* Task permission to the "Partition Details" task.
Parameters:
crypto_adapters (:term:`iterable` of :class:`~zhmcclient.Adapter`):
Crypto adapters that should be added to the crypto configuration of
this partition.
crypto_domain_configurations (:term:`iterable` of `domain_config`):
Crypto domain configurations that should be added to the crypto
configuration of this partition.
A crypto domain configuration (`domain_config`) is a dictionary
with the following keys:
* ``"domain-index"`` (:term:`integer`): Domain index of the crypto
domain.
The domain index is a number in the range of 0 to a maximum that
depends on the model of the crypto adapter and the CPC model. For
the Crypto Express 5S adapter in a z13, the maximum domain index
is 84.
* ``"access-mode"`` (:term:`string`): Access mode for the crypto
domain.
The access mode specifies the way the partition can use the
crypto domain on the crypto adapter(s), using one of the
following string values:
* ``"control"`` - The partition can load cryptographic keys into
the domain, but it may not use the domain to perform
cryptographic operations.
* ``"control-usage"`` - The partition can load cryptographic keys
into the domain, and it can use the domain to perform
cryptographic operations.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
crypto_adapter_uris = [a.uri for a in crypto_adapters]
body = {'crypto-adapter-uris': crypto_adapter_uris,
'crypto-domain-configurations': crypto_domain_configurations}
self.manager.session.post(
self.uri + '/operations/increase-crypto-configuration',
resource=self, body=body)
@logged_api_call
def decrease_crypto_config(self, crypto_adapters,
crypto_domain_indexes):
"""
Remove crypto adapters and/or crypto domains from the crypto
configuration of this partition.
For the general principle for maintaining crypto configurations of
partitions, see :meth:`~zhmcclient.Partition.increase_crypto_config`.
Example: Assume that the current crypto configuration of a partition
includes crypto adapters A, B and C and crypto domains 0, 1, and 2 (on
each of the adapters). When this method is called to remove adapter C
and domain 2, the resulting crypto configuration of the partition will
include domains 0 and 1 on each of the adapters A and B.
HMC/SE version requirements:
* SE version >= 2.13.1
Authorization requirements:
* Object-access permission to this Partition.
* Object-access permission to the specified Crypto Adapters.
* Task permission to the "Partition Details" task.
Parameters:
crypto_adapters (:term:`iterable` of :class:`~zhmcclient.Adapter`):
Crypto adapters that should be removed from the crypto
configuration of this partition.
crypto_domain_indexes (:term:`iterable` of :term:`integer`):
Domain indexes of the crypto domains that should be removed from
the crypto configuration of this partition. For values, see
:meth:`~zhmcclient.Partition.increase_crypto_config`.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
crypto_adapter_uris = [a.uri for a in crypto_adapters]
body = {'crypto-adapter-uris': crypto_adapter_uris,
'crypto-domain-indexes': crypto_domain_indexes}
self.manager.session.post(
self.uri + '/operations/decrease-crypto-configuration',
resource=self, body=body)
@logged_api_call
def change_crypto_domain_config(self, crypto_domain_index, access_mode):
"""
Change the access mode for a crypto domain that is currently included
in the crypto configuration of this partition.
The access mode will be changed for the specified crypto domain on all
crypto adapters currently included in the crypto configuration of this
partition.
For the general principle for maintaining crypto configurations of
partitions, see :meth:`~zhmcclient.Partition.increase_crypto_config`.
HMC/SE version requirements:
* SE version >= 2.13.1
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Partition Details" task.
Parameters:
crypto_domain_index (:term:`integer`):
Domain index of the crypto domain to be changed. For values, see
:meth:`~zhmcclient.Partition.increase_crypto_config`.
access_mode (:term:`string`):
The new access mode for the crypto domain. For values, see
:meth:`~zhmcclient.Partition.increase_crypto_config`.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'domain-index': crypto_domain_index,
'access-mode': access_mode}
self.manager.session.post(
self.uri + '/operations/change-crypto-domain-configuration',
resource=self, body=body)
@logged_api_call
def zeroize_crypto_domain(self, crypto_adapter, crypto_domain_index):
"""
Zeroize a single crypto domain on a crypto adapter.
Zeroizing a crypto domain clears the cryptographic keys and
non-compliance mode settings in the crypto domain.
The crypto domain must be attached to this partition in "control-usage"
access mode.
Supported CPC versions: z14 GA2 and above, and the corresponding
LinuxOne systems.
HMC/SE version requirements:
* SE version >= 2.14.1
Authorization requirements:
* Object-access permission to this Partition.
* Object-access permission to the specified Crypto Adapter.
* Task permission to the "Zeroize Crypto Domain" task.
Parameters:
crypto_adapter (:class:`~zhmcclient.Adapter`):
Crypto adapter with the crypto domain to be zeroized.
crypto_domain_index (:term:`integer`):
Domain index of the crypto domain to be zeroized.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {
'crypto-adapter-uri': crypto_adapter.uri,
'domain-index': crypto_domain_index
}
self.manager.session.post(
self.uri + '/operations/zeroize-crypto-domain', resource=self,
body=body)
@logged_api_call
def attach_storage_group(self, storage_group):
"""
Attach a :term:`storage group` to this partition.
This will cause the :term:`storage volumes <storage volume>` of the
storage group to be attached to the partition, instantiating any
necessary :term:`virtual storage resource` objects.
A storage group can be attached to a partition regardless of its
fulfillment state. The fulfillment state of its storage volumes
and thus of the entire storage group changes as volumes are discovered
by DPM, and will eventually reach "complete".
HMC/SE version requirements:
* :ref:`firmware feature <firmware features>` "dpm-storage-management"
Authorization requirements:
* Object-access permission to this partition.
* Object-access permission to the specified storage group.
* Task permission to the "Partition Details" task.
Parameters:
storage_group (:class:`~zhmcclient.StorageGroup`):
Storage group to be attached. The storage group must not currently
be attached to this partition.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'storage-group-uri': storage_group.uri}
self.manager.session.post(
self.uri + '/operations/attach-storage-group', resource=self,
body=body)
@logged_api_call
def detach_storage_group(self, storage_group):
"""
Detach a :term:`storage group` from this partition.
This will cause the :term:`storage volumes <storage volume>` of the
storage group to be detached from the partition, removing any
:term:`virtual storage resource` objects that had been created upon
attachment.
A storage group can be detached from a partition regardless of its
fulfillment state. The fulfillment state of its storage volumes
changes as volumes are discovered by DPM.
HMC/SE version requirements:
* :ref:`firmware feature <firmware features>` "dpm-storage-management"
Authorization requirements:
* Object-access permission to this partition.
* Task permission to the "Partition Details" task.
Parameters:
storage_group (:class:`~zhmcclient.StorageGroup`):
Storage group to be detached. The storage group must currently
be attached to this partition.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'storage-group-uri': storage_group.uri}
self.manager.session.post(
self.uri + '/operations/detach-storage-group', resource=self,
body=body)
@logged_api_call
def list_attached_storage_groups(self, full_properties=False):
"""
Return the storage groups that are attached to this partition.
HMC/SE version requirements:
* :ref:`firmware feature <firmware features>` "dpm-storage-management"
Authorization requirements:
* Object-access permission to this partition.
Parameters:
full_properties (bool):
Controls that the full set of resource properties for each returned
storage group is being retrieved, vs. only the following short set:
"object-uri", "object-id", "class", "parent".
TODO: Verify short list of properties.
Returns:
List of :class:`~zhmcclient.StorageGroup` objects representing the
storage groups that are attached to this partition.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
sg_list = []
sg_uris = self.get_property('storage-group-uris')
if sg_uris:
console = self.manager.cpc.manager.console
for sg_uri in sg_uris:
sg = console.storage_groups.resource_object(sg_uri)
sg_list.append(sg)
if full_properties:
sg.pull_full_properties()
return sg_list
@logged_api_call
def assign_certificate(self, certificate):
"""
Assigns a :term:`Certificate` to this partition.
HMC/SE version requirements:
* :ref:`API feature <API features>` "secure-boot-with-certificates"
Authorization requirements:
* Object-access permission to this partition.
* Object-access permission to the specified certificate.
* Task permission to the "Assign Secure Boot Certificates" task.
Parameters:
certificate (:class:`~zhmcclient.Certificate`):
Certificate to be assigned. The certificate must not currently
be assigned to this partition.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'certificate-uri': certificate.uri}
self.manager.session.post(
self.uri + '/operations/assign-certificate', resource=self,
body=body)
@logged_api_call
def unassign_certificate(self, certificate):
"""
Unassign a :term:`Certificate` from this partition.
HMC/SE version requirements:
* :ref:`API feature <API features>` "secure-boot-with-certificates"
Authorization requirements:
* Object-access permission to this partition.
* Object-access permission to the specified certificate.
* Task permission to the "Assign Secure Boot Certificates" task.
Parameters:
certificate (:class:`~zhmcclient.Certificate`):
Certificate to be unassigned. The certificate must currently be
assigned to this partition.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'certificate-uri': certificate.uri}
self.manager.session.post(
self.uri + '/operations/unassign-certificate', resource=self,
body=body)
def dump(self):
"""
Dump this Partition resource with its properties and child resources
(recursively) as a resource definition.
The returned resource definition has the following format::
{
# Resource properties:
"properties": {...},
# Child resources:
"nics": [...],
"hbas": [...],
"virtual_functions": [...],
}
Returns:
dict: Resource definition of this resource.
"""
# Dump the resource properties
resource_dict = super().dump()
# Dump the child resources
nics = self.nics.dump()
if nics:
resource_dict['nics'] = nics
if self.hbas is not None:
# Note: z14 and later do not have HBAs
hbas = self.hbas.dump()
if hbas:
resource_dict['hbas'] = hbas
virtual_functions = self.virtual_functions.dump()
if virtual_functions:
resource_dict['virtual_functions'] = virtual_functions
return resource_dict
@logged_api_call
def get_sustainability_data(
self, range="last-week", resolution="one-hour",
custom_range_start=None, custom_range_end=None):
# pylint: disable=redefined-builtin
"""
Get energy management related metrics for the partition on a specific
historical time range. The metrics are returned as multiple data points
covering the requested time range with the requested resolution.
This method performs the "Get Partition Historical Sustainability Data"
HMC operation.
HMC/SE version requirements:
* :ref:`API feature <API features>` "environmental-metrics"
Authorization requirements:
* Object-access permission to this partition
* Task permission to the "Environmental Dashboard" task
Parameters:
range (:term:`string`):
Time range for the requested data points, as follows:
* "last-day" - Last 24 hours.
* "last-week" - Last 7 days (default).
* "last-month" - Last 30 days.
* "last-three-months" - Last 90 days.
* "last-six-months" - Last 180 days.
* "last-year" - Last 365 days.
* "custom" - From `custom_range_start` to `custom_range_end`.
resolution (:term:`string`):
Resolution for the requested data points. This is the time interval
in between the data points. For systems where the
"environmental-metrics" API feature is not available, the minimum
resolution is "one-hour".
The possible values are as follows:
* "fifteen-minutes" - 15 minutes.
* "one-hour" - 60 minutes (default).
* "one-day" - 24 hours.
* "one-week" - 7 days.
* "one-month" - 30 days.
custom_range_start (:class:`~py:datetime.datetime`):
Start of custom time range. Timezone-naive values are interpreted
using the local system time. Required if `range` is "custom".
custom_range_end (:class:`~py:datetime.datetime`):
End of custom time range. Timezone-naive values are interpreted
using the local system time. Required if `range` is "custom".
Returns:
dict: A dictionary with items as described for the response body
of the "Get Partition Historical Sustainability Data" HMC operation.
Timestamp fields are represented as timezone-aware
:class:`~py:datetime.datetime` objects.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {
'range': range,
'resolution': resolution,
}
if range == "custom":
body['custom-range-start'] = \
timestamp_from_datetime(custom_range_start)
body['custom-range-end'] = \
timestamp_from_datetime(custom_range_end)
result = self.manager.session.post(
self.uri + '/operations/get-historical-sustainability-data',
body=body)
for field_array in result.values():
for item in field_array:
if 'timestamp' in item:
item['timestamp'] = \
datetime_from_timestamp(item['timestamp'])
return result
|
GHSA-p57h-3cmc-xpjq
|
zhmcclient/_session.py
|
@@ -39,7 +39,7 @@
DEFAULT_OPERATION_TIMEOUT, DEFAULT_STATUS_TIMEOUT, \
DEFAULT_NAME_URI_CACHE_TIMETOLIVE, HMC_LOGGER_NAME, \
HTML_REASON_WEB_SERVICES_DISABLED, HTML_REASON_OTHER, \
- DEFAULT_HMC_PORT
+ DEFAULT_HMC_PORT, BLANKED_OUT_STRING
from ._utils import repr_obj_id
from ._version import __version__
@@ -54,7 +54,14 @@
'Accept': '*/*'
}
-BLANKED_OUT = '********' # Replacement for blanked out sensitive values
+# Properties whose values are always blanked out in the HMC log entries
+BLANKED_OUT_PROPERTIES = [
+ 'boot-ftp-password', # partition create/update
+ 'bind-password', # LDAP server def. create/update
+ 'ssc-master-pw', # image profile cr/upd, part. cr/upd, LPAR upd
+ 'password', # user create/update
+ 'zaware-master-pw', # image profile create/update, LPAR update
+]
def _handle_request_exc(exc, retry_timeout_config):
@@ -263,7 +270,7 @@ def _headers_for_logging(headers):
"""
if headers and 'X-API-Session' in headers:
headers = headers.copy()
- headers['X-API-Session'] = BLANKED_OUT
+ headers['X-API-Session'] = BLANKED_OUT_STRING
return headers
@@ -465,7 +472,7 @@ def __repr__(self):
f" _actual_host={self._actual_host!r},\n"
f" _base_url={self._base_url!r},\n"
f" _headers={headers!r},\n"
- f" _session_id={BLANKED_OUT!r},\n"
+ f" _session_id={BLANKED_OUT_STRING!r},\n"
f" _session={self._session!r}\n"
f" _object_topic={self._object_topic!r}\n"
f" _job_topic={self._job_topic!r}\n"
@@ -960,8 +967,11 @@ def _log_http_request(
# structured data such as a password or session IDs.
pass
else:
- if 'password' in content_dict:
- content_dict['password'] = BLANKED_OUT
+ for prop in BLANKED_OUT_PROPERTIES:
+ try:
+ content_dict[prop] = BLANKED_OUT_STRING
+ except KeyError:
+ pass
content = dict2json(content_dict)
trunc = 30000
if content_len > trunc:
@@ -1029,11 +1039,11 @@ def _log_http_response(
if 'request-headers' in content_dict:
headers_dict = content_dict['request-headers']
if 'x-api-session' in headers_dict:
- headers_dict['x-api-session'] = BLANKED_OUT
+ headers_dict['x-api-session'] = BLANKED_OUT_STRING
if 'api-session' in content_dict:
- content_dict['api-session'] = BLANKED_OUT
+ content_dict['api-session'] = BLANKED_OUT_STRING
if 'session-credential' in content_dict:
- content_dict['session-credential'] = BLANKED_OUT
+ content_dict['session-credential'] = BLANKED_OUT_STRING
content = dict2json(content_dict)
if status >= 400:
content_label = 'content'
|
# Copyright 2016,2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Session class: A session to the HMC, optionally in context of an HMC user.
"""
import json
import time
import re
from copy import copy
from collections import OrderedDict
from collections.abc import Iterable
import requests
import urllib3
from ._exceptions import HTTPError, ServerAuthError, ClientAuthError, \
ParseError, ConnectTimeout, ReadTimeout, RetriesExceeded, \
OperationTimeout, Error
from ._exceptions import ConnectionError # pylint: disable=redefined-builtin
from ._timestats import TimeStatsKeeper
from ._auto_updater import AutoUpdater
from ._logging import get_logger, logged_api_call
from ._constants import DEFAULT_CONNECT_TIMEOUT, DEFAULT_CONNECT_RETRIES, \
DEFAULT_READ_TIMEOUT, DEFAULT_READ_RETRIES, DEFAULT_MAX_REDIRECTS, \
DEFAULT_OPERATION_TIMEOUT, DEFAULT_STATUS_TIMEOUT, \
DEFAULT_NAME_URI_CACHE_TIMETOLIVE, HMC_LOGGER_NAME, \
HTML_REASON_WEB_SERVICES_DISABLED, HTML_REASON_OTHER, \
DEFAULT_HMC_PORT
from ._utils import repr_obj_id
from ._version import __version__
__all__ = ['Session', 'Job', 'RetryTimeoutConfig', 'get_password_interface']
HMC_LOGGER = get_logger(HMC_LOGGER_NAME)
_HMC_SCHEME = "https"
_STD_HEADERS = {
'User-Agent': f'python-zhmcclient/{__version__}',
'Content-type': 'application/json',
'Accept': '*/*'
}
BLANKED_OUT = '********' # Replacement for blanked out sensitive values
def _handle_request_exc(exc, retry_timeout_config):
"""
Handle a :exc:`request.exceptions.RequestException` exception that was
raised.
"""
if isinstance(exc, requests.exceptions.ConnectTimeout):
new_exc = ConnectTimeout(_request_exc_message(exc), exc,
retry_timeout_config.connect_timeout,
retry_timeout_config.connect_retries)
new_exc.__cause__ = None
raise new_exc # ConnectTimeout
if isinstance(exc, requests.exceptions.ReadTimeout):
new_exc = ReadTimeout(_request_exc_message(exc), exc,
retry_timeout_config.read_timeout,
retry_timeout_config.read_retries)
new_exc.__cause__ = None
raise new_exc # ReadTimeout
if isinstance(exc, requests.exceptions.RetryError):
new_exc = RetriesExceeded(_request_exc_message(exc), exc,
retry_timeout_config.connect_retries)
new_exc.__cause__ = None
raise new_exc # RetriesExceeded
new_exc = ConnectionError(_request_exc_message(exc), exc)
new_exc.__cause__ = None
raise new_exc # ConnectionError
def _request_exc_message(exc):
"""
Return a reasonable exception message from a
:exc:`request.exceptions.RequestException` exception.
The approach is to dig deep to the original reason, if the original
exception is present, skipping irrelevant exceptions such as
`urllib3.exceptions.MaxRetryError`, and eliminating useless object
representations such as the connection pool object in
`urllib3.exceptions.NewConnectionError`.
Parameters:
exc (:exc:`~request.exceptions.RequestException`): Exception
Returns:
string: A reasonable exception message from the specified exception.
"""
messages = []
for arg in exc.args:
if isinstance(arg, Exception):
org_exc = arg
if isinstance(org_exc, urllib3.exceptions.MaxRetryError):
message = f"{org_exc}, reason: {org_exc.reason}"
else:
message = str(org_exc)
else:
message = str(arg)
# Eliminate useless object repr at begin of the message
m = re.match(r'^(\(<[^>]+>, \'(.*)\'\)|<[^>]+>: (.*))$', message)
if m:
message = m.group(2) or m.group(3)
messages.append(message)
return ", ".join(messages)
class RetryTimeoutConfig:
# pylint: disable=too-few-public-methods
"""
A configuration setting that specifies verious retry counts and timeout
durations.
HMC/SE version requirements: None
"""
def __init__(self, connect_timeout=None, connect_retries=None,
read_timeout=None, read_retries=None, max_redirects=None,
operation_timeout=None, status_timeout=None,
name_uri_cache_timetolive=None):
"""
For all parameters, `None` means that this object does not specify a
value for the parameter, and that a default value should be used
(see :ref:`Constants`).
All parameters are available as instance attributes.
Parameters:
connect_timeout (:term:`number`): Connect timeout in seconds.
This timeout applies to making a connection at the socket level.
The same socket connection is used for sending an HTTP request to
the HMC and for receiving its HTTP response.
The special value 0 means that no timeout is set.
connect_retries (:term:`integer`): Number of retries (after the
initial attempt) for connection-related issues. These retries are
performed for failed DNS lookups, failed socket connections, and
socket connection timeouts.
read_timeout (:term:`number`): Read timeout in seconds.
This timeout applies to reading at the socket level, when receiving
an HTTP response.
The special value 0 means that no timeout is set.
read_retries (:term:`integer`): Number of retries (after the
initial attempt) for read-related issues. These retries are
performed for failed socket reads and socket read timeouts.
A retry consists of resending the original HTTP request. The
zhmcclient restricts these retries to just the HTTP GET method.
For other HTTP methods, no retry will be performed.
max_redirects (:term:`integer`): Maximum number of HTTP redirects.
operation_timeout (:term:`number`): Asynchronous operation timeout in
seconds. This timeout applies when waiting for the completion of
asynchronous HMC operations. The special value 0 means that no
timeout is set.
status_timeout (:term:`number`): Resource status timeout in seconds.
This timeout applies when waiting for the transition of the status
of a resource to a desired status. The special value 0 means that
no timeout is set.
name_uri_cache_timetolive (:term:`number`): Time to the next
automatic invalidation of the Name-URI cache of manager objects, in
seconds since the last invalidation. The special value 0 means
that no Name-URI cache is maintained (i.e. the caching is
disabled).
"""
self.connect_timeout = connect_timeout
self.connect_retries = connect_retries
self.read_timeout = read_timeout
self.read_retries = read_retries
self.max_redirects = max_redirects
self.operation_timeout = operation_timeout
self.status_timeout = status_timeout
self.name_uri_cache_timetolive = name_uri_cache_timetolive
# Read retries only for these HTTP methods:
self.allowed_methods = {'GET'}
_attrs = ('connect_timeout', 'connect_retries', 'read_timeout',
'read_retries', 'max_redirects', 'operation_timeout',
'status_timeout', 'name_uri_cache_timetolive',
'allowed_methods')
def override_with(self, override_config):
"""
Return a new configuration object that represents the configuration
from this configuration object acting as a default, and the specified
configuration object overriding that default for any of its
attributes that are not `None`.
Parameters:
override_config (:class:`~zhmcclient.RetryTimeoutConfig`):
The configuration object overriding the defaults defined in this
configuration object.
Returns:
:class:`~zhmcclient.RetryTimeoutConfig`:
A new configuration object representing this configuration object,
overridden by the specified configuration object.
"""
ret = RetryTimeoutConfig()
for attr in RetryTimeoutConfig._attrs:
value = getattr(self, attr)
if override_config and getattr(override_config, attr) is not None:
value = getattr(override_config, attr)
setattr(ret, attr, value)
return ret
def get_password_interface(host, userid):
"""
Interface to the password retrieval function that is invoked by
:class:`~zhmcclient.Session` if no password is provided.
Parameters:
host (string): Hostname or IP address of the HMC
userid (string): Userid on the HMC
Returns:
string: Password of the userid on the HMC
"""
raise NotImplementedError
def _headers_for_logging(headers):
"""
Return the input headers dict with blanked out values for any headers that
carry sensitive information, so that it can be logged or displayed.
The headers argument is not modified; if it needs to be changed, a copy is
made that is changed.
"""
if headers and 'X-API-Session' in headers:
headers = headers.copy()
headers['X-API-Session'] = BLANKED_OUT
return headers
class Session:
"""
A session to the HMC, optionally in context of an HMC user.
The session supports operations that require to be authenticated, as well
as operations that don't (e.g. obtaining the API version).
The session can keep statistics about the elapsed time for issuing HTTP
requests against the HMC API. Instance variable
:attr:`~zhmcclient.Session.time_stats_keeper` is used to enable/disable the
measurements, and to print the statistics.
HMC/SE version requirements: None
"""
default_rt_config = RetryTimeoutConfig(
connect_timeout=DEFAULT_CONNECT_TIMEOUT,
connect_retries=DEFAULT_CONNECT_RETRIES,
read_timeout=DEFAULT_READ_TIMEOUT,
read_retries=DEFAULT_READ_RETRIES,
max_redirects=DEFAULT_MAX_REDIRECTS,
operation_timeout=DEFAULT_OPERATION_TIMEOUT,
status_timeout=DEFAULT_STATUS_TIMEOUT,
name_uri_cache_timetolive=DEFAULT_NAME_URI_CACHE_TIMETOLIVE,
)
def __init__(self, host, userid=None, password=None, session_id=None,
get_password=None, retry_timeout_config=None,
port=DEFAULT_HMC_PORT, verify_cert=True):
# pylint: disable=line-too-long
"""
Creating a session object will not immediately cause a logon to be
attempted; the logon is deferred until needed.
There are several alternatives for specifying the authentication
related parameters:
* `userid`/`password` only: The session is initially in a logged-off
state and subsequent operations that require logon will use the
specified userid and password to automatically log on. The returned
session-id will be stored in this session object. Subsequent
operations that require logon will use that session-id. Once the HMC
expires that session-id, subsequent operations that require logon
will cause a re-logon with the specified userid and password.
* `userid`/`password` and `session_id`: The specified session-id will
be stored in this session object, so that the session is initially in
a logged-on state. Subsequent operations that require logon will use
that session-id. Once the HMC expires that session-id, subsequent
operations that require logon will cause a re-logon with the
specified userid/password.
In this case, the `host` parameter must specify the single HMC that
has that session.
* `session_id` only: The specified session-id will be stored in this
session object, so that the session is initially in a logged-on
state. Subsequent operations that require logon will use the stored
session-id. Once the HMC expires the session-id, subsequent
operations that require logon will cause an
:exc:`~zhmcclient.ServerAuthError` to be raised (because
userid/password have not been specified, so an automatic re-logon is
not possible).
In this case, the `host` parameter must specify the single HMC that
has that session.
* Neither `userid`/`password` nor `session_id`: Only operations that do
not require logon, are possible.
Parameters:
host (:term:`string` or iterable of :term:`string`):
HMC host or list of HMC hosts to try from.
For valid formats, see the :attr:`~zhmcclient.Session.host`
property.
If `session_id` is specified, this must be the single HMC that has
that session.
Must not be `None`.
userid (:term:`string`):
Userid of the HMC user to be used, or `None`.
password (:term:`string`):
Password of the HMC user to be used, if `userid` was specified.
session_id (:term:`string`):
Session-id to be used for this session, or `None`.
get_password (:term:`callable`):
A password retrieval function, or `None`.
If provided, this function will be called if a password is needed
but not provided. This mechanism can be used for example by command
line interfaces for prompting for the password.
The password retrieval function must follow the interface
defined in :func:`~zhmcclient.get_password_interface`.
retry_timeout_config (:class:`~zhmcclient.RetryTimeoutConfig`):
The retry/timeout configuration for this session for use by any of
its HMC operations, overriding any defaults.
`None` for an attribute in that configuration object means that the
default value will be used for that attribute.
`None` for the entire `retry_timeout_config` parameter means that a
default configuration will be used with the default values for all
of its attributes.
See :ref:`Constants` for the default values.
port (:term:`integer`):
HMC TCP port. Defaults to
:attr:`~zhmcclient._constants.DEFAULT_HMC_PORT`.
For details, see the :attr:`~zhmcclient.Session.port` property.
verify_cert (bool or :term:`string`):
Controls whether and how the client verifies the server certificate
presented by the HMC during SSL/TLS handshake:
* `False`: Do not verify the HMC certificate. Not verifying the HMC
certificate means the zhmcclient will not detect hostname
mismatches, expired certificates, revoked certificates, or
otherwise invalid certificates. Since this mode makes the
connection vulnerable to man-in-the-middle attacks, it is insecure
and should not be used in production environments.
* `True`: Verify the HMC certificate using the CA certificates from
the first of these locations:
- The file or directory in the REQUESTS_CA_BUNDLE env.var, if set
- The file or directory in the CURL_CA_BUNDLE env.var, if set
- The Python 'certifi' package (which contains the
`Mozilla Included CA Certificate List <https://wiki.mozilla.org/CA/Included_Certificates>`_).
* :term:`string`: Path name of a certificate file or directory.
Verify the HMC certificate using the CA certificates in that file
or directory.
For details, see the :ref:`HMC certificate` section.
*Added in version 0.31*
""" # noqa: E501
# pylint: enable=line-too-long
if isinstance(host, str):
self._hosts = [host]
else:
self._hosts = list(host)
assert len(self._hosts) >= 1
self._port = port
self._userid = userid
self._password = password
self._verify_cert = verify_cert
self._get_password = get_password
self._retry_timeout_config = self.default_rt_config.override_with(
retry_timeout_config)
self._headers = copy(_STD_HEADERS) # dict with standard HTTP headers
if session_id is not None:
# Create a logged-on state (nearly same state as in _do_logon())
self._session_id = session_id
self._session = self._new_session(self.retry_timeout_config)
self._headers['X-API-Session'] = session_id
assert len(self._hosts) == 1
self._actual_host = self._hosts[0]
self._base_url = \
self._create_base_url(self._actual_host, self._port)
# The following are set in _do_logon()) but not here:
self._session_credential = None
self._object_topic = None
self._job_topic = None
else:
# Create a logged-off state (same state as in _do_logoff())
self._session_id = None
self._session = None
self._actual_host = None
self._base_url = None
self._session_credential = None
self._object_topic = None
self._job_topic = None
self._time_stats_keeper = TimeStatsKeeper()
self._auto_updater = AutoUpdater(self)
def __repr__(self):
"""
Return a string with the state of this session, for debug purposes.
"""
headers = _headers_for_logging(self.headers)
ret = (
f"{repr_obj_id(self)} (\n"
f" _hosts={self._hosts!r},\n"
f" _userid={self._userid!r},\n"
f" _password='...',\n"
f" _verify_cert={self._verify_cert!r},\n"
f" _get_password={self._get_password!r},\n"
f" _retry_timeout_config={self._retry_timeout_config!r},\n"
f" _actual_host={self._actual_host!r},\n"
f" _base_url={self._base_url!r},\n"
f" _headers={headers!r},\n"
f" _session_id={BLANKED_OUT!r},\n"
f" _session={self._session!r}\n"
f" _object_topic={self._object_topic!r}\n"
f" _job_topic={self._job_topic!r}\n"
f" _auto_updater={self._auto_updater!r}\n"
")")
return ret
@property
def host(self):
"""
:term:`string` or list of :term:`string`: HMC host or redundant HMC
hosts to use. The first working HMC from this list will actually be
used. The working state of the HMC is detrmined using the
'Query API Version' operation for which no authentication is needed.
Each host will be in one of the following formats:
* a short or fully qualified DNS hostname
* a literal (= dotted) IPv4 address
* a literal IPv6 address, formatted as defined in :term:`RFC3986`
with the extensions for zone identifiers as defined in
:term:`RFC6874`, supporting ``-`` (minus) for the delimiter
before the zone ID string, as an additional choice to ``%25``
"""
if len(self._hosts) == 1:
host = self._hosts[0]
else:
host = self._hosts
return host
@property
def actual_host(self):
"""
:term:`string` or `None`: The HMC host that is actually used for this
session, if the session is in the logged-on state. `None`, if the
session is in the logged-off state.
The HMC host will be in one of the following formats:
* a short or fully qualified DNS hostname
* a literal (= dotted) IPv4 address
* a literal IPv6 address, formatted as defined in :term:`RFC3986`
with the extensions for zone identifiers as defined in
:term:`RFC6874`, supporting ``-`` (minus) for the delimiter
before the zone ID string, as an additional choice to ``%25``
"""
return self._actual_host
@property
def port(self):
"""
:term:`integer`: HMC TCP port that is used for this session.
"""
return self._port
@property
def userid(self):
"""
:term:`string`: HMC userid that is used for this session.
If `None`, only operations that do not require authentication, can be
performed.
"""
return self._userid
@property
def verify_cert(self):
"""
bool or :term:`string`: Controls whether and how the client verifies
server certificate presented by the HMC during SSL/TLS handshake.
For details, see the same-named init parameter.
"""
return self._verify_cert
@property
def get_password(self):
"""
The password retrieval function, or `None`.
The password retrieval function must follow the interface defined in
:func:`~zhmcclient.get_password_interface`.
"""
return self._get_password
@property
def retry_timeout_config(self):
"""
:class:`~zhmcclient.RetryTimeoutConfig`: The effective retry/timeout
configuration for this session for use by any of its HMC operations,
taking into account the defaults and the session-specific overrides.
"""
return self._retry_timeout_config
@property
def base_url(self):
"""
:term:`string` or `None`: Base URL of the HMC that is actually used for
this session, if the session is in the logged-on state. `None`, if the
session is in the logged-off state.
Example:
.. code-block:: text
https://myhmc.acme.com:6794
"""
return self._base_url
@property
def headers(self):
"""
:term:`header dict`: HTTP headers that are used in requests sent
for this session.
Initially, this is the following set of headers:
.. code-block:: text
Content-type: application/json
Accept: */*
When the session is logged on to the HMC, the session token is added
to these headers:
.. code-block:: text
X-API-Session: ...
"""
return self._headers
@property
def time_stats_keeper(self):
"""
The time statistics keeper (for a usage example, see section
:ref:`Time Statistics`).
"""
return self._time_stats_keeper
@property
def session_id(self):
"""
:term:`string` or `None`: Session ID (= HMC session token) used for this
session, if the session is in the logged-on state. `None`, if the
session is in the logged-off state.
In the logged-off state, any request that requires logon will first
cause a session to be created on the HMC and will store the session ID
returned by the HMC in this property.
In the logged-on state, the session ID stored in this property will be
used for any requests to the HMC.
"""
return self._session_id
@property
def session_credential(self):
"""
:term:`string` or `None`: Session credential for this session returned
by the HMC, if the session is in the logged-on state. `None`, if the
session is in the logged-off state.
"""
return self._session_credential
@property
def session(self):
"""
:term:`string` or `None`: :class:`requests.Session` object for this
session, if the session is in the logged-on state. `None`, if the
session is in the logged-off state.
"""
return self._session
@property
def object_topic(self):
"""
:term:`string` or `None`: Name of the notification topic the HMC will
use to send object-related notification messages to this session, if
the session is in the logged-on state. `None`, if the session is in the
logged-off state.
The associated topic type is "object-notification".
"""
return self._object_topic
@property
def job_topic(self):
"""
:term:`string` or `None`: Name of the notification topic the HMC will
use to send job notification messages to this session, if the session
is in the logged-on state. `None`, if the session is in the logged-off
state.
The associated topic type is "job-notification".
"""
return self._job_topic
@property
def auto_updater(self):
"""
:class:`~zhmcclient.AutoUpdater`: Updater for
:ref:`auto-updating` of resource and manager objects.
"""
return self._auto_updater
@logged_api_call
def logon(self, verify=False):
"""
Make sure this session object is logged on to the HMC.
If `verify=False`, this method determines the logged-on state of this
session object based on whether there is a session ID set in this
session object. If a session ID is set, it is assumed to be valid and
no new session is created on the HMC. Otherwise, a new session will be
created on the HMC.
If `verify=True`, this method determines the logged-on state of this
session object in addition by performing a read operation on the HMC
that requires to be logged on but no specific authorizations. If a
session ID is set and if that operation succeeds, no new session is
created on the HMC. Any failure of that read operation will be ignored.
Otherwise, a new session will be created on the HMC.
When a new session has been successfully created on the HMC, the
:attr:`session_id` attribute of this session object will be set to the
session ID returned by the HMC to put it into the logged-on state.
Any exceptions raised from this method are always related to the
creation of a new session on the HMC - any failures of the read
operation in the verification case are always ignored.
Parameters:
verify (bool): Verify the validity of an existing session ID.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ClientAuthError`
:exc:`~zhmcclient.ServerAuthError`
:exc:`~zhmcclient.ConnectionError`
"""
need_logon = False
if self._session_id is None:
need_logon = True
elif verify:
try:
self.get('/api/console', logon_required=False,
renew_session=False)
except Error:
need_logon = True
if need_logon:
self._do_logon()
@logged_api_call
def logoff(self, verify=False):
# pylint: disable=unused-argument
"""
Make sure this session object is logged off from the HMC.
If a session ID is set in this session object, its session will be
deleted on the HMC. If that delete operation fails due to an invalid
session ID, that failure will be ignored. Any other failures of that
delete operation will be raised as exceptions.
When the session has been successfully deleted on the HMC, the
:attr:`session_id` attribute of this session object will be set to
`None` to put it into the logged-off state.
Parameters:
verify (bool): Deprecated: This parameter will be ignored.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ConnectionError`
"""
if self._session_id:
self._do_logoff()
@logged_api_call
def is_logon(self, verify=False):
"""
Return a boolean indicating whether this session object is logged on to
the HMC.
If `verify=False`, this method determines the logged-on state based
on whether there is a session ID set in this object. If a session ID is
set, it is assumed to be valid, and `True` is returned. Otherwise,
`False` is returned. In that case, no exception is ever raised.
If `verify=True`, this method determines the logged-on state in
addition by verifying a session ID that is set, by performing a read
operation on the HMC that requires to be logged on but no specific
authorizations. If a session ID is set and if that read operation
succeeds, `True` is returned. If no session ID is set or if that read
operation fails due to an invalid session ID, `False` is returned.
Any other failures of that read operation are raised as exceptions,
because that indicates that a verification with the HMC could not be
performed.
Parameters:
verify (bool): Verify the validity of an existing session ID.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ConnectionError`
"""
if self._session_id is None:
return False
if verify:
try:
self.get('/api/console', logon_required=False,
renew_session=False)
except ServerAuthError:
return False
return True
def _do_logon(self):
"""
Log on, unconditionally. This can be used to re-logon.
This requires credentials to be provided.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ClientAuthError`
:exc:`~zhmcclient.ServerAuthError`
:exc:`~zhmcclient.ConnectionError`
"""
if self._userid is None:
raise ClientAuthError("Userid is not provided.")
# Determine working HMC for this session
self._actual_host = self._determine_actual_host()
self._base_url = self._create_base_url(self._actual_host, self._port)
if self._password is None:
if self._get_password:
self._password = \
self._get_password(self._actual_host, self._userid)
else:
raise ClientAuthError("Password is not provided.")
# Create an HMC session
logon_uri = '/api/sessions'
logon_body = {
'userid': self._userid,
'password': self._password
}
self._headers.pop('X-API-Session', None) # Just in case
self._session = self._new_session(self.retry_timeout_config)
logon_res = self.post(logon_uri, body=logon_body, logon_required=False)
self._session_id = logon_res['api-session']
self._session_credential = logon_res['session-credential']
self._headers['X-API-Session'] = self._session_id
self._object_topic = logon_res['notification-topic']
self._job_topic = logon_res['job-notification-topic']
@staticmethod
def _create_base_url(host, port):
"""
Encapsulates how the base URL of the HMC is constructed.
"""
return f"{_HMC_SCHEME}://{host}:{port}"
def _determine_actual_host(self):
"""
Determine the actual HMC host to be used.
If a single HMC host is specified, that host is used without further
verification as to whether it is available.
If more than one HMC host is specified, the first available host is
used. Availability of the HMC is determined using the
'Query API Version' operation, for which no logon is required.
If no available HMC can be found, raises the ConnectionError of the
last HMC that was tried.
"""
if len(self._hosts) == 1:
host = self._hosts[0]
HMC_LOGGER.debug("Using the only HMC specified without verifying "
"its availability: %s", host)
return host
last_exc = None
for host in self._hosts:
HMC_LOGGER.debug("Trying HMC for availability: %s", host)
self._base_url = self._create_base_url(host, self._port)
try:
self.get('/api/version', logon_required=False,
renew_session=False)
except ConnectionError as exc:
last_exc = exc
continue
HMC_LOGGER.debug("Using available HMC: %s", host)
return host
HMC_LOGGER.debug("Did not find an available HMC in: %s",
self._hosts)
raise last_exc
@staticmethod
def _new_session(retry_timeout_config):
"""
Return a new `requests.Session` object.
"""
retry = urllib3.Retry(
total=retry_timeout_config.connect_retries,
connect=retry_timeout_config.connect_retries,
read=retry_timeout_config.read_retries,
allowed_methods=retry_timeout_config.allowed_methods,
redirect=retry_timeout_config.max_redirects)
session = requests.Session()
session.mount('https://',
requests.adapters.HTTPAdapter(max_retries=retry))
session.mount('http://',
requests.adapters.HTTPAdapter(max_retries=retry))
return session
def _do_logoff(self):
"""
Log off, unconditionally.
This deletes the session on the HMC. If that deletion operation fails
due to an invalid session ID, that failure is ignored.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ConnectionError`
"""
session_uri = '/api/sessions/this-session'
try:
self.delete(session_uri, logon_required=False, renew_session=False)
except (ServerAuthError, ConnectionError):
# HMC shutdown or broken network causes ConnectionError.
# Invalid credentials cause ServerAuthError.
pass
self._actual_host = None
self._base_url = None
self._session_id = None
self._session = None
self._headers.pop('X-API-Session', None)
self._object_topic = None
self._job_topic = None
@staticmethod
def _log_http_request(
method, url, resource, headers=None, content=None,
content_len=None):
"""
Log the HTTP request of an HMC REST API call, at the debug level.
Parameters:
method (:term:`string`): HTTP method name in upper case, e.g. 'GET'
url (:term:`string`): HTTP URL (base URL and operation URI)
headers (iterable): HTTP headers used for the request
content (:term:`string`): HTTP body (aka content) used for the
request (byte string or unicode string)
content_len (int): Length of content in Bytes, or `None` for
determining the length from the content string
"""
content_msg = None
if content is not None:
if isinstance(content, bytes):
content = content.decode('utf-8', errors='ignore')
assert isinstance(content, str)
if content_len is None:
content_len = len(content) # may change after JSON conversion
try:
content_dict = json2dict(content)
except ValueError:
# If the content is not JSON, we assume it does not contain
# structured data such as a password or session IDs.
pass
else:
if 'password' in content_dict:
content_dict['password'] = BLANKED_OUT
content = dict2json(content_dict)
trunc = 30000
if content_len > trunc:
content_label = f"content(first {trunc} B of {content_len} B)"
content_msg = content[0:trunc] + '...(truncated)'
else:
content_label = f'content({content_len} B)'
content_msg = content
else:
content_label = 'content'
content_msg = content
if resource:
names = []
res_class = resource.manager.class_name
while resource:
# Using resource.name gets into an infinite recursion when
# the resource name is not present, due to pulling the
# properties in that case. We take the careful approach.
name_prop = resource.manager.name_prop
name = resource.properties.get(name_prop, '<unknown>')
names.insert(0, name)
resource = resource.manager.parent
res_str = f" ({res_class} {'.'.join(names)})"
else:
res_str = ""
HMC_LOGGER.debug("Request: %s %s%s, headers: %r, %s: %r",
method, url, res_str, _headers_for_logging(headers),
content_label, content_msg)
@staticmethod
def _log_http_response(
method, url, resource, status, headers=None, content=None):
"""
Log the HTTP response of an HMC REST API call, at the debug level.
Parameters:
method (:term:`string`): HTTP method name in upper case, e.g. 'GET'
url (:term:`string`): HTTP URL (base URL and operation URI)
status (integer): HTTP status code
headers (iterable): HTTP headers returned in the response
content (:term:`string`): HTTP body (aka content) returned in the
response (byte string or unicode string)
"""
if content is not None:
if isinstance(content, bytes):
content = content.decode('utf-8')
assert isinstance(content, str)
content_len = len(content) # may change after JSON conversion
try:
content_dict = json2dict(content)
except ValueError:
# If the content is not JSON (e.g. response from metrics
# context retrieval), we assume it does not contain structured
# data such as a password or session IDs.
pass
else:
if 'request-headers' in content_dict:
headers_dict = content_dict['request-headers']
if 'x-api-session' in headers_dict:
headers_dict['x-api-session'] = BLANKED_OUT
if 'api-session' in content_dict:
content_dict['api-session'] = BLANKED_OUT
if 'session-credential' in content_dict:
content_dict['session-credential'] = BLANKED_OUT
content = dict2json(content_dict)
if status >= 400:
content_label = 'content'
content_msg = content
else:
trunc = 30000
if content_len > trunc:
content_label = \
f"content(first {trunc} B of {content_len} B)"
content_msg = content[0:trunc] + '...(truncated)'
else:
content_label = f'content({len(content)} B)'
content_msg = content
else:
content_label = 'content'
content_msg = content
if resource:
names = []
res_class = resource.manager.class_name
while resource:
# Using resource.name gets into an infinite recursion when
# the resource name is not present, due to pulling the
# properties in that case. We take the careful approach.
name_prop = resource.manager.name_prop
name = resource.properties.get(name_prop, '<unknown>')
names.insert(0, name)
resource = resource.manager.parent
res_str = f" ({res_class} {'.'.join(names)})"
else:
res_str = ""
HMC_LOGGER.debug("Respons: %s %s%s, status: %s, "
"headers: %r, %s: %r",
method, url, res_str, status,
_headers_for_logging(headers),
content_label, content_msg)
@logged_api_call
def get(self, uri, resource=None, logon_required=True, renew_session=True):
"""
Perform the HTTP GET method against the resource identified by a URI.
A set of standard HTTP headers is automatically part of the request.
If the HMC session token is expired, this method re-logs on and retries
the operation.
Parameters:
uri (:term:`string`):
Relative URI path of the resource, e.g. "/api/session".
This URI is relative to the base URL of the session (see
the :attr:`~zhmcclient.Session.base_url` property).
Must not be `None`.
logon_required (bool):
Boolean indicating whether the operation requires that the session
is logged on to the HMC. For example, the API version retrieval
operation does not require that.
renew_session (bool):
Boolean indicating whether the session should be renewed in case
it is expired.
Returns:
:term:`json object` with the operation result.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ClientAuthError`
:exc:`~zhmcclient.ServerAuthError`
:exc:`~zhmcclient.ConnectionError`
"""
if logon_required:
self.logon()
elif self._base_url is None:
self._actual_host = self._determine_actual_host()
self._base_url = \
self._create_base_url(self._actual_host, self._port)
url = self._base_url + uri
self._log_http_request('GET', url, resource=resource,
headers=self.headers)
stats = self.time_stats_keeper.get_stats('get ' + uri)
stats.begin()
req = self._session or requests
req_timeout = (self.retry_timeout_config.connect_timeout,
self.retry_timeout_config.read_timeout)
try:
result = req.get(url, headers=self.headers, verify=self.verify_cert,
timeout=req_timeout)
# Note: The requests method may raise OSError/IOError in case of
# HMC certificate validation issues (e.g. incorrect cert path)
except (requests.exceptions.RequestException, OSError) as exc:
_handle_request_exc(exc, self.retry_timeout_config)
finally:
stats.end()
self._log_http_response('GET', url, resource=resource,
status=result.status_code,
headers=result.headers,
content=result.content)
if result.status_code == 200:
return _result_object(result)
if result.status_code == 403:
result_object = _result_object(result)
reason = result_object.get('reason', None)
message = result_object.get('message', None)
HMC_LOGGER.debug("Received HTTP status 403.%d on GET %s: %s",
reason, uri, message)
if reason in (4, 5):
# 403.4: No session ID was provided
# 403.5: Session ID was invalid
if renew_session:
self._do_logon()
return self.get(
uri, resource=resource, logon_required=False,
renew_session=False)
if reason == 1:
# Login user's authentication is fine; this is an authorization
# issue, so we don't raise ServerAuthError.
raise HTTPError(result_object)
msg = result_object.get('message', None)
raise ServerAuthError(
"HTTP authentication failed with "
f"{result.status_code},{reason}: {msg}",
HTTPError(result_object))
result_object = _result_object(result)
raise HTTPError(result_object)
@logged_api_call
def post(self, uri, resource=None, body=None, logon_required=True,
wait_for_completion=False, operation_timeout=None,
renew_session=True):
"""
Perform the HTTP POST method against the resource identified by a URI,
using a provided request body.
A set of standard HTTP headers is automatically part of the request.
HMC operations using HTTP POST are either synchronous or asynchronous.
Asynchronous operations return the URI of an asynchronously executing
job that can be queried for status and result.
Examples for synchronous operations:
* With no result: "Logon", "Update CPC Properties"
* With a result: "Create Partition"
Examples for asynchronous operations:
* With no result: "Start Partition"
The `wait_for_completion` parameter of this method can be used to deal
with asynchronous HMC operations in a synchronous way.
If executing the operation reveals that the HMC session token is
expired, this method re-logs on and retries the operation.
The timeout and retry
Parameters:
uri (:term:`string`):
Relative URI path of the resource, e.g. "/api/session".
This URI is relative to the base URL of the session (see the
:attr:`~zhmcclient.Session.base_url` property).
Must not be `None`.
body (:term:`json object` or :term:`string` or file-like object):
The HTTP request body (payload).
If a JSON object (=dict) is provided, it will be serialized into
a UTF-8 encoded binary string.
If a Unicode string is provided, it will be encoded into a UTF-8
encoded binary string.
If a binary string is provided, it will be used unchanged.
If a file-like object is provided, it must return binary strings,
i.e. the file must have been opened in binary mode.
`None` means the same as an empty dictionary, namely that no HTTP
body is included in the request.
logon_required (bool):
Boolean indicating whether the operation requires that the session
is logged on to the HMC. For example, the "Logon" operation does
not require that.
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation.
A value of `True` will cause an additional entry in the time
statistics to be created that represents the entire asynchronous
operation including the waiting for its completion.
That time statistics entry will have a URI that is the targeted
URI, appended with "+completion".
For synchronous HMC operations, this parameter has no effect on
the operation execution or on the return value of this method, but
it should still be set (or defaulted) to `False` in order to avoid
the additional entry in the time statistics.
operation_timeout (:term:`number`):
Timeout in seconds, when waiting for completion of an asynchronous
operation. The special value 0 means that no timeout is set. `None`
means that the default async operation timeout of the session is
used.
For `wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised when the timeout
expires.
For `wait_for_completion=False`, this parameter has no effect.
renew_session (bool):
Boolean indicating whether the session should be renewed in case
it is expired.
Returns:
: A :term:`json object` or `None` or a :class:`~zhmcclient.Job`
object, as follows:
* For synchronous HMC operations, and for asynchronous HMC
operations with `wait_for_completion=True`:
If this method returns, the HMC operation has completed
successfully (otherwise, an exception is raised).
For asynchronous HMC operations, the associated job has been
deleted.
The return value is the result of the HMC operation as a
:term:`json object`, or `None` if the operation has no result.
See the section in the :term:`HMC API` book about the specific
HMC operation for a description of the members of the returned
JSON object.
* For asynchronous HMC operations with `wait_for_completion=False`:
If this method returns, the asynchronous execution of the HMC
operation has been started successfully as a job on the HMC (if
the operation could not be started, an exception is raised).
The return value is a :class:`~zhmcclient.Job` object
representing the job on the HMC.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ClientAuthError`
:exc:`~zhmcclient.ServerAuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the asynchronous operation.
:exc:`TypeError`: Body has invalid type.
"""
if logon_required:
self.logon()
elif self._base_url is None:
self._actual_host = self._determine_actual_host()
self._base_url = \
self._create_base_url(self._actual_host, self._port)
url = self._base_url + uri
headers = self.headers.copy() # Standard headers
log_len = None
if body is None:
data = None
log_data = None
elif isinstance(body, dict):
data = json.dumps(body)
# Produces unicode string on py3, and unicode or byte string on py2.
# Content-type is already set to 'application/json' in standard
# headers.
if isinstance(data, str):
log_data = data
data = data.encode('utf-8')
else:
log_data = data
elif isinstance(body, str):
data = body.encode('utf-8')
log_data = body
headers['Content-type'] = 'application/octet-stream'
elif isinstance(body, bytes):
data = body
log_data = body
headers['Content-type'] = 'application/octet-stream'
elif isinstance(body, Iterable):
# File-like objects, e.g. io.BufferedReader or io.TextIOWrapper
# returned from open() or io.open().
data = body
try:
mode = body.mode
except AttributeError:
mode = 'unknown'
log_data = f"<file-like object with mode {mode}>"
log_len = -1
headers['Content-type'] = 'application/octet-stream'
else:
raise TypeError(f"Body has invalid type: {type(body)}")
self._log_http_request('POST', url, resource=resource, headers=headers,
content=log_data, content_len=log_len)
req = self._session or requests
req_timeout = (self.retry_timeout_config.connect_timeout,
self.retry_timeout_config.read_timeout)
if wait_for_completion:
stats_total = self.time_stats_keeper.get_stats(
'post ' + uri + '+completion')
stats_total.begin()
try:
stats = self.time_stats_keeper.get_stats('post ' + uri)
stats.begin()
try:
if data is None:
result = req.post(url, headers=headers,
verify=self.verify_cert,
timeout=req_timeout)
else:
result = req.post(url, data=data, headers=headers,
verify=self.verify_cert,
timeout=req_timeout)
# Note: The requests method may raise OSError/IOError in case of
# HMC certificate validation issues (e.g. incorrect cert path)
except (requests.exceptions.RequestException, OSError) \
as exc:
_handle_request_exc(exc, self.retry_timeout_config)
finally:
stats.end()
self._log_http_response('POST', url, resource=resource,
status=result.status_code,
headers=result.headers,
content=result.content)
if result.status_code in (200, 201):
return _result_object(result)
if result.status_code == 204:
# No content
return None
if result.status_code == 202:
if result.content == b'':
# Some operations (e.g. "Restart Console",
# "Shutdown Console" or "Cancel Job") return 202
# with no response content.
return None
# This is the most common case to return 202: An
# asynchronous job has been started.
result_object = _result_object(result)
job_uri = result_object['job-uri']
job = Job(self, job_uri, 'POST', uri)
if wait_for_completion:
return job.wait_for_completion(operation_timeout)
return job
if result.status_code == 403:
result_object = _result_object(result)
reason = result_object.get('reason', None)
message = result_object.get('message', None)
HMC_LOGGER.debug("Received HTTP status 403.%d on GET %s: %s",
reason, uri, message)
if reason in (4, 5):
# 403.4: No session ID was provided
# 403.5: Session ID was invalid
if renew_session:
self._do_logon()
return self.post(
uri, resource=resource, body=body,
logon_required=False, renew_session=False,
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout)
if reason == 1:
# Login user's authentication is fine; this is an
# authorization issue, so we don't raise ServerAuthError.
raise HTTPError(result_object)
msg = result_object.get('message', None)
raise ServerAuthError(
"HTTP authentication failed with "
f"{result.status_code},{reason}: {msg}",
HTTPError(result_object))
result_object = _result_object(result)
raise HTTPError(result_object)
finally:
if wait_for_completion:
stats_total.end()
@logged_api_call
def delete(
self, uri, resource=None, logon_required=True, renew_session=True):
"""
Perform the HTTP DELETE method against the resource identified by a
URI.
A set of standard HTTP headers is automatically part of the request.
If the HMC session token is expired, this method re-logs on and retries
the operation.
Parameters:
uri (:term:`string`):
Relative URI path of the resource, e.g.
"/api/session/{session-id}".
This URI is relative to the base URL of the session (see
the :attr:`~zhmcclient.Session.base_url` property).
Must not be `None`.
logon_required (bool):
Boolean indicating whether the operation requires that the session
is logged on to the HMC. For example, for the logoff operation, it
does not make sense to first log on.
renew_session (bool):
Boolean indicating whether the session should be renewed in case
it is expired. For example, for the logoff operation, it does not
make sense to do that.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ClientAuthError`
:exc:`~zhmcclient.ServerAuthError`
:exc:`~zhmcclient.ConnectionError`
"""
if logon_required:
self.logon()
elif self._base_url is None:
self._actual_host = self._determine_actual_host()
self._base_url = \
self._create_base_url(self._actual_host, self._port)
url = self._base_url + uri
self._log_http_request('DELETE', url, resource=resource,
headers=self.headers)
stats = self.time_stats_keeper.get_stats('delete ' + uri)
stats.begin()
req = self._session or requests
req_timeout = (self.retry_timeout_config.connect_timeout,
self.retry_timeout_config.read_timeout)
try:
result = req.delete(url, headers=self.headers,
verify=self.verify_cert, timeout=req_timeout)
# Note: The requests method may raise OSError/IOError in case of
# HMC certificate validation issues (e.g. incorrect cert path)
except (requests.exceptions.RequestException, OSError) as exc:
_handle_request_exc(exc, self.retry_timeout_config)
finally:
stats.end()
self._log_http_response('DELETE', url, resource=resource,
status=result.status_code,
headers=result.headers,
content=result.content)
if result.status_code in (200, 204):
return
if result.status_code == 403:
result_object = _result_object(result)
reason = result_object.get('reason', None)
message = result_object.get('message', None)
HMC_LOGGER.debug("Received HTTP status 403.%d on GET %s: %s",
reason, uri, message)
if reason in (4, 5):
# 403.4: No session ID was provided
# 403.5: Session ID was invalid
if renew_session:
self._do_logon()
self.delete(uri, resource=resource, logon_required=False,
renew_session=False)
return
if reason == 1:
# Login user's authentication is fine; this is an authorization
# issue, so we don't raise ServerAuthError.
raise HTTPError(result_object)
msg = result_object.get('message', None)
raise ServerAuthError(
"HTTP authentication failed with "
f"{result.status_code},{reason}: {msg}",
HTTPError(result_object))
result_object = _result_object(result)
raise HTTPError(result_object)
@logged_api_call
def get_notification_topics(self):
"""
The 'Get Notification Topics' operation returns a structure that
describes the JMS notification topics associated with this session.
Returns:
: A list with one item for each notification topic. Each item is a
dictionary with the following keys:
* ``"topic-type"`` (string): Topic type, e.g. "job-notification".
* ``"topic-name"`` (string): Topic name; can be used for
subscriptions.
* ``"object-uri"`` (string): When topic-type is
"os-message-notification", this item is the canonical URI path
of the Partition for which this topic exists.
This field does not exist for the other topic types.
* ``"include-refresh-messages"`` (bool): When the topic-type is
"os-message-notification", this item indicates whether refresh
operating system messages will be sent on this topic.
"""
topics_uri = '/api/sessions/operations/get-notification-topics'
response = self.get(topics_uri)
return response['topics']
def auto_update_subscribed(self):
"""
Return whether this session is currently subscribed for
:ref:`auto-updating`.
Return:
bool: Indicates whether session is subscribed.
"""
return self._auto_updater.is_open()
@logged_api_call
def subscribe_auto_update(self):
"""
Subscribe this session for :ref:`auto-updating`, if not currently
subscribed.
When not yet subscribed, the session is also logged on.
When subscribed, object notifications will be sent by the HMC as
resource objects on the HMC change their properties or come or go.
These object notifications will be received by the client and will then
update the properties of any Python resource objects that are enabled
for auto-updating.
This method is automatically called by
:meth:`~zhmcclient.BaseResource.enable_auto_update` and thus does not
need to be called by the user.
"""
if not self._auto_updater.is_open():
self._auto_updater.open()
@logged_api_call
def unsubscribe_auto_update(self):
"""
Unsubscribe this session from :ref:`auto-updating`, if
currently subscribed.
When unsubscribed, object notifications are no longer sent by the HMC.
This method is automatically called by
:meth:`~zhmcclient.BaseResource.disable_auto_update` and thus does not
need to be called by the user.
"""
if self._auto_updater.is_open():
self._auto_updater.close()
class Job:
"""
A job on the HMC that performs an asynchronous HMC operation.
This class supports checking the job for completion, and waiting for job
completion.
"""
def __init__(self, session, uri, op_method, op_uri):
"""
Parameters:
session (:class:`~zhmcclient.Session`):
Session with the HMC.
Must not be `None`.
uri (:term:`string`):
Canonical URI of the job on the HMC.
Must not be `None`.
Example: ``"/api/jobs/{job-id}"``
op_method (:term:`string`):
Name of the HTTP method of the operation that is executing
asynchronously on the HMC.
Must not be `None`.
Example: ``"POST"``
op_uri (:term:`string`):
Canonical URI of the operation that is executing asynchronously on
the HMC.
Must not be `None`.
Example: ``"/api/partitions/{partition-id}/stop"``
"""
self._session = session
self._uri = uri
self._op_method = op_method
self._op_uri = op_uri
@property
def session(self):
"""
:class:`~zhmcclient.Session`: Session with the HMC.
"""
return self._session
@property
def uri(self):
"""
:term:`string`: Canonical URI of the job on the HMC.
Example: ``"/api/jobs/{job-id}"``
"""
return self._uri
@property
def op_method(self):
"""
:term:`string`: Name of the HTTP method of the operation that is
executing asynchronously on the HMC.
Example: ``"POST"``
"""
return self._op_method
@property
def op_uri(self):
"""
:term:`string`: Canonical URI of the operation that is executing
asynchronously on the HMC.
Example: ``"/api/partitions/{partition-id}/stop"``
"""
return self._op_uri
@logged_api_call
def query_status(self):
"""
Get the current status of this job, and if completed also the
operation results.
This method performs the "Query Job Status" operation on the job.
This is a low level operation, consider using
:meth:`~zhmcclient.Job.check_for_completion` or
:meth:`~zhmcclient.Job.wait_for_completion` instead.
If the job no longer exists, :exc:`~zhmcclient.HTTPError` is raised
with status code 404 and reason code 1.
Returns:
tuple(job_status, op_status, op_reason, op_result): With the following
items:
* job_status(string): Job status, one of:
- "running" - indicates that the job was found and it has not ended
at the time of the query.
- "cancel-pending" - indicates that the job was found and it has
not ended but cancellation has been requested.
- "canceled" - indicates that the job's normal course of execution
was interrupted by a cancel request, and the job has now ended.
- "complete" - indicates that the job was found and has completed
the operation, and the job has now ended.
* op_status(int): HTTP status code of the operation performed by
the job. Will be `None` if the job has not ended.
* op_reason(int): HTTP reason code of the operation performed by
the job. Will be `None` if the job has not ended.
* op_result(dict): Result of the operation performed by
the job, as described in the zhmcclient method that performed
the operation. Will be `None` if the job has not ended.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ClientAuthError`
:exc:`~zhmcclient.ServerAuthError`
:exc:`~zhmcclient.ConnectionError`
"""
try:
result = self.session.get(self.uri)
except Error as exc:
HMC_LOGGER.debug("Request: GET %s failed with %s: %s",
self.uri, exc.__class__.__name__, exc)
raise
job_status = result['status']
op_status = result.get('job-status-code', None)
op_reason = result.get('job-reason-code', None)
op_result = result.get('job-results', None)
return job_status, op_status, op_reason, op_result
@logged_api_call
def delete(self):
"""
Delete this ended job on the HMC.
This method performs the "Delete Completed Job Status" operation on the
job.
This is a low level operation, consider using
:meth:`~zhmcclient.Job.check_for_completion` or
:meth:`~zhmcclient.Job.wait_for_completion` instead, which delete the
ended job.
If the job has not ended (i.e. its `status` property is not "canceled"
or"complete"), :exc:`~zhmcclient.HTTPError` is raised with status code
409 and reason code 40.
If the job no longer exists, :exc:`~zhmcclient.HTTPError` is raised
with status code 404 and reason code 1.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ClientAuthError`
:exc:`~zhmcclient.ServerAuthError`
:exc:`~zhmcclient.ConnectionError`
"""
try:
self.session.delete(self.uri)
except Error as exc:
HMC_LOGGER.debug("Request: DELETE %s failed with %s: %s",
self.uri, exc.__class__.__name__, exc)
raise
@logged_api_call
def check_for_completion(self):
"""
Check once for completion of the job and return completion status and
result if it has completed.
If the job completed in error, an :exc:`~zhmcclient.HTTPError`
exception is raised.
Returns:
: A tuple (status, result) with:
* status (:term:`string`): Completion status of the job, as
returned in the ``status`` field of the response body of the
"Query Job Status" HMC operation, as follows:
* ``"complete"``: Job completed (successfully).
* any other value: Job is not yet complete.
* result (:term:`json object` or `None`): `None` for incomplete
jobs. For completed jobs, the result of the original asynchronous
operation that was performed by the job, from the ``job-results``
field of the response body of the "Query Job Status" HMC
operation. That result is a :term:`json object` as described
for the asynchronous operation, or `None` if the operation has no
result.
Raises:
:exc:`~zhmcclient.HTTPError`: The job completed in error, or the job
status cannot be retrieved, or the job cannot be deleted.
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ClientAuthError`
:exc:`~zhmcclient.ServerAuthError`
:exc:`~zhmcclient.ConnectionError`
"""
try:
job_result_obj = self.session.get(self.uri)
except Error as exc:
HMC_LOGGER.debug("Request: GET %s failed with %s: %s",
self.uri, exc.__class__.__name__, exc)
raise
job_status = job_result_obj['status']
if job_status == 'complete':
self.session.delete(self.uri)
op_status_code = job_result_obj['job-status-code']
if op_status_code in (200, 201):
op_result_obj = job_result_obj.get('job-results', None)
elif op_status_code == 204:
# No content
op_result_obj = None
else:
error_result_obj = job_result_obj.get('job-results', None)
if not error_result_obj:
message = None
elif 'message' in error_result_obj:
message = error_result_obj['message']
elif 'error' in error_result_obj:
message = error_result_obj['error']
else:
message = None
error_obj = {
'http-status': op_status_code,
'reason': job_result_obj['job-reason-code'],
'message': message,
'request-method': self.op_method,
'request-uri': self.op_uri,
}
raise HTTPError(error_obj)
else:
op_result_obj = None
return job_status, op_result_obj
@logged_api_call
def wait_for_completion(self, operation_timeout=None):
"""
Wait for completion of the job, then delete the job on the HMC and
return the result of the original asynchronous HMC operation, if it
completed successfully.
If the job completed in error, an :exc:`~zhmcclient.HTTPError`
exception is raised.
Parameters:
operation_timeout (:term:`number`):
Timeout in seconds, when waiting for completion of the job. The
special value 0 means that no timeout is set. `None` means that the
default async operation timeout of the session is used.
If the timeout expires, a :exc:`~zhmcclient.OperationTimeout`
is raised.
This method gives completion of the job priority over strictly
achieving the timeout. This may cause a slightly longer duration of
the method than prescribed by the timeout.
Returns:
:term:`json object` or `None`:
The result of the original asynchronous operation that was
performed by the job, from the ``job-results`` field of the
response body of the "Query Job Status" HMC operation. That result
is a :term:`json object` as described for the asynchronous
operation, or `None` if the operation has no result.
Raises:
:exc:`~zhmcclient.HTTPError`: The job completed in error, or the job
status cannot be retrieved, or the job cannot be deleted.
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ClientAuthError`
:exc:`~zhmcclient.ServerAuthError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for job completion.
"""
if operation_timeout is None:
operation_timeout = \
self.session.retry_timeout_config.operation_timeout
if operation_timeout > 0:
start_time = time.time()
while True:
try:
job_status, op_result_obj = self.check_for_completion()
except ConnectionError:
HMC_LOGGER.debug("Retrying after ConnectionError while waiting"
" for completion of job %s. This could be "
"because HMC is restarting.", self.uri)
job_status = None
# We give completion of status priority over strictly achieving
# the timeout, so we check status first. This may cause a longer
# duration of the method than prescribed by the timeout.
if job_status == 'complete':
return op_result_obj
if operation_timeout > 0:
current_time = time.time()
if current_time > start_time + operation_timeout:
raise OperationTimeout(
f"Waiting for completion of job {self.uri} timed out "
f"(operation timeout: {operation_timeout} s)",
operation_timeout)
time.sleep(10) # Avoid hot spin loop
@logged_api_call
def cancel(self):
"""
Attempt to cancel this job.
This method performs the "Cancel Job" operation on the job.
The specific nature of the job and its current state of execution can
affect the success of the cancellation.
Not all jobs support cancellation; this is described in each zhmcclient
method that can return a job.
If the job exists, supports cancellation and has not yet completed (i.e.
its `status` property is "running"), the cancellation is made pending
for the job and its `status` property is changed to "cancel-pending".
If the operation performed by the job does not support cancellation,
:exc:`~zhmcclient.HTTPError` is raised with status code 404 and reason
code 4.
If the job supports cancellation and exists, but already has a
cancellation request pending (i.e. its `status` property is
"cancel-pending"), :exc:`~zhmcclient.HTTPError` is raised with status
409 and reason code 42.
If the job supports cancellation and exists, but already has ended
(i.e. its `status` property is "complete" or "canceled"),
:exc:`~zhmcclient.HTTPError` is raised with status code 409 and reason
code 41.
If the job supports cancellation but no longer exists,
:exc:`~zhmcclient.HTTPError` is raised with status code 404 and reason
code 1.
Raises:
:exc:`~zhmcclient.HTTPError`: The job cancellation attempt failed.
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ClientAuthError`
:exc:`~zhmcclient.ServerAuthError`
:exc:`~zhmcclient.ConnectionError`
"""
uri = f'{self.uri}/operations/cancel'
try:
self.session.post(uri)
except Error as exc:
HMC_LOGGER.debug("Request: POST %s failed with %s: %s",
uri, exc.__class__.__name__, exc)
raise
def _text_repr(text, max_len=1000):
"""
Return the input text as a Python string representation (i.e. using repr())
that is limited to a maximum length.
"""
if text is None:
text_repr = 'None'
elif len(text) > max_len:
text_repr = repr(text[0:max_len]) + '...'
else:
text_repr = repr(text)
return text_repr
def _result_object(result):
"""
Return the JSON payload in the HTTP response as a Python dict.
Parameters:
result (requests.Response): HTTP response object.
Raises:
:exc:`~zhmcclient.ParseError`: Error parsing the returned JSON.
"""
content_type = result.headers.get('content-type', None)
if content_type is None or content_type.startswith('application/json'):
# This function is only called when there is content expected.
# Therefore, a response without content will result in a ParseError.
try:
return result.json(object_pairs_hook=OrderedDict)
except ValueError as exc:
new_exc = ParseError(
f"JSON parse error in HTTP response: {exc.args[0]}. "
f"HTTP request: {result.request.method} {result.request.url}. "
f"Response status {result.status_code}. "
f"Response content-type: {content_type!r}. "
f"Content (max.1000, decoded using {result.encoding}): "
f"{_text_repr(result.text, 1000)}")
new_exc.__cause__ = None
raise new_exc # zhmcclient.ParseError
if content_type.startswith('text/html'):
# We are in some error situation. The HMC returns HTML content
# for some 5xx status codes. We try to deal with it somehow,
# but we are not going as far as real HTML parsing.
m = re.search(r'charset=([^;,]+)', content_type)
if m:
encoding = m.group(1) # e.g. RFC "ISO-8859-1"
else:
encoding = 'utf-8'
try:
html_uni = result.content.decode(encoding)
except LookupError:
html_uni = result.content.decode()
# We convert to one line to be regexp-friendly.
html_oneline = html_uni.replace('\r\n', '\\n').replace('\r', '\\n').\
replace('\n', '\\n')
# Check for some well-known errors:
if re.search(r'javax\.servlet\.ServletException: '
r'Web Services are not enabled\.', html_oneline):
html_title = "Console Configuration Error"
html_details = "Web Services API is not enabled on the HMC."
html_reason = HTML_REASON_WEB_SERVICES_DISABLED
else:
m = re.search(
r'<title>([^<]*)</title>.*'
r'<h2>Details:</h2>(.*)(<hr size="1" noshade>)?</body>',
html_oneline)
if m:
html_title = m.group(1)
# Spend a reasonable effort to make the HTML readable:
html_details = m.group(2).replace('<p>', '\\n').\
replace('<br>', '\\n').replace('\\n\\n', '\\n').strip()
else:
html_title = "Console Internal Error"
html_details = f"Response body: {html_uni!r}"
html_reason = HTML_REASON_OTHER
message = f"{html_title}: {html_details}"
# We create a minimal JSON error object (to the extent we use it
# when processing it):
result_obj = {
'http-status': result.status_code,
'reason': html_reason,
'message': message,
'request-uri': result.request.url,
'request-method': result.request.method,
}
return result_obj
if content_type.startswith('application/vnd.ibm-z-zmanager-metrics'):
content_bytes = result.content
assert isinstance(content_bytes, bytes)
return content_bytes.decode('utf-8') # as a unicode object
raise ParseError(
f"Unknown content type in HTTP response: {content_type}. "
f"HTTP request: {result.request.method} {result.request.url}. "
f"Response status {result.status_code}. "
f"Content (max.1000, decoded using {result.encoding}): "
f"{_text_repr(result.text, 1000)}")
def json2dict(json_str):
"""
Convert a JSON string into a dict.
Parameters:
json_str (string): Unicode or binary string in JSON format.
Returns:
dict: JSON string converted to a dict.
Raises:
ValueError: Cannot parse JSON string
"""
json_dict = json.loads(json_str) # May raise ValueError
return json_dict
def dict2json(json_dict):
"""
Convert a dict into a JSON string.
Parameters:
json_dict (dict): The dict.
Returns:
unicode string (py3) or byte string (py2): Dict converted to a JSON
string.
"""
json_str = json.dumps(json_dict)
return json_str
|
GHSA-p57h-3cmc-xpjq
|
zhmcclient/_user.py
|
@@ -149,7 +149,7 @@ def list(self, full_properties=False, filter_args=None):
return self._list_with_operation(
list_uri, result_prop, full_properties, filter_args, None)
- @logged_api_call
+ @logged_api_call(blanked_properties=['password'], properties_pos=1)
def create(self, properties):
"""
Create a new User in this HMC.
@@ -256,7 +256,7 @@ def delete(self):
self.get_properties_local(self.manager._name_prop, None))
self.cease_existence_local()
- @logged_api_call
+ @logged_api_call(blanked_properties=['password'], properties_pos=1)
def update_properties(self, properties):
"""
Update writeable properties of this User.
|
# Copyright 2017,2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A :term:`User` resource represents a user configured in the HMC.
"""
import copy
from ._manager import BaseManager
from ._resource import BaseResource
from ._logging import logged_api_call
from ._utils import RC_USER
__all__ = ['UserManager', 'User']
class UserManager(BaseManager):
"""
Manager providing access to the :term:`User` resources of a HMC.
Derived from :class:`~zhmcclient.BaseManager`; see there for common methods
and attributes.
Objects of this class are not directly created by the user; they are
accessible via the following instance variable of a
:class:`~zhmcclient.Console` object:
* :attr:`zhmcclient.Console.users`
HMC/SE version requirements:
* HMC version >= 2.13.0
"""
def __init__(self, console):
# This function should not go into the docs.
# Parameters:
# console (:class:`~zhmcclient.Console`):
# Console object representing the HMC.
# Resource properties that are supported as filter query parameters.
# If the support for a resource property changes within the set of HMC
# versions that support this type of resource, this list must be set up
# for the version of the HMC this session is connected to.
# Because this resource has case-insensitive names, this list must
# contain the name property.
query_props = [
'name',
'type',
]
super().__init__(
resource_class=User,
class_name=RC_USER,
session=console.manager.session,
parent=console,
base_uri='/api/users',
oid_prop='object-id',
uri_prop='object-uri',
name_prop='name',
query_props=query_props,
case_insensitive_names=True)
@property
def console(self):
"""
:class:`~zhmcclient.Console`: :term:`Console` defining the scope for
this manager.
"""
return self._parent
@logged_api_call
def list(self, full_properties=False, filter_args=None):
"""
List the :term:`User` resources representing the users defined in this
HMC.
Any resource property may be specified in a filter argument. For
details about filter arguments, see :ref:`Filtering`.
The listing of resources is handled in an optimized way:
* If this manager is enabled for :ref:`auto-updating`, a locally
maintained resource list is used (which is automatically updated via
inventory notifications from the HMC) and the provided filter
arguments are applied.
* Otherwise, if the filter arguments specify the resource name as a
single filter argument with a straight match string (i.e. without
regular expressions), an optimized lookup is performed based on a
locally maintained name-URI cache.
* Otherwise, the HMC List operation is performed with the subset of the
provided filter arguments that can be handled on the HMC side and the
remaining filter arguments are applied on the client side on the list
result.
HMC/SE version requirements:
* HMC version >= 2.13.0
Authorization requirements:
* User-related-access permission to the User object included in the
result, or, depending on the type of User object, task permission to
the "Manage Users" task or the "Manage User Templates" task.
Parameters:
full_properties (bool):
Controls whether the full set of resource properties should be
retrieved, vs. only the short set as returned by the list
operation.
filter_args (dict):
Filter arguments that narrow the list of returned resources to
those that match the specified filter arguments. For details, see
:ref:`Filtering`.
`None` causes no filtering to happen, i.e. all resources are
returned.
Returns:
: A list of :class:`~zhmcclient.User` objects.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
result_prop = 'users'
list_uri = f'{self.console.uri}/users'
return self._list_with_operation(
list_uri, result_prop, full_properties, filter_args, None)
@logged_api_call
def create(self, properties):
"""
Create a new User in this HMC.
HMC/SE version requirements:
* HMC version >= 2.13.0
Authorization requirements:
* Task permission to the "Manage Users" task to create a standard user
or the "Manage User Templates" task to create a template user.
Parameters:
properties (dict): Initial property values.
Allowable properties are defined in section 'Request body contents'
in section 'Create User' in the :term:`HMC API` book.
Returns:
User:
The resource object for the new User.
The object will have its 'object-uri' property set as returned by
the HMC, and will also have the input properties set.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
result = self.session.post(self.console.uri + '/users',
body=properties)
# There should not be overlaps, but just in case there are, the
# returned props should overwrite the input props:
props = copy.deepcopy(properties)
props.update(result)
props.pop('password', None)
name = props.get(self._name_prop, None)
uri = props[self._uri_prop]
user = User(self, uri, name, props)
self._name_uri_cache.update(name, uri)
return user
class User(BaseResource):
"""
Representation of a :term:`User`.
Derived from :class:`~zhmcclient.BaseResource`; see there for common
methods and attributes.
Objects of this class are not directly created by the user; they are
returned from creation or list functions on their manager object
(in this case, :class:`~zhmcclient.UserManager`).
HMC/SE version requirements:
* HMC version >= 2.13.0
"""
def __init__(self, manager, uri, name=None, properties=None):
# This function should not go into the docs.
# manager (:class:`~zhmcclient.UserManager`):
# Manager object for this resource object.
# uri (string):
# Canonical URI path of the resource.
# name (string):
# Name of the resource.
# properties (dict):
# Properties to be set for this resource object. May be `None` or
# empty.
assert isinstance(manager, UserManager), (
f"Console init: Expected manager type {UserManager}, "
f"got {type(manager)}")
super().__init__(manager, uri, name, properties)
@logged_api_call
def delete(self):
"""
Delete this User.
HMC/SE version requirements:
* HMC version >= 2.13.0
Authorization requirements:
* Task permission to the "Manage Users" task to delete a non-template
user, or the "Manage User Templates" task to delete a template user.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
# pylint: disable=protected-access
self.manager.session.delete(self.uri, resource=self)
self.manager._name_uri_cache.delete(
self.get_properties_local(self.manager._name_prop, None))
self.cease_existence_local()
@logged_api_call
def update_properties(self, properties):
"""
Update writeable properties of this User.
This method serializes with other methods that access or change
properties on the same Python object.
HMC/SE version requirements:
* HMC version >= 2.13.0
Authorization requirements:
* Task permission to the "Manage Users" task to update a non-template
user, or the "Manage User Templates" task to update a template user.
* For a user to update their own password or default-group-uri
property, user-related-access permission to the user represented
by this User object, or task permission to the "Manage Users" task is
required.
Parameters:
properties (dict): New values for the properties to be updated.
Properties not to be updated are omitted.
Allowable properties are the properties with qualifier (w) in
section 'Data model' in section 'User object' in the
:term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
# pylint: disable=protected-access
self.manager.session.post(self.uri, resource=self, body=properties)
# The name of Users cannot be updated. An attempt to do so should cause
# HTTPError to be raised in the POST above, so we assert that here,
# because we omit the extra code for handling name updates:
assert self.manager._name_prop not in properties
props = copy.deepcopy(properties)
props.pop('password', None)
self.update_properties_local(props)
@logged_api_call
def add_user_role(self, user_role):
"""
Add the specified User Role to this User.
This User must not be a system-defined or pattern-based user.
HMC/SE version requirements:
* HMC version >= 2.13.0
Authorization requirements:
* Task permission to the "Manage Users" task to modify a standard user
or the "Manage User Templates" task to modify a template user.
Parameters:
user_role (:class:`~zhmcclient.UserRole`): User Role to be added.
Must not be `None`.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {
'user-role-uri': user_role.uri
}
self.manager.session.post(
self.uri + '/operations/add-user-role', resource=self,
body=body)
@logged_api_call
def remove_user_role(self, user_role):
"""
Remove the specified User Role from this User.
This User must not be a system-defined or pattern-based user.
HMC/SE version requirements:
* HMC version >= 2.13.0
Authorization requirements:
* Task permission to the "Manage Users" task to modify a standard user
or the "Manage User Templates" task to modify a template user.
Parameters:
user_role (:class:`~zhmcclient.UserRole`): User Role to be removed.
Must not be `None`.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {
'user-role-uri': user_role.uri
}
self.manager.session.post(
self.uri + '/operations/remove-user-role', resource=self,
body=body)
|
GHSA-p57h-3cmc-xpjq
|
pymdownx/__meta__.py
|
@@ -185,5 +185,5 @@ def parse_version(ver, pre=False):
return Version(major, minor, micro, release, pre, post, dev)
-__version_info__ = Version(9, 11, 0, "final")
+__version_info__ = Version(10, 0, 0, "final")
__version__ = __version_info__._get_canonical()
|
"""Meta related things."""
from collections import namedtuple
import re
RE_VER = re.compile(
r'''(?x)
(?P<major>\d+)(?:\.(?P<minor>\d+))?(?:\.(?P<micro>\d+))?
(?:(?P<type>a|b|rc)(?P<pre>\d+))?
(?:\.post(?P<post>\d+))?
(?:\.dev(?P<dev>\d+))?
'''
)
REL_MAP = {
".dev": "",
".dev-alpha": "a",
".dev-beta": "b",
".dev-candidate": "rc",
"alpha": "a",
"beta": "b",
"candidate": "rc",
"final": ""
}
DEV_STATUS = {
".dev": "2 - Pre-Alpha",
".dev-alpha": "2 - Pre-Alpha",
".dev-beta": "2 - Pre-Alpha",
".dev-candidate": "2 - Pre-Alpha",
"alpha": "3 - Alpha",
"beta": "4 - Beta",
"candidate": "4 - Beta",
"final": "5 - Production/Stable"
}
PRE_REL_MAP = {"a": 'alpha', "b": 'beta', "rc": 'candidate'}
class Version(namedtuple("Version", ["major", "minor", "micro", "release", "pre", "post", "dev"])):
"""
Get the version (PEP 440).
A biased approach to the PEP 440 semantic version.
Provides a tuple structure which is sorted for comparisons `v1 > v2` etc.
(major, minor, micro, release type, pre-release build, post-release build, development release build)
Release types are named in is such a way they are comparable with ease.
Accessors to check if a development, pre-release, or post-release build. Also provides accessor to get
development status for setup files.
How it works (currently):
- You must specify a release type as either `final`, `alpha`, `beta`, or `candidate`.
- To define a development release, you can use either `.dev`, `.dev-alpha`, `.dev-beta`, or `.dev-candidate`.
The dot is used to ensure all development specifiers are sorted before `alpha`.
You can specify a `dev` number for development builds, but do not have to as implicit development releases
are allowed.
- You must specify a `pre` value greater than zero if using a prerelease as this project (not PEP 440) does not
allow implicit prereleases.
- You can optionally set `post` to a value greater than zero to make the build a post release. While post releases
are technically allowed in prereleases, it is strongly discouraged, so we are rejecting them. It should be
noted that we do not allow `post0` even though PEP 440 does not restrict this. This project specifically
does not allow implicit post releases.
- It should be noted that we do not support epochs `1!` or local versions `+some-custom.version-1`.
Acceptable version releases:
```
Version(1, 0, 0, "final") 1.0
Version(1, 2, 0, "final") 1.2
Version(1, 2, 3, "final") 1.2.3
Version(1, 2, 0, ".dev-alpha", pre=4) 1.2a4
Version(1, 2, 0, ".dev-beta", pre=4) 1.2b4
Version(1, 2, 0, ".dev-candidate", pre=4) 1.2rc4
Version(1, 2, 0, "final", post=1) 1.2.post1
Version(1, 2, 3, ".dev") 1.2.3.dev0
Version(1, 2, 3, ".dev", dev=1) 1.2.3.dev1
```
"""
def __new__(cls, major, minor, micro, release="final", pre=0, post=0, dev=0):
"""Validate version info."""
# Ensure all parts are positive integers.
for value in (major, minor, micro, pre, post):
if not (isinstance(value, int) and value >= 0):
raise ValueError("All version parts except 'release' should be integers.")
if release not in REL_MAP:
raise ValueError("'{}' is not a valid release type.".format(release))
# Ensure valid pre-release (we do not allow implicit pre-releases).
if ".dev-candidate" < release < "final":
if pre == 0:
raise ValueError("Implicit pre-releases not allowed.")
elif dev:
raise ValueError("Version is not a development release.")
elif post:
raise ValueError("Post-releases are not allowed with pre-releases.")
# Ensure valid development or development/pre release
elif release < "alpha":
if release > ".dev" and pre == 0:
raise ValueError("Implicit pre-release not allowed.")
elif post:
raise ValueError("Post-releases are not allowed with pre-releases.")
# Ensure a valid normal release
else:
if pre:
raise ValueError("Version is not a pre-release.")
elif dev:
raise ValueError("Version is not a development release.")
return super(Version, cls).__new__(cls, major, minor, micro, release, pre, post, dev)
def _is_pre(self):
"""Is prerelease."""
return self.pre > 0
def _is_dev(self):
"""Is development."""
return bool(self.release < "alpha")
def _is_post(self):
"""Is post."""
return self.post > 0
def _get_dev_status(self): # pragma: no cover
"""Get development status string."""
return DEV_STATUS[self.release]
def _get_canonical(self):
"""Get the canonical output string."""
# Assemble major, minor, micro version and append `pre`, `post`, or `dev` if needed..
if self.micro == 0:
ver = "{}.{}".format(self.major, self.minor)
else:
ver = "{}.{}.{}".format(self.major, self.minor, self.micro)
if self._is_pre():
ver += '{}{}'.format(REL_MAP[self.release], self.pre)
if self._is_post():
ver += ".post{}".format(self.post)
if self._is_dev():
ver += ".dev{}".format(self.dev)
return ver
def parse_version(ver, pre=False):
"""Parse version into a comparable Version tuple."""
m = RE_VER.match(ver)
# Handle major, minor, micro
major = int(m.group('major'))
minor = int(m.group('minor')) if m.group('minor') else 0
micro = int(m.group('micro')) if m.group('micro') else 0
# Handle pre releases
if m.group('type'):
release = PRE_REL_MAP[m.group('type')]
pre = int(m.group('pre'))
else:
release = "final"
pre = 0
# Handle development releases
dev = m.group('dev') if m.group('dev') else 0
if m.group('dev'):
dev = int(m.group('dev'))
release = '.dev-' + release if pre else '.dev'
else:
dev = 0
# Handle post
post = int(m.group('post')) if m.group('post') else 0
return Version(major, minor, micro, release, pre, post, dev)
__version_info__ = Version(9, 11, 0, "final")
__version__ = __version_info__._get_canonical()
|
GHSA-jh85-wwv9-24hv
|
pymdownx/snippets.py
|
@@ -82,7 +82,8 @@ def __init__(self, config, md):
base = config.get('base_path')
if isinstance(base, str):
base = [base]
- self.base_path = base
+ self.base_path = [os.path.abspath(b) for b in base]
+ self.restrict_base_path = config['restrict_base_path']
self.encoding = config.get('encoding')
self.check_paths = config.get('check_paths')
self.auto_append = config.get('auto_append')
@@ -159,18 +160,22 @@ def get_snippet_path(self, path):
for base in self.base_path:
if os.path.exists(base):
if os.path.isdir(base):
- filename = os.path.join(base, path)
+ if self.restrict_base_path:
+ filename = os.path.abspath(os.path.join(base, path))
+ # If the absolute path is no longer under the specified base path, reject the file
+ if not os.path.samefile(base, os.path.dirname(filename)):
+ continue
+ else:
+ filename = os.path.join(base, path)
if os.path.exists(filename):
snippet = filename
break
else:
- basename = os.path.basename(base)
dirname = os.path.dirname(base)
- if basename.lower() == path.lower():
- filename = os.path.join(dirname, path)
- if os.path.exists(filename):
- snippet = filename
- break
+ filename = os.path.join(dirname, path)
+ if os.path.exists(filename) and os.path.samefile(filename, base):
+ snippet = filename
+ break
return snippet
@functools.lru_cache()
@@ -367,6 +372,10 @@ def __init__(self, *args, **kwargs):
self.config = {
'base_path': [["."], "Base path for snippet paths - Default: [\".\"]"],
+ 'restrict_base_path': [
+ True,
+ "Restrict snippet paths such that they are under the base paths - Default: True"
+ ],
'encoding': ["utf-8", "Encoding of snippets - Default: \"utf-8\""],
'check_paths': [False, "Make the build fail if a snippet can't be found - Default: \"False\""],
"auto_append": [
|
"""
Snippet ---8<---.
pymdownx.snippet
Inject snippets
MIT license.
Copyright (c) 2017 Isaac Muse <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from markdown import Extension
from markdown.preprocessors import Preprocessor
import functools
import urllib
import re
import codecs
import os
from . import util
import textwrap
MI = 1024 * 1024 # mebibyte (MiB)
DEFAULT_URL_SIZE = MI * 32
DEFAULT_URL_TIMEOUT = 10.0 # in seconds
DEFAULT_URL_REQUEST_HEADERS = {}
class SnippetMissingError(Exception):
"""Snippet missing exception."""
class SnippetPreprocessor(Preprocessor):
"""Handle snippets in Markdown content."""
RE_ALL_SNIPPETS = re.compile(
r'''(?x)
^(?P<space>[ \t]*)
(?P<escape>;*)
(?P<all>
(?P<inline_marker>-{1,}8<-{1,}[ \t]+)
(?P<snippet>(?:"(?:\\"|[^"\n\r])+?"|'(?:\\'|[^'\n\r])+?'))(?![ \t]) |
(?P<block_marker>-{1,}8<-{1,})(?![ \t])
)\r?$
'''
)
RE_SNIPPET = re.compile(
r'''(?x)
^(?P<space>[ \t]*)
(?P<snippet>.*?)\r?$
'''
)
RE_SNIPPET_SECTION = re.compile(
r'''(?xi)
^(?P<pre>.*?)
(?P<escape>;*)
(?P<inline_marker>-{1,}8<-{1,}[ \t]+)
(?P<section>\[[ \t]*(?P<type>start|end)[ \t]*:[ \t]*(?P<name>[a-z][-_0-9a-z]*)[ \t]*\])
(?P<post>.*?)$
'''
)
RE_SNIPPET_FILE = re.compile(r'(?i)(.*?)(?:(:[0-9]*)?(:[0-9]*)?|(:[a-z][-_0-9a-z]*)?)$')
def __init__(self, config, md):
"""Initialize."""
base = config.get('base_path')
if isinstance(base, str):
base = [base]
self.base_path = base
self.encoding = config.get('encoding')
self.check_paths = config.get('check_paths')
self.auto_append = config.get('auto_append')
self.url_download = config['url_download']
self.url_max_size = config['url_max_size']
self.url_timeout = config['url_timeout']
self.url_request_headers = config['url_request_headers']
self.dedent_subsections = config['dedent_subsections']
self.tab_length = md.tab_length
super(SnippetPreprocessor, self).__init__()
def extract_section(self, section, lines):
"""Extract the specified section from the lines."""
new_lines = []
start = False
found = False
for l in lines:
# Found a snippet section marker with our specified name
m = self.RE_SNIPPET_SECTION.match(l)
# Handle escaped line
if m and start and m.group('escape'):
l = (
m.group('pre') + m.group('escape').replace(';', '', 1) + m.group('inline_marker') +
m.group('section') + m.group('post')
)
# Found a section we are looking for.
elif m is not None and m.group('name') == section:
# We found the start
if not start and m.group('type') == 'start':
start = True
found = True
continue
# Ignore duplicate start
elif start and m.group('type') == 'start':
continue
# We found the end
elif start and m.group('type') == 'end':
start = False
break
# We found an end, but no start
else:
break
# Found a section we don't care about, so ignore it.
elif m and start:
continue
# We are currently in a section, so append the line
if start:
new_lines.append(l)
if not found and self.check_paths:
raise SnippetMissingError("Snippet section '{}' could not be located".format(section))
return self.dedent(new_lines) if self.dedent_subsections else new_lines
def dedent(self, lines):
"""De-indent lines."""
return textwrap.dedent('\n'.join(lines)).split('\n')
def get_snippet_path(self, path):
"""Get snippet path."""
snippet = None
for base in self.base_path:
if os.path.exists(base):
if os.path.isdir(base):
filename = os.path.join(base, path)
if os.path.exists(filename):
snippet = filename
break
else:
basename = os.path.basename(base)
dirname = os.path.dirname(base)
if basename.lower() == path.lower():
filename = os.path.join(dirname, path)
if os.path.exists(filename):
snippet = filename
break
return snippet
@functools.lru_cache()
def download(self, url):
"""
Actually download the snippet pointed to by the passed URL.
The most recently used files are kept in a cache until the next reset.
"""
http_request = urllib.request.Request(url, headers=self.url_request_headers)
timeout = None if self.url_timeout == 0 else self.url_timeout
with urllib.request.urlopen(http_request, timeout=timeout) as response:
# Fail if status is not OK
status = response.status if util.PY39 else response.code
if status != 200:
raise SnippetMissingError("Cannot download snippet '{}'".format(url))
# We provide some basic protection against absurdly large files.
# 32MB is chosen as an arbitrary upper limit. This can be raised if desired.
length = response.headers.get("content-length")
if length is None:
raise ValueError("Missing content-length header")
content_length = int(length)
if self.url_max_size != 0 and content_length >= self.url_max_size:
raise ValueError("refusing to read payloads larger than or equal to {}".format(self.url_max_size))
# Nothing to return
if content_length == 0:
return ['']
# Process lines
return [l.decode(self.encoding).rstrip('\r\n') for l in response.readlines()]
def parse_snippets(self, lines, file_name=None, is_url=False):
"""Parse snippets snippet."""
if file_name:
# Track this file.
self.seen.add(file_name)
new_lines = []
inline = False
block = False
for line in lines:
# Check for snippets on line
inline = False
m = self.RE_ALL_SNIPPETS.match(line)
if m:
if m.group('escape'):
# The snippet has been escaped, replace first `;` and continue.
new_lines.append(line.replace(';', '', 1))
continue
if block and m.group('inline_marker'):
# Don't use inline notation directly under a block.
# It's okay if inline is used again in sub file though.
continue
elif m.group('inline_marker'):
# Inline
inline = True
else:
# Block
block = not block
continue
elif not block:
# Not in snippet, and we didn't find an inline,
# so just a normal line
new_lines.append(line)
continue
if block and not inline:
# We are in a block and we didn't just find a nested inline
# So check if a block path
m = self.RE_SNIPPET.match(line)
if m:
# Get spaces and snippet path. Remove quotes if inline.
space = m.group('space').expandtabs(self.tab_length)
path = m.group('snippet')[1:-1].strip() if inline else m.group('snippet').strip()
if not inline:
# Block path handling
if not path:
# Empty path line, insert a blank line
new_lines.append('')
continue
# Ignore commented out lines
if path.startswith(';'):
continue
# Get line numbers (if specified)
end = None
start = None
section = None
m = self.RE_SNIPPET_FILE.match(path)
path = m.group(1).strip()
# Looks like we have an empty file and only lines specified
if not path:
if self.check_paths:
raise SnippetMissingError("Snippet at path '{}' could not be found".format(path))
else:
continue
ending = m.group(3)
if ending and len(ending) > 1:
end = int(ending[1:])
starting = m.group(2)
if starting and len(starting) > 1:
start = max(1, int(starting[1:]) - 1)
section_name = m.group(4)
if section_name:
section = section_name[1:]
# Ignore path links if we are in external, downloaded content
is_link = path.lower().startswith(('https://', 'http://'))
if is_url and not is_link:
continue
# If this is a link, and we are allowing URLs, set `url` to true.
# Make sure we don't process `path` as a local file reference.
url = self.url_download and is_link
snippet = self.get_snippet_path(path) if not url else path
if snippet:
# This is in the stack and we don't want an infinite loop!
if snippet in self.seen:
continue
if not url:
# Read file content
with codecs.open(snippet, 'r', encoding=self.encoding) as f:
s_lines = [l.rstrip('\r\n') for l in f]
if start is not None or end is not None:
s = slice(start, end)
s_lines = self.dedent(s_lines[s]) if self.dedent_subsections else s_lines[s]
elif section:
s_lines = self.extract_section(section, s_lines)
else:
# Read URL content
try:
s_lines = self.download(snippet)
if start is not None or end is not None:
s = slice(start, end)
s_lines = self.dedent(s_lines[s]) if self.dedent_subsections else s_lines[s]
elif section:
s_lines = self.extract_section(section, s_lines)
except SnippetMissingError:
if self.check_paths:
raise
s_lines = []
# Process lines looking for more snippets
new_lines.extend(
[
space + l2 for l2 in self.parse_snippets(
s_lines,
snippet,
is_url=url
)
]
)
elif self.check_paths:
raise SnippetMissingError("Snippet at path '{}' could not be found".format(path))
# Pop the current file name out of the cache
if file_name:
self.seen.remove(file_name)
return new_lines
def run(self, lines):
"""Process snippets."""
self.seen = set()
if self.auto_append:
lines.extend("\n\n-8<-\n{}\n-8<-\n".format('\n\n'.join(self.auto_append)).split('\n'))
return self.parse_snippets(lines)
class SnippetExtension(Extension):
"""Snippet extension."""
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {
'base_path': [["."], "Base path for snippet paths - Default: [\".\"]"],
'encoding': ["utf-8", "Encoding of snippets - Default: \"utf-8\""],
'check_paths': [False, "Make the build fail if a snippet can't be found - Default: \"False\""],
"auto_append": [
[],
"A list of snippets (relative to the 'base_path') to auto append to the Markdown content - Default: []"
],
'url_download': [False, "Download external URLs as snippets - Default: \"False\""],
'url_max_size': [DEFAULT_URL_SIZE, "External URL max size (0 means no limit)- Default: 32 MiB"],
'url_timeout': [DEFAULT_URL_TIMEOUT, 'Defualt URL timeout (0 means no timeout) - Default: 10 sec'],
'url_request_headers': [DEFAULT_URL_REQUEST_HEADERS, "Extra request Headers - Default: {}"],
'dedent_subsections': [False, "Dedent subsection extractions e.g. 'sections' and/or 'lines'."]
}
super(SnippetExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md):
"""Register the extension."""
self.md = md
md.registerExtension(self)
config = self.getConfigs()
snippet = SnippetPreprocessor(config, md)
md.preprocessors.register(snippet, "snippet", 32)
def reset(self):
"""Reset."""
self.md.preprocessors['snippet'].download.cache_clear()
def makeExtension(*args, **kwargs):
"""Return extension."""
return SnippetExtension(*args, **kwargs)
|
GHSA-jh85-wwv9-24hv
|
tests/test_extensions/test_snippets.py
|
@@ -481,6 +481,63 @@ def test_user(self):
)
+class TestSnippetsNested(util.MdCase):
+ """Test nested restriction."""
+
+ extension = [
+ 'pymdownx.snippets',
+ ]
+
+ extension_configs = {
+ 'pymdownx.snippets': {
+ 'base_path': os.path.join(BASE, '_snippets', 'nested'),
+ 'check_paths': True
+ }
+ }
+
+ def test_restricted(self):
+ """Test file restriction."""
+
+ with self.assertRaises(SnippetMissingError):
+ self.check_markdown(
+ R'''
+ --8<-- "../b.txt"
+ ''',
+ '''
+ <p>Snippet</p>
+ ''',
+ True
+ )
+
+
+class TestSnippetsNestedUnrestricted(util.MdCase):
+ """Test nested no bounds."""
+
+ extension = [
+ 'pymdownx.snippets',
+ ]
+
+ extension_configs = {
+ 'pymdownx.snippets': {
+ 'base_path': os.path.join(BASE, '_snippets', 'nested'),
+ 'restrict_base_path': False
+ }
+ }
+
+ def test_restricted(self):
+ """Test file restriction."""
+
+ self.check_markdown(
+ R'''
+ --8<-- "../b.txt"
+ ''',
+ '''
+ <p>Snippet</p>
+ ''',
+ True
+ )
+
+
class TestSnippetsAutoAppend(util.MdCase):
"""Test snippet file case."""
|
"""Test cases for Snippets."""
from .. import util
import os
from pymdownx.snippets import SnippetMissingError
from unittest.mock import patch, MagicMock
BASE = os.path.abspath(os.path.dirname(__file__))
class TestSnippetDedent(util.MdCase):
"""Test snippet cases."""
extension = [
'pymdownx.snippets', 'pymdownx.superfences'
]
extension_configs = {
'pymdownx.snippets': {
'base_path': [os.path.join(BASE, '_snippets')],
'dedent_subsections': True
}
}
def test_dedent_section(self):
"""Test dedenting sections."""
self.check_markdown(
R'''
```text
---8<--- "indented.txt:py-section"
```
''', # noqa: W291
R'''
<div class="highlight"><pre><span></span><code>def some_method(self, param):
"""Docstring."""
return param
</code></pre></div>
''',
True
)
def test_dedent_lines(self):
"""Test dedenting lines."""
self.check_markdown(
R'''
```text
---8<--- "indented.txt:5:8"
```
''', # noqa: W291
R'''
<div class="highlight"><pre><span></span><code>def some_method(self, param):
"""Docstring."""
return param
</code></pre></div>
''',
True
)
def test_dedent_indented(self):
"""Test dedenting sections that has indented insertion."""
self.check_markdown(
R'''
Paragraph
---8<--- "indented.txt:py-section"
''', # noqa: W291
R'''
<p>Paragraph</p>
<pre><code>def some_method(self, param):
"""Docstring."""
return param
</code></pre>
''',
True
)
class TestSnippets(util.MdCase):
"""Test snippet cases."""
extension = [
'pymdownx.snippets', 'pymdownx.superfences'
]
extension_configs = {
'pymdownx.snippets': {
'base_path': [os.path.join(BASE, '_snippets')]
}
}
def test_inline(self):
"""Test inline."""
self.check_markdown(
R'''
---8<--- "loop.txt"
---8<--- "a.txt"
---8<--- "b.txt"
;---8<--- "b.txt"
- Testing indentation
---8<--- "b.txt"
''', # noqa: W291
R'''
<p>Snippet
Snippet
---8<--- "b.txt"</p>
<ul>
<li>
<p>Testing indentation</p>
<p>Snippet</p>
</li>
</ul>
''',
True
)
def test_block(self):
"""Test block."""
self.check_markdown(
R'''
---8<---
loop_block.txt
c.txt
d.txt
---8<---
;---8<---
d.txt
;---8<---
- Testing indentation
---8<---
d.txt
; d.txt
# Nested inline won't work
--8<-- "a.txt"
--8<-- "; b.txt"
---8<---
# Un-nested Inline
--8<-- "a.txt"
--8<-- "; b.txt"
''', # noqa: W291
R'''
<p>Snippet</p>
<p>Snippet</p>
<p>---8<---
d.txt
---8<---</p>
<ul>
<li>
<p>Testing indentation</p>
<p>Snippet</p>
<h1>Un-nested Inline</h1>
<p>Snippet</p>
</li>
</ul>
''', # noqa: W291
True
)
def test_mixed(self):
"""Test mixed."""
self.check_markdown(
R'''
---8<--- "a.txt"
---8<---
loop_block.txt
c.txt
d.txt
---8<---
''',
R'''
<p>Snippet</p>
<p>Snippet</p>
<p>Snippet</p>
''',
True
)
def test_start_line_inline(self):
"""Test starting line with inline syntax."""
self.check_markdown(
R'''
---8<--- "lines.txt:4"
''',
'''
<p>Content resides on various lines.
If we use line specifiers,
we can select any number of lines we want.</p>
<p>This is the end of the file.
There is no more.</p>
''',
True
)
def test_end_line_inline(self):
"""Test ending line with inline syntax."""
self.check_markdown(
R'''
---8<--- "lines.txt::6"
''',
'''
<p>This is a multi-line
snippet.</p>
<p>Content resides on various lines.
If we use line specifiers,
we can select any number of lines we want.</p>
''',
True
)
def test_start_end_line_inline(self):
"""Test starting and ending line with inline syntax."""
self.check_markdown(
R'''
---8<--- "lines.txt:4:6"
''',
'''
<p>Content resides on various lines.
If we use line specifiers,
we can select any number of lines we want.</p>
''',
True
)
def test_start_line_block(self):
"""Test starting line with block syntax."""
self.check_markdown(
R'''
--8<--
lines.txt:4
--8<--
''',
'''
<p>Content resides on various lines.
If we use line specifiers,
we can select any number of lines we want.</p>
<p>This is the end of the file.
There is no more.</p>
''',
True
)
def test_end_line_block(self):
"""Test ending line with block syntax."""
self.check_markdown(
R'''
--8<--
lines.txt::6
--8<--
''',
'''
<p>This is a multi-line
snippet.</p>
<p>Content resides on various lines.
If we use line specifiers,
we can select any number of lines we want.</p>
''',
True
)
def test_start_end_line_block(self):
"""Test starting and ending line with block syntax."""
self.check_markdown(
R'''
--8<--
lines.txt:4:6
--8<--
''',
'''
<p>Content resides on various lines.
If we use line specifiers,
we can select any number of lines we want.</p>
''',
True
)
def test_section_inline(self):
"""Test section partial in inline snippet."""
self.check_markdown(
R'''
```
--8<-- "section.txt:css-section"
```
''',
'''
<div class="highlight"><pre><span></span><code>div {
color: red;
}
</code></pre></div>
''',
True
)
def test_section_inline_min(self):
"""Test section partial in inline snippet using minimum tokens."""
self.check_markdown(
R'''
```
-8<- "section.txt:css-section"
```
''',
'''
<div class="highlight"><pre><span></span><code>div {
color: red;
}
</code></pre></div>
''',
True
)
def test_section_inline_ignore_other_section(self):
"""Test nested sections."""
self.check_markdown(
R'''
```
-8<- "section_nested.txt:css-section"
```
''',
'''
<div class="highlight"><pre><span></span><code>div {
color: red;
background-color: white;
padding: 16px
}
</code></pre></div>
''',
True
)
def test_section_inline_escaped_other_section(self):
"""Test nested escaped sections."""
self.check_markdown(
R'''
```
-8<- "section_nested.txt:css-section3"
```
''',
'''
<div class="highlight"><pre><span></span><code>div {
color: red;
/* --8<-- [start: css-section4] */
background-color: white;
padding: 16px
/* --8<-- [end: css-section4] */
}
</code></pre></div>
''',
True
)
def test_section_ignore_double_start_section(self):
"""Test nested sections."""
self.check_markdown(
R'''
```
-8<- "section_nested.txt:css-section5"
```
''',
'''
<div class="highlight"><pre><span></span><code>div {
color: red;
background-color: white;
padding: 16px
}
</code></pre></div>
''',
True
)
def test_section_block(self):
"""Test section partial in block snippet."""
self.check_markdown(
R'''
--8<--
section.txt:html-section
--8<--
''',
'''
<div><p>content</p></div>
''',
True
)
def test_section_block_min(self):
"""Test section partial in block snippet using minimum tokens."""
self.check_markdown(
R'''
-8<-
section.txt:html-section
-8<-
''',
'''
<div><p>content</p></div>
''',
True
)
def test_section_end_first(self):
"""Test section when the end is specified first."""
self.check_markdown(
R'''
--8<--
section.txt:css-section2
--8<--
''',
'''
''',
True
)
def test_section_no_end(self):
"""Test section when the end is not specified."""
self.check_markdown(
R'''
--8<--
section.txt:html-section2
--8<--
''',
'''
<div><p>content</p></div>
''',
True
)
class TestSnippetsFile(util.MdCase):
"""Test snippet file case."""
extension = [
'pymdownx.snippets',
]
extension_configs = {
'pymdownx.snippets': {
'base_path': os.path.join(BASE, '_snippets', 'b.txt')
}
}
def test_user(self):
"""Test file."""
self.check_markdown(
R'''
--8<-- "b.txt"
''',
'''
<p>Snippet</p>
''',
True
)
class TestSnippetsAutoAppend(util.MdCase):
"""Test snippet file case."""
extension = [
'pymdownx.snippets',
]
extension_configs = {
'pymdownx.snippets': {
'base_path': [os.path.join(BASE, '_snippets')],
'auto_append': ['b.txt']
}
}
def test_auto_append(self):
"""Test auto append."""
self.check_markdown(
R'''
Test
''',
'''
<p>Test</p>
<p>Snippet</p>
''',
True
)
class TestSnippetsMissing(util.MdCase):
"""Test snippet file case."""
extension = [
'pymdownx.snippets',
]
extension_configs = {
'pymdownx.snippets': {
'base_path': [os.path.join(BASE, '_snippets')],
'check_paths': True
}
}
def test_good(self):
"""Test found file."""
self.check_markdown(
'''
--8<--- "d.txt"
''',
'''
<p>Snippet</p>
''',
True
)
def test_top_level(self):
"""Test top level."""
with self.assertRaises(SnippetMissingError):
self.check_markdown(
R'''
--8<-- "not-here.txt"
''',
'''
''',
True
)
def test_nested(self):
"""Test nested."""
with self.assertRaises(SnippetMissingError):
self.check_markdown(
R'''
--8<-- "missing.txt"
''',
'''
''',
True
)
def test_missing_file_lines(self):
"""Test missing file with line numbers."""
with self.assertRaises(SnippetMissingError):
self.check_markdown(
R'''
--8<-- ":3:4"
''',
'''
''',
True
)
def test_missing_section(self):
"""Test missing section."""
with self.assertRaises(SnippetMissingError):
self.check_markdown(
R'''
--8<-- "section.txt:missing-section"
''',
'''
''',
True
)
class TestSnippetsGracefulMissing(util.MdCase):
"""Test snippet file case."""
extension = [
'pymdownx.snippets',
]
extension_configs = {
'pymdownx.snippets': {
'base_path': [os.path.join(BASE, '_snippets')]
}
}
def test_top_level(self):
"""Test top level."""
self.check_markdown(
R'''
--8<-- "not-here.txt"
''',
'''
''',
True
)
def test_nested(self):
"""Test nested."""
self.check_markdown(
R'''
--8<-- "missing.txt"
''',
'''
''',
True
)
def test_missing_lines(self):
"""Test missing file with lines."""
self.check_markdown(
R'''
--8<-- ":3:4"
''',
'''
''',
True
)
class TestURLSnippets(util.MdCase):
"""Test snippet URL cases."""
extension = [
'pymdownx.snippets',
]
extension_configs = {
'pymdownx.snippets': {
'base_path': [os.path.join(BASE, '_snippets')],
'url_download': True
}
}
@patch('urllib.request.urlopen')
def test_url(self, mock_urlopen):
"""Test URL."""
cm = MagicMock()
cm.status = 200
cm.code = 200
cm.readlines.return_value = [b'contents']
cm.headers = {'content-length': '8'}
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
self.check_markdown(
R'''
--8<-- "https://test.com/myfile.md"
''',
'''
<p>contents</p>
''',
True
)
@patch('urllib.request.urlopen')
def test_url_nested(self, mock_urlopen):
"""Test nested URLs."""
cm = MagicMock()
cm.status = 200
cm.code = 200
cm.readlines.side_effect = [[b'content', b'', b'--8<-- "https://test.com/myfile2.md"'], [b'other']]
cm.headers = {'content-length': '8'}
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
self.check_markdown(
R'''
--8<-- "https://test.com/myfile.md"
''',
'''
<p>content</p>
<p>other</p>
''',
True
)
@patch('urllib.request.urlopen')
def test_url_nested_duplicatqe(self, mock_urlopen):
"""Test nested duplicate file."""
cm = MagicMock()
cm.status = 200
cm.code = 200
cm.readlines.side_effect = [[b'content', b'', b'--8<-- "https://test.com/myfile.md"'], [b'other']]
cm.headers = {'content-length': '8'}
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
self.check_markdown(
R'''
--8<-- "https://test.com/myfile.md"
''',
'''
<p>content</p>
''',
True
)
@patch('urllib.request.urlopen')
def test_url_nested_file(self, mock_urlopen):
"""Test nested file in URL."""
cm = MagicMock()
cm.status = 200
cm.code = 200
cm.readlines.return_value = [b'content', b'', b'--8<-- "b.txt"']
cm.headers = {'content-length': '8'}
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
self.check_markdown(
R'''
--8<-- "https://test.com/myfile.md"
''',
'''
<p>content</p>
''',
True
)
@patch('urllib.request.urlopen')
def test_url_lines(self, mock_urlopen):
"""Test specifying specific lines in a URL."""
content = []
length = 0
with open('tests/test_extensions/_snippets/lines.txt', 'rb') as f:
for l in f:
length += len(l)
content.append(l)
cm = MagicMock()
cm.status = 200
cm.code = 200
cm.readlines.return_value = content
cm.headers = {'content-length': length}
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
self.check_markdown(
R'''
--8<-- "https://test.com/myfile.md:4:6"
''',
'''
<p>Content resides on various lines.
If we use line specifiers,
we can select any number of lines we want.</p>
''',
True
)
@patch('urllib.request.urlopen')
def test_missing(self, mock_urlopen):
"""Test missing URL."""
cm = MagicMock()
cm.status = 404
cm.code = 404
cm.readlines.return_value = []
cm.headers = {'content-length': '0'}
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
self.check_markdown(
R'''
--8<-- "https://test.com/myfile.md"
''',
'',
True
)
@patch('urllib.request.urlopen')
def test_missing_content_length(self, mock_urlopen):
"""Test missing content length header."""
cm = MagicMock()
cm.status = 200
cm.code = 200
cm.readlines.return_value = []
cm.headers = {}
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
with self.assertRaises(ValueError):
self.check_markdown(
R'''
--8<-- "https://test.com/myfile.md"
''',
'''
''',
True
)
@patch('urllib.request.urlopen')
def test_missing_content_length_too_big(self, mock_urlopen):
"""Test content length too big."""
cm = MagicMock()
cm.status = 200
cm.code = 200
cm.readlines.return_value = []
cm.headers = {'content-length': str(1024 * 1024 * 48)}
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
with self.assertRaises(ValueError):
self.check_markdown(
R'''
--8<-- "https://test.com/myfile.md"
''',
'''
''',
True
)
@patch('urllib.request.urlopen')
def test_content_length_zero(self, mock_urlopen):
"""Test empty content."""
cm = MagicMock()
cm.status = 200
cm.code = 200
cm.readlines.return_value = []
cm.headers = {'content-length': '0'}
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
self.check_markdown(
R'''
--8<-- "https://test.com/myfile.md"
''',
'',
True
)
@patch('urllib.request.urlopen')
def test_url_sections(self, mock_urlopen):
"""Test specifying a section in a URL."""
content = []
length = 0
with open('tests/test_extensions/_snippets/section.txt', 'rb') as f:
for l in f:
length += len(l)
content.append(l)
cm = MagicMock()
cm.status = 200
cm.code = 200
cm.readlines.return_value = content
cm.headers = {'content-length': length}
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
self.check_markdown(
R'''
--8<-- "https://test.com/myfile.md:html-section"
''',
'''
<div><p>content</p></div>
''',
True
)
class TestURLDedentSnippets(util.MdCase):
"""Test snippet URL cases."""
extension = [
'pymdownx.snippets', 'pymdownx.superfences'
]
extension_configs = {
'pymdownx.snippets': {
'base_path': [os.path.join(BASE, '_snippets')],
'url_download': True,
'dedent_subsections': True
}
}
@patch('urllib.request.urlopen')
def test_url_sections(self, mock_urlopen):
"""Test specifying a section in a URL."""
content = []
length = 0
with open('tests/test_extensions/_snippets/indented.txt', 'rb') as f:
for l in f:
length += len(l)
content.append(l)
cm = MagicMock()
cm.status = 200
cm.code = 200
cm.readlines.return_value = content
cm.headers = {'content-length': length}
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
self.check_markdown(
R'''
```
--8<-- "https://test.com/myfile.md:py-section"
```
''',
'''
<div class="highlight"><pre><span></span><code>def some_method(self, param):
"""Docstring."""
return param
</code></pre></div>
''',
True
)
class TestURLSnippetsNoMax(util.MdCase):
"""Test snippet URL cases no max size."""
extension = [
'pymdownx.snippets',
]
extension_configs = {
'pymdownx.snippets': {
'base_path': [os.path.join(BASE, '_snippets')],
'url_download': True,
'url_max_size': 0
}
}
@patch('urllib.request.urlopen')
def test_content_length_zero(self, mock_urlopen):
"""Test empty content."""
cm = MagicMock()
cm.status = 200
cm.code = 200
cm.readlines.return_value = [b'contents']
cm.headers = {'content-length': str(1024 * 1024 * 48)}
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
self.check_markdown(
R'''
--8<-- "https://test.com/myfile.md"
''',
'''
<p>contents</p>
''',
True
)
class TestURLSnippetsMissing(util.MdCase):
"""Test snippet URL cases with missing URL and 'check paths'."""
extension = [
'pymdownx.snippets',
]
extension_configs = {
'pymdownx.snippets': {
'base_path': [os.path.join(BASE, '_snippets')],
'url_download': True,
'url_max_size': 0,
'check_paths': True
}
}
@patch('urllib.request.urlopen')
def test_missing(self, mock_urlopen):
"""Test missing URL."""
cm = MagicMock()
cm.status = 404
cm.code = 404
cm.readlines.return_value = []
cm.headers = {'content-length': '0'}
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
with self.assertRaises(SnippetMissingError):
self.check_markdown(
R'''
--8<-- "https://test.com/myfile.md"
''',
'',
True
)
|
GHSA-jh85-wwv9-24hv
|
wagtail/core/forms.py
|
@@ -1,4 +1,5 @@
from django import forms
+from django.utils.crypto import constant_time_compare
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
@@ -13,7 +14,7 @@ def __init__(self, *args, **kwargs):
def clean_password(self):
data = self.cleaned_data['password']
- if data != self.restriction.password:
+ if not constant_time_compare(data, self.restriction.password):
raise forms.ValidationError(_("The password you have entered is not correct. Please try again."))
return data
|
from django import forms
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
class PasswordViewRestrictionForm(forms.Form):
password = forms.CharField(label=ugettext_lazy("Password"), widget=forms.PasswordInput)
return_url = forms.CharField(widget=forms.HiddenInput)
def __init__(self, *args, **kwargs):
self.restriction = kwargs.pop('instance')
super().__init__(*args, **kwargs)
def clean_password(self):
data = self.cleaned_data['password']
if data != self.restriction.password:
raise forms.ValidationError(_("The password you have entered is not correct. Please try again."))
return data
|
GHSA-jjjr-3jcw-f8v6
|
tensorflow/python/kernel_tests/image_ops/extract_image_patches_op_test.py
|
@@ -17,7 +17,9 @@
import numpy as np
from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
@@ -139,6 +141,17 @@ def testComplexDataTypes(self):
padding=padding,
patches=patches)
+ def testInvalidAttributes(self):
+ """Test for passing weird things into ksizes."""
+ with self.assertRaisesRegex(TypeError, "Expected list"):
+ image = constant_op.constant([0.0])
+ ksizes = math_ops.cast(
+ constant_op.constant(dtype=dtypes.int16, value=[[1, 4], [5, 2]]),
+ dtype=dtypes.qint16)
+ strides = [1, 1, 1, 1]
+ self.evaluate(
+ array_ops.extract_image_patches(
+ image, ksizes=ksizes, strides=strides, padding="SAME"))
if __name__ == "__main__":
test.main()
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for ExtractImagePatches op."""
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ExtractImagePatches(test.TestCase):
"""Functional tests for ExtractImagePatches op."""
def _VerifyValues(self, image, ksizes, strides, rates, padding, patches):
"""Tests input-output pairs for the ExtractImagePatches op.
Args:
image: Input tensor with shape: [batch, in_rows, in_cols, depth].
ksizes: Patch size specified as: [ksize_rows, ksize_cols].
strides: Output strides, specified as [stride_rows, stride_cols].
rates: Atrous rates, specified as [rate_rows, rate_cols].
padding: Padding type.
patches: Expected output.
"""
ksizes = [1] + ksizes + [1]
strides = [1] + strides + [1]
rates = [1] + rates + [1]
out_tensor = array_ops.extract_image_patches(
constant_op.constant(image),
ksizes=ksizes,
strides=strides,
rates=rates,
padding=padding,
name="im2col")
self.assertAllClose(patches, self.evaluate(out_tensor))
def testKsize1x1Stride1x1Rate1x1(self):
"""Verifies that for 1x1 kernel the output equals the input."""
# [2, 3, 4, 5]
image = np.reshape(range(120), [2, 3, 4, 5])
# [2, 3, 4, 5]
patches = np.reshape(range(120), [2, 3, 4, 5])
for padding in ["VALID", "SAME"]:
self._VerifyValues(
image,
ksizes=[1, 1],
strides=[1, 1],
rates=[1, 1],
padding=padding,
patches=patches)
def testKsize1x1Stride2x3Rate1x1(self):
"""Test for 1x1 kernel and strides."""
# [2, 4, 5, 3]
image = np.reshape(range(120), [2, 4, 5, 3])
# [2, 2, 2, 3]
patches = image[:, ::2, ::3, :]
for padding in ["VALID", "SAME"]:
self._VerifyValues(
image,
ksizes=[1, 1],
strides=[2, 3],
rates=[1, 1],
padding=padding,
patches=patches)
def testKsize2x2Stride1x1Rate1x1Valid(self):
"""Test for 2x2 kernel with VALID padding."""
# [1, 2, 2, 1]
image = [[[[1], [2]], [[3], [4]]]]
# [1, 1, 1, 4]
patches = [[[[1, 2, 3, 4]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
patches=patches)
def testKsize2x2Stride1x1Rate1x1Same(self):
"""Test for 2x2 kernel with SAME padding."""
# [1, 2, 2, 1]
image = [[[[1], [2]], [[3], [4]]]]
# [1, 2, 2, 4]
patches = [[[[1, 2, 3, 4], [2, 0, 4, 0]], [[3, 4, 0, 0], [4, 0, 0, 0]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
patches=patches)
def testKsize2x2Stride1x1Rate2x2Valid(self):
"""Test for 2x2 kernel with 2x2 dilation."""
# [1, 2, 2, 1]
image = np.arange(16).reshape(1, 4, 4, 1).astype(np.float32)
# [1, 2, 2, 4]
patches = [[[[0, 2, 8, 10], [1, 3, 9, 11]],
[[4, 6, 12, 14], [5, 7, 13, 15]]]]
self._VerifyValues(
image,
ksizes=[2, 2],
strides=[1, 1],
rates=[2, 2],
padding="VALID",
patches=patches)
def testComplexDataTypes(self):
"""Test for complex data types"""
for dtype in [np.complex64, np.complex128]:
image = (
np.reshape(range(120), [2, 3, 4, 5]).astype(dtype) +
np.reshape(range(120, 240), [2, 3, 4, 5]).astype(dtype) * 1j)
patches = (
np.reshape(range(120), [2, 3, 4, 5]).astype(dtype) +
np.reshape(range(120, 240), [2, 3, 4, 5]).astype(dtype) * 1j)
for padding in ["VALID", "SAME"]:
self._VerifyValues(
image,
ksizes=[1, 1],
strides=[1, 1],
rates=[1, 1],
padding=padding,
patches=patches)
if __name__ == "__main__":
test.main()
|
GHSA-xxcj-rhqg-m46g
|
invenio_drafts_resources/services/records/service.py
|
@@ -267,10 +267,9 @@ def publish(self, id_, identity, uow=None):
into records)
- Create or update associated (published) record with data
"""
- self.require_permission(identity, "publish")
-
# Get the draft
draft = self.draft_cls.pid.resolve(id_, registered_only=False)
+ self.require_permission(identity, "publish", record=draft)
# Validate the draft strictly - since a draft can be saved with errors
# we do a strict validation here to make sure only valid drafts can be
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020-2021 CERN.
# Copyright (C) 2020 Northwestern University.
#
# Invenio-Drafts-Resources is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Primary service for working with records and drafts."""
from elasticsearch_dsl.query import Q
from invenio_db import db
from invenio_records_resources.services import LinksTemplate
from invenio_records_resources.services import \
RecordService as RecordServiceBase
from invenio_records_resources.services import ServiceSchemaWrapper
from invenio_records_resources.services.uow import RecordCommitOp, \
RecordDeleteOp, RecordIndexOp, unit_of_work
from sqlalchemy.orm.exc import NoResultFound
class RecordService(RecordServiceBase):
"""Record and draft service interface.
This service provides an interface to business logic for published and
draft records.
"""
def __init__(self, config, files_service=None, draft_files_service=None):
"""Constructor for RecordService."""
super().__init__(config)
self._files = files_service
self._draft_files = draft_files_service
#
# Subservices
#
@property
def files(self):
"""Record files service."""
return self._files
@property
def draft_files(self):
"""Draft files service."""
return self._draft_files
#
# Properties
#
@property
def schema_parent(self):
"""Schema for parent records."""
return ServiceSchemaWrapper(self, schema=self.config.schema_parent)
@property
def draft_cls(self):
"""Factory for creating a record class."""
return self.config.draft_cls
# High-level API
# Inherits record search, read, create, delete and update
def update(self, *args, **kwargs):
"""Do not use."""
raise NotImplementedError("Records should be updated via their draft.")
def search_drafts(self, identity, params=None, es_preference=None,
**kwargs):
"""Search for drafts records matching the querystring."""
self.require_permission(identity, 'search_drafts')
# Prepare and execute the search
params = params or {}
search_result = self._search(
'search_drafts',
identity,
params,
es_preference,
record_cls=self.draft_cls,
search_opts=self.config.search_drafts,
# `has_draft` systemfield is not defined here. This is not ideal
# but it helps avoid overriding the method. See how is used in
# https://github.com/inveniosoftware/invenio-rdm-records
extra_filter=Q('term', has_draft=False),
permission_action='read_draft',
**kwargs
).execute()
return self.result_list(
self,
identity,
search_result,
params,
links_tpl=LinksTemplate(self.config.links_search_drafts, context={
"args": params
}),
links_item_tpl=self.links_item_tpl,
)
def search_versions(self, id_, identity, params=None, es_preference=None,
**kwargs):
"""Search for record's versions."""
try:
record = self.record_cls.pid.resolve(id_, registered_only=False)
except NoResultFound:
record = self.draft_cls.pid.resolve(id_, registered_only=False)
self.require_permission(identity, "read", record=record)
# Prepare and execute the search
params = params or {}
search_result = self._search(
'search_versions',
identity,
params,
es_preference,
record_cls=self.record_cls,
search_opts=self.config.search_versions,
extra_filter=Q(
'term', **{'parent.id': str(record.parent.pid.pid_value)}),
permission_action='read',
**kwargs
).execute()
return self.result_list(
self,
identity,
search_result,
params,
links_tpl=LinksTemplate(
self.config.links_search_versions,
context={"id": id_, "args": params}
),
links_item_tpl=self.links_item_tpl,
)
def read_draft(self, id_, identity):
"""Retrieve a draft."""
# Resolve and require permission
draft = self.draft_cls.pid.resolve(id_, registered_only=False)
self.require_permission(identity, "read_draft", record=draft)
# Run components
for component in self.components:
if hasattr(component, 'read_draft'):
component.read_draft(identity, draft=draft)
return self.result_item(
self, identity, draft, links_tpl=self.links_item_tpl)
def read_latest(self, id_, identity):
"""Retrieve latest record."""
# Resolve and require permission
record = self.record_cls.pid.resolve(id_)
# Retrieve latest if record is not
if not record.versions.is_latest:
record = self.record_cls.get_record(record.versions.latest_id)
self.require_permission(identity, "read", record=record)
return self.result_item(
self, identity, record, links_tpl=self.links_item_tpl)
@unit_of_work()
def update_draft(self, id_, identity, data, revision_id=None, uow=None):
"""Replace a draft."""
draft = self.draft_cls.pid.resolve(id_, registered_only=False)
self.check_revision_id(draft, revision_id)
# Permissions
self.require_permission(identity, "update_draft", record=draft)
# Load data with service schema
data, errors = self.schema.load(
data,
context=dict(
identity=identity,
pid=draft.pid,
record=draft,
),
# Saving a draft only saves valid metadata and reports
# (doesn't raise) errors
raise_errors=False
)
# Run components
self.run_components(
'update_draft', identity, record=draft, data=data,
errors=errors, uow=uow
)
# Commit and index
uow.register(RecordCommitOp(draft, indexer=self.indexer))
return self.result_item(
self,
identity,
draft,
links_tpl=self.links_item_tpl,
errors=errors
)
@unit_of_work()
def create(self, identity, data, uow=None):
"""Create a draft for a new record.
It does NOT eagerly create the associated record.
"""
res = self._create(
self.draft_cls,
identity,
data,
raise_errors=False,
uow=uow,
)
uow.register(RecordCommitOp(res._record.parent))
return res
@unit_of_work()
def edit(self, id_, identity, uow=None):
"""Create a new revision or a draft for an existing record.
:param id_: record PID value.
"""
# Draft exists - return it
try:
draft = self.draft_cls.pid.resolve(id_, registered_only=False)
self.require_permission(identity, "edit", record=draft)
return self.result_item(
self, identity, draft, links_tpl=self.links_item_tpl)
except NoResultFound:
pass
# Draft does not exists - so get the main record we want to edit and
# create a draft from it
record = self.record_cls.pid.resolve(id_)
self.require_permission(identity, "edit", record=record)
draft = self.draft_cls.edit(record)
# Run components
self.run_components(
"edit", identity, draft=draft, record=record, uow=uow)
uow.register(RecordCommitOp(draft, indexer=self.indexer))
# Reindex the record to trigger update of computed values in the
# available dumpers of the record.
uow.register(RecordIndexOp(record, indexer=self.indexer))
return self.result_item(
self, identity, draft, links_tpl=self.links_item_tpl)
@unit_of_work()
def publish(self, id_, identity, uow=None):
"""Publish a draft.
Idea:
- Get the draft from the data layer (draft is not passed in)
- Validate it more strictly than when it was originally saved
(drafts can be incomplete but only complete drafts can be turned
into records)
- Create or update associated (published) record with data
"""
self.require_permission(identity, "publish")
# Get the draft
draft = self.draft_cls.pid.resolve(id_, registered_only=False)
# Validate the draft strictly - since a draft can be saved with errors
# we do a strict validation here to make sure only valid drafts can be
# published.
self._validate_draft(identity, draft)
# Create the record from the draft
latest_id = draft.versions.latest_id
record = self.record_cls.publish(draft)
# Run components
self.run_components(
'publish', identity, draft=draft, record=record, uow=uow)
# Commit and index
uow.register(RecordCommitOp(record, indexer=self.indexer))
uow.register(RecordDeleteOp(draft, force=False, indexer=self.indexer))
if latest_id:
self._reindex_latest(latest_id, uow=uow)
return self.result_item(
self, identity, record, links_tpl=self.links_item_tpl)
@unit_of_work()
def new_version(self, id_, identity, uow=None):
"""Create a new version of a record."""
# Get the a record - i.e. you can only create a new version in case
# at least one published record already exists.
record = self.record_cls.pid.resolve(id_)
# Check permissions
self.require_permission(identity, "new_version", record=record)
# Draft for new version already exists? if so return it
if record.versions.next_draft_id:
next_draft = self.draft_cls.get_record(
record.versions.next_draft_id)
return self.result_item(
self, identity, next_draft, links_tpl=self.links_item_tpl)
# Draft for new version does not exists, so create it
next_draft = self.draft_cls.new_version(record)
# Get the latest published record if it's not the current one.
if not record.versions.is_latest:
record = self.record_cls.get_record(record.versions.latest_id)
# Run components
self.run_components(
'new_version', identity, draft=next_draft, record=record, uow=uow)
# Commit and index
uow.register(RecordCommitOp(next_draft, indexer=self.indexer))
self._reindex_latest(
next_draft.versions.latest_id, record=record, uow=uow)
return self.result_item(
self, identity, next_draft, links_tpl=self.links_item_tpl)
@unit_of_work()
def delete_draft(self, id_, identity, revision_id=None, uow=None):
"""Delete a record from database and search indexes."""
draft = self.draft_cls.pid.resolve(id_, registered_only=False)
latest_id = draft.versions.latest_id
self.check_revision_id(draft, revision_id)
# Permissions
self.require_permission(identity, "delete_draft", record=draft)
# Get published record if exists
try:
record = self.record_cls.get_record(draft.id)
except NoResultFound:
record = None
# We soft-delete a draft when a published record exists, in order to
# keep the version_id counter around for optimistic concurrency
# control (both for ES indexing and for REST API clients)
force = False if record else True
# Run components
self.run_components(
'delete_draft', identity, draft=draft, record=record,
force=force, uow=uow
)
# Note, the parent record deletion logic is implemented in the
# ParentField and will automatically take care of deleting the parent
# record in case this is the only draft that exists for the parent.
# We refresh the index because users are usually redirected to a
# search result immediately after, and we don't want the users to see
# their just deleted draft.
uow.register(RecordDeleteOp(
draft, indexer=self.indexer, force=force, index_refresh=True))
if force:
# Case 1: We deleted a new draft (without a published record) or a
# new version draft (without a published).
# In this case, we reindex the latest published record/draft
self._reindex_latest(latest_id, refresh=True, uow=uow)
else:
# Case 2: We deleted a draft for a published record.
# In this case we reindex just the published record to trigger and
# update of computed values.
uow.register(RecordIndexOp(
record, indexer=self.indexer, index_refresh=True))
return True
@unit_of_work()
def import_files(self, id_, identity, uow=None):
"""Import files from previous record version."""
if self.draft_files is None:
raise RuntimeError("Files support is not enabled.")
# Read draft
draft = self.draft_cls.pid.resolve(id_, registered_only=False)
self.require_permission(identity, "draft_create_files", record=draft)
# Retrieve latest record
record = self.record_cls.get_record(draft.versions.latest_id)
self.require_permission(identity, "read_files", record=record)
# Run components
self.run_components(
'import_files', identity, draft=draft, record=record, uow=uow)
# Commit and index
uow.register(RecordCommitOp(draft, indexer=self.indexer))
return self.draft_files.file_result_list(
self.draft_files,
identity,
results=draft.files.values(),
record=draft,
links_tpl=self.draft_files.file_links_list_tpl(id_),
links_item_tpl=self.draft_files.file_links_item_tpl(id_),
)
def rebuild_index(self, identity):
"""Reindex all records and drafts.
Note: Skips (soft) deleted records and drafts.
"""
ret_val = super().rebuild_index(identity)
for draft_meta in self.draft_cls.model_cls.query.all():
draft = self.draft_cls(draft_meta.data, model=draft_meta)
if not draft.is_deleted:
self.indexer.index(draft)
return ret_val
def validate_draft(self, identity, id_):
"""Validate a draft."""
draft = self.draft_cls.pid.resolve(id_, registered_only=False)
self._validate_draft(identity, draft)
def _validate_draft(self, identity, draft):
"""Validate a draft.
This method is internal because it works with a data access layer
draft, and thus should not be called from outside the service.
"""
# Convert to draft into service layer draft result item (a record
# projection for the given identity). This way we can load and validate
# the data with the service schema.
draft_item = self.result_item(self, identity, draft)
# Validate the data - will raise ValidationError if not valid.
self.schema.load(
data=draft_item.data,
context=dict(
identity=identity,
pid=draft.pid,
record=draft,
),
raise_errors=True # this is the default, but might as well be
# explicit
)
@unit_of_work()
def _reindex_latest(self, latest_id, record=None, draft=None,
refresh=False, uow=None):
"""Reindex the latest published record and draft.
This triggers and update of computed values in the index, such as
"is_latest".
This method is internal because it works with a data access layer
record/draft, and thus should not be called from outside the service.
"""
# We only have a draft, no latest to index
if not latest_id:
return
# Note, the record may not be the latest published record, and we only
# want to index the latest published.
if record is None or latest_id != record.id:
record = self.record_cls.get_record(latest_id)
uow.register(
RecordIndexOp(record, indexer=self.indexer, index_refresh=refresh))
# Note, a draft may or may not exists for a published record (depending
# on if it's being edited).
try:
draft = self.draft_cls.get_record(latest_id)
uow.register(RecordIndexOp(
draft, indexer=self.indexer, index_refresh=refresh))
except NoResultFound:
pass
def _get_record_and_parent_by_id(self, id_):
"""Resolve the record and its parent, by the given ID.
If the ID belongs to a parent record, no child record will be
resolved.
"""
record = self.record_cls.pid.resolve(id_, registered_only=False)
parent = record.parent
return record, parent
def _get_draft_and_parent_by_id(self, id_):
"""Resolve the draft and its parent, by the given ID."""
draft = self.draft_cls.pid.resolve(id_, registered_only=False)
parent = draft.parent
return draft, parent
@unit_of_work()
def _index_related_records(self, record, parent, uow=None):
"""Index all records that are related to the specified ones."""
siblings = self.record_cls.get_records_by_parent(
parent or record.parent
)
# TODO only index the current record immediately;
# all siblings should be sent to a high-priority celery task
# instead (requires bulk indexing to work)
for sibling in siblings:
uow.register(RecordIndexOp(sibling, indexer=self.indexer))
@unit_of_work()
def cleanup_drafts(self, timedelta, uow=None):
"""Hard delete of soft deleted drafts.
:param int timedelta: timedelta that should pass since
the last update of the draft in order to be hard deleted.
"""
self.draft_cls.cleanup_drafts(timedelta)
|
PYSEC-2021-837
|
django/utils/translation/trans_real.py
|
@@ -30,6 +30,11 @@
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = "\x04"
+# Maximum number of characters that will be parsed from the Accept-Language
+# header to prevent possible denial of service or memory exhaustion attacks.
+# About 10x longer than the longest value shown on MDN’s Accept-Language page.
+ACCEPT_LANGUAGE_HEADER_MAX_LENGTH = 500
+
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9
# and RFC 3066, section 2.1
accept_language_re = _lazy_re_compile(r'''
@@ -556,7 +561,7 @@ def get_language_from_request(request, check_path=False):
@functools.lru_cache(maxsize=1000)
-def parse_accept_lang_header(lang_string):
+def _parse_accept_lang_header(lang_string):
"""
Parse the lang_string, which is the body of an HTTP Accept-Language
header, and return a tuple of (lang, q-value), ordered by 'q' values.
@@ -578,3 +583,28 @@ def parse_accept_lang_header(lang_string):
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return tuple(result)
+
+
+def parse_accept_lang_header(lang_string):
+ """
+ Parse the value of the Accept-Language header up to a maximum length.
+
+ The value of the header is truncated to a maximum length to avoid potential
+ denial of service and memory exhaustion attacks. Excessive memory could be
+ used if the raw value is very large as it would be cached due to the use of
+ functools.lru_cache() to avoid repetitive parsing of common header values.
+ """
+ # If the header value doesn't exceed the maximum allowed length, parse it.
+ if len(lang_string) <= ACCEPT_LANGUAGE_HEADER_MAX_LENGTH:
+ return _parse_accept_lang_header(lang_string)
+
+ # If there is at least one comma in the value, parse up to the last comma
+ # before the max length, skipping any truncated parts at the end of the
+ # header value.
+ index = lang_string.rfind(",", 0, ACCEPT_LANGUAGE_HEADER_MAX_LENGTH)
+ if index > 0:
+ return _parse_accept_lang_header(lang_string[:index])
+
+ # Don't attempt to parse if there is only one language-range value which is
+ # longer than the maximum allowed length and so truncated.
+ return ()
|
"""Translation helper functions."""
import functools
import gettext as gettext_module
import os
import re
import sys
import warnings
from asgiref.local import Local
from django.apps import apps
from django.conf import settings
from django.conf.locale import LANG_INFO
from django.core.exceptions import AppRegistryNotReady
from django.core.signals import setting_changed
from django.dispatch import receiver
from django.utils.regex_helper import _lazy_re_compile
from django.utils.safestring import SafeData, mark_safe
from . import to_language, to_locale
# Translations are cached in a dictionary for every language.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = Local()
# The default translation is based on the settings file.
_default = None
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = "\x04"
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9
# and RFC 3066, section 2.1
accept_language_re = _lazy_re_compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # "en", "en-au", "x-y-z", "es-419", "*"
(?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:\.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
language_code_re = _lazy_re_compile(
r'^[a-z]{1,8}(?:-[a-z0-9]{1,8})*(?:@[a-z0-9]{1,20})?$',
re.IGNORECASE
)
language_code_prefix_re = _lazy_re_compile(r'^/(\w+([@-]\w+)?)(/|$)')
@receiver(setting_changed)
def reset_cache(**kwargs):
"""
Reset global state when LANGUAGES setting has been changed, as some
languages should no longer be accepted.
"""
if kwargs['setting'] in ('LANGUAGES', 'LANGUAGE_CODE'):
check_for_language.cache_clear()
get_languages.cache_clear()
get_supported_language_variant.cache_clear()
class TranslationCatalog:
"""
Simulate a dict for DjangoTranslation._catalog so as multiple catalogs
with different plural equations are kept separate.
"""
def __init__(self, trans=None):
self._catalogs = [trans._catalog.copy()] if trans else [{}]
self._plurals = [trans.plural] if trans else [lambda n: int(n != 1)]
def __getitem__(self, key):
for cat in self._catalogs:
try:
return cat[key]
except KeyError:
pass
raise KeyError(key)
def __setitem__(self, key, value):
self._catalogs[0][key] = value
def __contains__(self, key):
return any(key in cat for cat in self._catalogs)
def items(self):
for cat in self._catalogs:
yield from cat.items()
def keys(self):
for cat in self._catalogs:
yield from cat.keys()
def update(self, trans):
# Merge if plural function is the same, else prepend.
for cat, plural in zip(self._catalogs, self._plurals):
if trans.plural.__code__ == plural.__code__:
cat.update(trans._catalog)
break
else:
self._catalogs.insert(0, trans._catalog.copy())
self._plurals.insert(0, trans.plural)
def get(self, key, default=None):
missing = object()
for cat in self._catalogs:
result = cat.get(key, missing)
if result is not missing:
return result
return default
def plural(self, msgid, num):
for cat, plural in zip(self._catalogs, self._plurals):
tmsg = cat.get((msgid, plural(num)))
if tmsg is not None:
return tmsg
raise KeyError
class DjangoTranslation(gettext_module.GNUTranslations):
"""
Set up the GNUTranslations context with regard to output charset.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct an object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
domain = 'django'
def __init__(self, language, domain=None, localedirs=None):
"""Create a GNUTranslations() using many locale directories"""
gettext_module.GNUTranslations.__init__(self)
if domain is not None:
self.domain = domain
self.__language = language
self.__to_language = to_language(language)
self.__locale = to_locale(language)
self._catalog = None
# If a language doesn't have a catalog, use the Germanic default for
# pluralization: anything except one is pluralized.
self.plural = lambda n: int(n != 1)
if self.domain == 'django':
if localedirs is not None:
# A module-level cache is used for caching 'django' translations
warnings.warn("localedirs is ignored when domain is 'django'.", RuntimeWarning)
localedirs = None
self._init_translation_catalog()
if localedirs:
for localedir in localedirs:
translation = self._new_gnu_trans(localedir)
self.merge(translation)
else:
self._add_installed_apps_translations()
self._add_local_translations()
if self.__language == settings.LANGUAGE_CODE and self.domain == 'django' and self._catalog is None:
# default lang should have at least one translation file available.
raise OSError('No translation files found for default language %s.' % settings.LANGUAGE_CODE)
self._add_fallback(localedirs)
if self._catalog is None:
# No catalogs found for this language, set an empty catalog.
self._catalog = TranslationCatalog()
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def _new_gnu_trans(self, localedir, use_null_fallback=True):
"""
Return a mergeable gettext.GNUTranslations instance.
A convenience wrapper. By default gettext uses 'fallback=False'.
Using param `use_null_fallback` to avoid confusion with any other
references to 'fallback'.
"""
return gettext_module.translation(
domain=self.domain,
localedir=localedir,
languages=[self.__locale],
fallback=use_null_fallback,
)
def _init_translation_catalog(self):
"""Create a base catalog using global django translations."""
settingsfile = sys.modules[settings.__module__].__file__
localedir = os.path.join(os.path.dirname(settingsfile), 'locale')
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_installed_apps_translations(self):
"""Merge translations from each installed app."""
try:
app_configs = reversed(list(apps.get_app_configs()))
except AppRegistryNotReady:
raise AppRegistryNotReady(
"The translation infrastructure cannot be initialized before the "
"apps registry is ready. Check that you don't make non-lazy "
"gettext calls at import time.")
for app_config in app_configs:
localedir = os.path.join(app_config.path, 'locale')
if os.path.exists(localedir):
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_local_translations(self):
"""Merge translations defined in LOCALE_PATHS."""
for localedir in reversed(settings.LOCALE_PATHS):
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_fallback(self, localedirs=None):
"""Set the GNUTranslations() fallback with the default language."""
# Don't set a fallback for the default language or any English variant
# (as it's empty, so it'll ALWAYS fall back to the default language)
if self.__language == settings.LANGUAGE_CODE or self.__language.startswith('en'):
return
if self.domain == 'django':
# Get from cache
default_translation = translation(settings.LANGUAGE_CODE)
else:
default_translation = DjangoTranslation(
settings.LANGUAGE_CODE, domain=self.domain, localedirs=localedirs
)
self.add_fallback(default_translation)
def merge(self, other):
"""Merge another translation into this catalog."""
if not getattr(other, '_catalog', None):
return # NullTranslations() has no _catalog
if self._catalog is None:
# Take plural and _info from first catalog found (generally Django's).
self.plural = other.plural
self._info = other._info.copy()
self._catalog = TranslationCatalog(other)
else:
self._catalog.update(other)
if other._fallback:
self.add_fallback(other._fallback)
def language(self):
"""Return the translation language."""
return self.__language
def to_language(self):
"""Return the translation language name."""
return self.__to_language
def ngettext(self, msgid1, msgid2, n):
try:
tmsg = self._catalog.plural(msgid1, n)
except KeyError:
if self._fallback:
return self._fallback.ngettext(msgid1, msgid2, n)
if n == 1:
tmsg = msgid1
else:
tmsg = msgid2
return tmsg
def translation(language):
"""
Return a translation object in the default 'django' domain.
"""
global _translations
if language not in _translations:
_translations[language] = DjangoTranslation(language)
return _translations[language]
def activate(language):
"""
Fetch the translation object for a given language and install it as the
current translation object for the current thread.
"""
if not language:
return
_active.value = translation(language)
def deactivate():
"""
Uninstall the active translation object so that further _() calls resolve
to the default translation object.
"""
if hasattr(_active, "value"):
del _active.value
def deactivate_all():
"""
Make the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active.value = gettext_module.NullTranslations()
_active.value.to_language = lambda *args: None
def get_language():
"""Return the currently selected language."""
t = getattr(_active, "value", None)
if t is not None:
try:
return t.to_language()
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Return selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
lang = get_language()
if lang is None:
return False
else:
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Return the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default
t = getattr(_active, "value", None)
if t is not None:
return t
if _default is None:
_default = translation(settings.LANGUAGE_CODE)
return _default
def gettext(message):
"""
Translate the 'message' string. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
eol_message = message.replace('\r\n', '\n').replace('\r', '\n')
if eol_message:
_default = _default or translation(settings.LANGUAGE_CODE)
translation_object = getattr(_active, "value", _default)
result = translation_object.gettext(eol_message)
else:
# Return an empty value of the corresponding type if an empty message
# is given, instead of metadata, which is the default gettext behavior.
result = type(message)('')
if isinstance(message, SafeData):
return mark_safe(result)
return result
def pgettext(context, message):
msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
result = gettext(msg_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = message
elif isinstance(message, SafeData):
result = mark_safe(result)
return result
def gettext_noop(message):
"""
Mark strings for translation but don't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default
t = getattr(_active, "value", None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Return a string of the translation of either the singular or plural,
based on the number.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
def npgettext(context, singular, plural, number):
msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
number)
result = ngettext(*msgs_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = ngettext(singular, plural, number)
return result
def all_locale_paths():
"""
Return a list of paths to user-provides languages files.
"""
globalpath = os.path.join(
os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
app_paths = []
for app_config in apps.get_app_configs():
locale_path = os.path.join(app_config.path, 'locale')
if os.path.exists(locale_path):
app_paths.append(locale_path)
return [globalpath, *settings.LOCALE_PATHS, *app_paths]
@functools.lru_cache(maxsize=1000)
def check_for_language(lang_code):
"""
Check whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
# First, a quick check to make sure lang_code is well-formed (#21458)
if lang_code is None or not language_code_re.search(lang_code):
return False
return any(
gettext_module.find('django', path, [to_locale(lang_code)]) is not None
for path in all_locale_paths()
)
@functools.lru_cache()
def get_languages():
"""
Cache of settings.LANGUAGES in a dictionary for easy lookups by key.
"""
return dict(settings.LANGUAGES)
@functools.lru_cache(maxsize=1000)
def get_supported_language_variant(lang_code, strict=False):
"""
Return the language code that's listed in supported languages, possibly
selecting a more generic variant. Raise LookupError if nothing is found.
If `strict` is False (the default), look for a country-specific variant
when neither the language code nor its generic variant is found.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
if lang_code:
# If 'fr-ca' is not supported, try special fallback or language-only 'fr'.
possible_lang_codes = [lang_code]
try:
possible_lang_codes.extend(LANG_INFO[lang_code]['fallback'])
except KeyError:
pass
generic_lang_code = lang_code.split('-')[0]
possible_lang_codes.append(generic_lang_code)
supported_lang_codes = get_languages()
for code in possible_lang_codes:
if code in supported_lang_codes and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in supported_lang_codes:
if supported_code.startswith(generic_lang_code + '-'):
return supported_code
raise LookupError(lang_code)
def get_language_from_path(path, strict=False):
"""
Return the language code if there's a valid language code found in `path`.
If `strict` is False (the default), look for a country-specific variant
when neither the language code nor its generic variant is found.
"""
regex_match = language_code_prefix_re.match(path)
if not regex_match:
return None
lang_code = regex_match[1]
try:
return get_supported_language_variant(lang_code, strict=strict)
except LookupError:
return None
def get_language_from_request(request, check_path=False):
"""
Analyze the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
If check_path is True, the URL path prefix will be checked for a language
code, otherwise this is skipped for backwards compatibility.
"""
if check_path:
lang_code = get_language_from_path(request.path_info)
if lang_code is not None:
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
if lang_code is not None and lang_code in get_languages() and check_for_language(lang_code):
return lang_code
try:
return get_supported_language_variant(lang_code)
except LookupError:
pass
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
if not language_code_re.search(accept_lang):
continue
try:
return get_supported_language_variant(accept_lang)
except LookupError:
continue
try:
return get_supported_language_variant(settings.LANGUAGE_CODE)
except LookupError:
return settings.LANGUAGE_CODE
@functools.lru_cache(maxsize=1000)
def parse_accept_lang_header(lang_string):
"""
Parse the lang_string, which is the body of an HTTP Accept-Language
header, and return a tuple of (lang, q-value), ordered by 'q' values.
Return an empty tuple if there are any format errors in lang_string.
"""
result = []
pieces = accept_language_re.split(lang_string.lower())
if pieces[-1]:
return ()
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i:i + 3]
if first:
return ()
if priority:
priority = float(priority)
else:
priority = 1.0
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return tuple(result)
|
GHSA-q2jf-h9jm-m7p4
|
tests/i18n/tests.py
|
@@ -1352,6 +1352,14 @@ def test_parse_spec_http_header(self):
('de;q=0.', [('de', 0.0)]),
('en; q=1,', [('en', 1.0)]),
('en; q=1.0, * ; q=0.5', [('en', 1.0), ('*', 0.5)]),
+ (
+ 'en' + '-x' * 20,
+ [('en-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x', 1.0)],
+ ),
+ (
+ ', '.join(['en; q=1.0'] * 20),
+ [('en', 1.0)] * 20,
+ ),
# Bad headers
('en-gb;q=1.0000', []),
('en;q=0.1234', []),
@@ -1367,6 +1375,10 @@ def test_parse_spec_http_header(self):
('12-345', []),
('', []),
('en;q=1e0', []),
+ # Invalid as language-range value too long.
+ ('xxxxxxxx' + '-xxxxxxxx' * 500, []),
+ # Header value too long, only parse up to limit.
+ (', '.join(['en; q=1.0'] * 500), [('en', 1.0)] * 45),
]
for value, expected in tests:
with self.subTest(value=value):
|
import datetime
import decimal
import gettext as gettext_module
import os
import pickle
import re
import tempfile
from contextlib import contextmanager
from importlib import import_module
from pathlib import Path
from unittest import mock
from asgiref.local import Local
from django import forms
from django.apps import AppConfig
from django.conf import settings
from django.conf.locale import LANG_INFO
from django.conf.urls.i18n import i18n_patterns
from django.template import Context, Template
from django.test import (
RequestFactory, SimpleTestCase, TestCase, override_settings,
)
from django.utils import translation
from django.utils.deprecation import RemovedInDjango40Warning
from django.utils.formats import (
date_format, get_format, get_format_modules, iter_format_modules, localize,
localize_input, reset_format_cache, sanitize_separators, time_format,
)
from django.utils.numberformat import format as nformat
from django.utils.safestring import SafeString, mark_safe
from django.utils.translation import (
LANGUAGE_SESSION_KEY, activate, check_for_language, deactivate,
get_language, get_language_bidi, get_language_from_request,
get_language_info, gettext, gettext_lazy, ngettext, ngettext_lazy,
npgettext, npgettext_lazy, pgettext, round_away_from_one, to_language,
to_locale, trans_null, trans_real, ugettext, ugettext_lazy, ugettext_noop,
ungettext, ungettext_lazy,
)
from django.utils.translation.reloader import (
translation_file_changed, watch_for_translation_changes,
)
from .forms import CompanyForm, I18nForm, SelectDateForm
from .models import Company, TestModel
here = os.path.dirname(os.path.abspath(__file__))
extended_locale_paths = settings.LOCALE_PATHS + [
os.path.join(here, 'other', 'locale'),
]
class AppModuleStub:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
@contextmanager
def patch_formats(lang, **settings):
from django.utils.formats import _format_cache
# Populate _format_cache with temporary values
for key, value in settings.items():
_format_cache[(key, lang)] = value
try:
yield
finally:
reset_format_cache()
class TranslationTests(SimpleTestCase):
@translation.override('de')
def test_legacy_aliases(self):
"""
Pre-Django 2.0 aliases with u prefix are still available.
"""
msg = (
'django.utils.translation.ugettext_noop() is deprecated in favor '
'of django.utils.translation.gettext_noop().'
)
with self.assertWarnsMessage(RemovedInDjango40Warning, msg):
self.assertEqual(ugettext_noop("Image"), "Image")
msg = (
'django.utils.translation.ugettext() is deprecated in favor of '
'django.utils.translation.gettext().'
)
with self.assertWarnsMessage(RemovedInDjango40Warning, msg):
self.assertEqual(ugettext("Image"), "Bild")
msg = (
'django.utils.translation.ugettext_lazy() is deprecated in favor '
'of django.utils.translation.gettext_lazy().'
)
with self.assertWarnsMessage(RemovedInDjango40Warning, msg):
self.assertEqual(ugettext_lazy("Image"), gettext_lazy("Image"))
msg = (
'django.utils.translation.ungettext() is deprecated in favor of '
'django.utils.translation.ngettext().'
)
with self.assertWarnsMessage(RemovedInDjango40Warning, msg):
self.assertEqual(ungettext("%d year", "%d years", 0) % 0, "0 Jahre")
msg = (
'django.utils.translation.ungettext_lazy() is deprecated in favor '
'of django.utils.translation.ngettext_lazy().'
)
with self.assertWarnsMessage(RemovedInDjango40Warning, msg):
self.assertEqual(
ungettext_lazy("%d year", "%d years", 0) % 0,
ngettext_lazy("%d year", "%d years", 0) % 0,
)
@translation.override('fr')
def test_plural(self):
"""
Test plurals with ngettext. French differs from English in that 0 is singular.
"""
self.assertEqual(ngettext("%d year", "%d years", 0) % 0, "0 année")
self.assertEqual(ngettext("%d year", "%d years", 2) % 2, "2 années")
self.assertEqual(ngettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}, "0 octet")
self.assertEqual(ngettext("%(size)d byte", "%(size)d bytes", 2) % {'size': 2}, "2 octets")
def test_plural_null(self):
g = trans_null.ngettext
self.assertEqual(g('%d year', '%d years', 0) % 0, '0 years')
self.assertEqual(g('%d year', '%d years', 1) % 1, '1 year')
self.assertEqual(g('%d year', '%d years', 2) % 2, '2 years')
@override_settings(LOCALE_PATHS=extended_locale_paths)
@translation.override('fr')
def test_multiple_plurals_per_language(self):
"""
Normally, French has 2 plurals. As other/locale/fr/LC_MESSAGES/django.po
has a different plural equation with 3 plurals, this tests if those
plural are honored.
"""
self.assertEqual(ngettext("%d singular", "%d plural", 0) % 0, "0 pluriel1")
self.assertEqual(ngettext("%d singular", "%d plural", 1) % 1, "1 singulier")
self.assertEqual(ngettext("%d singular", "%d plural", 2) % 2, "2 pluriel2")
french = trans_real.catalog()
# Internal _catalog can query subcatalogs (from different po files).
self.assertEqual(french._catalog[('%d singular', 0)], '%d singulier')
self.assertEqual(french._catalog[('%d hour', 0)], '%d heure')
def test_override(self):
activate('de')
try:
with translation.override('pl'):
self.assertEqual(get_language(), 'pl')
self.assertEqual(get_language(), 'de')
with translation.override(None):
self.assertIsNone(get_language())
with translation.override('pl'):
pass
self.assertIsNone(get_language())
self.assertEqual(get_language(), 'de')
finally:
deactivate()
def test_override_decorator(self):
@translation.override('pl')
def func_pl():
self.assertEqual(get_language(), 'pl')
@translation.override(None)
def func_none():
self.assertIsNone(get_language())
try:
activate('de')
func_pl()
self.assertEqual(get_language(), 'de')
func_none()
self.assertEqual(get_language(), 'de')
finally:
deactivate()
def test_override_exit(self):
"""
The language restored is the one used when the function was
called, not the one used when the decorator was initialized (#23381).
"""
activate('fr')
@translation.override('pl')
def func_pl():
pass
deactivate()
try:
activate('en')
func_pl()
self.assertEqual(get_language(), 'en')
finally:
deactivate()
def test_lazy_objects(self):
"""
Format string interpolation should work with *_lazy objects.
"""
s = gettext_lazy('Add %(name)s')
d = {'name': 'Ringo'}
self.assertEqual('Add Ringo', s % d)
with translation.override('de', deactivate=True):
self.assertEqual('Ringo hinzuf\xfcgen', s % d)
with translation.override('pl'):
self.assertEqual('Dodaj Ringo', s % d)
# It should be possible to compare *_lazy objects.
s1 = gettext_lazy('Add %(name)s')
self.assertEqual(s, s1)
s2 = gettext_lazy('Add %(name)s')
s3 = gettext_lazy('Add %(name)s')
self.assertEqual(s2, s3)
self.assertEqual(s, s2)
s4 = gettext_lazy('Some other string')
self.assertNotEqual(s, s4)
def test_lazy_pickle(self):
s1 = gettext_lazy("test")
self.assertEqual(str(s1), "test")
s2 = pickle.loads(pickle.dumps(s1))
self.assertEqual(str(s2), "test")
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_ngettext_lazy(self):
simple_with_format = ngettext_lazy('%d good result', '%d good results')
simple_context_with_format = npgettext_lazy('Exclamation', '%d good result', '%d good results')
simple_without_format = ngettext_lazy('good result', 'good results')
with translation.override('de'):
self.assertEqual(simple_with_format % 1, '1 gutes Resultat')
self.assertEqual(simple_with_format % 4, '4 guten Resultate')
self.assertEqual(simple_context_with_format % 1, '1 gutes Resultat!')
self.assertEqual(simple_context_with_format % 4, '4 guten Resultate!')
self.assertEqual(simple_without_format % 1, 'gutes Resultat')
self.assertEqual(simple_without_format % 4, 'guten Resultate')
complex_nonlazy = ngettext_lazy('Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 4)
complex_deferred = ngettext_lazy(
'Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 'num'
)
complex_context_nonlazy = npgettext_lazy(
'Greeting', 'Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 4
)
complex_context_deferred = npgettext_lazy(
'Greeting', 'Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 'num'
)
with translation.override('de'):
self.assertEqual(complex_nonlazy % {'num': 4, 'name': 'Jim'}, 'Hallo Jim, 4 guten Resultate')
self.assertEqual(complex_deferred % {'name': 'Jim', 'num': 1}, 'Hallo Jim, 1 gutes Resultat')
self.assertEqual(complex_deferred % {'name': 'Jim', 'num': 5}, 'Hallo Jim, 5 guten Resultate')
with self.assertRaisesMessage(KeyError, 'Your dictionary lacks key'):
complex_deferred % {'name': 'Jim'}
self.assertEqual(complex_context_nonlazy % {'num': 4, 'name': 'Jim'}, 'Willkommen Jim, 4 guten Resultate')
self.assertEqual(complex_context_deferred % {'name': 'Jim', 'num': 1}, 'Willkommen Jim, 1 gutes Resultat')
self.assertEqual(complex_context_deferred % {'name': 'Jim', 'num': 5}, 'Willkommen Jim, 5 guten Resultate')
with self.assertRaisesMessage(KeyError, 'Your dictionary lacks key'):
complex_context_deferred % {'name': 'Jim'}
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_ngettext_lazy_format_style(self):
simple_with_format = ngettext_lazy('{} good result', '{} good results')
simple_context_with_format = npgettext_lazy('Exclamation', '{} good result', '{} good results')
with translation.override('de'):
self.assertEqual(simple_with_format.format(1), '1 gutes Resultat')
self.assertEqual(simple_with_format.format(4), '4 guten Resultate')
self.assertEqual(simple_context_with_format.format(1), '1 gutes Resultat!')
self.assertEqual(simple_context_with_format.format(4), '4 guten Resultate!')
complex_nonlazy = ngettext_lazy('Hi {name}, {num} good result', 'Hi {name}, {num} good results', 4)
complex_deferred = ngettext_lazy(
'Hi {name}, {num} good result', 'Hi {name}, {num} good results', 'num'
)
complex_context_nonlazy = npgettext_lazy(
'Greeting', 'Hi {name}, {num} good result', 'Hi {name}, {num} good results', 4
)
complex_context_deferred = npgettext_lazy(
'Greeting', 'Hi {name}, {num} good result', 'Hi {name}, {num} good results', 'num'
)
with translation.override('de'):
self.assertEqual(complex_nonlazy.format(num=4, name='Jim'), 'Hallo Jim, 4 guten Resultate')
self.assertEqual(complex_deferred.format(name='Jim', num=1), 'Hallo Jim, 1 gutes Resultat')
self.assertEqual(complex_deferred.format(name='Jim', num=5), 'Hallo Jim, 5 guten Resultate')
with self.assertRaisesMessage(KeyError, 'Your dictionary lacks key'):
complex_deferred.format(name='Jim')
self.assertEqual(complex_context_nonlazy.format(num=4, name='Jim'), 'Willkommen Jim, 4 guten Resultate')
self.assertEqual(complex_context_deferred.format(name='Jim', num=1), 'Willkommen Jim, 1 gutes Resultat')
self.assertEqual(complex_context_deferred.format(name='Jim', num=5), 'Willkommen Jim, 5 guten Resultate')
with self.assertRaisesMessage(KeyError, 'Your dictionary lacks key'):
complex_context_deferred.format(name='Jim')
def test_ngettext_lazy_bool(self):
self.assertTrue(ngettext_lazy('%d good result', '%d good results'))
self.assertFalse(ngettext_lazy('', ''))
def test_ngettext_lazy_pickle(self):
s1 = ngettext_lazy('%d good result', '%d good results')
self.assertEqual(s1 % 1, '1 good result')
self.assertEqual(s1 % 8, '8 good results')
s2 = pickle.loads(pickle.dumps(s1))
self.assertEqual(s2 % 1, '1 good result')
self.assertEqual(s2 % 8, '8 good results')
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_pgettext(self):
trans_real._active = Local()
trans_real._translations = {}
with translation.override('de'):
self.assertEqual(pgettext("unexisting", "May"), "May")
self.assertEqual(pgettext("month name", "May"), "Mai")
self.assertEqual(pgettext("verb", "May"), "Kann")
self.assertEqual(npgettext("search", "%d result", "%d results", 4) % 4, "4 Resultate")
def test_empty_value(self):
"""Empty value must stay empty after being translated (#23196)."""
with translation.override('de'):
self.assertEqual('', gettext(''))
s = mark_safe('')
self.assertEqual(s, gettext(s))
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_safe_status(self):
"""
Translating a string requiring no auto-escaping with gettext or pgettext
shouldn't change the "safe" status.
"""
trans_real._active = Local()
trans_real._translations = {}
s1 = mark_safe('Password')
s2 = mark_safe('May')
with translation.override('de', deactivate=True):
self.assertIs(type(gettext(s1)), SafeString)
self.assertIs(type(pgettext('month name', s2)), SafeString)
self.assertEqual('aPassword', SafeString('a') + s1)
self.assertEqual('Passworda', s1 + SafeString('a'))
self.assertEqual('Passworda', s1 + mark_safe('a'))
self.assertEqual('aPassword', mark_safe('a') + s1)
self.assertEqual('as', mark_safe('a') + mark_safe('s'))
def test_maclines(self):
"""
Translations on files with Mac or DOS end of lines will be converted
to unix EOF in .po catalogs.
"""
ca_translation = trans_real.translation('ca')
ca_translation._catalog['Mac\nEOF\n'] = 'Catalan Mac\nEOF\n'
ca_translation._catalog['Win\nEOF\n'] = 'Catalan Win\nEOF\n'
with translation.override('ca', deactivate=True):
self.assertEqual('Catalan Mac\nEOF\n', gettext('Mac\rEOF\r'))
self.assertEqual('Catalan Win\nEOF\n', gettext('Win\r\nEOF\r\n'))
def test_to_locale(self):
tests = (
('en', 'en'),
('EN', 'en'),
('en-us', 'en_US'),
('EN-US', 'en_US'),
# With > 2 characters after the dash.
('sr-latn', 'sr_Latn'),
('sr-LATN', 'sr_Latn'),
# With private use subtag (x-informal).
('nl-nl-x-informal', 'nl_NL-x-informal'),
('NL-NL-X-INFORMAL', 'nl_NL-x-informal'),
('sr-latn-x-informal', 'sr_Latn-x-informal'),
('SR-LATN-X-INFORMAL', 'sr_Latn-x-informal'),
)
for lang, locale in tests:
with self.subTest(lang=lang):
self.assertEqual(to_locale(lang), locale)
def test_to_language(self):
self.assertEqual(to_language('en_US'), 'en-us')
self.assertEqual(to_language('sr_Lat'), 'sr-lat')
def test_language_bidi(self):
self.assertIs(get_language_bidi(), False)
with translation.override(None):
self.assertIs(get_language_bidi(), False)
def test_language_bidi_null(self):
self.assertIs(trans_null.get_language_bidi(), False)
with override_settings(LANGUAGE_CODE='he'):
self.assertIs(get_language_bidi(), True)
class TranslationLoadingTests(SimpleTestCase):
def setUp(self):
"""Clear translation state."""
self._old_language = get_language()
self._old_translations = trans_real._translations
deactivate()
trans_real._translations = {}
def tearDown(self):
trans_real._translations = self._old_translations
activate(self._old_language)
@override_settings(
USE_I18N=True,
LANGUAGE_CODE='en',
LANGUAGES=[
('en', 'English'),
('en-ca', 'English (Canada)'),
('en-nz', 'English (New Zealand)'),
('en-au', 'English (Australia)'),
],
LOCALE_PATHS=[os.path.join(here, 'loading')],
INSTALLED_APPS=['i18n.loading_app'],
)
def test_translation_loading(self):
"""
"loading_app" does not have translations for all languages provided by
"loading". Catalogs are merged correctly.
"""
tests = [
('en', 'local country person'),
('en_AU', 'aussie'),
('en_NZ', 'kiwi'),
('en_CA', 'canuck'),
]
# Load all relevant translations.
for language, _ in tests:
activate(language)
# Catalogs are merged correctly.
for language, nickname in tests:
with self.subTest(language=language):
activate(language)
self.assertEqual(gettext('local country person'), nickname)
class TranslationThreadSafetyTests(SimpleTestCase):
def setUp(self):
self._old_language = get_language()
self._translations = trans_real._translations
# here we rely on .split() being called inside the _fetch()
# in trans_real.translation()
class sideeffect_str(str):
def split(self, *args, **kwargs):
res = str.split(self, *args, **kwargs)
trans_real._translations['en-YY'] = None
return res
trans_real._translations = {sideeffect_str('en-XX'): None}
def tearDown(self):
trans_real._translations = self._translations
activate(self._old_language)
def test_bug14894_translation_activate_thread_safety(self):
translation_count = len(trans_real._translations)
# May raise RuntimeError if translation.activate() isn't thread-safe.
translation.activate('pl')
# make sure sideeffect_str actually added a new translation
self.assertLess(translation_count, len(trans_real._translations))
@override_settings(USE_L10N=True)
class FormattingTests(SimpleTestCase):
def setUp(self):
super().setUp()
self.n = decimal.Decimal('66666.666')
self.f = 99999.999
self.d = datetime.date(2009, 12, 31)
self.dt = datetime.datetime(2009, 12, 31, 20, 50)
self.t = datetime.time(10, 15, 48)
self.long = 10000
self.ctxt = Context({
'n': self.n,
't': self.t,
'd': self.d,
'dt': self.dt,
'f': self.f,
'l': self.long,
})
def test_all_format_strings(self):
all_locales = LANG_INFO.keys()
some_date = datetime.date(2017, 10, 14)
some_datetime = datetime.datetime(2017, 10, 14, 10, 23)
for locale in all_locales:
with self.subTest(locale=locale), translation.override(locale):
self.assertIn('2017', date_format(some_date)) # Uses DATE_FORMAT by default
self.assertIn('23', time_format(some_datetime)) # Uses TIME_FORMAT by default
self.assertIn('2017', date_format(some_datetime, format=get_format('DATETIME_FORMAT')))
self.assertIn('2017', date_format(some_date, format=get_format('YEAR_MONTH_FORMAT')))
self.assertIn('14', date_format(some_date, format=get_format('MONTH_DAY_FORMAT')))
self.assertIn('2017', date_format(some_date, format=get_format('SHORT_DATE_FORMAT')))
self.assertIn('2017', date_format(some_datetime, format=get_format('SHORT_DATETIME_FORMAT')))
def test_locale_independent(self):
"""
Localization of numbers
"""
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666.66', nformat(self.n, decimal_sep='.', decimal_pos=2, grouping=3, thousand_sep=','))
self.assertEqual('66666A6', nformat(self.n, decimal_sep='A', decimal_pos=1, grouping=1, thousand_sep='B'))
self.assertEqual('66666', nformat(self.n, decimal_sep='X', decimal_pos=0, grouping=1, thousand_sep='Y'))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual(
'66,666.66',
nformat(self.n, decimal_sep='.', decimal_pos=2, grouping=3, thousand_sep=',')
)
self.assertEqual(
'6B6B6B6B6A6',
nformat(self.n, decimal_sep='A', decimal_pos=1, grouping=1, thousand_sep='B')
)
self.assertEqual('-66666.6', nformat(-66666.666, decimal_sep='.', decimal_pos=1))
self.assertEqual('-66666.0', nformat(int('-66666'), decimal_sep='.', decimal_pos=1))
self.assertEqual('10000.0', nformat(self.long, decimal_sep='.', decimal_pos=1))
self.assertEqual(
'10,00,00,000.00',
nformat(100000000.00, decimal_sep='.', decimal_pos=2, grouping=(3, 2, 0), thousand_sep=',')
)
self.assertEqual(
'1,0,00,000,0000.00',
nformat(10000000000.00, decimal_sep='.', decimal_pos=2, grouping=(4, 3, 2, 1, 0), thousand_sep=',')
)
self.assertEqual(
'10000,00,000.00',
nformat(1000000000.00, decimal_sep='.', decimal_pos=2, grouping=(3, 2, -1), thousand_sep=',')
)
# This unusual grouping/force_grouping combination may be triggered by the intcomma filter (#17414)
self.assertEqual(
'10000',
nformat(self.long, decimal_sep='.', decimal_pos=0, grouping=0, force_grouping=True)
)
# date filter
self.assertEqual('31.12.2009 в 20:50', Template('{{ dt|date:"d.m.Y в H:i" }}').render(self.ctxt))
self.assertEqual('⌚ 10:15', Template('{{ t|time:"⌚ H:i" }}').render(self.ctxt))
@override_settings(USE_L10N=False)
def test_l10n_disabled(self):
"""
Catalan locale with format i18n disabled translations will be used,
but not formats
"""
with translation.override('ca', deactivate=True):
self.maxDiff = 3000
self.assertEqual('N j, Y', get_format('DATE_FORMAT'))
self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK'))
self.assertEqual('.', get_format('DECIMAL_SEPARATOR'))
self.assertEqual('10:15 a.m.', time_format(self.t))
self.assertEqual('Des. 31, 2009', date_format(self.d))
self.assertEqual('desembre 2009', date_format(self.d, 'YEAR_MONTH_FORMAT'))
self.assertEqual('12/31/2009 8:50 p.m.', date_format(self.dt, 'SHORT_DATETIME_FORMAT'))
self.assertEqual('No localizable', localize('No localizable'))
self.assertEqual('66666.666', localize(self.n))
self.assertEqual('99999.999', localize(self.f))
self.assertEqual('10000', localize(self.long))
self.assertEqual('Des. 31, 2009', localize(self.d))
self.assertEqual('Des. 31, 2009, 8:50 p.m.', localize(self.dt))
self.assertEqual('66666.666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99999.999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('Des. 31, 2009', Template('{{ d }}').render(self.ctxt))
self.assertEqual('Des. 31, 2009, 8:50 p.m.', Template('{{ dt }}').render(self.ctxt))
self.assertEqual('66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual('100000.0', Template('{{ f|floatformat }}').render(self.ctxt))
self.assertEqual(
'66666.67',
Template('{{ n|floatformat:"2g" }}').render(self.ctxt),
)
self.assertEqual(
'100000.0',
Template('{{ f|floatformat:"g" }}').render(self.ctxt),
)
self.assertEqual('10:15 a.m.', Template('{{ t|time:"TIME_FORMAT" }}').render(self.ctxt))
self.assertEqual('12/31/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt))
self.assertEqual(
'12/31/2009 8:50 p.m.', Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt)
)
form = I18nForm({
'decimal_field': '66666,666',
'float_field': '99999,999',
'date_field': '31/12/2009',
'datetime_field': '31/12/2009 20:50',
'time_field': '20:50',
'integer_field': '1.234',
})
self.assertFalse(form.is_valid())
self.assertEqual(['Introdu\xefu un n\xfamero.'], form.errors['float_field'])
self.assertEqual(['Introdu\xefu un n\xfamero.'], form.errors['decimal_field'])
self.assertEqual(['Introdu\xefu una data v\xe0lida.'], form.errors['date_field'])
self.assertEqual(['Introdu\xefu una data/hora v\xe0lides.'], form.errors['datetime_field'])
self.assertEqual(['Introdu\xefu un n\xfamero enter.'], form.errors['integer_field'])
form2 = SelectDateForm({
'date_field_month': '12',
'date_field_day': '31',
'date_field_year': '2009'
})
self.assertTrue(form2.is_valid())
self.assertEqual(datetime.date(2009, 12, 31), form2.cleaned_data['date_field'])
self.assertHTMLEqual(
'<select name="mydate_month" id="id_mydate_month">'
'<option value="">---</option>'
'<option value="1">gener</option>'
'<option value="2">febrer</option>'
'<option value="3">mar\xe7</option>'
'<option value="4">abril</option>'
'<option value="5">maig</option>'
'<option value="6">juny</option>'
'<option value="7">juliol</option>'
'<option value="8">agost</option>'
'<option value="9">setembre</option>'
'<option value="10">octubre</option>'
'<option value="11">novembre</option>'
'<option value="12" selected>desembre</option>'
'</select>'
'<select name="mydate_day" id="id_mydate_day">'
'<option value="">---</option>'
'<option value="1">1</option>'
'<option value="2">2</option>'
'<option value="3">3</option>'
'<option value="4">4</option>'
'<option value="5">5</option>'
'<option value="6">6</option>'
'<option value="7">7</option>'
'<option value="8">8</option>'
'<option value="9">9</option>'
'<option value="10">10</option>'
'<option value="11">11</option>'
'<option value="12">12</option>'
'<option value="13">13</option>'
'<option value="14">14</option>'
'<option value="15">15</option>'
'<option value="16">16</option>'
'<option value="17">17</option>'
'<option value="18">18</option>'
'<option value="19">19</option>'
'<option value="20">20</option>'
'<option value="21">21</option>'
'<option value="22">22</option>'
'<option value="23">23</option>'
'<option value="24">24</option>'
'<option value="25">25</option>'
'<option value="26">26</option>'
'<option value="27">27</option>'
'<option value="28">28</option>'
'<option value="29">29</option>'
'<option value="30">30</option>'
'<option value="31" selected>31</option>'
'</select>'
'<select name="mydate_year" id="id_mydate_year">'
'<option value="">---</option>'
'<option value="2009" selected>2009</option>'
'<option value="2010">2010</option>'
'<option value="2011">2011</option>'
'<option value="2012">2012</option>'
'<option value="2013">2013</option>'
'<option value="2014">2014</option>'
'<option value="2015">2015</option>'
'<option value="2016">2016</option>'
'<option value="2017">2017</option>'
'<option value="2018">2018</option>'
'</select>',
forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
# We shouldn't change the behavior of the floatformat filter re:
# thousand separator and grouping when USE_L10N is False even
# if the USE_THOUSAND_SEPARATOR, NUMBER_GROUPING and
# THOUSAND_SEPARATOR settings are specified
with self.settings(USE_THOUSAND_SEPARATOR=True, NUMBER_GROUPING=1, THOUSAND_SEPARATOR='!'):
self.assertEqual('66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual('100000.0', Template('{{ f|floatformat }}').render(self.ctxt))
def test_false_like_locale_formats(self):
"""
The active locale's formats take precedence over the default settings
even if they would be interpreted as False in a conditional test
(e.g. 0 or empty string) (#16938).
"""
with translation.override('fr'):
with self.settings(USE_THOUSAND_SEPARATOR=True, THOUSAND_SEPARATOR='!'):
self.assertEqual('\xa0', get_format('THOUSAND_SEPARATOR'))
# Even a second time (after the format has been cached)...
self.assertEqual('\xa0', get_format('THOUSAND_SEPARATOR'))
with self.settings(FIRST_DAY_OF_WEEK=0):
self.assertEqual(1, get_format('FIRST_DAY_OF_WEEK'))
# Even a second time (after the format has been cached)...
self.assertEqual(1, get_format('FIRST_DAY_OF_WEEK'))
def test_l10n_enabled(self):
self.maxDiff = 3000
# Catalan locale
with translation.override('ca', deactivate=True):
self.assertEqual(r'j \d\e F \d\e Y', get_format('DATE_FORMAT'))
self.assertEqual(1, get_format('FIRST_DAY_OF_WEEK'))
self.assertEqual(',', get_format('DECIMAL_SEPARATOR'))
self.assertEqual('10:15', time_format(self.t))
self.assertEqual('31 de desembre de 2009', date_format(self.d))
self.assertEqual('desembre del 2009', date_format(self.d, 'YEAR_MONTH_FORMAT'))
self.assertEqual('31/12/2009 20:50', date_format(self.dt, 'SHORT_DATETIME_FORMAT'))
self.assertEqual('No localizable', localize('No localizable'))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66.666,666', localize(self.n))
self.assertEqual('99.999,999', localize(self.f))
self.assertEqual('10.000', localize(self.long))
self.assertEqual('True', localize(True))
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666,666', localize(self.n))
self.assertEqual('99999,999', localize(self.f))
self.assertEqual('10000', localize(self.long))
self.assertEqual('31 de desembre de 2009', localize(self.d))
self.assertEqual('31 de desembre de 2009 a les 20:50', localize(self.dt))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66.666,666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99.999,999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('10.000', Template('{{ l }}').render(self.ctxt))
with self.settings(USE_THOUSAND_SEPARATOR=True):
form3 = I18nForm({
'decimal_field': '66.666,666',
'float_field': '99.999,999',
'date_field': '31/12/2009',
'datetime_field': '31/12/2009 20:50',
'time_field': '20:50',
'integer_field': '1.234',
})
self.assertTrue(form3.is_valid())
self.assertEqual(decimal.Decimal('66666.666'), form3.cleaned_data['decimal_field'])
self.assertEqual(99999.999, form3.cleaned_data['float_field'])
self.assertEqual(datetime.date(2009, 12, 31), form3.cleaned_data['date_field'])
self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form3.cleaned_data['datetime_field'])
self.assertEqual(datetime.time(20, 50), form3.cleaned_data['time_field'])
self.assertEqual(1234, form3.cleaned_data['integer_field'])
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666,666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99999,999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('31 de desembre de 2009', Template('{{ d }}').render(self.ctxt))
self.assertEqual('31 de desembre de 2009 a les 20:50', Template('{{ dt }}').render(self.ctxt))
self.assertEqual('66666,67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual('100000,0', Template('{{ f|floatformat }}').render(self.ctxt))
self.assertEqual(
'66.666,67',
Template('{{ n|floatformat:"2g" }}').render(self.ctxt),
)
self.assertEqual(
'100.000,0',
Template('{{ f|floatformat:"g" }}').render(self.ctxt),
)
self.assertEqual('10:15', Template('{{ t|time:"TIME_FORMAT" }}').render(self.ctxt))
self.assertEqual('31/12/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt))
self.assertEqual(
'31/12/2009 20:50',
Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt)
)
self.assertEqual(date_format(datetime.datetime.now(), "DATE_FORMAT"),
Template('{% now "DATE_FORMAT" %}').render(self.ctxt))
with self.settings(USE_THOUSAND_SEPARATOR=False):
form4 = I18nForm({
'decimal_field': '66666,666',
'float_field': '99999,999',
'date_field': '31/12/2009',
'datetime_field': '31/12/2009 20:50',
'time_field': '20:50',
'integer_field': '1234',
})
self.assertTrue(form4.is_valid())
self.assertEqual(decimal.Decimal('66666.666'), form4.cleaned_data['decimal_field'])
self.assertEqual(99999.999, form4.cleaned_data['float_field'])
self.assertEqual(datetime.date(2009, 12, 31), form4.cleaned_data['date_field'])
self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form4.cleaned_data['datetime_field'])
self.assertEqual(datetime.time(20, 50), form4.cleaned_data['time_field'])
self.assertEqual(1234, form4.cleaned_data['integer_field'])
form5 = SelectDateForm({
'date_field_month': '12',
'date_field_day': '31',
'date_field_year': '2009'
})
self.assertTrue(form5.is_valid())
self.assertEqual(datetime.date(2009, 12, 31), form5.cleaned_data['date_field'])
self.assertHTMLEqual(
'<select name="mydate_day" id="id_mydate_day">'
'<option value="">---</option>'
'<option value="1">1</option>'
'<option value="2">2</option>'
'<option value="3">3</option>'
'<option value="4">4</option>'
'<option value="5">5</option>'
'<option value="6">6</option>'
'<option value="7">7</option>'
'<option value="8">8</option>'
'<option value="9">9</option>'
'<option value="10">10</option>'
'<option value="11">11</option>'
'<option value="12">12</option>'
'<option value="13">13</option>'
'<option value="14">14</option>'
'<option value="15">15</option>'
'<option value="16">16</option>'
'<option value="17">17</option>'
'<option value="18">18</option>'
'<option value="19">19</option>'
'<option value="20">20</option>'
'<option value="21">21</option>'
'<option value="22">22</option>'
'<option value="23">23</option>'
'<option value="24">24</option>'
'<option value="25">25</option>'
'<option value="26">26</option>'
'<option value="27">27</option>'
'<option value="28">28</option>'
'<option value="29">29</option>'
'<option value="30">30</option>'
'<option value="31" selected>31</option>'
'</select>'
'<select name="mydate_month" id="id_mydate_month">'
'<option value="">---</option>'
'<option value="1">gener</option>'
'<option value="2">febrer</option>'
'<option value="3">mar\xe7</option>'
'<option value="4">abril</option>'
'<option value="5">maig</option>'
'<option value="6">juny</option>'
'<option value="7">juliol</option>'
'<option value="8">agost</option>'
'<option value="9">setembre</option>'
'<option value="10">octubre</option>'
'<option value="11">novembre</option>'
'<option value="12" selected>desembre</option>'
'</select>'
'<select name="mydate_year" id="id_mydate_year">'
'<option value="">---</option>'
'<option value="2009" selected>2009</option>'
'<option value="2010">2010</option>'
'<option value="2011">2011</option>'
'<option value="2012">2012</option>'
'<option value="2013">2013</option>'
'<option value="2014">2014</option>'
'<option value="2015">2015</option>'
'<option value="2016">2016</option>'
'<option value="2017">2017</option>'
'<option value="2018">2018</option>'
'</select>',
forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
# Russian locale (with E as month)
with translation.override('ru', deactivate=True):
self.assertHTMLEqual(
'<select name="mydate_day" id="id_mydate_day">'
'<option value="">---</option>'
'<option value="1">1</option>'
'<option value="2">2</option>'
'<option value="3">3</option>'
'<option value="4">4</option>'
'<option value="5">5</option>'
'<option value="6">6</option>'
'<option value="7">7</option>'
'<option value="8">8</option>'
'<option value="9">9</option>'
'<option value="10">10</option>'
'<option value="11">11</option>'
'<option value="12">12</option>'
'<option value="13">13</option>'
'<option value="14">14</option>'
'<option value="15">15</option>'
'<option value="16">16</option>'
'<option value="17">17</option>'
'<option value="18">18</option>'
'<option value="19">19</option>'
'<option value="20">20</option>'
'<option value="21">21</option>'
'<option value="22">22</option>'
'<option value="23">23</option>'
'<option value="24">24</option>'
'<option value="25">25</option>'
'<option value="26">26</option>'
'<option value="27">27</option>'
'<option value="28">28</option>'
'<option value="29">29</option>'
'<option value="30">30</option>'
'<option value="31" selected>31</option>'
'</select>'
'<select name="mydate_month" id="id_mydate_month">'
'<option value="">---</option>'
'<option value="1">\u042f\u043d\u0432\u0430\u0440\u044c</option>'
'<option value="2">\u0424\u0435\u0432\u0440\u0430\u043b\u044c</option>'
'<option value="3">\u041c\u0430\u0440\u0442</option>'
'<option value="4">\u0410\u043f\u0440\u0435\u043b\u044c</option>'
'<option value="5">\u041c\u0430\u0439</option>'
'<option value="6">\u0418\u044e\u043d\u044c</option>'
'<option value="7">\u0418\u044e\u043b\u044c</option>'
'<option value="8">\u0410\u0432\u0433\u0443\u0441\u0442</option>'
'<option value="9">\u0421\u0435\u043d\u0442\u044f\u0431\u0440\u044c</option>'
'<option value="10">\u041e\u043a\u0442\u044f\u0431\u0440\u044c</option>'
'<option value="11">\u041d\u043e\u044f\u0431\u0440\u044c</option>'
'<option value="12" selected>\u0414\u0435\u043a\u0430\u0431\u0440\u044c</option>'
'</select>'
'<select name="mydate_year" id="id_mydate_year">'
'<option value="">---</option>'
'<option value="2009" selected>2009</option>'
'<option value="2010">2010</option>'
'<option value="2011">2011</option>'
'<option value="2012">2012</option>'
'<option value="2013">2013</option>'
'<option value="2014">2014</option>'
'<option value="2015">2015</option>'
'<option value="2016">2016</option>'
'<option value="2017">2017</option>'
'<option value="2018">2018</option>'
'</select>',
forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
# English locale
with translation.override('en', deactivate=True):
self.assertEqual('N j, Y', get_format('DATE_FORMAT'))
self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK'))
self.assertEqual('.', get_format('DECIMAL_SEPARATOR'))
self.assertEqual('Dec. 31, 2009', date_format(self.d))
self.assertEqual('December 2009', date_format(self.d, 'YEAR_MONTH_FORMAT'))
self.assertEqual('12/31/2009 8:50 p.m.', date_format(self.dt, 'SHORT_DATETIME_FORMAT'))
self.assertEqual('No localizable', localize('No localizable'))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66,666.666', localize(self.n))
self.assertEqual('99,999.999', localize(self.f))
self.assertEqual('10,000', localize(self.long))
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666.666', localize(self.n))
self.assertEqual('99999.999', localize(self.f))
self.assertEqual('10000', localize(self.long))
self.assertEqual('Dec. 31, 2009', localize(self.d))
self.assertEqual('Dec. 31, 2009, 8:50 p.m.', localize(self.dt))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66,666.666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99,999.999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('10,000', Template('{{ l }}').render(self.ctxt))
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666.666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99999.999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('Dec. 31, 2009', Template('{{ d }}').render(self.ctxt))
self.assertEqual('Dec. 31, 2009, 8:50 p.m.', Template('{{ dt }}').render(self.ctxt))
self.assertEqual('66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual('100000.0', Template('{{ f|floatformat }}').render(self.ctxt))
self.assertEqual(
'66,666.67',
Template('{{ n|floatformat:"2g" }}').render(self.ctxt),
)
self.assertEqual(
'100,000.0',
Template('{{ f|floatformat:"g" }}').render(self.ctxt),
)
self.assertEqual('12/31/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt))
self.assertEqual(
'12/31/2009 8:50 p.m.',
Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt)
)
form5 = I18nForm({
'decimal_field': '66666.666',
'float_field': '99999.999',
'date_field': '12/31/2009',
'datetime_field': '12/31/2009 20:50',
'time_field': '20:50',
'integer_field': '1234',
})
self.assertTrue(form5.is_valid())
self.assertEqual(decimal.Decimal('66666.666'), form5.cleaned_data['decimal_field'])
self.assertEqual(99999.999, form5.cleaned_data['float_field'])
self.assertEqual(datetime.date(2009, 12, 31), form5.cleaned_data['date_field'])
self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form5.cleaned_data['datetime_field'])
self.assertEqual(datetime.time(20, 50), form5.cleaned_data['time_field'])
self.assertEqual(1234, form5.cleaned_data['integer_field'])
form6 = SelectDateForm({
'date_field_month': '12',
'date_field_day': '31',
'date_field_year': '2009'
})
self.assertTrue(form6.is_valid())
self.assertEqual(datetime.date(2009, 12, 31), form6.cleaned_data['date_field'])
self.assertHTMLEqual(
'<select name="mydate_month" id="id_mydate_month">'
'<option value="">---</option>'
'<option value="1">January</option>'
'<option value="2">February</option>'
'<option value="3">March</option>'
'<option value="4">April</option>'
'<option value="5">May</option>'
'<option value="6">June</option>'
'<option value="7">July</option>'
'<option value="8">August</option>'
'<option value="9">September</option>'
'<option value="10">October</option>'
'<option value="11">November</option>'
'<option value="12" selected>December</option>'
'</select>'
'<select name="mydate_day" id="id_mydate_day">'
'<option value="">---</option>'
'<option value="1">1</option>'
'<option value="2">2</option>'
'<option value="3">3</option>'
'<option value="4">4</option>'
'<option value="5">5</option>'
'<option value="6">6</option>'
'<option value="7">7</option>'
'<option value="8">8</option>'
'<option value="9">9</option>'
'<option value="10">10</option>'
'<option value="11">11</option>'
'<option value="12">12</option>'
'<option value="13">13</option>'
'<option value="14">14</option>'
'<option value="15">15</option>'
'<option value="16">16</option>'
'<option value="17">17</option>'
'<option value="18">18</option>'
'<option value="19">19</option>'
'<option value="20">20</option>'
'<option value="21">21</option>'
'<option value="22">22</option>'
'<option value="23">23</option>'
'<option value="24">24</option>'
'<option value="25">25</option>'
'<option value="26">26</option>'
'<option value="27">27</option>'
'<option value="28">28</option>'
'<option value="29">29</option>'
'<option value="30">30</option>'
'<option value="31" selected>31</option>'
'</select>'
'<select name="mydate_year" id="id_mydate_year">'
'<option value="">---</option>'
'<option value="2009" selected>2009</option>'
'<option value="2010">2010</option>'
'<option value="2011">2011</option>'
'<option value="2012">2012</option>'
'<option value="2013">2013</option>'
'<option value="2014">2014</option>'
'<option value="2015">2015</option>'
'<option value="2016">2016</option>'
'<option value="2017">2017</option>'
'<option value="2018">2018</option>'
'</select>',
forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
def test_sub_locales(self):
"""
Check if sublocales fall back to the main locale
"""
with self.settings(USE_THOUSAND_SEPARATOR=True):
with translation.override('de-at', deactivate=True):
self.assertEqual('66.666,666', Template('{{ n }}').render(self.ctxt))
with translation.override('es-us', deactivate=True):
self.assertEqual('31 de Diciembre de 2009', date_format(self.d))
def test_localized_input(self):
"""
Tests if form input is correctly localized
"""
self.maxDiff = 1200
with translation.override('de-at', deactivate=True):
form6 = CompanyForm({
'name': 'acme',
'date_added': datetime.datetime(2009, 12, 31, 6, 0, 0),
'cents_paid': decimal.Decimal('59.47'),
'products_delivered': 12000,
})
self.assertTrue(form6.is_valid())
self.assertHTMLEqual(
form6.as_ul(),
'<li><label for="id_name">Name:</label>'
'<input id="id_name" type="text" name="name" value="acme" maxlength="50" required></li>'
'<li><label for="id_date_added">Date added:</label>'
'<input type="text" name="date_added" value="31.12.2009 06:00:00" id="id_date_added" required></li>'
'<li><label for="id_cents_paid">Cents paid:</label>'
'<input type="text" name="cents_paid" value="59,47" id="id_cents_paid" required></li>'
'<li><label for="id_products_delivered">Products delivered:</label>'
'<input type="text" name="products_delivered" value="12000" id="id_products_delivered" required>'
'</li>'
)
self.assertEqual(localize_input(datetime.datetime(2009, 12, 31, 6, 0, 0)), '31.12.2009 06:00:00')
self.assertEqual(datetime.datetime(2009, 12, 31, 6, 0, 0), form6.cleaned_data['date_added'])
with self.settings(USE_THOUSAND_SEPARATOR=True):
# Checking for the localized "products_delivered" field
self.assertInHTML(
'<input type="text" name="products_delivered" '
'value="12.000" id="id_products_delivered" required>',
form6.as_ul()
)
def test_localized_input_func(self):
tests = (
(True, 'True'),
(datetime.date(1, 1, 1), '0001-01-01'),
(datetime.datetime(1, 1, 1), '0001-01-01 00:00:00'),
)
with self.settings(USE_THOUSAND_SEPARATOR=True):
for value, expected in tests:
with self.subTest(value=value):
self.assertEqual(localize_input(value), expected)
def test_sanitize_separators(self):
"""
Tests django.utils.formats.sanitize_separators.
"""
# Non-strings are untouched
self.assertEqual(sanitize_separators(123), 123)
with translation.override('ru', deactivate=True):
# Russian locale has non-breaking space (\xa0) as thousand separator
# Usual space is accepted too when sanitizing inputs
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual(sanitize_separators('1\xa0234\xa0567'), '1234567')
self.assertEqual(sanitize_separators('77\xa0777,777'), '77777.777')
self.assertEqual(sanitize_separators('12 345'), '12345')
self.assertEqual(sanitize_separators('77 777,777'), '77777.777')
with self.settings(USE_THOUSAND_SEPARATOR=True, USE_L10N=False):
self.assertEqual(sanitize_separators('12\xa0345'), '12\xa0345')
with self.settings(USE_THOUSAND_SEPARATOR=True):
with patch_formats(get_language(), THOUSAND_SEPARATOR='.', DECIMAL_SEPARATOR=','):
self.assertEqual(sanitize_separators('10.234'), '10234')
# Suspicion that user entered dot as decimal separator (#22171)
self.assertEqual(sanitize_separators('10.10'), '10.10')
with self.settings(USE_L10N=False, DECIMAL_SEPARATOR=','):
self.assertEqual(sanitize_separators('1001,10'), '1001.10')
self.assertEqual(sanitize_separators('1001.10'), '1001.10')
with self.settings(
USE_L10N=False, DECIMAL_SEPARATOR=',', USE_THOUSAND_SEPARATOR=True,
THOUSAND_SEPARATOR='.'
):
self.assertEqual(sanitize_separators('1.001,10'), '1001.10')
self.assertEqual(sanitize_separators('1001,10'), '1001.10')
self.assertEqual(sanitize_separators('1001.10'), '1001.10')
self.assertEqual(sanitize_separators('1,001.10'), '1.001.10') # Invalid output
def test_iter_format_modules(self):
"""
Tests the iter_format_modules function.
"""
# Importing some format modules so that we can compare the returned
# modules with these expected modules
default_mod = import_module('django.conf.locale.de.formats')
test_mod = import_module('i18n.other.locale.de.formats')
test_mod2 = import_module('i18n.other2.locale.de.formats')
with translation.override('de-at', deactivate=True):
# Should return the correct default module when no setting is set
self.assertEqual(list(iter_format_modules('de')), [default_mod])
# When the setting is a string, should return the given module and
# the default module
self.assertEqual(
list(iter_format_modules('de', 'i18n.other.locale')),
[test_mod, default_mod])
# When setting is a list of strings, should return the given
# modules and the default module
self.assertEqual(
list(iter_format_modules('de', ['i18n.other.locale', 'i18n.other2.locale'])),
[test_mod, test_mod2, default_mod])
def test_iter_format_modules_stability(self):
"""
Tests the iter_format_modules function always yields format modules in
a stable and correct order in presence of both base ll and ll_CC formats.
"""
en_format_mod = import_module('django.conf.locale.en.formats')
en_gb_format_mod = import_module('django.conf.locale.en_GB.formats')
self.assertEqual(list(iter_format_modules('en-gb')), [en_gb_format_mod, en_format_mod])
def test_get_format_modules_lang(self):
with translation.override('de', deactivate=True):
self.assertEqual('.', get_format('DECIMAL_SEPARATOR', lang='en'))
def test_get_format_modules_stability(self):
with self.settings(FORMAT_MODULE_PATH='i18n.other.locale'):
with translation.override('de', deactivate=True):
old = "%r" % get_format_modules(reverse=True)
new = "%r" % get_format_modules(reverse=True) # second try
self.assertEqual(new, old, 'Value returned by get_formats_modules() must be preserved between calls.')
def test_localize_templatetag_and_filter(self):
"""
Test the {% localize %} templatetag and the localize/unlocalize filters.
"""
context = Context({'int': 1455, 'float': 3.14, 'date': datetime.date(2016, 12, 31)})
template1 = Template(
'{% load l10n %}{% localize %}{{ int }}/{{ float }}/{{ date }}{% endlocalize %}; '
'{% localize on %}{{ int }}/{{ float }}/{{ date }}{% endlocalize %}'
)
template2 = Template(
'{% load l10n %}{{ int }}/{{ float }}/{{ date }}; '
'{% localize off %}{{ int }}/{{ float }}/{{ date }};{% endlocalize %} '
'{{ int }}/{{ float }}/{{ date }}'
)
template3 = Template(
'{% load l10n %}{{ int }}/{{ float }}/{{ date }}; '
'{{ int|unlocalize }}/{{ float|unlocalize }}/{{ date|unlocalize }}'
)
template4 = Template(
'{% load l10n %}{{ int }}/{{ float }}/{{ date }}; '
'{{ int|localize }}/{{ float|localize }}/{{ date|localize }}'
)
expected_localized = '1.455/3,14/31. Dezember 2016'
expected_unlocalized = '1455/3.14/Dez. 31, 2016'
output1 = '; '.join([expected_localized, expected_localized])
output2 = '; '.join([expected_localized, expected_unlocalized, expected_localized])
output3 = '; '.join([expected_localized, expected_unlocalized])
output4 = '; '.join([expected_unlocalized, expected_localized])
with translation.override('de', deactivate=True):
with self.settings(USE_L10N=False, USE_THOUSAND_SEPARATOR=True):
self.assertEqual(template1.render(context), output1)
self.assertEqual(template4.render(context), output4)
with self.settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True):
self.assertEqual(template1.render(context), output1)
self.assertEqual(template2.render(context), output2)
self.assertEqual(template3.render(context), output3)
def test_localized_off_numbers(self):
"""A string representation is returned for unlocalized numbers."""
template = Template(
'{% load l10n %}{% localize off %}'
'{{ int }}/{{ float }}/{{ decimal }}{% endlocalize %}'
)
context = Context(
{'int': 1455, 'float': 3.14, 'decimal': decimal.Decimal('24.1567')}
)
for use_l10n in [True, False]:
with self.subTest(use_l10n=use_l10n), self.settings(
USE_L10N=use_l10n,
DECIMAL_SEPARATOR=',',
USE_THOUSAND_SEPARATOR=True,
THOUSAND_SEPARATOR='°',
NUMBER_GROUPING=2,
):
self.assertEqual(template.render(context), '1455/3.14/24.1567')
def test_localized_as_text_as_hidden_input(self):
"""
Tests if form input with 'as_hidden' or 'as_text' is correctly localized. Ticket #18777
"""
self.maxDiff = 1200
with translation.override('de-at', deactivate=True):
template = Template('{% load l10n %}{{ form.date_added }}; {{ form.cents_paid }}')
template_as_text = Template('{% load l10n %}{{ form.date_added.as_text }}; {{ form.cents_paid.as_text }}')
template_as_hidden = Template(
'{% load l10n %}{{ form.date_added.as_hidden }}; {{ form.cents_paid.as_hidden }}'
)
form = CompanyForm({
'name': 'acme',
'date_added': datetime.datetime(2009, 12, 31, 6, 0, 0),
'cents_paid': decimal.Decimal('59.47'),
'products_delivered': 12000,
})
context = Context({'form': form})
self.assertTrue(form.is_valid())
self.assertHTMLEqual(
template.render(context),
'<input id="id_date_added" name="date_added" type="text" value="31.12.2009 06:00:00" required>;'
'<input id="id_cents_paid" name="cents_paid" type="text" value="59,47" required>'
)
self.assertHTMLEqual(
template_as_text.render(context),
'<input id="id_date_added" name="date_added" type="text" value="31.12.2009 06:00:00" required>;'
' <input id="id_cents_paid" name="cents_paid" type="text" value="59,47" required>'
)
self.assertHTMLEqual(
template_as_hidden.render(context),
'<input id="id_date_added" name="date_added" type="hidden" value="31.12.2009 06:00:00">;'
'<input id="id_cents_paid" name="cents_paid" type="hidden" value="59,47">'
)
def test_format_arbitrary_settings(self):
self.assertEqual(get_format('DEBUG'), 'DEBUG')
def test_get_custom_format(self):
with self.settings(FORMAT_MODULE_PATH='i18n.other.locale'):
with translation.override('fr', deactivate=True):
self.assertEqual('d/m/Y CUSTOM', get_format('CUSTOM_DAY_FORMAT'))
def test_admin_javascript_supported_input_formats(self):
"""
The first input format for DATE_INPUT_FORMATS, TIME_INPUT_FORMATS, and
DATETIME_INPUT_FORMATS must not contain %f since that's unsupported by
the admin's time picker widget.
"""
regex = re.compile('%([^BcdHImMpSwxXyY%])')
for language_code, language_name in settings.LANGUAGES:
for format_name in ('DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'):
with self.subTest(language=language_code, format=format_name):
formatter = get_format(format_name, lang=language_code)[0]
self.assertEqual(
regex.findall(formatter), [],
"%s locale's %s uses an unsupported format code." % (language_code, format_name)
)
class MiscTests(SimpleTestCase):
rf = RequestFactory()
@override_settings(LANGUAGE_CODE='de')
def test_english_fallback(self):
"""
With a non-English LANGUAGE_CODE and if the active language is English
or one of its variants, the untranslated string should be returned
(instead of falling back to LANGUAGE_CODE) (See #24413).
"""
self.assertEqual(gettext("Image"), "Bild")
with translation.override('en'):
self.assertEqual(gettext("Image"), "Image")
with translation.override('en-us'):
self.assertEqual(gettext("Image"), "Image")
with translation.override('en-ca'):
self.assertEqual(gettext("Image"), "Image")
def test_parse_spec_http_header(self):
"""
Testing HTTP header parsing. First, we test that we can parse the
values according to the spec (and that we extract all the pieces in
the right order).
"""
tests = [
# Good headers
('de', [('de', 1.0)]),
('en-AU', [('en-au', 1.0)]),
('es-419', [('es-419', 1.0)]),
('*;q=1.00', [('*', 1.0)]),
('en-AU;q=0.123', [('en-au', 0.123)]),
('en-au;q=0.5', [('en-au', 0.5)]),
('en-au;q=1.0', [('en-au', 1.0)]),
('da, en-gb;q=0.25, en;q=0.5', [('da', 1.0), ('en', 0.5), ('en-gb', 0.25)]),
('en-au-xx', [('en-au-xx', 1.0)]),
('de,en-au;q=0.75,en-us;q=0.5,en;q=0.25,es;q=0.125,fa;q=0.125',
[('de', 1.0), ('en-au', 0.75), ('en-us', 0.5), ('en', 0.25), ('es', 0.125), ('fa', 0.125)]),
('*', [('*', 1.0)]),
('de;q=0.', [('de', 0.0)]),
('en; q=1,', [('en', 1.0)]),
('en; q=1.0, * ; q=0.5', [('en', 1.0), ('*', 0.5)]),
# Bad headers
('en-gb;q=1.0000', []),
('en;q=0.1234', []),
('en;q=.2', []),
('abcdefghi-au', []),
('**', []),
('en,,gb', []),
('en-au;q=0.1.0', []),
(('X' * 97) + 'Z,en', []),
('da, en-gb;q=0.8, en;q=0.7,#', []),
('de;q=2.0', []),
('de;q=0.a', []),
('12-345', []),
('', []),
('en;q=1e0', []),
]
for value, expected in tests:
with self.subTest(value=value):
self.assertEqual(trans_real.parse_accept_lang_header(value), tuple(expected))
def test_parse_literal_http_header(self):
"""
Now test that we parse a literal HTTP header correctly.
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-br'}
self.assertEqual('pt-br', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt'}
self.assertEqual('pt', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es,de'}
self.assertEqual('es', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es-ar,de'}
self.assertEqual('es-ar', g(r))
# This test assumes there won't be a Django translation to a US
# variation of the Spanish language, a safe assumption. When the
# user sets it as the preferred language, the main 'es'
# translation should be selected instead.
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es-us'}
self.assertEqual(g(r), 'es')
# This tests the following scenario: there isn't a main language (zh)
# translation of Django but there is a translation to variation (zh-hans)
# the user sets zh-hans as the preferred language, it should be selected
# by Django without falling back nor ignoring it.
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-hans,de'}
self.assertEqual(g(r), 'zh-hans')
r.META = {'HTTP_ACCEPT_LANGUAGE': 'NL'}
self.assertEqual('nl', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'fy'}
self.assertEqual('fy', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'ia'}
self.assertEqual('ia', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'sr-latn'}
self.assertEqual('sr-latn', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-hans'}
self.assertEqual('zh-hans', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-hant'}
self.assertEqual('zh-hant', g(r))
@override_settings(
LANGUAGES=[
('en', 'English'),
('zh-hans', 'Simplified Chinese'),
('zh-hant', 'Traditional Chinese'),
]
)
def test_support_for_deprecated_chinese_language_codes(self):
"""
Some browsers (Firefox, IE, etc.) use deprecated language codes. As these
language codes will be removed in Django 1.9, these will be incorrectly
matched. For example zh-tw (traditional) will be interpreted as zh-hans
(simplified), which is wrong. So we should also accept these deprecated
language codes.
refs #18419 -- this is explicitly for browser compatibility
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-cn,en'}
self.assertEqual(g(r), 'zh-hans')
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-tw,en'}
self.assertEqual(g(r), 'zh-hant')
def test_special_fallback_language(self):
"""
Some languages may have special fallbacks that don't follow the simple
'fr-ca' -> 'fr' logic (notably Chinese codes).
"""
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-my,en'}
self.assertEqual(get_language_from_request(r), 'zh-hans')
def test_parse_language_cookie(self):
"""
Now test that we parse language preferences stored in a cookie correctly.
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt-br'}
r.META = {}
self.assertEqual('pt-br', g(r))
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt'}
r.META = {}
self.assertEqual('pt', g(r))
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es'}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'}
self.assertEqual('es', g(r))
# This test assumes there won't be a Django translation to a US
# variation of the Spanish language, a safe assumption. When the
# user sets it as the preferred language, the main 'es'
# translation should be selected instead.
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es-us'}
r.META = {}
self.assertEqual(g(r), 'es')
# This tests the following scenario: there isn't a main language (zh)
# translation of Django but there is a translation to variation (zh-hans)
# the user sets zh-hans as the preferred language, it should be selected
# by Django without falling back nor ignoring it.
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'zh-hans'}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'}
self.assertEqual(g(r), 'zh-hans')
@override_settings(
USE_I18N=True,
LANGUAGES=[
('en', 'English'),
('de', 'German'),
('de-at', 'Austrian German'),
('pt-br', 'Portuguese (Brazil)'),
],
)
def test_get_supported_language_variant_real(self):
g = trans_real.get_supported_language_variant
self.assertEqual(g('en'), 'en')
self.assertEqual(g('en-gb'), 'en')
self.assertEqual(g('de'), 'de')
self.assertEqual(g('de-at'), 'de-at')
self.assertEqual(g('de-ch'), 'de')
self.assertEqual(g('pt-br'), 'pt-br')
self.assertEqual(g('pt'), 'pt-br')
self.assertEqual(g('pt-pt'), 'pt-br')
with self.assertRaises(LookupError):
g('pt', strict=True)
with self.assertRaises(LookupError):
g('pt-pt', strict=True)
with self.assertRaises(LookupError):
g('xyz')
with self.assertRaises(LookupError):
g('xy-zz')
def test_get_supported_language_variant_null(self):
g = trans_null.get_supported_language_variant
self.assertEqual(g(settings.LANGUAGE_CODE), settings.LANGUAGE_CODE)
with self.assertRaises(LookupError):
g('pt')
with self.assertRaises(LookupError):
g('de')
with self.assertRaises(LookupError):
g('de-at')
with self.assertRaises(LookupError):
g('de', strict=True)
with self.assertRaises(LookupError):
g('de-at', strict=True)
with self.assertRaises(LookupError):
g('xyz')
@override_settings(
LANGUAGES=[
('en', 'English'),
('de', 'German'),
('de-at', 'Austrian German'),
('pl', 'Polish'),
],
)
def test_get_language_from_path_real(self):
g = trans_real.get_language_from_path
self.assertEqual(g('/pl/'), 'pl')
self.assertEqual(g('/pl'), 'pl')
self.assertIsNone(g('/xyz/'))
self.assertEqual(g('/en/'), 'en')
self.assertEqual(g('/en-gb/'), 'en')
self.assertEqual(g('/de/'), 'de')
self.assertEqual(g('/de-at/'), 'de-at')
self.assertEqual(g('/de-ch/'), 'de')
self.assertIsNone(g('/de-simple-page/'))
def test_get_language_from_path_null(self):
g = trans_null.get_language_from_path
self.assertIsNone(g('/pl/'))
self.assertIsNone(g('/pl'))
self.assertIsNone(g('/xyz/'))
def test_cache_resetting(self):
"""
After setting LANGUAGE, the cache should be cleared and languages
previously valid should not be used (#14170).
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-br'}
self.assertEqual('pt-br', g(r))
with self.settings(LANGUAGES=[('en', 'English')]):
self.assertNotEqual('pt-br', g(r))
def test_i18n_patterns_returns_list(self):
with override_settings(USE_I18N=False):
self.assertIsInstance(i18n_patterns([]), list)
with override_settings(USE_I18N=True):
self.assertIsInstance(i18n_patterns([]), list)
class ResolutionOrderI18NTests(SimpleTestCase):
def setUp(self):
super().setUp()
activate('de')
def tearDown(self):
deactivate()
super().tearDown()
def assertGettext(self, msgid, msgstr):
result = gettext(msgid)
self.assertIn(
msgstr, result,
"The string '%s' isn't in the translation of '%s'; the actual result is '%s'."
% (msgstr, msgid, result)
)
class AppResolutionOrderI18NTests(ResolutionOrderI18NTests):
@override_settings(LANGUAGE_CODE='de')
def test_app_translation(self):
# Original translation.
self.assertGettext('Date/time', 'Datum/Zeit')
# Different translation.
with self.modify_settings(INSTALLED_APPS={'append': 'i18n.resolution'}):
# Force refreshing translations.
activate('de')
# Doesn't work because it's added later in the list.
self.assertGettext('Date/time', 'Datum/Zeit')
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.admin.apps.SimpleAdminConfig'}):
# Force refreshing translations.
activate('de')
# Unless the original is removed from the list.
self.assertGettext('Date/time', 'Datum/Zeit (APP)')
@override_settings(LOCALE_PATHS=extended_locale_paths)
class LocalePathsResolutionOrderI18NTests(ResolutionOrderI18NTests):
def test_locale_paths_translation(self):
self.assertGettext('Time', 'LOCALE_PATHS')
def test_locale_paths_override_app_translation(self):
with self.settings(INSTALLED_APPS=['i18n.resolution']):
self.assertGettext('Time', 'LOCALE_PATHS')
class DjangoFallbackResolutionOrderI18NTests(ResolutionOrderI18NTests):
def test_django_fallback(self):
self.assertEqual(gettext('Date/time'), 'Datum/Zeit')
@override_settings(INSTALLED_APPS=['i18n.territorial_fallback'])
class TranslationFallbackI18NTests(ResolutionOrderI18NTests):
def test_sparse_territory_catalog(self):
"""
Untranslated strings for territorial language variants use the
translations of the generic language. In this case, the de-de
translation falls back to de.
"""
with translation.override('de-de'):
self.assertGettext('Test 1 (en)', '(de-de)')
self.assertGettext('Test 2 (en)', '(de)')
class TestModels(TestCase):
def test_lazy(self):
tm = TestModel()
tm.save()
def test_safestr(self):
c = Company(cents_paid=12, products_delivered=1)
c.name = SafeString('Iñtërnâtiônàlizætiøn1')
c.save()
class TestLanguageInfo(SimpleTestCase):
def test_localized_language_info(self):
li = get_language_info('de')
self.assertEqual(li['code'], 'de')
self.assertEqual(li['name_local'], 'Deutsch')
self.assertEqual(li['name'], 'German')
self.assertIs(li['bidi'], False)
def test_unknown_language_code(self):
with self.assertRaisesMessage(KeyError, "Unknown language code xx"):
get_language_info('xx')
with translation.override('xx'):
# A language with no translation catalogs should fallback to the
# untranslated string.
self.assertEqual(gettext("Title"), "Title")
def test_unknown_only_country_code(self):
li = get_language_info('de-xx')
self.assertEqual(li['code'], 'de')
self.assertEqual(li['name_local'], 'Deutsch')
self.assertEqual(li['name'], 'German')
self.assertIs(li['bidi'], False)
def test_unknown_language_code_and_country_code(self):
with self.assertRaisesMessage(KeyError, "Unknown language code xx-xx and xx"):
get_language_info('xx-xx')
def test_fallback_language_code(self):
"""
get_language_info return the first fallback language info if the lang_info
struct does not contain the 'name' key.
"""
li = get_language_info('zh-my')
self.assertEqual(li['code'], 'zh-hans')
li = get_language_info('zh-hans')
self.assertEqual(li['code'], 'zh-hans')
@override_settings(
USE_I18N=True,
LANGUAGES=[
('en', 'English'),
('fr', 'French'),
],
MIDDLEWARE=[
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
ROOT_URLCONF='i18n.urls',
)
class LocaleMiddlewareTests(TestCase):
def test_streaming_response(self):
# Regression test for #5241
response = self.client.get('/fr/streaming/')
self.assertContains(response, "Oui/Non")
response = self.client.get('/en/streaming/')
self.assertContains(response, "Yes/No")
@override_settings(
MIDDLEWARE=[
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
)
def test_language_not_saved_to_session(self):
"""
The Current language isno' automatically saved to the session on every
request (#21473).
"""
self.client.get('/fr/simple/')
self.assertNotIn(LANGUAGE_SESSION_KEY, self.client.session)
@override_settings(
USE_I18N=True,
LANGUAGES=[
('en', 'English'),
('de', 'German'),
('fr', 'French'),
],
MIDDLEWARE=[
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
ROOT_URLCONF='i18n.urls_default_unprefixed',
LANGUAGE_CODE='en',
)
class UnprefixedDefaultLanguageTests(SimpleTestCase):
def test_default_lang_without_prefix(self):
"""
With i18n_patterns(..., prefix_default_language=False), the default
language (settings.LANGUAGE_CODE) should be accessible without a prefix.
"""
response = self.client.get('/simple/')
self.assertEqual(response.content, b'Yes')
def test_other_lang_with_prefix(self):
response = self.client.get('/fr/simple/')
self.assertEqual(response.content, b'Oui')
def test_unprefixed_language_other_than_accept_language(self):
response = self.client.get('/simple/', HTTP_ACCEPT_LANGUAGE='fr')
self.assertEqual(response.content, b'Yes')
def test_page_with_dash(self):
# A page starting with /de* shouldn't match the 'de' language code.
response = self.client.get('/de-simple-page/')
self.assertEqual(response.content, b'Yes')
def test_no_redirect_on_404(self):
"""
A request for a nonexistent URL shouldn't cause a redirect to
/<default_language>/<request_url> when prefix_default_language=False and
/<default_language>/<request_url> has a URL match (#27402).
"""
# A match for /group1/group2/ must exist for this to act as a
# regression test.
response = self.client.get('/group1/group2/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/nonexistent/')
self.assertEqual(response.status_code, 404)
@override_settings(
USE_I18N=True,
LANGUAGES=[
('bg', 'Bulgarian'),
('en-us', 'English'),
('pt-br', 'Portuguese (Brazil)'),
],
MIDDLEWARE=[
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
ROOT_URLCONF='i18n.urls'
)
class CountrySpecificLanguageTests(SimpleTestCase):
rf = RequestFactory()
def test_check_for_language(self):
self.assertTrue(check_for_language('en'))
self.assertTrue(check_for_language('en-us'))
self.assertTrue(check_for_language('en-US'))
self.assertFalse(check_for_language('en_US'))
self.assertTrue(check_for_language('be'))
self.assertTrue(check_for_language('be@latin'))
self.assertTrue(check_for_language('sr-RS@latin'))
self.assertTrue(check_for_language('sr-RS@12345'))
self.assertFalse(check_for_language('en-ü'))
self.assertFalse(check_for_language('en\x00'))
self.assertFalse(check_for_language(None))
self.assertFalse(check_for_language('be@ '))
# Specifying encoding is not supported (Django enforces UTF-8)
self.assertFalse(check_for_language('tr-TR.UTF-8'))
self.assertFalse(check_for_language('tr-TR.UTF8'))
self.assertFalse(check_for_language('de-DE.utf-8'))
def test_check_for_language_null(self):
self.assertIs(trans_null.check_for_language('en'), True)
def test_get_language_from_request(self):
# issue 19919
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'en-US,en;q=0.8,bg;q=0.6,ru;q=0.4'}
lang = get_language_from_request(r)
self.assertEqual('en-us', lang)
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'bg-bg,en-US;q=0.8,en;q=0.6,ru;q=0.4'}
lang = get_language_from_request(r)
self.assertEqual('bg', lang)
def test_get_language_from_request_null(self):
lang = trans_null.get_language_from_request(None)
self.assertEqual(lang, 'en')
with override_settings(LANGUAGE_CODE='de'):
lang = trans_null.get_language_from_request(None)
self.assertEqual(lang, 'de')
def test_specific_language_codes(self):
# issue 11915
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt,en-US;q=0.8,en;q=0.6,ru;q=0.4'}
lang = get_language_from_request(r)
self.assertEqual('pt-br', lang)
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-pt,en-US;q=0.8,en;q=0.6,ru;q=0.4'}
lang = get_language_from_request(r)
self.assertEqual('pt-br', lang)
class TranslationFilesMissing(SimpleTestCase):
def setUp(self):
super().setUp()
self.gettext_find_builtin = gettext_module.find
def tearDown(self):
gettext_module.find = self.gettext_find_builtin
super().tearDown()
def patchGettextFind(self):
gettext_module.find = lambda *args, **kw: None
def test_failure_finding_default_mo_files(self):
"""OSError is raised if the default language is unparseable."""
self.patchGettextFind()
trans_real._translations = {}
with self.assertRaises(OSError):
activate('en')
class NonDjangoLanguageTests(SimpleTestCase):
"""
A language non present in default Django languages can still be
installed/used by a Django project.
"""
@override_settings(
USE_I18N=True,
LANGUAGES=[
('en-us', 'English'),
('xxx', 'Somelanguage'),
],
LANGUAGE_CODE='xxx',
LOCALE_PATHS=[os.path.join(here, 'commands', 'locale')],
)
def test_non_django_language(self):
self.assertEqual(get_language(), 'xxx')
self.assertEqual(gettext("year"), "reay")
@override_settings(USE_I18N=True)
def test_check_for_language(self):
with tempfile.TemporaryDirectory() as app_dir:
os.makedirs(os.path.join(app_dir, 'locale', 'dummy_Lang', 'LC_MESSAGES'))
open(os.path.join(app_dir, 'locale', 'dummy_Lang', 'LC_MESSAGES', 'django.mo'), 'w').close()
app_config = AppConfig('dummy_app', AppModuleStub(__path__=[app_dir]))
with mock.patch('django.apps.apps.get_app_configs', return_value=[app_config]):
self.assertIs(check_for_language('dummy-lang'), True)
@override_settings(
USE_I18N=True,
LANGUAGES=[
('en-us', 'English'),
# xyz language has no locale files
('xyz', 'XYZ'),
],
)
@translation.override('xyz')
def test_plural_non_django_language(self):
self.assertEqual(get_language(), 'xyz')
self.assertEqual(ngettext('year', 'years', 2), 'years')
@override_settings(USE_I18N=True)
class WatchForTranslationChangesTests(SimpleTestCase):
@override_settings(USE_I18N=False)
def test_i18n_disabled(self):
mocked_sender = mock.MagicMock()
watch_for_translation_changes(mocked_sender)
mocked_sender.watch_dir.assert_not_called()
def test_i18n_enabled(self):
mocked_sender = mock.MagicMock()
watch_for_translation_changes(mocked_sender)
self.assertGreater(mocked_sender.watch_dir.call_count, 1)
def test_i18n_locale_paths(self):
mocked_sender = mock.MagicMock()
with tempfile.TemporaryDirectory() as app_dir:
with self.settings(LOCALE_PATHS=[app_dir]):
watch_for_translation_changes(mocked_sender)
mocked_sender.watch_dir.assert_any_call(Path(app_dir), '**/*.mo')
def test_i18n_app_dirs(self):
mocked_sender = mock.MagicMock()
with self.settings(INSTALLED_APPS=["i18n.sampleproject"]):
watch_for_translation_changes(mocked_sender)
project_dir = Path(__file__).parent / 'sampleproject' / 'locale'
mocked_sender.watch_dir.assert_any_call(project_dir, '**/*.mo')
def test_i18n_app_dirs_ignore_django_apps(self):
mocked_sender = mock.MagicMock()
with self.settings(INSTALLED_APPS=['django.contrib.admin']):
watch_for_translation_changes(mocked_sender)
mocked_sender.watch_dir.assert_called_once_with(Path('locale'), '**/*.mo')
def test_i18n_local_locale(self):
mocked_sender = mock.MagicMock()
watch_for_translation_changes(mocked_sender)
locale_dir = Path(__file__).parent / 'locale'
mocked_sender.watch_dir.assert_any_call(locale_dir, '**/*.mo')
class TranslationFileChangedTests(SimpleTestCase):
def setUp(self):
self.gettext_translations = gettext_module._translations.copy()
self.trans_real_translations = trans_real._translations.copy()
def tearDown(self):
gettext._translations = self.gettext_translations
trans_real._translations = self.trans_real_translations
def test_ignores_non_mo_files(self):
gettext_module._translations = {'foo': 'bar'}
path = Path('test.py')
self.assertIsNone(translation_file_changed(None, path))
self.assertEqual(gettext_module._translations, {'foo': 'bar'})
def test_resets_cache_with_mo_files(self):
gettext_module._translations = {'foo': 'bar'}
trans_real._translations = {'foo': 'bar'}
trans_real._default = 1
trans_real._active = False
path = Path('test.mo')
self.assertIs(translation_file_changed(None, path), True)
self.assertEqual(gettext_module._translations, {})
self.assertEqual(trans_real._translations, {})
self.assertIsNone(trans_real._default)
self.assertIsInstance(trans_real._active, Local)
class UtilsTests(SimpleTestCase):
def test_round_away_from_one(self):
tests = [
(0, 0),
(0., 0),
(0.25, 0),
(0.5, 0),
(0.75, 0),
(1, 1),
(1., 1),
(1.25, 2),
(1.5, 2),
(1.75, 2),
(-0., 0),
(-0.25, -1),
(-0.5, -1),
(-0.75, -1),
(-1, -1),
(-1., -1),
(-1.25, -2),
(-1.5, -2),
(-1.75, -2),
]
for value, expected in tests:
with self.subTest(value=value):
self.assertEqual(round_away_from_one(value), expected)
|
GHSA-q2jf-h9jm-m7p4
|
keystone/api/credentials.py
|
@@ -101,13 +101,22 @@ def _list_credentials(self):
# If the request was filtered, make sure to return only the
# credentials specific to that user. This makes it so that users with
# roles on projects can't see credentials that aren't theirs.
- if (not self.oslo_context.system_scope and
- CONF.oslo_policy.enforce_scope):
- filtered_refs = []
- for ref in refs:
- if ref['user_id'] == target['credential']['user_id']:
- filtered_refs.append(ref)
- refs = filtered_refs
+ filtered_refs = []
+ for ref in refs:
+ # Check each credential again to make sure the user has access to
+ # it, either by owning it, being a project admin with
+ # enforce_scope=false, being a system user, or having some other
+ # custom policy that allows access.
+ try:
+ cred = PROVIDERS.credential_api.get_credential(ref['id'])
+ ENFORCER.enforce_call(
+ action='identity:get_credential',
+ target_attr={'credential': cred}
+ )
+ filtered_refs.append(ref)
+ except exception.Forbidden:
+ pass
+ refs = filtered_refs
refs = [self._blob_to_json(r) for r in refs]
return self.wrap_collection(refs, hints=hints)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This file handles all flask-restful resources for /v3/credentials
import hashlib
import flask
from oslo_serialization import jsonutils
from six.moves import http_client
from keystone.common import provider_api
from keystone.common import rbac_enforcer
from keystone.common import validation
import keystone.conf
from keystone.credential import schema
from keystone import exception
from keystone.i18n import _
from keystone.server import flask as ks_flask
CONF = keystone.conf.CONF
PROVIDERS = provider_api.ProviderAPIs
ENFORCER = rbac_enforcer.RBACEnforcer
def _build_target_enforcement():
target = {}
try:
target['credential'] = PROVIDERS.credential_api.get_credential(
flask.request.view_args.get('credential_id')
)
except exception.NotFound: # nosec
# Defer existance in the event the credential doesn't exist, we'll
# check this later anyway.
pass
return target
class CredentialResource(ks_flask.ResourceBase):
collection_key = 'credentials'
member_key = 'credential'
@staticmethod
def _blob_to_json(ref):
# credentials stored via ec2tokens before the fix for #1259584
# need json_serailzing, as that's the documented API format
blob = ref.get('blob')
if isinstance(blob, dict):
ref = ref.copy()
ref['blob'] = jsonutils.dumps(blob)
return ref
def _assign_unique_id(self, ref, trust_id=None):
# Generates an assigns a unique identifier to a credential reference.
if ref.get('type', '').lower() == 'ec2':
try:
blob = jsonutils.loads(ref.get('blob'))
except (ValueError, TabError):
raise exception.ValidationError(
message=_('Invalid blob in credential'))
if not blob or not isinstance(blob, dict):
raise exception.ValidationError(attribute='blob',
target='credential')
if blob.get('access') is None:
raise exception.ValidationError(attribute='access',
target='credential')
ref = ref.copy()
ref['id'] = hashlib.sha256(
blob['access'].encode('utf8')).hexdigest()
# update the blob with the trust_id, so credentials created with
# a trust scoped token will result in trust scoped tokens when
# authentication via ec2tokens happens
if trust_id is not None:
blob['trust_id'] = trust_id
ref['blob'] = jsonutils.dumps(blob)
return ref
else:
return super(CredentialResource, self)._assign_unique_id(ref)
def _list_credentials(self):
filters = ['user_id', 'type']
if not self.oslo_context.system_scope:
target = {'credential': {'user_id': self.oslo_context.user_id}}
else:
target = None
ENFORCER.enforce_call(action='identity:list_credentials',
filters=filters, target_attr=target)
hints = self.build_driver_hints(filters)
refs = PROVIDERS.credential_api.list_credentials(hints)
# If the request was filtered, make sure to return only the
# credentials specific to that user. This makes it so that users with
# roles on projects can't see credentials that aren't theirs.
if (not self.oslo_context.system_scope and
CONF.oslo_policy.enforce_scope):
filtered_refs = []
for ref in refs:
if ref['user_id'] == target['credential']['user_id']:
filtered_refs.append(ref)
refs = filtered_refs
refs = [self._blob_to_json(r) for r in refs]
return self.wrap_collection(refs, hints=hints)
def _get_credential(self, credential_id):
ENFORCER.enforce_call(
action='identity:get_credential',
build_target=_build_target_enforcement
)
credential = PROVIDERS.credential_api.get_credential(credential_id)
return self.wrap_member(self._blob_to_json(credential))
def get(self, credential_id=None):
# Get Credential or List of credentials.
if credential_id is None:
# No Parameter passed means that we're doing a LIST action.
return self._list_credentials()
else:
return self._get_credential(credential_id)
def post(self):
# Create a new credential
credential = self.request_body_json.get('credential', {})
target = {}
target['credential'] = credential
ENFORCER.enforce_call(
action='identity:create_credential', target_attr=target
)
validation.lazy_validate(schema.credential_create, credential)
trust_id = getattr(self.oslo_context, 'trust_id', None)
ref = self._assign_unique_id(
self._normalize_dict(credential), trust_id=trust_id)
ref = PROVIDERS.credential_api.create_credential(ref['id'], ref,
initiator=self.audit_initiator)
return self.wrap_member(ref), http_client.CREATED
def patch(self, credential_id):
# Update Credential
ENFORCER.enforce_call(
action='identity:update_credential',
build_target=_build_target_enforcement
)
PROVIDERS.credential_api.get_credential(credential_id)
credential = self.request_body_json.get('credential', {})
validation.lazy_validate(schema.credential_update, credential)
self._require_matching_id(credential)
ref = PROVIDERS.credential_api.update_credential(
credential_id, credential)
return self.wrap_member(ref)
def delete(self, credential_id):
# Delete credentials
ENFORCER.enforce_call(
action='identity:delete_credential',
build_target=_build_target_enforcement
)
return (PROVIDERS.credential_api.delete_credential(credential_id,
initiator=self.audit_initiator),
http_client.NO_CONTENT)
class CredentialAPI(ks_flask.APIBase):
_name = 'credentials'
_import_name = __name__
resource_mapping = []
resources = [CredentialResource]
APIs = (CredentialAPI,)
|
GHSA-2j23-fwqm-mgwr
|
keystone/tests/protection/v3/test_credentials.py
|
@@ -1138,3 +1138,115 @@ def _override_policy(self):
'identity:delete_credential': bp.SYSTEM_ADMIN_OR_CRED_OWNER
}
f.write(jsonutils.dumps(overridden_policies))
+
+
+class ProjectReaderTestsEnforceScopeFalse(base_classes.TestCaseWithBootstrap,
+ common_auth.AuthTestMixin,
+ _UserCredentialTests,
+ _ProjectUsersTests):
+
+ def setUp(self):
+ super(ProjectReaderTestsEnforceScopeFalse, self).setUp()
+ self.loadapp()
+ self.useFixture(ksfixtures.Policy(self.config_fixture))
+ self.config_fixture.config(group='oslo_policy', enforce_scope=False)
+
+ project_reader = unit.new_user_ref(
+ domain_id=CONF.identity.default_domain_id
+ )
+ self.user_id = PROVIDERS.identity_api.create_user(
+ project_reader
+ )['id']
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id
+ )
+ self.project_id = PROVIDERS.resource_api.create_project(
+ project['id'], project
+ )['id']
+ PROVIDERS.assignment_api.create_grant(
+ self.bootstrapper.reader_role_id, user_id=self.user_id,
+ project_id=self.project_id
+ )
+
+ auth = self.build_authentication_request(
+ user_id=self.user_id,
+ password=project_reader['password'],
+ project_id=self.project_id
+ )
+
+ # Grab a token using the persona we're testing and prepare headers
+ # for requests we'll be making in the tests.
+ with self.test_client() as c:
+ r = c.post('/v3/auth/tokens', json=auth)
+ self.token_id = r.headers['X-Subject-Token']
+ self.headers = {'X-Auth-Token': self.token_id}
+
+
+class ProjectMemberTestsEnforceScopeFalse(base_classes.TestCaseWithBootstrap,
+ common_auth.AuthTestMixin,
+ _UserCredentialTests,
+ _ProjectUsersTests):
+
+ def setUp(self):
+ super(ProjectMemberTestsEnforceScopeFalse, self).setUp()
+ self.loadapp()
+ self.useFixture(ksfixtures.Policy(self.config_fixture))
+ self.config_fixture.config(group='oslo_policy', enforce_scope=False)
+
+ project_member = unit.new_user_ref(
+ domain_id=CONF.identity.default_domain_id
+ )
+ self.user_id = PROVIDERS.identity_api.create_user(
+ project_member
+ )['id']
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id
+ )
+ self.project_id = PROVIDERS.resource_api.create_project(
+ project['id'], project
+ )['id']
+ PROVIDERS.assignment_api.create_grant(
+ self.bootstrapper.member_role_id, user_id=self.user_id,
+ project_id=self.project_id
+ )
+
+ auth = self.build_authentication_request(
+ user_id=self.user_id,
+ password=project_member['password'],
+ project_id=self.project_id
+ )
+
+ # Grab a token using the persona we're testing and prepare headers
+ # for requests we'll be making in the tests.
+ with self.test_client() as c:
+ r = c.post('/v3/auth/tokens', json=auth)
+ self.token_id = r.headers['X-Subject-Token']
+ self.headers = {'X-Auth-Token': self.token_id}
+
+
+class ProjectAdminTestsEnforceScopeFalse(base_classes.TestCaseWithBootstrap,
+ common_auth.AuthTestMixin,
+ _UserCredentialTests,
+ _SystemUserCredentialTests):
+
+ def setUp(self):
+ super(ProjectAdminTestsEnforceScopeFalse, self).setUp()
+ self.loadapp()
+ self.useFixture(ksfixtures.Policy(self.config_fixture))
+ self.config_fixture.config(group='oslo_policy', enforce_scope=False)
+
+ # Reuse the system administrator account created during
+ # ``keystone-manage bootstrap``
+ self.user_id = self.bootstrapper.admin_user_id
+ auth = self.build_authentication_request(
+ user_id=self.user_id,
+ password=self.bootstrapper.admin_password,
+ project_id=self.bootstrapper.project_id
+ )
+
+ # Grab a token using the persona we're testing and prepare headers
+ # for requests we'll be making in the tests.
+ with self.test_client() as c:
+ r = c.post('/v3/auth/tokens', json=auth)
+ self.token_id = r.headers['X-Subject-Token']
+ self.headers = {'X-Auth-Token': self.token_id}
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_serialization import jsonutils
from six.moves import http_client
from keystone.common.policies import base as bp
from keystone.common import provider_api
import keystone.conf
from keystone.tests.common import auth as common_auth
from keystone.tests import unit
from keystone.tests.unit import base_classes
from keystone.tests.unit import ksfixtures
from keystone.tests.unit.ksfixtures import temporaryfile
CONF = keystone.conf.CONF
PROVIDERS = provider_api.ProviderAPIs
class _UserCredentialTests(object):
"""Test cases for anyone that has a valid user token."""
def test_user_can_create_credentials_for_themselves(self):
create = {
'credential': {
'blob': uuid.uuid4().hex,
'user_id': self.user_id,
'type': uuid.uuid4().hex
}
}
with self.test_client() as c:
c.post('/v3/credentials', json=create, headers=self.headers)
def test_user_can_get_their_credentials(self):
with self.test_client() as c:
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': self.user_id
}
}
r = c.post('/v3/credentials', json=create, headers=self.headers)
credential_id = r.json['credential']['id']
path = '/v3/credentials/%s' % credential_id
r = c.get(path, headers=self.headers)
self.assertEqual(
self.user_id, r.json['credential']['user_id']
)
def test_user_can_list_their_credentials(self):
with self.test_client() as c:
expected = []
for _ in range(2):
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': self.user_id
}
}
r = c.post(
'/v3/credentials', json=create, headers=self.headers
)
expected.append(r.json['credential'])
r = c.get('/v3/credentials', headers=self.headers)
for credential in expected:
self.assertIn(credential, r.json['credentials'])
def test_user_can_filter_their_credentials_by_type_and_user(self):
with self.test_client() as c:
credential_type = uuid.uuid4().hex
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': credential_type,
'user_id': self.user_id
}
}
r = c.post(
'/v3/credentials', json=create, headers=self.headers
)
expected_credential_id = r.json['credential']['id']
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': self.user_id
}
}
r = c.post(
'/v3/credentials', json=create, headers=self.headers
)
path = '/v3/credentials?type=%s' % credential_type
r = c.get(path, headers=self.headers)
self.assertEqual(
expected_credential_id, r.json['credentials'][0]['id']
)
path = '/v3/credentials?user=%s' % self.user_id
r = c.get(path, headers=self.headers)
self.assertEqual(
expected_credential_id, r.json['credentials'][0]['id']
)
def test_user_can_update_their_credential(self):
with self.test_client() as c:
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': self.user_id
}
}
r = c.post('/v3/credentials', json=create, headers=self.headers)
credential_id = r.json['credential']['id']
updated_blob = uuid.uuid4().hex
update = {'credential': {'blob': updated_blob}}
path = '/v3/credentials/%s' % credential_id
r = c.patch(path, json=update, headers=self.headers)
self.assertEqual(updated_blob, r.json['credential']['blob'])
def test_user_can_delete_their_credentials(self):
with self.test_client() as c:
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': self.user_id
}
}
r = c.post('/v3/credentials', json=create, headers=self.headers)
credential_id = r.json['credential']['id']
path = '/v3/credentials/%s' % credential_id
c.delete(path, headers=self.headers)
class _ProjectUsersTests(object):
"""Users who have project role authorization observe the same behavior."""
def test_user_cannot_get_credentials_for_other_users(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user_password = user['password']
user = PROVIDERS.identity_api.create_user(user)
project = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
project = PROVIDERS.resource_api.create_project(project['id'], project)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=user['id'],
project_id=project['id']
)
user_auth = self.build_authentication_request(
user_id=user['id'], password=user_password,
project_id=project['id']
)
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=user_auth)
token_id = r.headers['X-Subject-Token']
headers = {'X-Auth-Token': token_id}
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': user['id']
}
}
r = c.post('/v3/credentials', json=create, headers=headers)
credential_id = r.json['credential']['id']
with self.test_client() as c:
path = '/v3/credentials/%s' % credential_id
c.get(
path, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_get_non_existant_credential_forbidden(self):
with self.test_client() as c:
c.get(
'/v3/credentials/%s' % uuid.uuid4().hex, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_list_credentials_for_other_users(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user_password = user['password']
user = PROVIDERS.identity_api.create_user(user)
project = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
project = PROVIDERS.resource_api.create_project(project['id'], project)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=user['id'],
project_id=project['id']
)
user_auth = self.build_authentication_request(
user_id=user['id'], password=user_password,
project_id=project['id']
)
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=user_auth)
token_id = r.headers['X-Subject-Token']
headers = {'X-Auth-Token': token_id}
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': user['id']
}
}
c.post('/v3/credentials', json=create, headers=headers)
with self.test_client() as c:
path = '/v3/credentials?user_id=%s' % user['id']
r = c.get(path, headers=self.headers)
self.assertEqual([], r.json['credentials'])
def test_user_cannot_filter_credentials_by_type_for_others(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user_password = user['password']
user = PROVIDERS.identity_api.create_user(user)
project = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
project = PROVIDERS.resource_api.create_project(project['id'], project)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=user['id'],
project_id=project['id']
)
user_auth = self.build_authentication_request(
user_id=user['id'], password=user_password,
project_id=project['id']
)
credential_type = uuid.uuid4().hex
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=user_auth)
token_id = r.headers['X-Subject-Token']
headers = {'X-Auth-Token': token_id}
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': credential_type,
'user_id': user['id']
}
}
c.post('/v3/credentials', json=create, headers=headers)
with self.test_client() as c:
path = '/v3/credentials?type=%s' % credential_type
r = c.get(path, headers=self.headers)
self.assertEqual(0, len(r.json['credentials']))
def test_user_cannot_filter_credentials_by_user_for_others(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user_password = user['password']
user = PROVIDERS.identity_api.create_user(user)
project = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
project = PROVIDERS.resource_api.create_project(project['id'], project)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=user['id'],
project_id=project['id']
)
user_auth = self.build_authentication_request(
user_id=user['id'], password=user_password,
project_id=project['id']
)
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=user_auth)
token_id = r.headers['X-Subject-Token']
headers = {'X-Auth-Token': token_id}
expected_cred_ids = []
for _ in range(2):
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': user['id']
}
}
r = c.post('/v3/credentials', json=create, headers=headers)
expected_cred_ids.append(r.json['credential']['id'])
with self.test_client() as c:
path = '/v3/credentials?user_id=%s' % user['id']
r = c.get(path, headers=self.headers)
self.assertEqual([], r.json['credentials'])
def test_user_cannot_update_credentials_for_others(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user_password = user['password']
user = PROVIDERS.identity_api.create_user(user)
project = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
project = PROVIDERS.resource_api.create_project(project['id'], project)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=user['id'],
project_id=project['id']
)
user_auth = self.build_authentication_request(
user_id=user['id'], password=user_password,
project_id=project['id']
)
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=user_auth)
token_id = r.headers['X-Subject-Token']
headers = {'X-Auth-Token': token_id}
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': user['id']
}
}
r = c.post('/v3/credentials', json=create, headers=headers)
credential_id = r.json['credential']['id']
with self.test_client() as c:
update = {'credential': {'blob': uuid.uuid4().hex}}
path = '/v3/credentials/%s' % credential_id
c.patch(
path, json=update, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_non_existant_credential_forbidden(self):
with self.test_client() as c:
update = {'credential': {'blob': uuid.uuid4().hex}}
c.patch(
'/v3/credentials/%s' % uuid.uuid4().hex, json=update,
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_create_credentials_for_other_users(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
with self.test_client() as c:
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': user['id']
}
}
c.post(
'/v3/credentials', json=create, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_credentials_for_others(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user_password = user['password']
user = PROVIDERS.identity_api.create_user(user)
project = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
project = PROVIDERS.resource_api.create_project(project['id'], project)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=user['id'],
project_id=project['id']
)
user_auth = self.build_authentication_request(
user_id=user['id'], password=user_password,
project_id=project['id']
)
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=user_auth)
token_id = r.headers['X-Subject-Token']
headers = {'X-Auth-Token': token_id}
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': user['id']
}
}
r = c.post('/v3/credentials', json=create, headers=headers)
credential_id = r.json['credential']['id']
with self.test_client() as c:
path = '/v3/credentials/%s' % credential_id
c.delete(
path, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_non_existant_credential_forbidden(self):
with self.test_client() as c:
c.delete(
'/v3/credentials/%s' % uuid.uuid4().hex, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
class _SystemUserCredentialTests(object):
"""Tests that are common across all system users."""
def test_user_can_list_credentials_for_other_users(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user_password = user['password']
user = PROVIDERS.identity_api.create_user(user)
project = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
project = PROVIDERS.resource_api.create_project(project['id'], project)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=user['id'],
project_id=project['id']
)
user_auth = self.build_authentication_request(
user_id=user['id'], password=user_password,
project_id=project['id']
)
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=user_auth)
token_id = r.headers['X-Subject-Token']
headers = {'X-Auth-Token': token_id}
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': user['id']
}
}
r = c.post('/v3/credentials', json=create, headers=headers)
credential_id = r.json['credential']['id']
with self.test_client() as c:
r = c.get('/v3/credentials', headers=self.headers)
self.assertEqual(1, len(r.json['credentials']))
self.assertEqual(credential_id, r.json['credentials'][0]['id'])
self.assertEqual(user['id'], r.json['credentials'][0]['user_id'])
def test_user_cannot_get_non_existant_credential_not_found(self):
with self.test_client() as c:
c.get(
'/v3/credentials/%s' % uuid.uuid4().hex, headers=self.headers,
expected_status_code=http_client.NOT_FOUND
)
def test_user_can_filter_credentials_by_type_for_others(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user_password = user['password']
user = PROVIDERS.identity_api.create_user(user)
project = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
project = PROVIDERS.resource_api.create_project(project['id'], project)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=user['id'],
project_id=project['id']
)
user_auth = self.build_authentication_request(
user_id=user['id'], password=user_password,
project_id=project['id']
)
credential_type = uuid.uuid4().hex
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=user_auth)
token_id = r.headers['X-Subject-Token']
headers = {'X-Auth-Token': token_id}
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': credential_type,
'user_id': user['id']
}
}
r = c.post('/v3/credentials', json=create, headers=headers)
credential_id = r.json['credential']['id']
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': user['id']
}
}
c.post('/v3/credentials', json=create, headers=headers)
with self.test_client() as c:
path = '/v3/credentials?type=%s' % credential_type
r = c.get(path, headers=self.headers)
self.assertEqual(1, len(r.json['credentials']))
self.assertEqual(credential_id, r.json['credentials'][0]['id'])
self.assertEqual(user['id'], r.json['credentials'][0]['user_id'])
def test_user_can_filter_credentials_by_user_for_others(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user_password = user['password']
user = PROVIDERS.identity_api.create_user(user)
project = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
project = PROVIDERS.resource_api.create_project(project['id'], project)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=user['id'],
project_id=project['id']
)
user_auth = self.build_authentication_request(
user_id=user['id'], password=user_password,
project_id=project['id']
)
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=user_auth)
token_id = r.headers['X-Subject-Token']
headers = {'X-Auth-Token': token_id}
expected_cred_ids = []
for _ in range(2):
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': user['id']
}
}
r = c.post('/v3/credentials', json=create, headers=headers)
expected_cred_ids.append(r.json['credential']['id'])
with self.test_client() as c:
path = '/v3/credentials?user_id=%s' % user['id']
r = c.get(path, headers=self.headers)
self.assertEqual(2, len(r.json['credentials']))
for credential in r.json['credentials']:
self.assertIn(credential['id'], expected_cred_ids)
self.assertEqual(user['id'], credential['user_id'])
class SystemReaderTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_UserCredentialTests,
_SystemUserCredentialTests):
def setUp(self):
super(SystemReaderTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
system_reader = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
system_reader
)['id']
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.bootstrapper.reader_role_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=system_reader['password'],
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
def test_user_cannot_create_credentials_for_other_users(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
with self.test_client() as c:
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': user['id']
}
}
c.post(
'/v3/credentials', json=create, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_credentials_for_others(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user_password = user['password']
user = PROVIDERS.identity_api.create_user(user)
project = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
project = PROVIDERS.resource_api.create_project(project['id'], project)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=user['id'],
project_id=project['id']
)
user_auth = self.build_authentication_request(
user_id=user['id'], password=user_password,
project_id=project['id']
)
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=user_auth)
token_id = r.headers['X-Subject-Token']
headers = {'X-Auth-Token': token_id}
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': user['id']
}
}
r = c.post('/v3/credentials', json=create, headers=headers)
credential_id = r.json['credential']['id']
with self.test_client() as c:
update = {'credential': {'blob': uuid.uuid4().hex}}
path = '/v3/credentials/%s' % credential_id
c.patch(
path, json=update, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_non_existant_credential_forbidden(self):
with self.test_client() as c:
update = {'credential': {'blob': uuid.uuid4().hex}}
c.patch(
'/v3/credentials/%s' % uuid.uuid4().hex, json=update,
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_credentials_for_others(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user_password = user['password']
user = PROVIDERS.identity_api.create_user(user)
project = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
project = PROVIDERS.resource_api.create_project(project['id'], project)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=user['id'],
project_id=project['id']
)
user_auth = self.build_authentication_request(
user_id=user['id'], password=user_password,
project_id=project['id']
)
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=user_auth)
token_id = r.headers['X-Subject-Token']
headers = {'X-Auth-Token': token_id}
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': user['id']
}
}
r = c.post('/v3/credentials', json=create, headers=headers)
credential_id = r.json['credential']['id']
with self.test_client() as c:
path = '/v3/credentials/%s' % credential_id
c.delete(
path, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_non_existant_credential_forbidden(self):
with self.test_client() as c:
c.delete(
'/v3/credentials/%s' % uuid.uuid4().hex, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
class SystemMemberTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_UserCredentialTests,
_SystemUserCredentialTests):
def setUp(self):
super(SystemMemberTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
system_member = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
system_member
)['id']
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.bootstrapper.member_role_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=system_member['password'],
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
def test_user_cannot_create_credentials_for_other_users(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
with self.test_client() as c:
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': user['id']
}
}
c.post(
'/v3/credentials', json=create, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_credentials_for_others(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user_password = user['password']
user = PROVIDERS.identity_api.create_user(user)
project = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
project = PROVIDERS.resource_api.create_project(project['id'], project)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=user['id'],
project_id=project['id']
)
user_auth = self.build_authentication_request(
user_id=user['id'], password=user_password,
project_id=project['id']
)
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=user_auth)
token_id = r.headers['X-Subject-Token']
headers = {'X-Auth-Token': token_id}
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': user['id']
}
}
r = c.post('/v3/credentials', json=create, headers=headers)
credential_id = r.json['credential']['id']
with self.test_client() as c:
update = {'credential': {'blob': uuid.uuid4().hex}}
path = '/v3/credentials/%s' % credential_id
c.patch(
path, json=update, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_non_existant_credential_forbidden(self):
with self.test_client() as c:
update = {'credential': {'blob': uuid.uuid4().hex}}
c.patch(
'/v3/credentials/%s' % uuid.uuid4().hex, json=update,
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_credentials_for_others(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user_password = user['password']
user = PROVIDERS.identity_api.create_user(user)
project = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
project = PROVIDERS.resource_api.create_project(project['id'], project)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=user['id'],
project_id=project['id']
)
user_auth = self.build_authentication_request(
user_id=user['id'], password=user_password,
project_id=project['id']
)
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=user_auth)
token_id = r.headers['X-Subject-Token']
headers = {'X-Auth-Token': token_id}
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': user['id']
}
}
r = c.post('/v3/credentials', json=create, headers=headers)
credential_id = r.json['credential']['id']
with self.test_client() as c:
path = '/v3/credentials/%s' % credential_id
c.delete(
path, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_non_existant_credential_forbidden(self):
with self.test_client() as c:
c.delete(
'/v3/credentials/%s' % uuid.uuid4().hex, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
class SystemAdminTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_UserCredentialTests,
_SystemUserCredentialTests):
def setUp(self):
super(SystemAdminTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
# Reuse the system administrator account created during
# ``keystone-manage bootstrap``
self.user_id = self.bootstrapper.admin_user_id
auth = self.build_authentication_request(
user_id=self.user_id,
password=self.bootstrapper.admin_password,
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
def test_user_can_create_credentials_for_other_users(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
with self.test_client() as c:
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': user['id']
}
}
c.post('/v3/credentials', json=create, headers=self.headers)
def test_user_can_update_credentials_for_others(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user_password = user['password']
user = PROVIDERS.identity_api.create_user(user)
project = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
project = PROVIDERS.resource_api.create_project(project['id'], project)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=user['id'],
project_id=project['id']
)
user_auth = self.build_authentication_request(
user_id=user['id'], password=user_password,
project_id=project['id']
)
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=user_auth)
token_id = r.headers['X-Subject-Token']
headers = {'X-Auth-Token': token_id}
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': user['id']
}
}
r = c.post('/v3/credentials', json=create, headers=headers)
credential_id = r.json['credential']['id']
with self.test_client() as c:
path = '/v3/credentials/%s' % credential_id
updated_blob = uuid.uuid4().hex
update = {'credential': {'blob': updated_blob}}
r = c.patch(path, json=update, headers=self.headers)
self.assertEqual(updated_blob, r.json['credential']['blob'])
self.assertEqual(user['id'], r.json['credential']['user_id'])
def test_user_cannot_update_non_existant_credential_not_found(self):
with self.test_client() as c:
update = {'credential': {'blob': uuid.uuid4().hex}}
c.patch(
'/v3/credentials/%s' % uuid.uuid4().hex, json=update,
headers=self.headers,
expected_status_code=http_client.NOT_FOUND
)
def test_user_can_delete_credentials_for_others(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user_password = user['password']
user = PROVIDERS.identity_api.create_user(user)
project = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
project = PROVIDERS.resource_api.create_project(project['id'], project)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=user['id'],
project_id=project['id']
)
user_auth = self.build_authentication_request(
user_id=user['id'], password=user_password,
project_id=project['id']
)
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=user_auth)
token_id = r.headers['X-Subject-Token']
headers = {'X-Auth-Token': token_id}
create = {
'credential': {
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'user_id': user['id']
}
}
r = c.post('/v3/credentials', json=create, headers=headers)
credential_id = r.json['credential']['id']
with self.test_client() as c:
path = '/v3/credentials/%s' % credential_id
c.delete(path, headers=self.headers)
def test_user_cannot_delete_non_existant_credential_not_found(self):
with self.test_client() as c:
c.delete(
'/v3/credentials/%s' % uuid.uuid4().hex, headers=self.headers,
expected_status_code=http_client.NOT_FOUND
)
class ProjectReaderTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_UserCredentialTests,
_ProjectUsersTests):
def setUp(self):
super(ProjectReaderTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
project_reader = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
project_reader
)['id']
project = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
self.project_id = PROVIDERS.resource_api.create_project(
project['id'], project
)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=self.user_id,
project_id=self.project_id
)
auth = self.build_authentication_request(
user_id=self.user_id,
password=project_reader['password'],
project_id=self.project_id
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class ProjectMemberTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_UserCredentialTests,
_ProjectUsersTests):
def setUp(self):
super(ProjectMemberTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
project_member = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
project_member
)['id']
project = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
self.project_id = PROVIDERS.resource_api.create_project(
project['id'], project
)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=self.user_id,
project_id=self.project_id
)
auth = self.build_authentication_request(
user_id=self.user_id,
password=project_member['password'],
project_id=self.project_id
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class ProjectAdminTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_UserCredentialTests,
_ProjectUsersTests):
def setUp(self):
super(ProjectAdminTests, self).setUp()
self.loadapp()
self.policy_file = self.useFixture(temporaryfile.SecureTempFile())
self.policy_file_name = self.policy_file.file_name
self.useFixture(
ksfixtures.Policy(
self.config_fixture, policy_file=self.policy_file_name
)
)
self._override_policy()
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
# Reuse the system administrator account created during
# ``keystone-manage bootstrap``
self.user_id = self.bootstrapper.admin_user_id
auth = self.build_authentication_request(
user_id=self.user_id,
password=self.bootstrapper.admin_password,
project_id=self.bootstrapper.project_id
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
def _override_policy(self):
# TODO(lbragstad): Remove this once the deprecated policies in
# keystone.common.policies.credentials have been removed. This is only
# here to make sure we test the new policies instead of the deprecated
# ones. Oslo.policy will OR deprecated policies with new policies to
# maintain compatibility and give operators a chance to update
# permissions or update policies without breaking users. This will
# cause these specific tests to fail since we're trying to correct this
# broken behavior with better scope checking.
with open(self.policy_file_name, 'w') as f:
overridden_policies = {
'identity:get_credential': bp.SYSTEM_READER_OR_CRED_OWNER,
'identity:list_credentials': bp.SYSTEM_READER_OR_CRED_OWNER,
'identity:create_credential': bp.SYSTEM_ADMIN_OR_CRED_OWNER,
'identity:update_credential': bp.SYSTEM_ADMIN_OR_CRED_OWNER,
'identity:delete_credential': bp.SYSTEM_ADMIN_OR_CRED_OWNER
}
f.write(jsonutils.dumps(overridden_policies))
|
GHSA-2j23-fwqm-mgwr
|
django/contrib/auth/tests/views.py
|
@@ -326,8 +326,10 @@ def test_security_check(self, password='password'):
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
+ 'http:///example.com',
'https://example.com',
'ftp://exampel.com',
+ '///example.com',
'//example.com',
'javascript:alert("XSS")'):
@@ -349,8 +351,8 @@ def test_security_check(self, password='password'):
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
- 'https:///',
- 'HTTPS:///',
+ 'https://testserver/',
+ 'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
@@ -521,8 +523,10 @@ def test_security_check(self, password='password'):
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
+ 'http:///example.com',
'https://example.com',
'ftp://exampel.com',
+ '///example.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
@@ -542,8 +546,8 @@ def test_security_check(self, password='password'):
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
- 'https:///',
- 'HTTPS:///',
+ 'https://testserver/',
+ 'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
|
import os
import re
from django.conf import global_settings, settings
from django.contrib.sites.models import Site, RequestSite
from django.contrib.auth.models import User
from django.core import mail
from django.core.exceptions import SuspiciousOperation
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import QueryDict, HttpRequest
from django.utils.encoding import force_text
from django.utils.html import escape
from django.utils.http import urlquote
from django.utils._os import upath
from django.test import TestCase
from django.test.utils import override_settings
from django.middleware.csrf import CsrfViewMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.auth import SESSION_KEY, REDIRECT_FIELD_NAME
from django.contrib.auth.forms import (AuthenticationForm, PasswordChangeForm,
SetPasswordForm, PasswordResetForm)
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.auth.views import login as login_view
@override_settings(
LANGUAGES=(
('en', 'English'),
),
LANGUAGE_CODE='en',
TEMPLATE_LOADERS=global_settings.TEMPLATE_LOADERS,
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
USE_TZ=False,
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
)
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
fixtures = ['authtestdata.json']
urls = 'django.contrib.auth.tests.urls'
def login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith(settings.LOGIN_REDIRECT_URL))
self.assertTrue(SESSION_KEY in self.client.session)
def assertContainsEscaped(self, response, text, **kwargs):
return self.assertContains(response, escape(force_text(text)), **kwargs)
@skipIfCustomUser
class AuthViewNamedURLTests(AuthViewsTestCase):
urls = 'django.contrib.auth.urls'
def test_named_urls(self):
"Named URLs should be reversible"
expected_named_urls = [
('login', [], {}),
('logout', [], {}),
('password_change', [], {}),
('password_change_done', [], {}),
('password_reset', [], {}),
('password_reset_done', [], {}),
('password_reset_confirm', [], {
'uidb36': 'aaaaaaa',
'token': '1111-aaaaa',
}),
('password_reset_complete', [], {}),
]
for name, args, kwargs in expected_named_urls:
try:
reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name)
@skipIfCustomUser
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"Error is raised if the provided email address isn't currently registered"
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': '[email protected]'})
self.assertContainsEscaped(response, PasswordResetForm.error_messages['unknown'])
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("[email protected]", mail.outbox[0].from_email)
@override_settings(ALLOWED_HOSTS=['adminsite.com'])
def test_admin_reset(self):
"If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override."
response = self.client.post('/admin_password_reset/',
{'email': '[email protected]'},
HTTP_HOST='adminsite.com'
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://adminsite.com" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host(self):
"Poisoned HTTP_HOST headers can't be used for reset emails"
# This attack is based on the way browsers handle URLs. The colon
# should be used to separate the port, but if the URL contains an @,
# the colon is interpreted as part of a username for login purposes,
# making 'evil.com' the request domain. Since HTTP_HOST is used to
# produce a meaningful reset URL, we need to be certain that the
# HTTP_HOST header isn't poisoned. This is done as a check when get_host()
# is invoked, but we check here as a practical consequence.
with self.assertRaises(SuspiciousOperation):
self.client.post('/password_reset/',
{'email': '[email protected]'},
HTTP_HOST='www.example:[email protected]'
)
self.assertEqual(len(mail.outbox), 0)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host_admin_site(self):
"Poisoned HTTP_HOST headers can't be used for reset emails on admin views"
with self.assertRaises(SuspiciousOperation):
self.client.post('/admin_password_reset/',
{'email': '[email protected]'},
HTTP_HOST='www.example:[email protected]'
)
self.assertEqual(len(mail.outbox), 0)
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertTrue(urlmatch is not None, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0" * 4) + path[-1]
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_user(self):
# Ensure that we get a 200 response for a non-existant user, not a 404
response = self.client.get('/reset/123456-1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_overflow_user(self):
# Ensure that we get a 200 response for a base36 user id that overflows int
response = self.client.get('/reset/zzzzzzzzzzzzz-1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but trying
# to do a POST instead.
url, path = self._test_confirm_start()
path = path[:-5] + ("0" * 4) + path[-1]
self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': ' anewpassword',
})
# Check the password has not been changed
u = User.objects.get(email='[email protected]')
self.assertTrue(not u.check_password("anewpassword"))
def test_confirm_complete(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
# It redirects us to a 'complete' page:
self.assertEqual(response.status_code, 302)
# Check the password has been changed
u = User.objects.get(email='[email protected]')
self.assertTrue(u.check_password("anewpassword"))
# Check we can't use the link again
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'x'})
self.assertContainsEscaped(response, SetPasswordForm.error_messages['password_mismatch'])
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
class CustomUserPasswordResetTest(AuthViewsTestCase):
fixtures = ['custom_user.json']
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertTrue(urlmatch is not None, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid_custom_user(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
@skipIfCustomUser
class ChangePasswordTest(AuthViewsTestCase):
def fail_login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertContainsEscaped(response, AuthenticationForm.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})
def logout(self):
response = self.client.get('/logout/')
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'donuts',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertContainsEscaped(response, PasswordChangeForm.error_messages['password_incorrect'])
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'donuts',
})
self.assertContainsEscaped(response, SetPasswordForm.error_messages['password_mismatch'])
def test_password_change_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/password_change/done/'))
self.fail_login()
self.login(password='password1')
def test_password_change_done_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/password_change/done/'))
def test_password_change_done_fails(self):
with self.settings(LOGIN_URL='/login/'):
response = self.client.get('/password_change/done/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/login/?next=/password_change/done/'))
@skipIfCustomUser
class LoginTest(AuthViewsTestCase):
def test_current_site_in_context_after_login(self):
response = self.client.get(reverse('django.contrib.auth.views.login'))
self.assertEqual(response.status_code, 200)
if Site._meta.installed:
site = Site.objects.get_current()
self.assertEqual(response.context['site'], site)
self.assertEqual(response.context['site_name'], site.name)
else:
self.assertIsInstance(response.context['site'], RequestSite)
self.assertTrue(isinstance(response.context['form'], AuthenticationForm),
'Login form is not an AuthenticationForm')
def test_security_check(self, password='password'):
login_url = reverse('django.contrib.auth.views.login')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'https://example.com',
'ftp://exampel.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
response = self.client.post(nasty_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertFalse(bad_url in response['Location'],
"%s should be blocked" % bad_url)
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https:///',
'HTTPS:///',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
response = self.client.post(safe_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertTrue(good_url in response['Location'],
"%s should be allowed" % good_url)
def test_login_csrf_rotate(self, password='password'):
"""
Makes sure that a login rotates the currently-used CSRF token.
"""
# Do a GET to establish a CSRF token
# TestClient isn't used here as we're testing middleware, essentially.
req = HttpRequest()
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["CSRF_COOKIE_USED"] = True
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token1 = csrf_cookie.coded_value
# Prepare the POST request
req = HttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = token1
req.method = "POST"
req.POST = {'username': 'testclient', 'password': password, 'csrfmiddlewaretoken': token1}
req.REQUEST = req.POST
# Use POST request to log in
SessionMiddleware().process_request(req)
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["SERVER_NAME"] = "testserver" # Required to have redirect work in login view
req.META["SERVER_PORT"] = 80
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token2 = csrf_cookie.coded_value
# Check the CSRF token switched
self.assertNotEqual(token1, token2)
@skipIfCustomUser
class LoginURLSettings(AuthViewsTestCase):
def setUp(self):
super(LoginURLSettings, self).setUp()
self.old_LOGIN_URL = settings.LOGIN_URL
def tearDown(self):
super(LoginURLSettings, self).tearDown()
settings.LOGIN_URL = self.old_LOGIN_URL
def get_login_required_url(self, login_url):
settings.LOGIN_URL = login_url
response = self.client.get('/login_required/')
self.assertEqual(response.status_code, 302)
return response['Location']
def test_standard_login_url(self):
login_url = '/login/'
login_required_url = self.get_login_required_url(login_url)
querystring = QueryDict('', mutable=True)
querystring['next'] = '/login_required/'
self.assertEqual(login_required_url, 'http://testserver%s?%s' %
(login_url, querystring.urlencode('/')))
def test_remote_login_url(self):
login_url = 'http://remote.example.com/login'
login_required_url = self.get_login_required_url(login_url)
querystring = QueryDict('', mutable=True)
querystring['next'] = 'http://testserver/login_required/'
self.assertEqual(login_required_url,
'%s?%s' % (login_url, querystring.urlencode('/')))
def test_https_login_url(self):
login_url = 'https:///login/'
login_required_url = self.get_login_required_url(login_url)
querystring = QueryDict('', mutable=True)
querystring['next'] = 'http://testserver/login_required/'
self.assertEqual(login_required_url,
'%s?%s' % (login_url, querystring.urlencode('/')))
def test_login_url_with_querystring(self):
login_url = '/login/?pretty=1'
login_required_url = self.get_login_required_url(login_url)
querystring = QueryDict('pretty=1', mutable=True)
querystring['next'] = '/login_required/'
self.assertEqual(login_required_url, 'http://testserver/login/?%s' %
querystring.urlencode('/'))
def test_remote_login_url_with_next_querystring(self):
login_url = 'http://remote.example.com/login/'
login_required_url = self.get_login_required_url('%s?next=/default/' %
login_url)
querystring = QueryDict('', mutable=True)
querystring['next'] = 'http://testserver/login_required/'
self.assertEqual(login_required_url, '%s?%s' % (login_url,
querystring.urlencode('/')))
@skipIfCustomUser
class LogoutTest(AuthViewsTestCase):
def confirm_logged_out(self):
self.assertTrue(SESSION_KEY not in self.client.session)
def test_logout_default(self):
"Logout without next_page option renders the default template"
self.login()
response = self.client.get('/logout/')
self.assertContains(response, 'Logged out')
self.confirm_logged_out()
def test_14377(self):
# Bug 14377
self.login()
response = self.client.get('/logout/')
self.assertTrue('site' in response.context)
def test_logout_with_overridden_redirect_url(self):
# Bug 11223
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/somewhere/'))
response = self.client.get('/logout/next_page/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/login/'))
self.confirm_logged_out()
def test_logout_with_next_page_specified(self):
"Logout with next_page option given redirects to specified resource"
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/somewhere/'))
self.confirm_logged_out()
def test_logout_with_redirect_argument(self):
"Logout with query string redirects to specified resource"
self.login()
response = self.client.get('/logout/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/login/'))
self.confirm_logged_out()
def test_logout_with_custom_redirect_argument(self):
"Logout with custom query string redirects to specified resource"
self.login()
response = self.client.get('/logout/custom_query/?follow=/somewhere/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/somewhere/'))
self.confirm_logged_out()
def test_security_check(self, password='password'):
logout_url = reverse('django.contrib.auth.views.logout')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'https://example.com',
'ftp://exampel.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
self.login()
response = self.client.get(nasty_url)
self.assertEqual(response.status_code, 302)
self.assertFalse(bad_url in response['Location'],
"%s should be blocked" % bad_url)
self.confirm_logged_out()
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https:///',
'HTTPS:///',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
self.login()
response = self.client.get(safe_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(good_url in response['Location'],
"%s should be allowed" % good_url)
self.confirm_logged_out()
@skipIfCustomUser
class ChangelistTests(AuthViewsTestCase):
urls = 'django.contrib.auth.tests.urls_admin'
# #20078 - users shouldn't be allowed to guess password hashes via
# repeated password__startswith queries.
def test_changelist_disallows_password_lookups(self):
# Make me a superuser before loging in.
User.objects.filter(username='testclient').update(is_staff=True, is_superuser=True)
self.login()
# A lookup that tries to filter on password isn't OK
with self.assertRaises(SuspiciousOperation):
response = self.client.get('/admin/auth/user/?password__startswith=sha1$')
|
GHSA-vq3h-3q7v-9prw
|
django/utils/http.py
|
@@ -237,6 +237,18 @@ def is_safe_url(url, host=None):
"""
if not url:
return False
+ # Chrome treats \ completely as /
+ url = url.replace('\\', '/')
+ # Chrome considers any URL with more than two slashes to be absolute, but
+ # urlaprse is not so flexible. Treat any url with three slashes as unsafe.
+ if url.startswith('///'):
+ return False
url_info = urllib_parse.urlparse(url)
+ # Forbid URLs like http:///example.com - with a scheme, but without a hostname.
+ # In that URL, example.com is not the hostname but, a path component. However,
+ # Chrome will still consider example.com to be the hostname, so we must not
+ # allow this syntax.
+ if not url_info.netloc and url_info.scheme:
+ return False
return (not url_info.netloc or url_info.netloc == host) and \
(not url_info.scheme or url_info.scheme in ['http', 'https'])
|
from __future__ import unicode_literals
import calendar
import datetime
import re
import sys
try:
from urllib import parse as urllib_parse
except ImportError: # Python 2
import urllib as urllib_parse
import urlparse
urllib_parse.urlparse = urlparse.urlparse
from email.utils import formatdate
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str, force_text
from django.utils.functional import allow_lazy
from django.utils import six
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_text(urllib_parse.quote(force_str(url), force_str(safe)))
urlquote = allow_lazy(urlquote, six.text_type)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_text(urllib_parse.quote_plus(force_str(url), force_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, six.text_type)
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_text(urllib_parse.unquote(force_str(quoted_url)))
urlunquote = allow_lazy(urlunquote, six.text_type)
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_text(urllib_parse.unquote_plus(force_str(quoted_url)))
urlunquote_plus = allow_lazy(urlunquote_plus, six.text_type)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first case to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return urllib_parse.urlencode(
[(force_str(k),
[force_str(i) for i in v] if isinstance(v, (list,tuple)) else force_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
raise ValueError("%r is not a valid date" % date)
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int to avoid
# returning a long (#15067). The long type was removed in Python 3.
if not six.PY3 and value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
if i < 0:
raise ValueError("Negative base36 conversion input.")
if not six.PY3:
if not isinstance(i, six.integer_types):
raise TypeError("Non-integer base36 conversion input.")
if i > sys.maxint:
raise ValueError("Base36 conversion input too large.")
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i // j])
i = i % j
factor -= 1
return ''.join(base36)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.encode('ascii').decode('unicode_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necessary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urllib_parse.urlparse(url1), urllib_parse.urlparse(url2)
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
def is_safe_url(url, host=None):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always returns ``False`` on an empty url.
"""
if not url:
return False
url_info = urllib_parse.urlparse(url)
return (not url_info.netloc or url_info.netloc == host) and \
(not url_info.scheme or url_info.scheme in ['http', 'https'])
|
GHSA-vq3h-3q7v-9prw
|
tests/regressiontests/utils/http.py
|
@@ -91,6 +91,35 @@ def test_base36(self):
self.assertEqual(http.int_to_base36(n), b36)
self.assertEqual(http.base36_to_int(b36), n)
+ def test_is_safe_url(self):
+ for bad_url in ('http://example.com',
+ 'http:///example.com',
+ 'https://example.com',
+ 'ftp://exampel.com',
+ r'\\example.com',
+ r'\\\example.com',
+ r'/\\/example.com',
+ r'\\\example.com',
+ r'\\example.com',
+ r'\\//example.com',
+ r'/\/example.com',
+ r'\/example.com',
+ r'/\example.com',
+ 'http:///example.com',
+ 'http:/\//example.com',
+ 'http:\/example.com',
+ 'http:/\example.com',
+ 'javascript:alert("XSS")'):
+ self.assertFalse(http.is_safe_url(bad_url, host='testserver'), "%s should be blocked" % bad_url)
+ for good_url in ('/view/?param=http://example.com',
+ '/view/?param=https://example.com',
+ '/view?param=ftp://exampel.com',
+ 'view/?param=//example.com',
+ 'https://testserver/',
+ 'HTTPS://testserver/',
+ '//testserver/',
+ '/url%20with%20spaces/'):
+ self.assertTrue(http.is_safe_url(good_url, host='testserver'), "%s should be allowed" % good_url)
class ETagProcessingTests(unittest.TestCase):
def testParsing(self):
|
from datetime import datetime
import sys
from django.http import HttpResponse, utils
from django.test import RequestFactory
from django.utils.datastructures import MultiValueDict
from django.utils import http
from django.utils import six
from django.utils import unittest
class TestUtilsHttp(unittest.TestCase):
def test_same_origin_true(self):
# Identical
self.assertTrue(http.same_origin('http://foo.com/', 'http://foo.com/'))
# One with trailing slash - see #15617
self.assertTrue(http.same_origin('http://foo.com', 'http://foo.com/'))
self.assertTrue(http.same_origin('http://foo.com/', 'http://foo.com'))
# With port
self.assertTrue(http.same_origin('https://foo.com:8000', 'https://foo.com:8000/'))
def test_same_origin_false(self):
# Different scheme
self.assertFalse(http.same_origin('http://foo.com', 'https://foo.com'))
# Different host
self.assertFalse(http.same_origin('http://foo.com', 'http://goo.com'))
# Different host again
self.assertFalse(http.same_origin('http://foo.com', 'http://foo.com.evil.com'))
# Different port
self.assertFalse(http.same_origin('http://foo.com:8000', 'http://foo.com:8001'))
def test_urlencode(self):
# 2-tuples (the norm)
result = http.urlencode((('a', 1), ('b', 2), ('c', 3)))
self.assertEqual(result, 'a=1&b=2&c=3')
# A dictionary
result = http.urlencode({ 'a': 1, 'b': 2, 'c': 3})
acceptable_results = [
# Need to allow all of these as dictionaries have to be treated as
# unordered
'a=1&b=2&c=3',
'a=1&c=3&b=2',
'b=2&a=1&c=3',
'b=2&c=3&a=1',
'c=3&a=1&b=2',
'c=3&b=2&a=1'
]
self.assertTrue(result in acceptable_results)
result = http.urlencode({'a': [1, 2]}, doseq=False)
self.assertEqual(result, 'a=%5B%271%27%2C+%272%27%5D')
result = http.urlencode({'a': [1, 2]}, doseq=True)
self.assertEqual(result, 'a=1&a=2')
result = http.urlencode({'a': []}, doseq=True)
self.assertEqual(result, '')
# A MultiValueDict
result = http.urlencode(MultiValueDict({
'name': ['Adrian', 'Simon'],
'position': ['Developer']
}), doseq=True)
acceptable_results = [
# MultiValueDicts are similarly unordered
'name=Adrian&name=Simon&position=Developer',
'position=Developer&name=Adrian&name=Simon'
]
self.assertTrue(result in acceptable_results)
def test_base36(self):
# reciprocity works
for n in [0, 1, 1000, 1000000]:
self.assertEqual(n, http.base36_to_int(http.int_to_base36(n)))
if not six.PY3:
self.assertEqual(sys.maxint, http.base36_to_int(http.int_to_base36(sys.maxint)))
# bad input
self.assertRaises(ValueError, http.int_to_base36, -1)
if not six.PY3:
self.assertRaises(ValueError, http.int_to_base36, sys.maxint + 1)
for n in ['1', 'foo', {1: 2}, (1, 2, 3), 3.141]:
self.assertRaises(TypeError, http.int_to_base36, n)
for n in ['#', ' ']:
self.assertRaises(ValueError, http.base36_to_int, n)
for n in [123, {1: 2}, (1, 2, 3), 3.141]:
self.assertRaises(TypeError, http.base36_to_int, n)
# more explicit output testing
for n, b36 in [(0, '0'), (1, '1'), (42, '16'), (818469960, 'django')]:
self.assertEqual(http.int_to_base36(n), b36)
self.assertEqual(http.base36_to_int(b36), n)
class ETagProcessingTests(unittest.TestCase):
def testParsing(self):
etags = http.parse_etags(r'"", "etag", "e\"t\"ag", "e\\tag", W/"weak"')
self.assertEqual(etags, ['', 'etag', 'e"t"ag', r'e\tag', 'weak'])
def testQuoting(self):
quoted_etag = http.quote_etag(r'e\t"ag')
self.assertEqual(quoted_etag, r'"e\\t\"ag"')
class HttpDateProcessingTests(unittest.TestCase):
def testParsingRfc1123(self):
parsed = http.parse_http_date('Sun, 06 Nov 1994 08:49:37 GMT')
self.assertEqual(datetime.utcfromtimestamp(parsed),
datetime(1994, 11, 6, 8, 49, 37))
def testParsingRfc850(self):
parsed = http.parse_http_date('Sunday, 06-Nov-94 08:49:37 GMT')
self.assertEqual(datetime.utcfromtimestamp(parsed),
datetime(1994, 11, 6, 8, 49, 37))
def testParsingAsctime(self):
parsed = http.parse_http_date('Sun Nov 6 08:49:37 1994')
self.assertEqual(datetime.utcfromtimestamp(parsed),
datetime(1994, 11, 6, 8, 49, 37))
|
GHSA-vq3h-3q7v-9prw
|
s3file/forms.py
|
@@ -4,6 +4,7 @@
import uuid
from django.conf import settings
+from django.core import signing
from django.utils.functional import cached_property
from storages.utils import safe_join
@@ -16,10 +17,14 @@ class S3FileInputMixin:
"""FileInput that uses JavaScript to directly upload to Amazon S3."""
needs_multipart_form = False
- upload_path = str(
- getattr(settings, "S3FILE_UPLOAD_PATH", pathlib.PurePosixPath("tmp", "s3file"))
+ upload_path = safe_join(
+ str(storage.aws_location),
+ str(
+ getattr(
+ settings, "S3FILE_UPLOAD_PATH", pathlib.PurePosixPath("tmp", "s3file")
+ )
+ ),
)
- upload_path = safe_join(str(storage.location), upload_path)
expires = settings.SESSION_COOKIE_AGE
@property
@@ -45,6 +50,11 @@ def build_attrs(self, *args, **kwargs):
"data-fields-%s" % key: value for key, value in response["fields"].items()
}
defaults["data-url"] = response["url"]
+ signer = signing.Signer(
+ salt=f"{S3FileInputMixin.__module__}.{S3FileInputMixin.__name__}"
+ )
+ print(self.upload_folder)
+ defaults["data-s3f-signature"] = signer.signature(self.upload_folder)
defaults.update(attrs)
try:
|
import base64
import logging
import pathlib
import uuid
from django.conf import settings
from django.utils.functional import cached_property
from storages.utils import safe_join
from s3file.storages import storage
logger = logging.getLogger("s3file")
class S3FileInputMixin:
"""FileInput that uses JavaScript to directly upload to Amazon S3."""
needs_multipart_form = False
upload_path = str(
getattr(settings, "S3FILE_UPLOAD_PATH", pathlib.PurePosixPath("tmp", "s3file"))
)
upload_path = safe_join(str(storage.location), upload_path)
expires = settings.SESSION_COOKIE_AGE
@property
def bucket_name(self):
return storage.bucket.name
@property
def client(self):
return storage.connection.meta.client
def build_attrs(self, *args, **kwargs):
attrs = super().build_attrs(*args, **kwargs)
accept = attrs.get("accept")
response = self.client.generate_presigned_post(
self.bucket_name,
str(pathlib.PurePosixPath(self.upload_folder, "${filename}")),
Conditions=self.get_conditions(accept),
ExpiresIn=self.expires,
)
defaults = {
"data-fields-%s" % key: value for key, value in response["fields"].items()
}
defaults["data-url"] = response["url"]
defaults.update(attrs)
try:
defaults["class"] += " s3file"
except KeyError:
defaults["class"] = "s3file"
return defaults
def get_conditions(self, accept):
conditions = [
{"bucket": self.bucket_name},
["starts-with", "$key", str(self.upload_folder)],
{"success_action_status": "201"},
]
if accept and "," not in accept:
top_type, sub_type = accept.split("/", 1)
if sub_type == "*":
conditions.append(["starts-with", "$Content-Type", "%s/" % top_type])
else:
conditions.append({"Content-Type": accept})
else:
conditions.append(["starts-with", "$Content-Type", ""])
return conditions
@cached_property
def upload_folder(self):
return str(
pathlib.PurePosixPath(
self.upload_path,
base64.urlsafe_b64encode(uuid.uuid4().bytes)
.decode("utf-8")
.rstrip("=\n"),
)
) # S3 uses POSIX paths
class Media:
js = ("s3file/js/s3file.js" if settings.DEBUG else "s3file/js/s3file.min.js",)
|
GHSA-4w8f-hjm9-xwgf
|
s3file/middleware.py
|
@@ -1,9 +1,13 @@
import logging
import pathlib
-from s3file.storages import local_dev, storage
+from django.core import signing
+from django.core.exceptions import PermissionDenied, SuspiciousFileOperation
+from django.utils.crypto import constant_time_compare
from . import views
+from .forms import S3FileInputMixin
+from .storages import local_dev, storage
logger = logging.getLogger("s3file")
@@ -15,25 +19,50 @@ def __init__(self, get_response):
def __call__(self, request):
file_fields = request.POST.getlist("s3file")
for field_name in file_fields:
+
paths = request.POST.getlist(field_name)
- request.FILES.setlist(field_name, list(self.get_files_from_storage(paths)))
+ if paths:
+ try:
+ signature = request.POST[f"{field_name}-s3f-signature"]
+ except KeyError:
+ raise PermissionDenied("No signature provided.")
+ try:
+ request.FILES.setlist(
+ field_name, list(self.get_files_from_storage(paths, signature))
+ )
+ except SuspiciousFileOperation as e:
+ raise PermissionDenied("Illegal file name!") from e
if local_dev and request.path == "/__s3_mock__/":
return views.S3MockView.as_view()(request)
return self.get_response(request)
@staticmethod
- def get_files_from_storage(paths):
+ def get_files_from_storage(paths, signature):
"""Return S3 file where the name does not include the path."""
+ try:
+ location = storage.aws_location
+ except AttributeError:
+ location = storage.location
+ signer = signing.Signer(
+ salt=f"{S3FileInputMixin.__module__}.{S3FileInputMixin.__name__}"
+ )
for path in paths:
path = pathlib.PurePosixPath(path)
+ print(path)
+ print(signer.signature(path.parent), signature)
+ if not constant_time_compare(signer.signature(path.parent), signature):
+ raise PermissionDenied("Illegal signature!")
try:
- location = storage.aws_location
- except AttributeError:
- location = storage.location
+ relative_path = str(path.relative_to(location))
+ except ValueError as e:
+ raise SuspiciousFileOperation(
+ f"Path is not inside the designated upload location: {path}"
+ ) from e
+
try:
- f = storage.open(str(path.relative_to(location)))
+ f = storage.open(relative_path)
f.name = path.name
yield f
except (OSError, ValueError):
|
import logging
import pathlib
from s3file.storages import local_dev, storage
from . import views
logger = logging.getLogger("s3file")
class S3FileMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
file_fields = request.POST.getlist("s3file")
for field_name in file_fields:
paths = request.POST.getlist(field_name)
request.FILES.setlist(field_name, list(self.get_files_from_storage(paths)))
if local_dev and request.path == "/__s3_mock__/":
return views.S3MockView.as_view()(request)
return self.get_response(request)
@staticmethod
def get_files_from_storage(paths):
"""Return S3 file where the name does not include the path."""
for path in paths:
path = pathlib.PurePosixPath(path)
try:
location = storage.aws_location
except AttributeError:
location = storage.location
try:
f = storage.open(str(path.relative_to(location)))
f.name = path.name
yield f
except (OSError, ValueError):
logger.exception("File not found: %s", path)
|
GHSA-4w8f-hjm9-xwgf
|
s3file/views.py
|
@@ -2,6 +2,7 @@
import hashlib
import hmac
import logging
+from pathlib import Path
from django import http
from django.conf import settings
|
import base64
import hashlib
import hmac
import logging
from django import http
from django.conf import settings
from django.core.files.storage import default_storage
from django.views import generic
logger = logging.getLogger("s3file")
class S3MockView(generic.View):
def post(self, request):
success_action_status = request.POST.get("success_action_status", 201)
try:
file = request.FILES["file"]
key = request.POST["key"]
date = request.POST["x-amz-date"]
signature = request.POST["x-amz-signature"]
policy = request.POST["policy"]
except KeyError:
logger.exception("bad request")
return http.HttpResponseBadRequest()
try:
signature = base64.b64decode(signature.encode())
policy = base64.b64decode(policy.encode())
calc_sign = hmac.new(
settings.SECRET_KEY.encode(), policy + date.encode(), "sha256"
).digest()
except ValueError:
logger.exception("bad request")
return http.HttpResponseBadRequest()
if not hmac.compare_digest(signature, calc_sign):
logger.warning("bad signature")
return http.HttpResponseForbidden()
key = key.replace("${filename}", file.name)
etag = hashlib.md5(file.read()).hexdigest() # nosec
file.seek(0)
key = default_storage.save(key, file)
return http.HttpResponse(
'<?xml version="1.0" encoding="UTF-8"?>'
"<PostResponse>"
f"<Location>{settings.MEDIA_URL}{key}</Location>"
f"<Bucket>{getattr(settings, 'AWS_STORAGE_BUCKET_NAME')}</Bucket>"
f"<Key>{key}</Key>"
f'<ETag>"{etag}"</ETag>'
"</PostResponse>",
status=success_action_status,
)
|
GHSA-4w8f-hjm9-xwgf
|
tests/conftest.py
|
@@ -1,12 +1,14 @@
-import os
import tempfile
+from pathlib import Path
import pytest
from django.core.files.base import ContentFile
from django.utils.encoding import force_str
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
+from s3file.storages import storage
+
@pytest.fixture(scope="session")
def driver():
@@ -22,30 +24,49 @@ def driver():
@pytest.fixture
-def upload_file(request):
- path = tempfile.mkdtemp()
- file_name = os.path.join(path, "%s.txt" % request.node.name)
- with open(file_name, "w") as f:
+def freeze_upload_folder(monkeypatch):
+ """Freeze datetime and UUID."""
+ upload_folder = Path(storage.aws_location) / "tmp" / "s3file"
+ monkeypatch.setattr(
+ "s3file.forms.S3FileInputMixin.upload_folder",
+ str(upload_folder),
+ )
+ return upload_folder
+
+
[email protected]
+def upload_file(request, freeze_upload_folder):
+ path = Path(tempfile.mkdtemp()) / freeze_upload_folder / f"{request.node.name}.txt"
+ path.parent.mkdir(parents=True, exist_ok=True)
+ with path.open("w") as f:
f.write(request.node.name)
- return file_name
+ return str(path.absolute())
@pytest.fixture
-def another_upload_file(request):
- path = tempfile.mkdtemp()
- file_name = os.path.join(path, "another_%s.txt" % request.node.name)
- with open(file_name, "w") as f:
+def another_upload_file(request, freeze_upload_folder):
+ path = (
+ Path(tempfile.mkdtemp())
+ / freeze_upload_folder
+ / f"another_{request.node.name}.txt"
+ )
+ path.parent.mkdir(parents=True, exist_ok=True)
+ with path.open("w") as f:
f.write(request.node.name)
- return file_name
+ return str(path.absolute())
@pytest.fixture
-def yet_another_upload_file(request):
- path = tempfile.mkdtemp()
- file_name = os.path.join(path, "yet_another_%s.txt" % request.node.name)
- with open(file_name, "w") as f:
+def yet_another_upload_file(request, freeze_upload_folder):
+ path = (
+ Path(tempfile.mkdtemp())
+ / freeze_upload_folder
+ / f"yet_another_{request.node.name}.txt"
+ )
+ path.parent.mkdir(parents=True, exist_ok=True)
+ with path.open("w") as f:
f.write(request.node.name)
- return file_name
+ return str(path.absolute())
@pytest.fixture
|
import os
import tempfile
import pytest
from django.core.files.base import ContentFile
from django.utils.encoding import force_str
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
@pytest.fixture(scope="session")
def driver():
chrome_options = webdriver.ChromeOptions()
chrome_options.headless = True
try:
b = webdriver.Chrome(options=chrome_options)
except WebDriverException as e:
pytest.skip(force_str(e))
else:
yield b
b.quit()
@pytest.fixture
def upload_file(request):
path = tempfile.mkdtemp()
file_name = os.path.join(path, "%s.txt" % request.node.name)
with open(file_name, "w") as f:
f.write(request.node.name)
return file_name
@pytest.fixture
def another_upload_file(request):
path = tempfile.mkdtemp()
file_name = os.path.join(path, "another_%s.txt" % request.node.name)
with open(file_name, "w") as f:
f.write(request.node.name)
return file_name
@pytest.fixture
def yet_another_upload_file(request):
path = tempfile.mkdtemp()
file_name = os.path.join(path, "yet_another_%s.txt" % request.node.name)
with open(file_name, "w") as f:
f.write(request.node.name)
return file_name
@pytest.fixture
def filemodel(request, db):
from tests.testapp.models import FileModel
return FileModel.objects.create(
file=ContentFile(request.node.name, "%s.txt" % request.node.name)
)
|
GHSA-4w8f-hjm9-xwgf
|
tests/test_forms.py
|
@@ -31,23 +31,15 @@ class TestS3FileInput:
def url(self):
return reverse("upload")
- @pytest.fixture
- def freeze(self, monkeypatch):
- """Freeze datetime and UUID."""
- monkeypatch.setattr(
- "s3file.forms.S3FileInputMixin.upload_folder",
- os.path.join(storage.aws_location, "tmp"),
- )
-
- def test_value_from_datadict(self, client, upload_file):
- print(storage.location)
+ def test_value_from_datadict(self, freeze_upload_folder, client, upload_file):
with open(upload_file) as f:
- uploaded_file = storage.save("test.jpg", f)
+ uploaded_file = storage.save(freeze_upload_folder / "test.jpg", f)
response = client.post(
reverse("upload"),
{
- "file": json.dumps([uploaded_file]),
- "s3file": '["file"]',
+ "file": f"custom/location/{uploaded_file}",
+ "file-s3f-signature": "m94qBxBsnMIuIICiY133kX18KkllSPMVbhGAdAwNn1A",
+ "s3file": "file",
},
)
@@ -82,7 +74,7 @@ def test_clear(self, filemodel):
assert form.is_valid()
assert not form.cleaned_data["file"]
- def test_build_attr(self):
+ def test_build_attr(self, freeze_upload_folder):
assert set(ClearableFileInput().build_attrs({}).keys()) == {
"class",
"data-url",
@@ -92,21 +84,26 @@ def test_build_attr(self):
"data-fields-x-amz-credential",
"data-fields-policy",
"data-fields-key",
+ "data-s3f-signature",
}
+ assert (
+ ClearableFileInput().build_attrs({})["data-s3f-signature"]
+ == "tFV9nGZlq9WX1I5Sotit18z1f4C_3lPnj33_zo4LZRc"
+ )
assert ClearableFileInput().build_attrs({})["class"] == "s3file"
assert (
ClearableFileInput().build_attrs({"class": "my-class"})["class"]
== "my-class s3file"
)
- def test_get_conditions(self, freeze):
+ def test_get_conditions(self, freeze_upload_folder):
conditions = ClearableFileInput().get_conditions(None)
assert all(
condition in conditions
for condition in [
{"bucket": "test-bucket"},
{"success_action_status": "201"},
- ["starts-with", "$key", "custom/location/tmp"],
+ ["starts-with", "$key", "custom/location/tmp/s3file"],
["starts-with", "$Content-Type", ""],
]
), conditions
@@ -145,20 +142,24 @@ def test_no_js_error(self, driver, live_server):
error = driver.find_element(By.XPATH, "//body[@JSError]")
pytest.fail(error.get_attribute("JSError"))
- def test_file_insert(self, request, driver, live_server, upload_file, freeze):
+ def test_file_insert(
+ self, request, driver, live_server, upload_file, freeze_upload_folder
+ ):
driver.get(live_server + self.url)
file_input = driver.find_element(By.XPATH, "//input[@name='file']")
file_input.send_keys(upload_file)
assert file_input.get_attribute("name") == "file"
with wait_for_page_load(driver, timeout=10):
file_input.submit()
- assert storage.exists("tmp/%s.txt" % request.node.name)
+ assert storage.exists("tmp/s3file/%s.txt" % request.node.name)
with pytest.raises(NoSuchElementException):
error = driver.find_element(By.XPATH, "//body[@JSError]")
pytest.fail(error.get_attribute("JSError"))
- def test_file_insert_submit_value(self, driver, live_server, upload_file, freeze):
+ def test_file_insert_submit_value(
+ self, driver, live_server, upload_file, freeze_upload_folder
+ ):
driver.get(live_server + self.url)
file_input = driver.find_element(By.XPATH, "//input[@name='file']")
file_input.send_keys(upload_file)
@@ -178,7 +179,7 @@ def test_file_insert_submit_value(self, driver, live_server, upload_file, freeze
assert "save_continue" in driver.page_source
assert "continue_value" in driver.page_source
- def test_progress(self, driver, live_server, upload_file, freeze):
+ def test_progress(self, driver, live_server, upload_file, freeze_upload_folder):
driver.get(live_server + self.url)
file_input = driver.find_element(By.XPATH, "//input[@name='file']")
file_input.send_keys(upload_file)
@@ -202,16 +203,23 @@ def test_multi_file(
self,
driver,
live_server,
- freeze,
+ freeze_upload_folder,
upload_file,
another_upload_file,
yet_another_upload_file,
):
driver.get(live_server + self.url)
file_input = driver.find_element(By.XPATH, "//input[@name='file']")
- file_input.send_keys(" \n ".join([upload_file, another_upload_file]))
+ file_input.send_keys(
+ " \n ".join(
+ [
+ str(freeze_upload_folder / upload_file),
+ str(freeze_upload_folder / another_upload_file),
+ ]
+ )
+ )
file_input = driver.find_element(By.XPATH, "//input[@name='other_file']")
- file_input.send_keys(yet_another_upload_file)
+ file_input.send_keys(str(freeze_upload_folder / yet_another_upload_file))
save_button = driver.find_element(By.XPATH, "//input[@name='save']")
with wait_for_page_load(driver, timeout=10):
save_button.click()
|
import json
import os
from contextlib import contextmanager
import pytest
from django.forms import ClearableFileInput
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.expected_conditions import staleness_of
from selenium.webdriver.support.wait import WebDriverWait
from s3file.storages import storage
from tests.testapp.forms import UploadForm
try:
from django.urls import reverse
except ImportError:
# Django 1.8 support
from django.core.urlresolvers import reverse
@contextmanager
def wait_for_page_load(driver, timeout=30):
old_page = driver.find_element(By.TAG_NAME, "html")
yield
WebDriverWait(driver, timeout).until(staleness_of(old_page))
class TestS3FileInput:
@property
def url(self):
return reverse("upload")
@pytest.fixture
def freeze(self, monkeypatch):
"""Freeze datetime and UUID."""
monkeypatch.setattr(
"s3file.forms.S3FileInputMixin.upload_folder",
os.path.join(storage.aws_location, "tmp"),
)
def test_value_from_datadict(self, client, upload_file):
print(storage.location)
with open(upload_file) as f:
uploaded_file = storage.save("test.jpg", f)
response = client.post(
reverse("upload"),
{
"file": json.dumps([uploaded_file]),
"s3file": '["file"]',
},
)
assert response.status_code == 201
def test_value_from_datadict_initial_data(self, filemodel):
form = UploadForm(instance=filemodel)
assert filemodel.file.name in form.as_p(), form.as_p()
assert not form.is_valid()
def test_file_does_not_exist_no_fallback(self, filemodel):
form = UploadForm(
data={"file": "foo.bar", "s3file": "file"},
instance=filemodel,
)
assert form.is_valid()
assert form.cleaned_data["file"] == filemodel.file
def test_initial_no_file_uploaded(self, filemodel):
form = UploadForm(data={"file": ""}, instance=filemodel)
assert form.is_valid(), form.errors
assert not form.has_changed()
assert form.cleaned_data["file"] == filemodel.file
def test_initial_fallback(self, filemodel):
form = UploadForm(data={"file": ""}, instance=filemodel)
assert form.is_valid()
assert form.cleaned_data["file"] == filemodel.file
def test_clear(self, filemodel):
form = UploadForm(data={"file-clear": "1"}, instance=filemodel)
assert form.is_valid()
assert not form.cleaned_data["file"]
def test_build_attr(self):
assert set(ClearableFileInput().build_attrs({}).keys()) == {
"class",
"data-url",
"data-fields-x-amz-algorithm",
"data-fields-x-amz-date",
"data-fields-x-amz-signature",
"data-fields-x-amz-credential",
"data-fields-policy",
"data-fields-key",
}
assert ClearableFileInput().build_attrs({})["class"] == "s3file"
assert (
ClearableFileInput().build_attrs({"class": "my-class"})["class"]
== "my-class s3file"
)
def test_get_conditions(self, freeze):
conditions = ClearableFileInput().get_conditions(None)
assert all(
condition in conditions
for condition in [
{"bucket": "test-bucket"},
{"success_action_status": "201"},
["starts-with", "$key", "custom/location/tmp"],
["starts-with", "$Content-Type", ""],
]
), conditions
def test_accept(self):
widget = ClearableFileInput()
assert "accept" not in widget.render(name="file", value="test.jpg")
assert ["starts-with", "$Content-Type", ""] in widget.get_conditions(None)
widget = ClearableFileInput(attrs={"accept": "image/*"})
assert 'accept="image/*"' in widget.render(name="file", value="test.jpg")
assert ["starts-with", "$Content-Type", "image/"] in widget.get_conditions(
"image/*"
)
widget = ClearableFileInput(attrs={"accept": "image/jpeg"})
assert 'accept="image/jpeg"' in widget.render(name="file", value="test.jpg")
assert {"Content-Type": "image/jpeg"} in widget.get_conditions("image/jpeg")
widget = ClearableFileInput(attrs={"accept": "application/pdf,image/*"})
assert 'accept="application/pdf,image/*"' in widget.render(
name="file",
value="test.jpg",
)
assert ["starts-with", "$Content-Type", ""] in widget.get_conditions(
"application/pdf,image/*"
)
assert {"Content-Type": "application/pdf"} not in widget.get_conditions(
"application/pdf,image/*"
)
def test_no_js_error(self, driver, live_server):
driver.get(live_server + self.url)
with pytest.raises(NoSuchElementException):
error = driver.find_element(By.XPATH, "//body[@JSError]")
pytest.fail(error.get_attribute("JSError"))
def test_file_insert(self, request, driver, live_server, upload_file, freeze):
driver.get(live_server + self.url)
file_input = driver.find_element(By.XPATH, "//input[@name='file']")
file_input.send_keys(upload_file)
assert file_input.get_attribute("name") == "file"
with wait_for_page_load(driver, timeout=10):
file_input.submit()
assert storage.exists("tmp/%s.txt" % request.node.name)
with pytest.raises(NoSuchElementException):
error = driver.find_element(By.XPATH, "//body[@JSError]")
pytest.fail(error.get_attribute("JSError"))
def test_file_insert_submit_value(self, driver, live_server, upload_file, freeze):
driver.get(live_server + self.url)
file_input = driver.find_element(By.XPATH, "//input[@name='file']")
file_input.send_keys(upload_file)
assert file_input.get_attribute("name") == "file"
save_button = driver.find_element(By.XPATH, "//input[@name='save']")
with wait_for_page_load(driver, timeout=10):
save_button.click()
assert "save" in driver.page_source
driver.get(live_server + self.url)
file_input = driver.find_element(By.XPATH, "//input[@name='file']")
file_input.send_keys(upload_file)
assert file_input.get_attribute("name") == "file"
save_button = driver.find_element(By.XPATH, "//button[@name='save_continue']")
with wait_for_page_load(driver, timeout=10):
save_button.click()
assert "save_continue" in driver.page_source
assert "continue_value" in driver.page_source
def test_progress(self, driver, live_server, upload_file, freeze):
driver.get(live_server + self.url)
file_input = driver.find_element(By.XPATH, "//input[@name='file']")
file_input.send_keys(upload_file)
assert file_input.get_attribute("name") == "file"
save_button = driver.find_element(By.XPATH, "//input[@name='save']")
with wait_for_page_load(driver, timeout=10):
save_button.click()
assert "save" in driver.page_source
driver.get(live_server + self.url)
file_input = driver.find_element(By.XPATH, "//input[@name='file']")
file_input.send_keys(upload_file)
assert file_input.get_attribute("name") == "file"
save_button = driver.find_element(By.XPATH, "//button[@name='save_continue']")
with wait_for_page_load(driver, timeout=10):
save_button.click()
response = json.loads(driver.find_elements(By.CSS_SELECTOR, "pre")[0].text)
assert response["POST"]["progress"] == "1"
def test_multi_file(
self,
driver,
live_server,
freeze,
upload_file,
another_upload_file,
yet_another_upload_file,
):
driver.get(live_server + self.url)
file_input = driver.find_element(By.XPATH, "//input[@name='file']")
file_input.send_keys(" \n ".join([upload_file, another_upload_file]))
file_input = driver.find_element(By.XPATH, "//input[@name='other_file']")
file_input.send_keys(yet_another_upload_file)
save_button = driver.find_element(By.XPATH, "//input[@name='save']")
with wait_for_page_load(driver, timeout=10):
save_button.click()
response = json.loads(driver.find_elements(By.CSS_SELECTOR, "pre")[0].text)
assert response["FILES"] == {
"file": [
os.path.basename(upload_file),
os.path.basename(another_upload_file),
],
"other_file": [os.path.basename(yet_another_upload_file)],
}
def test_media(self):
assert ClearableFileInput().media._js == ["s3file/js/s3file.js"]
def test_upload_folder(self):
assert "custom/location/tmp/s3file/" in ClearableFileInput().upload_folder
assert len(os.path.basename(ClearableFileInput().upload_folder)) == 22
|
GHSA-4w8f-hjm9-xwgf
|
tests/test_middleware.py
|
@@ -1,5 +1,7 @@
import os
+import pytest
+from django.core.exceptions import PermissionDenied, SuspiciousFileOperation
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import SimpleUploadedFile
@@ -8,18 +10,19 @@
class TestS3FileMiddleware:
- def test_get_files_from_storage(self):
+ def test_get_files_from_storage(self, freeze_upload_folder):
content = b"test_get_files_from_storage"
name = storage.save(
"tmp/s3file/test_get_files_from_storage", ContentFile(content)
)
files = S3FileMiddleware.get_files_from_storage(
- [os.path.join(storage.aws_location, name)]
+ [os.path.join(storage.aws_location, name)],
+ "tFV9nGZlq9WX1I5Sotit18z1f4C_3lPnj33_zo4LZRc",
)
file = next(files)
assert file.read() == content
- def test_process_request(self, rf):
+ def test_process_request(self, freeze_upload_folder, rf):
uploaded_file = SimpleUploadedFile("uploaded_file.txt", b"uploaded")
request = rf.post("/", data={"file": uploaded_file})
S3FileMiddleware(lambda x: None)(request)
@@ -32,13 +35,28 @@ def test_process_request(self, rf):
data={
"file": "custom/location/tmp/s3file/s3_file.txt",
"s3file": "file",
+ "file-s3f-signature": "tFV9nGZlq9WX1I5Sotit18z1f4C_3lPnj33_zo4LZRc",
},
)
S3FileMiddleware(lambda x: None)(request)
assert request.FILES.getlist("file")
assert request.FILES.get("file").read() == b"s3file"
- def test_process_request__multiple_files(self, rf):
+ def test_process_request__location_escape(self, freeze_upload_folder, rf):
+ storage.save("secrets/passwords.txt", ContentFile(b"keep this secret"))
+ request = rf.post(
+ "/",
+ data={
+ "file": "custom/location/secrets/passwords.txt",
+ "s3file": "file",
+ "file-s3f-signature": "tFV9nGZlq9WX1I5Sotit18z1f4C_3lPnj33_zo4LZRc",
+ },
+ )
+ with pytest.raises(PermissionDenied) as e:
+ S3FileMiddleware(lambda x: None)(request)
+ assert "Illegal signature!" in str(e.value)
+
+ def test_process_request__multiple_files(self, freeze_upload_folder, rf):
storage.save("tmp/s3file/s3_file.txt", ContentFile(b"s3file"))
storage.save("tmp/s3file/s3_other_file.txt", ContentFile(b"other s3file"))
request = rf.post(
@@ -48,6 +66,8 @@ def test_process_request__multiple_files(self, rf):
"custom/location/tmp/s3file/s3_file.txt",
"custom/location/tmp/s3file/s3_other_file.txt",
],
+ "file-s3f-signature": "tFV9nGZlq9WX1I5Sotit18z1f4C_3lPnj33_zo4LZRc",
+ "other_file-s3f-signature": "tFV9nGZlq9WX1I5Sotit18z1f4C_3lPnj33_zo4LZRc",
"s3file": ["file", "other_file"],
},
)
@@ -56,7 +76,7 @@ def test_process_request__multiple_files(self, rf):
assert files[0].read() == b"s3file"
assert files[1].read() == b"other s3file"
- def test_process_request__no_location(self, rf, settings):
+ def test_process_request__no_location(self, freeze_upload_folder, rf, settings):
settings.AWS_LOCATION = ""
uploaded_file = SimpleUploadedFile("uploaded_file.txt", b"uploaded")
request = rf.post("/", data={"file": uploaded_file})
@@ -66,14 +86,48 @@ def test_process_request__no_location(self, rf, settings):
storage.save("tmp/s3file/s3_file.txt", ContentFile(b"s3file"))
request = rf.post(
- "/", data={"file": "tmp/s3file/s3_file.txt", "s3file": "file"}
+ "/",
+ data={
+ "file": f"tmp/s3file/s3_file.txt",
+ "s3file": "file",
+ "file-s3f-signature": "scjzm3N8njBQIVSGEhOchtM0TkGyb2U6OXGLVlRUZhY",
+ },
)
S3FileMiddleware(lambda x: None)(request)
assert request.FILES.getlist("file")
assert request.FILES.get("file").read() == b"s3file"
- def test_process_request__no_file(self, rf, caplog):
- request = rf.post("/", data={"file": "does_not_exist.txt", "s3file": "file"})
+ def test_process_request__no_file(self, freeze_upload_folder, rf, caplog):
+ request = rf.post(
+ "/",
+ data={
+ "file": "custom/location/tmp/s3file/does_not_exist.txt",
+ "s3file": "file",
+ "file-s3f-signature": "tFV9nGZlq9WX1I5Sotit18z1f4C_3lPnj33_zo4LZRc",
+ },
+ )
S3FileMiddleware(lambda x: None)(request)
assert not request.FILES.getlist("file")
- assert "File not found: does_not_exist.txt" in caplog.text
+ assert (
+ "File not found: custom/location/tmp/s3file/does_not_exist.txt"
+ in caplog.text
+ )
+
+ def test_process_request__no_signature(self, rf, caplog):
+ request = rf.post(
+ "/", data={"file": "tmp/s3file/does_not_exist.txt", "s3file": "file"}
+ )
+ with pytest.raises(PermissionDenied) as e:
+ S3FileMiddleware(lambda x: None)(request)
+
+ def test_process_request__wrong_signature(self, rf, caplog):
+ request = rf.post(
+ "/",
+ data={
+ "file": "tmp/s3file/does_not_exist.txt",
+ "s3file": "file",
+ "file-s3f-signature": "fake",
+ },
+ )
+ with pytest.raises(PermissionDenied) as e:
+ S3FileMiddleware(lambda x: None)(request)
|
import os
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import SimpleUploadedFile
from s3file.middleware import S3FileMiddleware
from s3file.storages import storage
class TestS3FileMiddleware:
def test_get_files_from_storage(self):
content = b"test_get_files_from_storage"
name = storage.save(
"tmp/s3file/test_get_files_from_storage", ContentFile(content)
)
files = S3FileMiddleware.get_files_from_storage(
[os.path.join(storage.aws_location, name)]
)
file = next(files)
assert file.read() == content
def test_process_request(self, rf):
uploaded_file = SimpleUploadedFile("uploaded_file.txt", b"uploaded")
request = rf.post("/", data={"file": uploaded_file})
S3FileMiddleware(lambda x: None)(request)
assert request.FILES.getlist("file")
assert request.FILES.get("file").read() == b"uploaded"
storage.save("tmp/s3file/s3_file.txt", ContentFile(b"s3file"))
request = rf.post(
"/",
data={
"file": "custom/location/tmp/s3file/s3_file.txt",
"s3file": "file",
},
)
S3FileMiddleware(lambda x: None)(request)
assert request.FILES.getlist("file")
assert request.FILES.get("file").read() == b"s3file"
def test_process_request__multiple_files(self, rf):
storage.save("tmp/s3file/s3_file.txt", ContentFile(b"s3file"))
storage.save("tmp/s3file/s3_other_file.txt", ContentFile(b"other s3file"))
request = rf.post(
"/",
data={
"file": [
"custom/location/tmp/s3file/s3_file.txt",
"custom/location/tmp/s3file/s3_other_file.txt",
],
"s3file": ["file", "other_file"],
},
)
S3FileMiddleware(lambda x: None)(request)
files = request.FILES.getlist("file")
assert files[0].read() == b"s3file"
assert files[1].read() == b"other s3file"
def test_process_request__no_location(self, rf, settings):
settings.AWS_LOCATION = ""
uploaded_file = SimpleUploadedFile("uploaded_file.txt", b"uploaded")
request = rf.post("/", data={"file": uploaded_file})
S3FileMiddleware(lambda x: None)(request)
assert request.FILES.getlist("file")
assert request.FILES.get("file").read() == b"uploaded"
storage.save("tmp/s3file/s3_file.txt", ContentFile(b"s3file"))
request = rf.post(
"/", data={"file": "tmp/s3file/s3_file.txt", "s3file": "file"}
)
S3FileMiddleware(lambda x: None)(request)
assert request.FILES.getlist("file")
assert request.FILES.get("file").read() == b"s3file"
def test_process_request__no_file(self, rf, caplog):
request = rf.post("/", data={"file": "does_not_exist.txt", "s3file": "file"})
S3FileMiddleware(lambda x: None)(request)
assert not request.FILES.getlist("file")
assert "File not found: does_not_exist.txt" in caplog.text
|
GHSA-4w8f-hjm9-xwgf
|
docassemble_base/docassemble/base/error.py
|
@@ -27,6 +27,9 @@ def __str__(self):
return str(self.value)
+class DASourceError(DAError):
+ pass
+
class DANotFoundError(Exception):
pass
|
import re
valid_variable_match = re.compile(r'^[^\d][A-Za-z0-9\_]*$')
match_brackets_or_dot = re.compile(r'(\[.+?\]|\.[a-zA-Z_][a-zA-Z0-9_]*)')
class DAIndexError(IndexError):
pass
class DAAttributeError(AttributeError):
pass
class DAException(Exception):
pass
class DAError(Exception):
def __init__(self, value, code=501):
self.value = value
self.error_code = code
super().__init__(value)
def __str__(self):
return str(self.value)
class DANotFoundError(Exception):
pass
class DAInvalidFilename(Exception):
pass
class DAValidationError(Exception):
"""This is an Exception object that is used when raising an exception inside input validation code."""
def __init__(self, *pargs, field=None):
self.field = field
super().__init__(*pargs)
class CodeExecute(Exception):
def __init__(self, compute, question):
if isinstance(compute, list):
self.compute = "\n".join(compute)
else:
self.compute = compute
self.question = question
super().__init__()
class ForcedReRun(Exception):
pass
class LazyNameError(NameError):
pass
class DANameError(NameError):
pass
def invalid_variable_name(varname):
if not isinstance(varname, str):
return True
if re.search(r'[\n\r\(\)\{\}\*\^\#]', varname):
return True
varname = re.sub(r'[\.\[].*', '', varname)
if not valid_variable_match.match(varname):
return True
return False
def intrinsic_name_of(var_name, the_user_dict):
from docassemble.base.util import DAObject # pylint: disable=import-outside-toplevel
expression_as_list = [x for x in match_brackets_or_dot.split(var_name) if x != '']
n = len(expression_as_list)
i = n
while i > 0:
try:
item = eval(var_name, the_user_dict)
if isinstance(item, DAObject) and item.has_nonrandom_instance_name:
var_name = item.instanceName
break
except:
pass
i -= 1
var_name = ''.join(expression_as_list[0:i])
return var_name + (''.join(expression_as_list[i:n]))
class ForcedNameError(NameError):
def __init__(self, *pargs, **kwargs):
super().__init__()
the_args = list(pargs)
if len(the_args) == 0:
raise DAError("ForcedNameError must have at least one argument")
the_context = {}
the_user_dict = kwargs.get('user_dict', {})
for var_name in ('x', 'i', 'j', 'k', 'l', 'm', 'n'):
if var_name in the_user_dict:
the_context[var_name] = the_user_dict[var_name]
first_is_plain = bool(isinstance(the_args[0], str))
self.next_action = []
evaluate = kwargs.get('evaluate', False)
while len(the_args) > 0:
arg = the_args.pop(0)
if isinstance(arg, dict):
if (len(arg.keys()) == 2 and 'action' in arg and 'arguments' in arg) or (len(arg.keys()) == 1 and 'action' in arg):
arg['context'] = the_context
self.set_action(arg)
elif len(arg) == 1 and ('undefine' in arg or 'invalidate' in arg or 'recompute' in arg or 'set' in arg or 'follow up' in arg):
if 'set' in arg:
if isinstance(arg['set'], dict):
arg['set'] = [arg['set']]
if not isinstance(arg['set'], list):
raise DAError("force_ask: the set statement must refer to a list.")
clean_list = []
for the_dict in arg['set']:
if not isinstance(the_dict, dict):
raise DAError("force_ask: a set command must refer to a list of dicts.")
for the_var, the_val in the_dict.items():
if not isinstance(the_var, str):
raise DAError("force_ask: a set command must refer to a list of dicts with keys as variable names. ")
the_var_stripped = the_var.strip()
if invalid_variable_name(the_var_stripped):
raise DAError("force_ask: missing or invalid variable name " + repr(the_var) + ".")
clean_list.append([the_var_stripped, the_val])
self.set_action({'action': '_da_set', 'arguments': {'variables': clean_list}, 'context': the_context})
if 'follow up' in arg:
if isinstance(arg['follow up'], str):
arg['follow up'] = [arg['follow up']]
if not isinstance(arg['follow up'], list):
raise DAError("force_ask: the follow up statement must refer to a list.")
for var in arg['follow up']:
if not isinstance(var, str):
raise DAError("force_ask: invalid variable name " + repr(var) + " in follow up.")
var_saveas = var.strip()
if invalid_variable_name(var_saveas):
raise DAError("force_ask: missing or invalid variable name " + repr(var_saveas) + ".")
if evaluate:
var = intrinsic_name_of(var, the_user_dict)
self.set_action({'action': var, 'arguments': {}, 'context': the_context})
for command in ('undefine', 'invalidate', 'recompute'):
if command not in arg:
continue
if isinstance(arg[command], str):
arg[command] = [arg[command]]
if not isinstance(arg[command], list):
raise DAError("force_ask: the " + command + " statement must refer to a list. ")
clean_list = []
for undef_var in arg[command]:
if not isinstance(undef_var, str):
raise DAError("force_ask: invalid variable name " + repr(undef_var) + " in " + command + ".")
undef_saveas = undef_var.strip()
if invalid_variable_name(undef_saveas):
raise DAError("force_ask: missing or invalid variable name " + repr(undef_saveas) + ".")
if evaluate:
undef_saveas = intrinsic_name_of(undef_saveas, the_user_dict)
clean_list.append(undef_saveas)
if command == 'invalidate':
self.set_action({'action': '_da_invalidate', 'arguments': {'variables': clean_list}, 'context': the_context})
else:
self.set_action({'action': '_da_undefine', 'arguments': {'variables': clean_list}, 'context': the_context})
if command == 'recompute':
self.set_action({'action': '_da_compute', 'arguments': {'variables': clean_list}, 'context': the_context})
else:
raise DAError("Dictionaries passed to force_ask must have keys of 'action' and 'argument' only.")
else:
if evaluate:
arg = intrinsic_name_of(arg, the_user_dict)
self.set_action({'action': arg, 'arguments': {}, 'context': the_context})
if kwargs.get('gathering', False):
self.next_action = None
if first_is_plain:
self.arguments = None
def set_action(self, data):
if (not hasattr(self, 'name')) or self.name is None:
if isinstance(data, dict) and 'action' in data and (len(data) == 1 or 'arguments' in data):
self.name = data['action']
self.arguments = data.get('arguments', {})
self.context = data.get('context', {})
else:
raise DAError("force_ask: invalid parameter " + repr(data))
self.next_action.append(data)
class DAErrorNoEndpoint(DAError):
pass
class DAErrorMissingVariable(DAError):
def __init__(self, value, variable=None, code=501):
self.value = value
self.variable = variable
self.error_code = code
super().__init__(value)
class DAErrorCompileError(DAError):
pass
class MandatoryQuestion(Exception):
def __init__(self):
self.value = 'Mandatory Question'
super().__init__()
def __str__(self):
return str(self.value)
class QuestionError(Exception):
def __init__(self, *pargs, **kwargs):
if len(pargs) >= 1:
self.question = pargs[0]
elif 'question' in kwargs:
self.question = kwargs['question']
else:
self.question = "Question not specified"
if len(pargs) >= 2:
self.subquestion = pargs[1]
elif 'subquestion' in kwargs:
self.subquestion = kwargs['subquestion']
else:
self.subquestion = None
if len(pargs) >= 3:
self.url = pargs[2]
elif 'url' in kwargs:
self.url = kwargs['url']
else:
self.url = None
if 'show_leave' in kwargs:
self.show_leave = kwargs['show_leave']
else:
self.show_leave = None
if 'show_exit' in kwargs:
self.show_exit = kwargs['show_exit']
else:
self.show_exit = None
if 'reload' in kwargs:
self.reload = kwargs['reload']
else:
self.reload = None
if 'show_restart' in kwargs:
self.show_restart = kwargs['show_restart']
else:
self.show_restart = None
if 'buttons' in kwargs:
self.buttons = kwargs['buttons']
else:
self.buttons = None
if 'dead_end' in kwargs:
self.dead_end = kwargs['dead_end']
else:
self.dead_end = None
super().__init__()
def __str__(self):
return str(self.question)
class BackgroundResponseError(Exception):
def __init__(self, *pargs, **kwargs):
if len(pargs) > 0 and len(kwargs) > 0:
self.backgroundresponse = {'pargs': list(pargs), 'kwargs': kwargs}
elif len(pargs) > 1:
self.backgroundresponse = list(pargs)
elif len(pargs) == 1:
self.backgroundresponse = pargs[0]
else:
self.backgroundresponse = kwargs
if 'sleep' in kwargs:
self.sleep = kwargs['sleep']
super().__init__()
def __str__(self):
if hasattr(self, 'backgroundresponse'):
return str(self.backgroundresponse)
return "A BackgroundResponseError exception was thrown"
class BackgroundResponseActionError(Exception):
def __init__(self, *pargs, **kwargs):
self.action = {'arguments': {}}
if len(pargs) == 0:
self.action['action'] = None
else:
self.action['action'] = pargs[0]
for key, val in kwargs.items():
self.action['arguments'][key] = val
super().__init__()
def __str__(self):
if hasattr(self, 'action'):
return str(self.action)
return "A BackgroundResponseActionError exception was thrown"
class ResponseError(Exception):
def __init__(self, *pargs, **kwargs):
if len(pargs) == 0 and not ('response' in kwargs or 'binaryresponse' in kwargs or 'all_variables' in kwargs or 'file' in kwargs or 'url' in kwargs or 'null' in kwargs):
self.response = "Empty Response"
if len(pargs) > 0:
self.response = pargs[0]
elif 'response' in kwargs:
self.response = kwargs['response']
elif 'binaryresponse' in kwargs:
self.binaryresponse = kwargs['binaryresponse']
elif 'file' in kwargs:
self.filename = kwargs['file']
elif 'url' in kwargs:
self.url = kwargs['url']
elif 'null' in kwargs:
self.nullresponse = kwargs['null']
if 'response_code' in kwargs and kwargs['response_code'] is not None:
self.response_code = kwargs['response_code']
if 'sleep' in kwargs:
self.sleep = kwargs['sleep']
if 'all_variables' in kwargs:
self.all_variables = kwargs['all_variables']
if 'include_internal' in kwargs:
self.include_internal = kwargs['include_internal']
if 'content_type' in kwargs:
self.content_type = kwargs['content_type']
super().__init__()
def __str__(self):
if hasattr(self, 'response'):
return str(self.response)
return "A ResponseError exception was thrown"
class CommandError(Exception):
def __init__(self, *pargs, **kwargs):
if len(pargs) > 0:
self.return_type = pargs[0]
elif 'type' in kwargs:
self.return_type = kwargs['type']
else:
self.return_type = "exit"
self.url = kwargs.get('url', '')
self.sleep = kwargs.get('sleep', None)
super().__init__()
def __str__(self):
return str(self.return_type)
class DAWebError(Exception):
def __init__(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
super().__init__()
|
GHSA-pcfx-g2j2-f6f6
|
docassemble_base/docassemble/base/pandoc.py
|
@@ -10,16 +10,16 @@
import random
import mimetypes
import urllib.request
+import convertapi
+import requests
+from pikepdf import Pdf
import docassemble.base.filter
import docassemble.base.functions
from docassemble.base.config import daconfig
from docassemble.base.logger import logmessage
from docassemble.base.pdfa import pdf_to_pdfa
from docassemble.base.pdftk import pdf_encrypt
from docassemble.base.error import DAError, DAException
-import convertapi
-import requests
-from pikepdf import Pdf
style_find = re.compile(r'{\s*(\\s([1-9])[^\}]+)\\sbasedon[^\}]+heading ([0-9])', flags=re.DOTALL)
PANDOC_PATH = daconfig.get('pandoc', 'pandoc')
@@ -802,13 +802,31 @@ def concatenate_files(path_list, pdfa=False, password=None, owner_password=None)
new_path_list.append(path)
if len(new_path_list) == 0:
raise DAError("concatenate_files: no valid files to concatenate")
+
if len(new_path_list) == 1:
shutil.copyfile(new_path_list[0], pdf_file.name)
else:
with Pdf.open(new_path_list[0]) as original:
+ need_appearances = False
+ try:
+ if original.Root.AcroForm.NeedAppearances:
+ need_appearances = True
+ except:
+ pass
for additional_file in new_path_list[1:]:
with Pdf.open(additional_file) as additional_pdf:
+ if need_appearances is False:
+ try:
+ if additional_pdf.Root.AcroForm.NeedAppearances:
+ need_appearances = True
+ except:
+ pass
original.pages.extend(additional_pdf.pages)
+ if need_appearances:
+ try:
+ original.Root.AcroForm.NeedAppearances = True
+ except:
+ logmessage("concatenate_files: an additional file had an AcroForm with NeedAppearances but setting NeedAppearances on the final document resulted in an error")
original.save(pdf_file.name)
if pdfa:
pdf_to_pdfa(pdf_file.name)
|
import os
import os.path
import subprocess
import tempfile
import filecmp
import shutil
# import sys
import re
import time
import random
import mimetypes
import urllib.request
import docassemble.base.filter
import docassemble.base.functions
from docassemble.base.config import daconfig
from docassemble.base.logger import logmessage
from docassemble.base.pdfa import pdf_to_pdfa
from docassemble.base.pdftk import pdf_encrypt
from docassemble.base.error import DAError, DAException
import convertapi
import requests
from pikepdf import Pdf
style_find = re.compile(r'{\s*(\\s([1-9])[^\}]+)\\sbasedon[^\}]+heading ([0-9])', flags=re.DOTALL)
PANDOC_PATH = daconfig.get('pandoc', 'pandoc')
def copy_if_different(source, destination):
if (not os.path.isfile(destination)) or filecmp.cmp(source, destination) is False:
shutil.copyfile(source, destination)
def gotenberg_to_pdf(from_file, to_file, pdfa, password, owner_password):
if pdfa:
data = {'nativePdfFormat': 'PDF/A-1a'}
else:
data = {}
r = requests.post(daconfig['gotenberg url'] + '/forms/libreoffice/convert', data=data, files={'files': open(from_file, 'rb')}, timeout=6000)
if r.status_code != 200:
logmessage("call to " + daconfig['gotenberg url'] + " returned status code " + str(r.status_code))
logmessage(r.text)
raise DAException("Call to gotenberg did not succeed")
with open(to_file, 'wb') as fp:
fp.write(r.content)
if password or owner_password:
pdf_encrypt(to_file, password, owner_password)
def cloudconvert_to_pdf(in_format, from_file, to_file, pdfa, password):
headers = {"Authorization": "Bearer " + daconfig.get('cloudconvert secret').strip()}
data = {
"tasks": {
"import-1": {
"operation": "import/upload"
},
"task-1": {
"operation": "convert",
"input_format": in_format,
"output_format": "pdf",
"engine": "office",
"input": [
"import-1"
],
"optimize_print": True,
"pdf_a": pdfa,
"filename": "myoutput.docx"
},
"export-1": {
"operation": "export/url",
"input": [
"task-1"
],
"inline": False,
"archive_multiple_files": False
}
}
}
if password:
data['tasks']['task-1']['password'] = password
r = requests.post("https://api.cloudconvert.com/v2/jobs", json=data, headers=headers, timeout=6000)
resp = r.json()
if 'data' not in resp:
logmessage("cloudconvert_to_pdf: create job returned " + repr(r.text))
raise DAException("cloudconvert_to_pdf: failed to create job")
uploaded = False
for task in resp['data']['tasks']:
if task['name'] == 'import-1':
r = requests.post(task['result']['form']['url'], data=task['result']['form']['parameters'], files={'file': open(from_file, 'rb')}, timeout=6000)
uploaded = True
if not uploaded:
raise DAException("cloudconvert_to_pdf: failed to upload")
r = requests.get("https://sync.api.cloudconvert.com/v2/jobs/%s" % (resp['data']['id'],), headers=headers, timeout=60)
wait_resp = r.json()
if 'data' not in wait_resp:
logmessage("cloudconvert_to_pdf: wait returned " + repr(r.text))
raise DAException("Failed to wait on job")
ok = False
for task in wait_resp['data']['tasks']:
if task['operation'] == "export/url":
for file_result in task['result']['files']:
urllib.request.urlretrieve(file_result['url'], to_file)
ok = True
if not ok:
raise DAException("cloudconvert failed")
def convertapi_to_pdf(from_file, to_file):
convertapi.api_secret = daconfig.get('convertapi secret')
result = convertapi.convert('pdf', {'File': from_file})
result.file.save(to_file)
def get_pandoc_version():
p = subprocess.Popen(
[PANDOC_PATH, '--version'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE
)
version_content = p.communicate()[0].decode('utf-8')
version_content = re.sub(r'\n.*', '', version_content)
version_content = re.sub(r'^pandoc ', '', version_content)
return version_content
PANDOC_INITIALIZED = False
PANDOC_OLD = False
PANDOC_ENGINE = '--pdf-engine=' + daconfig.get('pandoc engine', 'pdflatex')
def initialize_pandoc():
global PANDOC_OLD
global PANDOC_ENGINE
global PANDOC_INITIALIZED
if PANDOC_INITIALIZED:
return
PANDOC_VERSION = get_pandoc_version()
if PANDOC_VERSION.startswith('1'):
PANDOC_OLD = True
PANDOC_ENGINE = '--latex-engine=' + daconfig.get('pandoc engine', 'pdflatex')
else:
PANDOC_OLD = False
try:
subprocess.check_output(['lualatex', '--help'], stderr=subprocess.STDOUT)
assert os.path.isfile('/usr/share/texlive/texmf-dist/tex/luatex/luatexbase/luatexbase.sty')
lualatex_supported = True
except:
lualatex_supported = False
if lualatex_supported:
PANDOC_ENGINE = '--pdf-engine=' + daconfig.get('pandoc engine', 'lualatex')
else:
PANDOC_ENGINE = '--pdf-engine=' + daconfig.get('pandoc engine', 'pdflatex')
PANDOC_INITIALIZED = True
UNOCONV_PATH = daconfig.get('unoconv path', '/usr/bin/daunoconv')
UNOCONV_AVAILABLE = bool('enable unoconv' in daconfig and daconfig['enable unoconv'] is True and os.path.isfile(UNOCONV_PATH) and os.access(UNOCONV_PATH, os.X_OK))
UNOCONV_FILTERS = {'pdfa': ['-e', 'SelectPdfVersion=1', '-e', 'UseTaggedPDF=true'], 'tagged': ['-e', 'UseTaggedPDF=true'], 'default': []}
LIBREOFFICE_PATH = daconfig.get('libreoffice', 'libreoffice')
LIBREOFFICE_MACRO_PATH = daconfig.get('libreoffice macro file', '/var/www/.config/libreoffice/4/user/basic/Standard/Module1.xba')
LIBREOFFICE_INITIALIZED = False
convertible_mimetypes = {"application/vnd.openxmlformats-officedocument.wordprocessingml.document": "docx", "application/vnd.oasis.opendocument.text": "odt"}
convertible_extensions = {"docx": "docx", "odt": "odt"}
if daconfig.get('libreoffice', 'libreoffice') is not None:
convertible_mimetypes.update({"application/msword": "doc", "application/rtf": "rtf"})
convertible_extensions.update({"doc": "doc", "rtf": "rtf"})
# fontfamily: zi4, mathptmx, courier
# \ttfamily
# \renewcommand{\thesubsubsubsection}{\alph{subsubsubsection}.}
# \renewcommand{\thesubsubsubsubsection}{\roman{subsubsubsubsection}.}
# - \newenvironment{allcaps}{\startallcaps}{}
# - \def\startallcaps#1\end{\uppercase{#1}\end}
class MyPandoc:
def __init__(self, **kwargs):
initialize_pandoc()
if 'pdfa' in kwargs and kwargs['pdfa']:
self.pdfa = True
else:
self.pdfa = False
self.password = kwargs.get('password', None)
self.owner_password = kwargs.get('owner_password', None)
self.input_content = None
self.output_content = None
self.input_format = 'markdown'
self.output_format = 'rtf'
self.output_extension = 'rtf'
self.output_filename = None
self.template_file = None
self.reference_file = None
self.metadata = {}
self.initial_yaml = []
self.additional_yaml = []
self.arguments = []
def convert_to_file(self, question):
metadata_as_dict = {}
if isinstance(self.metadata, dict):
metadata_as_dict = self.metadata
elif isinstance(self.metadata, list):
for data in self.metadata:
if isinstance(data, dict):
for key in data:
metadata_as_dict[key] = data[key]
if self.output_format == 'rtf to docx':
self.output_extension = 'rtf'
else:
self.output_extension = self.output_format
if self.output_format in ('rtf', 'rtf to docx') and self.template_file is None:
self.template_file = docassemble.base.functions.standard_template_filename('Legal-Template.rtf')
if self.output_format == 'docx' and self.reference_file is None:
self.reference_file = docassemble.base.functions.standard_template_filename('Legal-Template.docx')
if self.output_format in ('pdf', 'tex') and self.template_file is None:
self.template_file = docassemble.base.functions.standard_template_filename('Legal-Template.tex')
yaml_to_use = []
if self.output_format in ('rtf', 'rtf to docx'):
# logmessage("pre input content is " + str(self.input_content))
self.input_content = docassemble.base.filter.rtf_prefilter(self.input_content)
# logmessage("post input content is " + str(self.input_content))
if self.output_format == 'docx':
self.input_content = docassemble.base.filter.docx_filter(self.input_content, metadata=metadata_as_dict, question=question)
if self.output_format in ('pdf', 'tex'):
if len(self.initial_yaml) == 0:
standard_file = docassemble.base.functions.standard_template_filename('Legal-Template.yml')
if standard_file is not None:
self.initial_yaml.append(standard_file)
for yaml_file in self.initial_yaml:
if yaml_file is not None:
yaml_to_use.append(yaml_file)
for yaml_file in self.additional_yaml:
if yaml_file is not None:
yaml_to_use.append(yaml_file)
# logmessage("Before: " + repr(self.input_content))
self.input_content = docassemble.base.filter.pdf_filter(self.input_content, metadata=metadata_as_dict, question=question)
# logmessage("After: " + repr(self.input_content))
if not re.search(r'[^\s]', self.input_content):
self.input_content = "\\textbf{}\n"
temp_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="w", suffix=".md", delete=False, encoding='utf-8')
temp_file.write(self.input_content)
temp_file.close()
temp_outfile = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix="." + str(self.output_extension), delete=False)
temp_outfile.close()
latex_conversion_directory = os.path.join(tempfile.gettempdir(), 'conv')
if not os.path.isdir(latex_conversion_directory):
os.makedirs(latex_conversion_directory, exist_ok=True)
if not os.path.isdir(latex_conversion_directory):
raise DAException("Could not create latex conversion directory")
icc_profile_in_temp = os.path.join(tempfile.gettempdir(), 'sRGB_IEC61966-2-1_black_scaled.icc')
if not os.path.isfile(icc_profile_in_temp):
shutil.copyfile(docassemble.base.functions.standard_template_filename('sRGB_IEC61966-2-1_black_scaled.icc'), icc_profile_in_temp)
subprocess_arguments = [PANDOC_PATH, PANDOC_ENGINE]
if PANDOC_OLD:
subprocess_arguments.append("--smart")
subprocess_arguments.extend(['-M', 'latextmpdir=' + os.path.join('.', 'conv'), '-M', 'pdfa=' + ('true' if self.pdfa else 'false')])
if len(yaml_to_use) > 0:
subprocess_arguments.extend(yaml_to_use)
if self.template_file is not None:
subprocess_arguments.extend(['--template=%s' % self.template_file])
if self.reference_file is not None:
if PANDOC_OLD:
subprocess_arguments.extend(['--reference-docx=%s' % self.reference_file])
else:
subprocess_arguments.extend(['--reference-doc=%s' % self.reference_file])
if self.output_format in ('pdf', 'tex'):
subprocess_arguments.extend(['--from=markdown+raw_tex-latex_macros'])
subprocess_arguments.extend(['-s', '-o', temp_outfile.name])
subprocess_arguments.extend([temp_file.name])
subprocess_arguments.extend(self.arguments)
# logmessage("Arguments are " + str(subprocess_arguments) + " and directory is " + tempfile.gettempdir())
try:
msg = subprocess.check_output(subprocess_arguments, cwd=tempfile.gettempdir(), stderr=subprocess.STDOUT).decode('utf-8', 'ignore')
except subprocess.CalledProcessError as err:
raise DAException("Failed to assemble file: " + err.output.decode())
if msg:
self.pandoc_message = msg
os.remove(temp_file.name)
if os.path.exists(temp_outfile.name):
if self.output_format in ('rtf', 'rtf to docx'):
with open(temp_outfile.name, encoding='utf-8') as the_file:
file_contents = the_file.read()
# with open('/tmp/asdf.rtf', 'w') as deb_file:
# deb_file.write(file_contents)
file_contents = docassemble.base.filter.rtf_filter(file_contents, metadata=metadata_as_dict, styles=get_rtf_styles(self.template_file), question=question)
with open(temp_outfile.name, "wb") as the_file:
the_file.write(bytearray(file_contents, encoding='utf-8'))
if self.output_format == 'rtf to docx':
docx_outfile = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".docx", delete=False)
success = rtf_to_docx(temp_outfile.name, docx_outfile.name)
if not success:
raise DAException("Could not convert RTF to DOCX.")
temp_outfile = docx_outfile
if self.output_filename is not None:
shutil.copyfile(temp_outfile.name, self.output_filename)
else:
self.output_filename = temp_outfile.name
self.output_content = None
if self.output_format == 'pdf' and (self.password or self.owner_password):
pdf_encrypt(self.output_filename, self.password, self.owner_password)
else:
raise IOError("Failed creating file: %s" % temp_outfile.name)
def convert(self, question):
latex_conversion_directory = os.path.join(tempfile.gettempdir(), 'conv')
if not os.path.isdir(latex_conversion_directory):
try:
os.makedirs(latex_conversion_directory, exist_ok=True)
except:
pass
if not os.path.isdir(latex_conversion_directory):
raise DAException("Could not create latex conversion directory")
if self.output_format in ("pdf", "tex", "rtf", "rtf to docx", "epub", "docx"):
self.convert_to_file(question)
else:
subprocess_arguments = [PANDOC_PATH, PANDOC_ENGINE]
if PANDOC_OLD:
input_format = self.input_format
subprocess_arguments.append("--smart")
else:
if self.input_format == 'markdown':
input_format = "markdown+smart"
if self.output_format in ('pdf', 'tex'):
input_format += '+raw_tex-latex_macros'
subprocess_arguments.extend(['-M', 'latextmpdir=' + os.path.join('.', 'conv'), '--from=%s' % input_format, '--to=%s' % self.output_format])
if self.output_format == 'html':
subprocess_arguments.append('--ascii')
subprocess_arguments.extend(self.arguments)
# logmessage("Arguments are " + str(subprocess_arguments))
p = subprocess.Popen(
subprocess_arguments,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=tempfile.gettempdir()
)
self.output_filename = None
# logmessage("input content is a " + self.input_content.__class__.__name__)
# with open('/tmp/moocow1', 'wb') as fp:
# fp.write(bytearray(self.input_content, encoding='utf-8'))
self.output_content = p.communicate(bytearray(self.input_content, encoding='utf-8'))[0]
# with open('/tmp/moocow2', 'wb') as fp:
# fp.write(self.output_content)
self.output_content = self.output_content.decode()
def word_to_pdf(in_file, in_format, out_file, pdfa=False, password=None, owner_password=None, update_refs=False, tagged=False, filename=None, retry=True):
if filename is None:
filename = 'file'
filename = docassemble.base.functions.secure_filename(filename)
tempdir = tempfile.mkdtemp(prefix='SavedFile')
from_file = os.path.join(tempdir, "file." + in_format)
to_file = os.path.join(tempdir, "file.pdf")
shutil.copyfile(in_file, from_file)
tries = 0
if pdfa:
method = 'pdfa'
elif tagged:
method = 'tagged'
else:
method = 'default'
if retry:
num_tries = 5
else:
num_tries = 1
while tries < num_tries:
completed_process = None
use_libreoffice = True
if update_refs:
if daconfig.get('gotenberg url', None) is not None:
update_references(from_file)
try:
gotenberg_to_pdf(from_file, to_file, pdfa, password, owner_password)
result = 0
except Exception as err:
logmessage("Call to gotenberg failed")
logmessage(err.__class__.__name__ + ": " + str(err))
result = 1
use_libreoffice = False
password = False
owner_password = False
elif daconfig.get('convertapi secret', None) is not None:
update_references(from_file)
try:
convertapi_to_pdf(from_file, to_file)
result = 0
except:
logmessage("Call to convertapi failed")
result = 1
use_libreoffice = False
elif daconfig.get('cloudconvert secret', None) is not None:
update_references(from_file)
try:
cloudconvert_to_pdf(in_format, from_file, to_file, pdfa, password)
result = 0
except Exception as err:
logmessage("Call to cloudconvert failed")
logmessage(err.__class__.__name__ + ": " + str(err))
result = 1
use_libreoffice = False
password = False
owner_password = False
else:
if UNOCONV_AVAILABLE:
subprocess_arguments = [UNOCONV_PATH, '-f', 'pdf'] + UNOCONV_FILTERS[method] + ['-e', 'PDFViewSelection=2', '-o', to_file, from_file]
else:
subprocess_arguments = [LIBREOFFICE_PATH, '--headless', '--invisible', 'macro:///Standard.Module1.ConvertToPdf(' + from_file + ',' + to_file + ',True,' + method + ')']
elif daconfig.get('gotenberg url', None) is not None:
try:
gotenberg_to_pdf(from_file, to_file, pdfa, password, owner_password)
result = 0
except Exception as err:
logmessage("Call to gotenberg failed")
logmessage(err.__class__.__name__ + ": " + str(err))
result = 1
use_libreoffice = False
password = False
owner_password = False
elif daconfig.get('convertapi secret', None) is not None:
try:
convertapi_to_pdf(from_file, to_file)
result = 0
except:
logmessage("Call to convertapi failed")
result = 1
use_libreoffice = False
elif daconfig.get('cloudconvert secret', None) is not None:
try:
cloudconvert_to_pdf(in_format, from_file, to_file, pdfa, password)
result = 0
except Exception as err:
logmessage("Call to cloudconvert failed")
logmessage(err.__class__.__name__ + ": " + str(err))
result = 1
use_libreoffice = False
password = False
owner_password = False
else:
if method == 'default':
if UNOCONV_AVAILABLE:
subprocess_arguments = [UNOCONV_PATH, '-f', 'pdf'] + UNOCONV_FILTERS[method] + ['-e', 'PDFViewSelection=2', '-o', to_file, from_file]
else:
subprocess_arguments = [LIBREOFFICE_PATH, '--headless', '--invisible', 'macro:///Standard.Module1.ConvertToPdf(' + from_file + ',' + to_file + ',False,' + method + ')']
else:
if UNOCONV_AVAILABLE:
subprocess_arguments = [UNOCONV_PATH, '-f', 'pdf'] + UNOCONV_FILTERS[method] + ['-e', 'PDFViewSelection=2', '-o', to_file, from_file]
else:
subprocess_arguments = [LIBREOFFICE_PATH, '--headless', '--invisible', 'macro:///Standard.Module1.ConvertToPdf(' + from_file + ',' + to_file + ',False,' + method + ')']
if use_libreoffice:
start_time = time.time()
if UNOCONV_AVAILABLE:
docassemble.base.functions.server.applock('obtain', 'unoconv', maxtime=6)
logmessage("Trying unoconv with " + repr(subprocess_arguments))
try:
completed_process = subprocess.run(subprocess_arguments, cwd=tempdir, timeout=120, check=False, capture_output=True)
result = completed_process.returncode
except subprocess.TimeoutExpired:
logmessage("word_to_pdf: unoconv took too long")
result = 1
tries = 5
docassemble.base.functions.server.applock('release', 'unoconv', maxtime=6)
logmessage("Finished unoconv after {:.4f} seconds.".format(time.time() - start_time))
else:
initialize_libreoffice()
logmessage("Trying libreoffice with " + repr(subprocess_arguments))
docassemble.base.functions.server.applock('obtain', 'libreoffice')
logmessage("Obtained libreoffice lock after {:.4f} seconds.".format(time.time() - start_time))
try:
completed_process = subprocess.run(subprocess_arguments, cwd=tempdir, timeout=120, check=False, capture_output=True)
result = completed_process.returncode
except subprocess.TimeoutExpired:
logmessage("word_to_pdf: libreoffice took too long")
result = 1
tries = 5
logmessage("Finished libreoffice after {:.4f} seconds.".format(time.time() - start_time))
docassemble.base.functions.server.applock('release', 'libreoffice')
if result == 0:
time.sleep(0.1)
if os.path.isfile(to_file) and os.path.getsize(to_file) > 0:
break
time.sleep(0.1)
if os.path.isfile(to_file) and os.path.getsize(to_file) > 0:
break
time.sleep(0.1)
if os.path.isfile(to_file) and os.path.getsize(to_file) > 0:
break
time.sleep(0.1)
if os.path.isfile(to_file) and os.path.getsize(to_file) > 0:
break
time.sleep(0.1)
if os.path.isfile(to_file) and os.path.getsize(to_file) > 0:
break
result = 1
tries += 1
if tries < num_tries:
if use_libreoffice:
error_msg = (f": {completed_process.stderr}") if completed_process else ""
if UNOCONV_AVAILABLE:
logmessage(f"Didn't get file ({error_msg}), Retrying unoconv with " + repr(subprocess_arguments))
else:
logmessage(f"Didn't get file ({error_msg}), Retrying libreoffice with " + repr(subprocess_arguments))
elif daconfig.get('gotenberg url', None) is not None:
logmessage("Retrying gotenberg")
elif daconfig.get('convertapi secret', None) is not None:
logmessage("Retrying convertapi")
else:
logmessage("Retrying cloudconvert")
time.sleep(tries*random.random())
if os.path.isfile(to_file) and os.path.getsize(to_file) == 0:
result = 1
if result == 0:
if password:
pdf_encrypt(to_file, password, owner_password)
shutil.copyfile(to_file, out_file)
if tempdir is not None:
shutil.rmtree(tempdir)
if result != 0:
return False
return True
def rtf_to_docx(in_file, out_file):
tempdir = tempfile.mkdtemp(prefix='SavedFile')
from_file = os.path.join(tempdir, "file.rtf")
to_file = os.path.join(tempdir, "file.docx")
shutil.copyfile(in_file, from_file)
if UNOCONV_AVAILABLE:
subprocess_arguments = [UNOCONV_PATH, '-f', 'docx', '-o', to_file, from_file]
else:
initialize_libreoffice()
subprocess_arguments = [LIBREOFFICE_PATH, '--headless', '--invisible', '--convert-to', 'docx', from_file, '--outdir', tempdir]
# logmessage("rtf_to_docx: creating " + to_file + " by doing " + " ".join(subprocess_arguments))
tries = 0
while tries < 5:
if UNOCONV_AVAILABLE:
try:
result = subprocess.run(subprocess_arguments, cwd=tempdir, timeout=120, check=False).returncode
except subprocess.TimeoutExpired:
logmessage("rtf_to_docx: call to unoconv took too long")
result = 1
tries = 5
if result != 0:
logmessage("rtf_to_docx: call to unoconv returned non-zero response")
else:
docassemble.base.functions.server.applock('obtain', 'libreoffice')
try:
result = subprocess.run(subprocess_arguments, cwd=tempdir, timeout=120, check=False).returncode
except subprocess.TimeoutExpired:
logmessage("rtf_to_docx: call to LibreOffice took too long")
result = 1
tries = 5
docassemble.base.functions.server.applock('release', 'libreoffice')
if result != 0:
logmessage("rtf_to_docx: call to LibreOffice returned non-zero response")
if result == 0 and os.path.isfile(to_file):
break
result = 1
tries += 1
if tries < 5:
if UNOCONV_AVAILABLE:
logmessage("rtf_to_docx: retrying unoconv")
else:
logmessage("rtf_to_docx: retrying LibreOffice")
time.sleep(0.5 + tries*random.random())
if result == 0:
shutil.copyfile(to_file, out_file)
if tempdir is not None:
shutil.rmtree(tempdir)
if result != 0:
return False
return True
def convert_file(in_file, out_file, input_extension, output_extension):
if not UNOCONV_AVAILABLE:
initialize_libreoffice()
tempdir1 = tempfile.mkdtemp(prefix='SavedFile')
tempdir2 = tempfile.mkdtemp(prefix='SavedFile')
from_file = os.path.join(tempdir1, "file." + input_extension)
to_file = os.path.join(tempdir2, "file." + output_extension)
shutil.copyfile(in_file, from_file)
if UNOCONV_AVAILABLE:
subprocess_arguments = [UNOCONV_PATH, '-f', output_extension, '-o', to_file, from_file]
else:
subprocess_arguments = [LIBREOFFICE_PATH, '--headless', '--invisible', '--convert-to', output_extension, from_file, '--outdir', tempdir2]
# logmessage("convert_to: creating " + to_file + " by doing " + " ".join(subprocess_arguments))
tries = 0
while tries < 5:
if UNOCONV_AVAILABLE:
try:
result = subprocess.run(subprocess_arguments, cwd=tempdir1, timeout=120, check=False).returncode
except subprocess.TimeoutExpired:
logmessage("convert_file: unoconv took too long")
result = 1
tries = 5
if result != 0:
logmessage("convert_file: call to unoconv returned non-zero response")
else:
docassemble.base.functions.server.applock('obtain', 'libreoffice')
try:
result = subprocess.run(subprocess_arguments, cwd=tempdir1, timeout=120, check=False).returncode
except subprocess.TimeoutExpired:
logmessage("convert_file: libreoffice took too long")
result = 1
tries = 5
docassemble.base.functions.server.applock('release', 'libreoffice')
if result != 0:
logmessage("convert_file: call to LibreOffice returned non-zero response")
if result == 0 and os.path.isfile(to_file):
break
result = 1
tries += 1
if tries < 5:
if UNOCONV_AVAILABLE:
logmessage("convert_file: retrying unoconv")
else:
logmessage("convert_file: retrying libreoffice")
time.sleep(0.5 + tries*random.random())
if result == 0:
shutil.copyfile(to_file, out_file)
if tempdir1 is not None:
shutil.rmtree(tempdir1)
if tempdir2 is not None:
shutil.rmtree(tempdir2)
if result != 0:
return False
return True
def word_to_markdown(in_file, in_format):
if not UNOCONV_AVAILABLE:
initialize_libreoffice()
temp_file = tempfile.NamedTemporaryFile(mode="wb", suffix=".md")
if in_format not in ['docx', 'odt']:
tempdir = tempfile.mkdtemp(prefix='SavedFile')
from_file = os.path.join(tempdir, "file." + in_format)
to_file = os.path.join(tempdir, "file.docx")
shutil.copyfile(in_file, from_file)
if UNOCONV_AVAILABLE:
subprocess_arguments = [UNOCONV_PATH, '-f', 'docx', '-o', to_file, from_file]
else:
subprocess_arguments = [LIBREOFFICE_PATH, '--headless', '--invisible', '--convert-to', 'docx', from_file, '--outdir', tempdir]
tries = 0
while tries < 5:
if UNOCONV_AVAILABLE:
if tries > 0:
logmessage("word_to_markdown: retrying unoconv")
try:
result = subprocess.run(subprocess_arguments, cwd=tempdir, timeout=120, check=False).returncode
except subprocess.TimeoutExpired:
logmessage("word_to_markdown: unoconv took too long")
result = 1
tries = 5
if result != 0:
logmessage("word_to_markdown: call to unoconv returned non-zero response")
else:
docassemble.base.functions.server.applock('obtain', 'libreoffice')
try:
result = subprocess.run(subprocess_arguments, cwd=tempdir, timeout=120, check=False).returncode
except subprocess.TimeoutExpired:
logmessage("word_to_markdown: libreoffice took too long")
result = 1
tries = 5
docassemble.base.functions.server.applock('release', 'libreoffice')
if result != 0:
logmessage("word_to_markdown: call to LibreOffice returned non-zero response")
if result == 0 and os.path.isfile(to_file):
break
result = 1
tries += 1
if tries < 5:
if UNOCONV_AVAILABLE:
logmessage("word_to_markdown: retrying unoconv")
else:
logmessage("word_to_markdown: retrying LibreOffice")
time.sleep(0.5 + tries*random.random())
if result != 0:
return None
in_file_to_use = to_file
in_format_to_use = 'docx'
else:
in_file_to_use = in_file
in_format_to_use = in_format
tempdir = None
subprocess_arguments = [PANDOC_PATH, PANDOC_ENGINE]
if PANDOC_OLD:
subprocess_arguments.append("--smart")
else:
if in_format_to_use == 'markdown':
in_format_to_use = "markdown+smart"
subprocess_arguments.extend(['--from=%s' % str(in_format_to_use), '--to=markdown_phpextra', str(in_file_to_use), '-o', str(temp_file.name)])
try:
result = subprocess.run(subprocess_arguments, timeout=60, check=False).returncode
except subprocess.TimeoutExpired:
result = 1
if tempdir is not None:
shutil.rmtree(tempdir)
if result == 0:
final_file = tempfile.NamedTemporaryFile(mode="wb", suffix=".md")
with open(temp_file.name, 'r', encoding='utf-8') as the_file:
file_contents = the_file.read()
file_contents = re.sub(r'\\([\$\[\]])', lambda x: x.group(1), file_contents)
with open(final_file.name, "w", encoding='utf-8') as the_file:
the_file.write(file_contents)
return final_file
return None
def get_rtf_styles(filename):
file_contents = ''
styles = {}
with open(filename, 'r', encoding='utf-8') as the_file:
file_contents = the_file.read()
for (style_string, style_number, heading_number) in re.findall(style_find, file_contents): # pylint: disable=unused-variable
style_string = re.sub(r'\s+', ' ', style_string, flags=re.DOTALL)
# logmessage("heading " + str(heading_number) + " is style " + str(style_number))
styles[heading_number] = style_string
return styles
def update_references(filename):
if UNOCONV_AVAILABLE:
with tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".docx", delete=False) as temp_file:
logmessage("update_references: converting docx to docx")
result = convert_file(filename, temp_file.name, 'docx', 'docx')
if result:
shutil.copyfile(temp_file.name, filename)
return result
initialize_libreoffice()
subprocess_arguments = [LIBREOFFICE_PATH, '--headless', '--invisible', 'macro:///Standard.Module1.PysIndexer(' + filename + ')']
tries = 0
while tries < 5:
docassemble.base.functions.server.applock('obtain', 'libreoffice')
try:
result = subprocess.run(subprocess_arguments, cwd=tempfile.gettempdir(), timeout=120, check=False).returncode
except subprocess.TimeoutExpired:
result = 1
tries = 5
docassemble.base.functions.server.applock('release', 'libreoffice')
if result == 0:
break
logmessage("update_references: call to LibreOffice returned non-zero response")
tries += 1
if tries < 5:
logmessage("update_references: retrying LibreOffice")
time.sleep(0.5 + tries*random.random())
if result != 0:
return False
return True
def initialize_libreoffice():
global LIBREOFFICE_INITIALIZED
if LIBREOFFICE_INITIALIZED:
return
LIBREOFFICE_INITIALIZED = True
if not os.path.isfile(LIBREOFFICE_MACRO_PATH):
logmessage("No LibreOffice macro path exists")
temp_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".pdf")
word_file = docassemble.base.functions.package_template_filename('docassemble.demo:data/templates/template_test.docx')
word_to_pdf(word_file, 'docx', temp_file.name, pdfa=False, password=None, owner_password=None, retry=False)
del temp_file
del word_file
orig_path = docassemble.base.functions.package_template_filename('docassemble.base:data/macros/Module1.xba')
try:
assert os.path.isdir(os.path.dirname(LIBREOFFICE_MACRO_PATH))
# logmessage("Copying LibreOffice macro from " + orig_path)
copy_if_different(orig_path, LIBREOFFICE_MACRO_PATH)
except:
logmessage("Could not copy LibreOffice macro into place")
def concatenate_files(path_list, pdfa=False, password=None, owner_password=None):
pdf_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".pdf", delete=False)
new_path_list = []
for path in path_list:
mimetype, encoding = mimetypes.guess_type(path) # pylint: disable=unused-variable
if mimetype.startswith('image'):
new_pdf_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".pdf", delete=False)
args = [daconfig.get('imagemagick', 'convert'), path, new_pdf_file.name]
try:
result = subprocess.run(args, timeout=60, check=False).returncode
except subprocess.TimeoutExpired:
logmessage("concatenate_files: convert took too long")
result = 1
if result != 0:
logmessage("failed to convert image to PDF: " + " ".join(args))
continue
new_path_list.append(new_pdf_file.name)
elif mimetype in ('application/rtf', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'application/msword', 'application/vnd.oasis.opendocument.text'):
new_pdf_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".pdf", delete=False)
if mimetype == 'application/rtf':
ext = 'rtf'
elif mimetype == 'application/vnd.openxmlformats-officedocument.wordprocessingml.document':
ext = 'docx'
elif mimetype == 'application/msword':
ext = 'doc'
elif mimetype == 'application/vnd.oasis.opendocument.text':
ext = 'odt'
if not word_to_pdf(path, ext, new_pdf_file.name, pdfa=False):
raise DAException('Failure to convert DOCX to PDF')
new_path_list.append(new_pdf_file.name)
elif mimetype == 'application/pdf':
new_path_list.append(path)
if len(new_path_list) == 0:
raise DAError("concatenate_files: no valid files to concatenate")
if len(new_path_list) == 1:
shutil.copyfile(new_path_list[0], pdf_file.name)
else:
with Pdf.open(new_path_list[0]) as original:
for additional_file in new_path_list[1:]:
with Pdf.open(additional_file) as additional_pdf:
original.pages.extend(additional_pdf.pages)
original.save(pdf_file.name)
if pdfa:
pdf_to_pdfa(pdf_file.name)
if password or owner_password:
pdf_encrypt(pdf_file.name, password, owner_password)
return pdf_file.name
|
GHSA-pcfx-g2j2-f6f6
|
docassemble_base/docassemble/base/pdftk.py
|
@@ -160,7 +160,7 @@ def recursively_add_fields(fields, id_to_page, outfields, prefix='', parent_ft=N
outfields.append((prefix, default, pageno, rect, field_type, export_value))
-def fill_template(template, data_strings=None, data_names=None, hidden=None, readonly=None, images=None, pdf_url=None, editable=True, pdfa=False, password=None, owner_password=None, template_password=None, default_export_value=None, replacement_font=None):
+def fill_template(template, data_strings=None, data_names=None, hidden=None, readonly=None, images=None, pdf_url=None, editable=True, pdfa=False, password=None, owner_password=None, template_password=None, default_export_value=None, replacement_font=None, use_pdftk=False):
if data_strings is None:
data_strings = []
if data_names is None:
@@ -219,7 +219,7 @@ def fill_template(template, data_strings=None, data_names=None, hidden=None, rea
for key, val in data_strings:
data_dict[key] = val
pdf_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".pdf", delete=False)
- if pdfa or not editable:
+ if pdfa or not editable or use_pdftk:
fdf = Xfdf(pdf_url, data_dict)
# fdf = fdfgen.forge_fdf(pdf_url, data_strings, data_names, hidden, readonly)
fdf_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".xfdf", delete=False)
@@ -251,7 +251,10 @@ def fill_template(template, data_strings=None, data_names=None, hidden=None, rea
if len(images) > 0:
subprocess_arguments.append('need_appearances')
else:
- subprocess_arguments.append('flatten')
+ if pdfa or not editable:
+ subprocess_arguments.append('flatten')
+ else:
+ subprocess_arguments.append('need_appearances')
completed_process = None
try:
completed_process = subprocess.run(subprocess_arguments, timeout=600, check=False, capture_output=True)
@@ -272,7 +275,6 @@ def fill_template(template, data_strings=None, data_names=None, hidden=None, rea
pdf = Pdf.open(template, password=template_password)
else:
pdf = Pdf.open(template)
- pdf.Root.AcroForm.NeedAppearances = True
for page in pdf.pages:
if not hasattr(page, 'Annots'):
continue
@@ -320,6 +322,9 @@ def fill_template(template, data_strings=None, data_names=None, hidden=None, rea
the_string = pikepdf.String(value)
annot.V = the_string
annot.DV = the_string
+ pdf.Root.AcroForm.NeedAppearances = True
+ pdf.generate_appearance_streams()
+ pdf.Root.AcroForm.NeedAppearances = True
if len(images) == 0:
pdf.save(pdf_file.name)
pdf.close()
|
import subprocess
import tempfile
import shutil
import re
import os
import string
import codecs
import logging
from io import BytesIO
import packaging
from xfdfgen import Xfdf
import pikepdf
import img2pdf
from pikepdf import Pdf
from PIL import Image
from docassemble.base.error import DAError, DAException
from docassemble.base.pdfa import pdf_to_pdfa
from docassemble.base.logger import logmessage
from docassemble.base.functions import word
from docassemble.base.config import daconfig
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdftypes import resolve1, PDFObjRef
from pdfminer.pdfpage import PDFPage
logging.getLogger('pdfminer').setLevel(logging.ERROR)
PDFTK_PATH = 'pdftk'
QPDF_PATH = 'qpdf'
SYSTEM_VERSION = daconfig.get('system version', None)
REPLACEMENT_FONT_SUPPORTED = SYSTEM_VERSION is not None and packaging.version.parse(SYSTEM_VERSION) >= packaging.version.parse("1.4.73")
REPLACEMENT_FONT_SUPPORTED = False
DEFAULT_RENDERING_FONT = daconfig.get('default rendering font', None)
if REPLACEMENT_FONT_SUPPORTED and DEFAULT_RENDERING_FONT and os.path.isfile(DEFAULT_RENDERING_FONT):
DEFAULT_FONT_ARGUMENTS = ['replacement_font', DEFAULT_RENDERING_FONT]
else:
DEFAULT_FONT_ARGUMENTS = []
def set_pdftk_path(path):
global PDFTK_PATH
PDFTK_PATH = path
def read_fields(pdffile):
outfields = []
fp = open(pdffile, 'rb')
id_to_page = {}
parser = PDFParser(fp)
doc = PDFDocument(parser)
pageno = 1
for page in PDFPage.create_pages(doc):
id_to_page[page.pageid] = pageno
pageno += 1
if 'AcroForm' not in doc.catalog:
return []
fields = resolve1(doc.catalog['AcroForm'])['Fields']
recursively_add_fields(fields, id_to_page, outfields)
return sorted(outfields, key=fieldsorter)
def fieldsorter(x):
if x[3] and isinstance(x[3], list):
x_coord = x[3][0]
y_coord = -1 * x[3][1]
else:
x_coord = 0
y_coord = 0
return (x[2], y_coord, x_coord)
def recursively_add_fields(fields, id_to_page, outfields, prefix='', parent_ft=None):
if isinstance(fields, PDFObjRef):
fields = resolve1(fields)
for i in fields:
field = resolve1(i)
if isinstance(field, PDFObjRef):
field = resolve1(field)
try:
name, value, rect, page, field_type = field.get('T'), field.get('V'), field.get('Rect'), field.get('P'), field.get('FT')
if field_type is None:
widget_type = str(field.get("Type"))
if widget_type in ("/'Annot'", "/Annot"):
field_type = parent_ft
except:
logmessage("Skipping field " + repr(field))
continue
if isinstance(rect, PDFObjRef):
rect = resolve1(rect)
if isinstance(rect, list):
new_list = []
for item in rect:
if isinstance(item, PDFObjRef):
new_list.append(resolve1(item))
else:
new_list.append(item)
rect = new_list
else:
rect = []
if name is not None:
if not isinstance(name, bytes):
name = bytes(str(name), encoding='utf-8')
name = remove_nonprintable_bytes_limited(name)
if value is not None:
if not isinstance(value, bytes):
value = bytes(str(value), encoding='utf-8')
value = remove_nonprintable_bytes_limited(value)
# logmessage("name is " + repr(name) + " and FT is |" + repr(str(field_type)) + "| and value is " + repr(value))
if page is not None and hasattr(page, 'objid'):
try:
pageno = id_to_page[page.objid]
except:
pageno = 1
else:
pageno = 1
export_value = None
if str(field_type) in ('/Btn', "/'Btn'"):
export_value = 'Yes'
try:
for key in list(field['AP']['N'].keys()):
if key in ('Off', 'off'): # , 'No', 'no'
continue
export_value = key
break
except:
pass
if value == '/Yes':
default = export_value
else:
default = "No"
elif str(field_type) in ('/Sig', "/'Sig'"):
default = '${ user.signature }'
else:
if value is not None:
# for val in value:
# logmessage("Got a " + str(ord(val)))
# logmessage(repr(value.decode('utf8')))
# default = re.sub(r'^\xc3\xbe\xc3\xbf', '', value)
default = value
if not default:
default = word("something")
else:
default = word("something")
kids = field.get('Kids')
if kids:
if name is None:
recursively_add_fields(kids, id_to_page, outfields, prefix=prefix, parent_ft=field_type)
else:
if prefix == '':
recursively_add_fields(kids, id_to_page, outfields, prefix=name, parent_ft=field_type)
else:
recursively_add_fields(kids, id_to_page, outfields, prefix=prefix + '.' + name, parent_ft=field_type)
else:
if prefix != '' and name is not None:
outfields.append((prefix + '.' + name, default, pageno, rect, field_type, export_value))
elif prefix == '':
outfields.append((name, default, pageno, rect, field_type, export_value))
else:
outfields.append((prefix, default, pageno, rect, field_type, export_value))
def fill_template(template, data_strings=None, data_names=None, hidden=None, readonly=None, images=None, pdf_url=None, editable=True, pdfa=False, password=None, owner_password=None, template_password=None, default_export_value=None, replacement_font=None):
if data_strings is None:
data_strings = []
if data_names is None:
data_names = []
if hidden is None:
hidden = []
if readonly is None:
readonly = []
if images is None:
images = []
if pdf_url is None:
pdf_url = 'file.pdf'
if not pdf_url.endswith('.pdf'):
pdf_url += '.pdf'
the_fields = read_fields(template)
if len(the_fields) == 0:
raise DAError("PDF template has no fields in it.")
export_values = {}
for field, default, pageno, rect, field_type, export_value in the_fields: # pylint: disable=unused-variable
field_type = re.sub(r'[^/A-Za-z]', '', str(field_type))
if field_type in ('/Btn', "/'Btn'"):
if field in export_values:
export_values[field].append(export_value or default_export_value or 'Yes')
else:
export_values[field] = [export_value or default_export_value or 'Yes']
if len(export_values) > 0:
new_data_strings = []
for key, val in data_strings:
if key in export_values and len(export_values[key]) > 0:
if len(export_values[key]) > 1:
# Implies a radio button, so val should stay the same. Check for yes vs True, since
# parse.py turns "true" into "yes".
# Just turn things off if it doesn't match any value
if 'True' in export_values[key] and val in ('Yes', 'yes'):
val = 'True'
if 'False' in export_values[key] and val in ('No', 'no'):
val = 'False'
if val not in export_values[key]:
val = 'Off'
else:
export_val = export_values[key][0]
if str(val) in ('Yes', 'yes', 'True', 'true', 'On', 'on', export_val):
val = export_val
else:
if export_val == 'On':
val = 'Off'
elif export_val == 'on':
val = 'off'
elif export_val == 'yes':
val = 'no'
else:
val = 'No'
new_data_strings.append((key, val))
data_strings = new_data_strings
data_dict = {}
for key, val in data_strings:
data_dict[key] = val
pdf_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".pdf", delete=False)
if pdfa or not editable:
fdf = Xfdf(pdf_url, data_dict)
# fdf = fdfgen.forge_fdf(pdf_url, data_strings, data_names, hidden, readonly)
fdf_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".xfdf", delete=False)
# fdf_file.write(fdf)
fdf_file.close()
fdf.write_xfdf(fdf_file.name)
if template_password is not None:
template_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".pdf", delete=False)
qpdf_subprocess_arguments = [QPDF_PATH, '--decrypt', '--password=' + template_password, template, template_file.name]
try:
result = subprocess.run(qpdf_subprocess_arguments, timeout=60, check=False).returncode
except subprocess.TimeoutExpired:
result = 1
logmessage("fill_template: call to qpdf took too long")
if result != 0:
logmessage("Failed to decrypt PDF template " + str(template))
raise DAError("Call to qpdf failed for template " + str(template) + " where arguments were " + " ".join(qpdf_subprocess_arguments))
template = template_file.name
if replacement_font:
if REPLACEMENT_FONT_SUPPORTED:
font_arguments = ['replacement_font', replacement_font]
else:
logmessage("Warning: the rendering font feature requires system version 1.4.73 or later")
font_arguments = []
else:
font_arguments = DEFAULT_FONT_ARGUMENTS
subprocess_arguments = [PDFTK_PATH, template, 'fill_form', fdf_file.name, 'output', pdf_file.name] + font_arguments
# logmessage("Arguments are " + str(subprocess_arguments))
if len(images) > 0:
subprocess_arguments.append('need_appearances')
else:
subprocess_arguments.append('flatten')
completed_process = None
try:
completed_process = subprocess.run(subprocess_arguments, timeout=600, check=False, capture_output=True)
result = completed_process.returncode
except subprocess.TimeoutExpired:
result = 1
logmessage("fill_template: call to pdftk fill_form took too long")
if result != 0:
logmessage("Failed to fill PDF form " + str(template))
pdftk_error_msg = (f": {completed_process.stderr}") if completed_process else ""
raise DAError("Call to pdftk failed for template " + str(template) + " where arguments were " + " ".join(subprocess_arguments) + pdftk_error_msg)
if len(images) > 0:
temp_pdf_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".pdf", delete=False)
shutil.copyfile(pdf_file.name, temp_pdf_file.name)
pdf = Pdf.open(temp_pdf_file.name)
else:
if template_password:
pdf = Pdf.open(template, password=template_password)
else:
pdf = Pdf.open(template)
pdf.Root.AcroForm.NeedAppearances = True
for page in pdf.pages:
if not hasattr(page, 'Annots'):
continue
for the_annot in page.Annots:
for field, value in data_dict.items():
annot = the_annot
annot_kid = None
while not (hasattr(annot, "FT") and hasattr(annot, "T")) and hasattr(annot, 'Parent'):
annot_kid = annot
annot = annot.Parent
if not (hasattr(annot, "T") and hasattr(annot, "FT")):
continue
if field != str(annot.T):
continue
field_type = str(annot.FT)
if field_type == "/Tx":
the_string = pikepdf.String(value)
annot.V = the_string
annot.DV = the_string
elif field_type == "/Btn":
if hasattr(annot, "A"):
continue
the_name = pikepdf.Name('/' + value)
annot.V = the_name
annot.DV = the_name
# Could be radio button: if it is, set the appearance stream of the
# correct child annot
if (annot_kid is not None and hasattr(annot_kid, "AP")
and hasattr(annot_kid.AP, "N")):
annot.AS = the_name
if the_name in annot_kid.AP.N.keys():
annot_kid.AS = the_name
else:
for off in ["/Off", "/off"]:
if off in annot_kid.AP.N.keys():
annot_kid.AS = off
elif (hasattr(annot, "AP") and hasattr(annot.AP, "N")):
if the_name in annot.AP.N.keys():
annot.AS = the_name
elif field_type == "/Ch":
opt_list = [str(item) for item in annot.Opt]
if value not in opt_list:
opt_list.append(value)
annot.Opt = pikepdf.Array(opt_list)
the_string = pikepdf.String(value)
annot.V = the_string
annot.DV = the_string
if len(images) == 0:
pdf.save(pdf_file.name)
pdf.close()
if len(images) > 0:
fields = {}
for field, default, pageno, rect, field_type, export_value in the_fields:
if str(field_type) in ('/Sig', "/'Sig'"):
fields[field] = {'pageno': pageno, 'rect': rect}
image_todo = []
for field, file_info in images:
if field not in fields:
logmessage("field name " + str(field) + " not found in PDF file")
continue
temp_png = tempfile.NamedTemporaryFile(mode="wb", suffix=".png")
args = [daconfig.get('imagemagick', 'convert'), file_info['fullpath'], "-trim", "+repage", "+profile", '*', '-density', '0', temp_png.name]
try:
result = subprocess.run(args, timeout=60, check=False).returncode
except subprocess.TimeoutExpired:
logmessage("fill_template: convert took too long")
result = 1
if result == 1:
logmessage("failed to trim file: " + " ".join(args))
continue
im = Image.open(temp_png.name)
width, height = im.size
xone, yone, xtwo, ytwo = fields[field]['rect']
dppx = width/(xtwo-xone)
dppy = height/(ytwo-yone)
if dppx > dppy:
dpp = dppx
x_offset = 0
y_offset = int(0.5 * ((ytwo - yone) * dpp - height))
else:
dpp = dppy
x_offset = int(0.5 * ((xtwo - xone) * dpp - width))
y_offset = 0
new_im = Image.new('RGBA', (int((xtwo - xone) * dpp), int((ytwo - yone) * dpp)), (255, 0, 0, 0))
new_im.paste(im, (x_offset, y_offset))
overlay_pdf_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".pdf", delete=False)
with BytesIO() as output:
new_im.save(output, 'PNG')
overlay_pdf_file.write(img2pdf.convert(output.getvalue()))
overlay_pdf_file.close()
image_todo.append({'overlay_file': overlay_pdf_file.name, 'pageno': fields[field]['pageno'], 'field': field})
if len(image_todo) > 0:
for item in image_todo:
xone, yone, xtwo, ytwo = fields[item['field']]['rect']
# logmessage("Trying to save to page " + repr(item['pageno'] - 1))
with Pdf.open(item['overlay_file']) as overlay_file:
overlay_page = overlay_file.pages[0]
pdf.pages[item['pageno'] - 1].add_overlay(overlay_page, rect=pikepdf.Rectangle(xone, yone, xtwo, ytwo))
pdf.save(pdf_file.name)
pdf.close()
if (pdfa or not editable) and len(images) > 0:
flatten_pdf(pdf_file.name)
if pdfa:
pdf_to_pdfa(pdf_file.name)
if password or owner_password:
pdf_encrypt(pdf_file.name, password, owner_password)
return pdf_file.name
def pdf_encrypt(filename, user_password, owner_password):
# logmessage("pdf_encrypt: running; password is " + repr(password))
outfile = tempfile.NamedTemporaryFile(prefix="datemp", suffix=".pdf", delete=False)
if owner_password is None:
commands = ['pdftk', filename, 'output', outfile.name, 'user_pw', user_password, 'allow', 'printing']
elif user_password is None:
commands = ['pdftk', filename, 'output', outfile.name, 'owner_pw', owner_password, 'allow', 'printing']
else:
commands = ['pdftk', filename, 'output', outfile.name, 'owner_pw', owner_password, 'user_pw', user_password, 'allow', 'printing']
try:
output = subprocess.check_output(commands, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
output = err.output
raise DAError("pdf_encrypt: error running pdftk. " + output)
# logmessage(' '.join(commands))
# logmessage(output)
shutil.move(outfile.name, filename)
def remove_nonprintable(text):
final = str()
for char in text:
if char in string.printable:
final += char
return final
def remove_nonprintable_bytes(byte_list):
if isinstance(byte_list, str):
return bytearray(remove_nonprintable(byte_list), 'utf-8')
final = str()
for the_int in byte_list:
if chr(the_int) in string.printable:
final += chr(the_int)
return bytearray(final, 'utf-8')
def remove_nonprintable_bytes_limited(byte_list):
final = bytes()
if len(byte_list) >= 2 and byte_list[0] == 254 and byte_list[1] == 255:
byte_list = byte_list[2:]
for the_int in byte_list:
if the_int > 0:
final += bytes([the_int])
return codecs.decode(final, 'latin1')
def remove_nonprintable_limited(text):
text = re.sub(r'^\xfe\xff', '', text)
text = re.sub(r'\x00', '', text)
return codecs.decode(text, 'latin1')
def flatten_pdf(filename):
# logmessage("flatten_pdf: running")
outfile = tempfile.NamedTemporaryFile(prefix="datemp", suffix=".pdf", delete=False)
subprocess_arguments = [PDFTK_PATH, filename, 'output', outfile.name, 'flatten']
# logmessage("Arguments are " + str(subprocess_arguments))
completed_process = None
try:
completed_process = subprocess.run(subprocess_arguments, timeout=60, check=False, capture_output=True)
result = completed_process.returncode
except subprocess.TimeoutExpired:
result = 1
logmessage("flatten_pdf: call to pdftk took too long")
if result != 0:
logmessage("Failed to flatten PDF form")
pdftk_error_msg = (f": {completed_process.stderr}") if completed_process else ""
raise DAError("Call to pdftk failed for template where arguments were " + " ".join(subprocess_arguments) + pdftk_error_msg)
shutil.move(outfile.name, filename)
def overlay_pdf_multi(main_file, logo_file, out_file):
subprocess_arguments = [PDFTK_PATH, main_file, 'multistamp', logo_file, 'output', out_file]
try:
result = subprocess.run(subprocess_arguments, timeout=60, check=False).returncode
except subprocess.TimeoutExpired:
result = 1
logmessage("overlay_pdf_multi: call to pdftk took too long")
if result != 0:
logmessage("Failed to overlay PDF")
raise DAError("Call to pdftk failed for overlay where arguments were " + " ".join(subprocess_arguments))
def overlay_pdf(main_file, logo_file, out_file, first_page=None, last_page=None, logo_page=None, only=None):
main_pdf = Pdf.open(main_file)
logo_pdf = Pdf.open(logo_file)
if first_page is None or first_page < 1:
first_page = 1
if last_page is None or last_page < 1:
last_page = len(main_pdf.pages)
if first_page > len(main_pdf.pages):
first_page = len(main_pdf.pages)
last_page = max(last_page, first_page)
if logo_page is None or logo_page < 1:
logo_page = 1
if logo_page > len(logo_pdf.pages):
logo_page = len(logo_pdf.pages)
for page_no in range(first_page - 1, last_page):
if only == 'even':
if page_no % 2 == 0:
continue
elif only == 'odd':
if page_no % 2 != 0:
continue
main_pdf.pages[page_no].add_overlay(logo_pdf.pages[logo_page - 1])
main_pdf.save(out_file)
logo_pdf.close()
main_pdf.close()
def apply_qpdf(filename):
new_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".pdf", delete=False)
try:
pikepdf.Job(['pikepdf', filename, new_file.name]).run()
except Exception as err:
raise DAError("Could not fix PDF: " + err.__class__.__name__ + ": " + str(err))
shutil.copyfile(new_file.name, filename)
os.remove(new_file.name)
def extract_pages(input_path, output_path, first, last):
subprocess_arguments = [PDFTK_PATH, input_path, 'cat', str(first) + '-' + str(last), 'output', output_path]
try:
result = subprocess.run(subprocess_arguments, timeout=60, check=False).returncode
except subprocess.TimeoutExpired:
raise DAException("call to pdftk took too long where arguments were " + " ".join(subprocess_arguments))
if result != 0:
raise DAException("call to pdftk failed where arguments were " + " ".join(subprocess_arguments))
|
GHSA-pcfx-g2j2-f6f6
|
docassemble_webapp/docassemble/webapp/develop.py
|
@@ -2,6 +2,7 @@
from flask_wtf import FlaskForm
from docassemble.base.functions import LazyWord as word
from wtforms import validators, ValidationError, StringField, SubmitField, TextAreaField, SelectMultipleField, SelectField, FileField, HiddenField, RadioField, BooleanField
+from docassemble.webapp.validators import html_validator
import packaging
@@ -37,21 +38,21 @@ def validate_package_name(form, field): # pylint: disable=unused-argument
class CreatePackageForm(FlaskForm):
name = StringField(word('Package name'), validators=[
- validators.DataRequired(word('Package name is required')), validate_name])
+ validators.DataRequired(word('Package name is required')), validate_name, html_validator])
submit = SubmitField(word('Get template'))
class CreatePlaygroundPackageForm(FlaskForm):
name = SelectField(word('Package'), validators=[
- validators.DataRequired(word('Package name is required')), validate_name])
+ validators.DataRequired(word('Package name is required')), validate_name, html_validator])
submit = SubmitField(word('Get package'))
class UpdatePackageForm(FlaskForm):
- giturl = StringField(word('GitHub URL'))
- gitbranch = SelectField(word('GitHub Branch'))
+ giturl = StringField(word('GitHub URL'), validators=[html_validator])
+ gitbranch = SelectField(word('GitHub Branch'), validators=[html_validator])
zipfile = FileField(word('Zip File'))
- pippackage = StringField(word('Package on PyPI'))
+ pippackage = StringField(word('Package on PyPI'), validators=[html_validator])
submit = SubmitField(word('Update'))
@@ -64,7 +65,7 @@ class ConfigForm(FlaskForm):
class PlaygroundForm(FlaskForm):
status = StringField('Status')
original_playground_name = StringField(word('Original Name'))
- playground_name = StringField(word('Name'), [validators.Length(min=1, max=255)])
+ playground_name = StringField(word('Name'), [validators.Length(min=1, max=255), html_validator])
playground_content = TextAreaField(word('Playground YAML'))
search_term = StringField(word('Search'))
submit = SubmitField(word('Save'))
@@ -108,7 +109,7 @@ class PlaygroundFilesEditForm(FlaskForm):
purpose = StringField('Purpose')
section = StringField(word('Section'))
original_file_name = StringField(word('Original Name'))
- file_name = StringField(word('Name'), [validators.Length(min=1, max=255)])
+ file_name = StringField(word('Name'), [validators.Length(min=1, max=255), html_validator])
search_term = StringField(word('Search'))
file_content = TextAreaField(word('File Text'))
active_file = StringField(word('Active File'))
@@ -118,7 +119,7 @@ class PlaygroundFilesEditForm(FlaskForm):
class RenameProject(FlaskForm):
name = StringField(word('New Name'), validators=[
- validators.DataRequired(word('Project name is required')), validate_project_name])
+ validators.DataRequired(word('Project name is required')), validate_project_name, html_validator])
submit = SubmitField(word('Rename'))
@@ -128,29 +129,29 @@ class DeleteProject(FlaskForm):
class NewProject(FlaskForm):
name = StringField(word('Name'), validators=[
- validators.DataRequired(word('Project name is required')), validate_project_name])
+ validators.DataRequired(word('Project name is required')), validate_project_name, html_validator])
submit = SubmitField(word('Save'))
class PullPlaygroundPackage(FlaskForm):
- github_url = StringField(word('GitHub URL'))
- github_branch = SelectField(word('GitHub Branch'))
- pypi = StringField(word('PyPI package'))
+ github_url = StringField(word('GitHub URL'), validators=[html_validator])
+ github_branch = SelectField(word('GitHub Branch'), validators=[html_validator])
+ pypi = StringField(word('PyPI package'), validators=[html_validator])
pull = SubmitField(word('Pull'))
cancel = SubmitField(word('Cancel'))
class PlaygroundPackagesForm(FlaskForm):
- original_file_name = StringField(word('Original Name'))
+ original_file_name = StringField(word('Original Name'), validators=[html_validator])
file_name = StringField(word('Package Name'), validators=[validators.Length(min=1, max=50),
validators.DataRequired(word('Package Name is required')),
- validate_package_name])
- license = StringField(word('License'), default='The MIT License (MIT)', validators=[validators.Length(min=0, max=255)])
- author_name = StringField(word('Author Name'), validators=[validators.Length(min=0, max=255)])
- author_email = StringField(word('Author E-mail'), validators=[validators.Length(min=0, max=255)])
- description = StringField(word('Description'), validators=[validators.Length(min=0, max=255)], default="A docassemble extension.")
- version = StringField(word('Version'), validators=[validators.Length(min=0, max=255), validate_package_version], default="0.0.1")
- url = StringField(word('URL'), validators=[validators.Length(min=0, max=255)], default="")
+ validate_package_name, html_validator])
+ license = StringField(word('License'), default='The MIT License (MIT)', validators=[validators.Length(min=0, max=255), html_validator])
+ author_name = StringField(word('Author Name'), validators=[validators.Length(min=0, max=255), html_validator])
+ author_email = StringField(word('Author E-mail'), validators=[validators.Length(min=0, max=255), html_validator])
+ description = StringField(word('Description'), validators=[validators.Length(min=0, max=255), html_validator], default="A docassemble extension.")
+ version = StringField(word('Version'), validators=[validators.Length(min=0, max=255), validate_package_version, html_validator], default="0.0.1")
+ url = StringField(word('URL'), validators=[validators.Length(min=0, max=255), html_validator], default="")
dependencies = SelectMultipleField(word('Dependencies'))
interview_files = SelectMultipleField(word('Interview files'))
template_files = SelectMultipleField(word('Template files'))
@@ -159,7 +160,7 @@ class PlaygroundPackagesForm(FlaskForm):
sources_files = SelectMultipleField(word('Source files'))
readme = TextAreaField(word('README file'), default='')
github_branch = NonValidatingSelectField(word('Branch'))
- github_branch_new = StringField(word('Name of new branch'))
+ github_branch_new = StringField(word('Name of new branch'), validators=[html_validator])
commit_message = StringField(word('Commit message'), default="")
pypi_also = BooleanField(word('Publish on PyPI also'))
install_also = BooleanField(word('Install package on this server also'))
@@ -222,7 +223,7 @@ class APIKey(FlaskForm):
action = HiddenField()
key = HiddenField()
security = HiddenField()
- name = StringField(word('Name'), validators=[validators.Length(min=1, max=255)])
+ name = StringField(word('Name'), validators=[validators.Length(min=1, max=255), html_validator])
method = SelectField(word('Security Method'))
permissions = SelectMultipleField(word('Limited Permissions'))
submit = SubmitField(word('Create'))
|
import re
from flask_wtf import FlaskForm
from docassemble.base.functions import LazyWord as word
from wtforms import validators, ValidationError, StringField, SubmitField, TextAreaField, SelectMultipleField, SelectField, FileField, HiddenField, RadioField, BooleanField
import packaging
class NonValidatingSelectField(SelectField):
def pre_validate(self, form):
pass
def validate_project_name(form, field): # pylint: disable=unused-argument
if re.search(r'^[0-9]', field.data):
raise ValidationError(word('Project name cannot begin with a number'))
if re.search(r'[^A-Za-z0-9]', field.data):
raise ValidationError(word('Valid characters are: A-Z, a-z, 0-9'))
def validate_name(form, field): # pylint: disable=unused-argument
if re.search(r'[^A-Za-z0-9\-]', field.data):
raise ValidationError(word('Valid characters are: A-Z, a-z, 0-9, hyphen'))
def validate_package_version(form, field): # pylint: disable=unused-argument
try:
packaging.version.Version(field.data)
except packaging.version.InvalidVersion:
raise ValidationError(word('Version number does not conform to PEP 440'))
def validate_package_name(form, field): # pylint: disable=unused-argument
if re.search(r'[^A-Za-z0-9]', field.data):
raise ValidationError(word('Valid characters are: A-Z, a-z, 0-9'))
class CreatePackageForm(FlaskForm):
name = StringField(word('Package name'), validators=[
validators.DataRequired(word('Package name is required')), validate_name])
submit = SubmitField(word('Get template'))
class CreatePlaygroundPackageForm(FlaskForm):
name = SelectField(word('Package'), validators=[
validators.DataRequired(word('Package name is required')), validate_name])
submit = SubmitField(word('Get package'))
class UpdatePackageForm(FlaskForm):
giturl = StringField(word('GitHub URL'))
gitbranch = SelectField(word('GitHub Branch'))
zipfile = FileField(word('Zip File'))
pippackage = StringField(word('Package on PyPI'))
submit = SubmitField(word('Update'))
class ConfigForm(FlaskForm):
config_content = TextAreaField(word('Configuration YAML'))
submit = SubmitField(word('Save'))
cancel = SubmitField(word('Cancel'))
class PlaygroundForm(FlaskForm):
status = StringField('Status')
original_playground_name = StringField(word('Original Name'))
playground_name = StringField(word('Name'), [validators.Length(min=1, max=255)])
playground_content = TextAreaField(word('Playground YAML'))
search_term = StringField(word('Search'))
submit = SubmitField(word('Save'))
run = SubmitField(word('Save and Run'))
delete = SubmitField(word('Delete'))
class PlaygroundUploadForm(FlaskForm):
uploadfile = FileField(word('File to upload'))
class LogForm(FlaskForm):
filter_string = StringField(word('Filter For'))
file_name = StringField(word('File Name'))
submit = SubmitField(word('Apply'))
clear = SubmitField(word('Clear'))
class Utilities(FlaskForm):
pdfdocxfile = FileField(word('PDF/DOCX File'))
scan = SubmitField(word('Scan'))
interview = StringField(word('Interview'))
interview_submit = SubmitField(word('Download'))
language = StringField(word('Language'))
tr_language = StringField(word('Language'))
systemfiletype = SelectField(word('Output Format'))
filetype = SelectField(word('Output Format'))
language_submit = SubmitField(word('Translate'))
officeaddin_version = StringField(word('Version'), default='0.0.0.1')
officeaddin_submit = SubmitField(word('Download'))
class PlaygroundFilesForm(FlaskForm):
purpose = StringField('Purpose')
section = StringField(word('Section'))
uploadfile = FileField(word('File to upload'))
submit = SubmitField(word('Upload'))
class PlaygroundFilesEditForm(FlaskForm):
purpose = StringField('Purpose')
section = StringField(word('Section'))
original_file_name = StringField(word('Original Name'))
file_name = StringField(word('Name'), [validators.Length(min=1, max=255)])
search_term = StringField(word('Search'))
file_content = TextAreaField(word('File Text'))
active_file = StringField(word('Active File'))
submit = SubmitField(word('Save'))
delete = SubmitField(word('Delete'))
class RenameProject(FlaskForm):
name = StringField(word('New Name'), validators=[
validators.DataRequired(word('Project name is required')), validate_project_name])
submit = SubmitField(word('Rename'))
class DeleteProject(FlaskForm):
submit = SubmitField(word('Delete'))
class NewProject(FlaskForm):
name = StringField(word('Name'), validators=[
validators.DataRequired(word('Project name is required')), validate_project_name])
submit = SubmitField(word('Save'))
class PullPlaygroundPackage(FlaskForm):
github_url = StringField(word('GitHub URL'))
github_branch = SelectField(word('GitHub Branch'))
pypi = StringField(word('PyPI package'))
pull = SubmitField(word('Pull'))
cancel = SubmitField(word('Cancel'))
class PlaygroundPackagesForm(FlaskForm):
original_file_name = StringField(word('Original Name'))
file_name = StringField(word('Package Name'), validators=[validators.Length(min=1, max=50),
validators.DataRequired(word('Package Name is required')),
validate_package_name])
license = StringField(word('License'), default='The MIT License (MIT)', validators=[validators.Length(min=0, max=255)])
author_name = StringField(word('Author Name'), validators=[validators.Length(min=0, max=255)])
author_email = StringField(word('Author E-mail'), validators=[validators.Length(min=0, max=255)])
description = StringField(word('Description'), validators=[validators.Length(min=0, max=255)], default="A docassemble extension.")
version = StringField(word('Version'), validators=[validators.Length(min=0, max=255), validate_package_version], default="0.0.1")
url = StringField(word('URL'), validators=[validators.Length(min=0, max=255)], default="")
dependencies = SelectMultipleField(word('Dependencies'))
interview_files = SelectMultipleField(word('Interview files'))
template_files = SelectMultipleField(word('Template files'))
module_files = SelectMultipleField(word('Modules'))
static_files = SelectMultipleField(word('Static files'))
sources_files = SelectMultipleField(word('Source files'))
readme = TextAreaField(word('README file'), default='')
github_branch = NonValidatingSelectField(word('Branch'))
github_branch_new = StringField(word('Name of new branch'))
commit_message = StringField(word('Commit message'), default="")
pypi_also = BooleanField(word('Publish on PyPI also'))
install_also = BooleanField(word('Install package on this server also'))
submit = SubmitField(word('Save'))
download = SubmitField(word('Download'))
install = SubmitField(word('Install'))
pypi = SubmitField(word('PyPI'))
github = SubmitField(word('GitHub'))
cancel = SubmitField(word('Cancel'))
delete = SubmitField(word('Delete'))
class GoogleDriveForm(FlaskForm):
folder = SelectField(word('Folder'))
submit = SubmitField(word('Save'))
cancel = SubmitField(word('Cancel'))
class OneDriveForm(FlaskForm):
folder = SelectField(word('Folder'))
submit = SubmitField(word('Save'))
cancel = SubmitField(word('Cancel'))
class GitHubForm(FlaskForm):
shared = BooleanField(word('Access shared repositories'))
orgs = BooleanField(word('Access organizational repositories'))
save = SubmitField(word('Save changes'))
configure = SubmitField(word('Configure'))
unconfigure = SubmitField(word('Disable'))
cancel = SubmitField(word('Back to profile'))
class TrainingForm(FlaskForm):
the_package = HiddenField()
the_file = HiddenField()
the_group_id = HiddenField()
show_all = HiddenField()
submit = SubmitField(word('Save'))
cancel = SubmitField(word('Cancel'))
class TrainingUploadForm(FlaskForm):
usepackage = RadioField(word('Use Package'))
jsonfile = FileField(word('JSON file'))
importtype = RadioField(word('Import method'))
submit = SubmitField(word('Import'))
class AddinUploadForm(FlaskForm):
content = HiddenField()
filename = HiddenField()
class FunctionFileForm(FlaskForm):
pass
class APIKey(FlaskForm):
action = HiddenField()
key = HiddenField()
security = HiddenField()
name = StringField(word('Name'), validators=[validators.Length(min=1, max=255)])
method = SelectField(word('Security Method'))
permissions = SelectMultipleField(word('Limited Permissions'))
submit = SubmitField(word('Create'))
delete = SubmitField(word('Delete'))
def validate(self, extra_validators=None):
rv = FlaskForm.validate(self, extra_validators=extra_validators)
if not rv:
return False
if self.action.data not in ('edit', 'new'):
return False
has_error = False
if self.action.data in ('edit', 'new'):
if (not isinstance(self.name.data, str)) or not re.search(r'[A-Za-z0-9]', self.name.data):
self.name.errors.append(word("The name must be filled in."))
has_error = True
if (not isinstance(self.method.data, str)) or self.method.data not in ('referer', 'ip', 'none'):
self.name.errors.append(word("You must select an option."))
has_error = True
if has_error:
return False
return True
|
GHSA-pcfx-g2j2-f6f6
|
docassemble_webapp/docassemble/webapp/server.py
|
@@ -49,7 +49,7 @@
from docassemble.webapp.setup import da_version
import docassemble.base.astparser
from docassemble.webapp.api_key import encrypt_api_key
-from docassemble.base.error import DAError, DAErrorNoEndpoint, DAErrorMissingVariable, DAErrorCompileError, DAValidationError, DAException, DANotFoundError, DAInvalidFilename
+from docassemble.base.error import DAError, DAErrorNoEndpoint, DAErrorMissingVariable, DAErrorCompileError, DAValidationError, DAException, DANotFoundError, DAInvalidFilename, DASourceError
import docassemble.base.functions
from docassemble.base.functions import get_default_timezone, ReturnValue, word
import docassemble.base.DA
@@ -159,7 +159,6 @@
docassemble.base.util.set_svm_machine_learner(docassemble.webapp.machinelearning.SVMMachineLearner)
-
min_system_version = '1.2.0'
re._MAXCACHE = 10000
@@ -1178,12 +1177,17 @@ def my_default_url(error, endpoint, values): # pylint: disable=unused-argument
def make_safe_url(url):
+ if url in ('help', 'login', 'signin', 'restart', 'new_session', 'exit', 'interview', 'logout', 'exit_logout', 'leave', 'register', 'profile', 'change_password', 'interviews', 'dispatch', 'manage', 'config', 'playground', 'playgroundtemplate', 'playgroundstatic', 'playgroundsources', 'playgroundmodules', 'playgroundpackages', 'configuration', 'root', 'temp_url', 'login_url', 'exit_endpoint', 'interview_start', 'interview_list', 'playgroundfiles', 'create_playground_package', 'run', 'run_interview_in_package', 'run_dispatch', 'run_new', 'run_new_dispatch'):
+ return url
parts = urlsplit(url)
safe_url = parts.path
if parts.query != '':
safe_url += '?' + parts.query
if parts.fragment != '':
safe_url += '#' + parts.fragment
+ if len(safe_url) > 0 and safe_url[0] not in ('?', '#', '/'):
+ safe_url = '/' + safe_url
+ safe_url = re.sub(r'^//+', '/', safe_url)
return safe_url
@@ -1238,6 +1242,7 @@ def password_validator(form, field): # pylint: disable=unused-argument
if DEBUG_BOOT:
boot_log("server: finished setting up Flask")
+
def url_for_interview(**args):
for k, v in daconfig.get('dispatch').items():
if v == args['i']:
@@ -5840,7 +5845,7 @@ def github_oauth_callback():
return ('File not found', 404)
setup_translation()
failed = False
- do_redirect = False
+ do_a_redirect = False
if not app.config['USE_GITHUB']:
logmessage('github_oauth_callback: server does not use github')
failed = True
@@ -5853,14 +5858,14 @@ def github_oauth_callback():
if 'code' not in request.args or 'state' not in request.args:
logmessage('github_oauth_callback: code and state not in args')
failed = True
- do_redirect = True
+ do_a_redirect = True
elif request.args['state'] != github_next['state']:
logmessage('github_oauth_callback: state did not match')
failed = True
if failed:
r.delete('da:github:userid:' + str(current_user.id))
r.delete('da:using_github:userid:' + str(current_user.id))
- if do_redirect:
+ if do_a_redirect:
flash(word("There was a problem connecting to GitHub. Please check your GitHub configuration and try again."), 'danger')
return redirect(url_for('github_menu'))
return ('File not found', 404)
@@ -8265,7 +8270,7 @@ def index(action_argument=None, refer=None):
the_field = validation_error.field
logmessage("field is " + the_field)
if the_field not in key_to_orig_key:
- for item in key_to_orig_key.keys():
+ for item in key_to_orig_key:
if item.startswith(the_field + '['):
the_field = item
break
@@ -10915,7 +10920,7 @@ def index(action_argument=None, refer=None):
$(query).each(function(){
var showIfParent = $(this).parents('.dashowif,.dajsshowif');
if (!(showIfParent.length && ($(showIfParent[0]).data('isVisible') == '0' || !$(showIfParent[0]).is(":visible")))){
- if ($(this).hasClass('combobox')){
+ if ($(this).prop('tagName') == 'INPUT' && $(this).hasClass('combobox')){
if (value){
daComboBoxes[$(this).attr('id')].disable();
}
@@ -14001,7 +14006,7 @@ def observer():
$(query).each(function(){
var showIfParent = $(this).parents('.dashowif, .dajsshowif');
if (!(showIfParent.length && ($(showIfParent[0]).data('isVisible') == '0' || !$(showIfParent[0]).is(":visible")))){
- if ($(this).hasClass('combobox')){
+ if ($(this).prop('tagName') == 'INPUT' && $(this).hasClass('combobox')){
if (value){
daComboBoxes[$(this).attr('id')].disable();
}
@@ -23043,7 +23048,14 @@ def server_error(the_error):
else:
the_history = None
the_vars = None
- if isinstance(the_error, (DAError, DANotFoundError, DAInvalidFilename)):
+ if isinstance(the_error, DASourceError):
+ if (DEBUG and daconfig.get('development site is protected', False)) or (current_user.is_authenticated and current_user.has_role('admin', 'developer')):
+ errmess = str(the_error)
+ else:
+ errmess = word("There was an error. Please contact the system administrator.")
+ the_trace = None
+ logmessage(str(the_error))
+ elif isinstance(the_error, (DAError, DANotFoundError, DAInvalidFilename)):
errmess = str(the_error)
the_trace = None
logmessage(errmess)
@@ -23073,7 +23085,10 @@ def server_error(the_error):
errmess += "\nIn field index number " + str(docassemble.base.functions.this_thread.misc['current_field'])
if hasattr(the_error, 'da_line_with_error'):
errmess += "\nIn line: " + str(the_error.da_line_with_error)
-
+ try:
+ logmessage(errmess)
+ except:
+ logmessage("Could not log the error message")
logmessage(the_trace)
if isinstance(the_error, DAError):
error_code = the_error.error_code
@@ -23296,7 +23311,7 @@ def server_error(the_error):
if 'in error' not in session and docassemble.base.functions.this_thread.interview is not None and 'error action' in docassemble.base.functions.this_thread.interview.consolidated_metadata:
session['in error'] = True
return index(action_argument={'action': docassemble.base.functions.this_thread.interview.consolidated_metadata['error action'], 'arguments': {'error_message': orig_errmess, 'error_history': the_history, 'error_trace': the_trace}}, refer=['error'])
- show_debug = not bool((not DEBUG) and isinstance(the_error, (DAError, DAInvalidFilename)))
+ show_debug = not bool((not (DEBUG and daconfig.get('development site is protected', False))) and isinstance(the_error, (DAError, DAInvalidFilename)))
if int(int(error_code)/100) == 4:
show_debug = False
if error_code == 404:
@@ -27000,6 +27015,7 @@ def invite_user(email_address, privilege=None, send=True):
return None
return accept_invite_link
+
@app.route('/api/user_invite', methods=['POST'])
@csrf.exempt
@cross_origin(origins='*', methods=['POST', 'HEAD'], automatic_options=True)
|
import ast
import base64
import codecs
import copy
import datetime
import email as emailpackage
import filecmp
import fnmatch
import hashlib
from pathlib import Path
import importlib
import importlib.resources
import inspect
from io import BytesIO, TextIOWrapper
import json
import logging
import math
import mimetypes
import operator
import os
import pickle
import re
import shutil
import stat
import subprocess
from subprocess import Popen, PIPE
import sys
import tarfile
import tempfile
import time
import traceback
import types
import unicodedata
import urllib
from urllib.parse import quote as urllibquote, quote_plus as urllibquoteplus
from urllib.parse import unquote as urllibunquote
from urllib.parse import urlparse, urlunparse, urlencode, urlsplit, parse_qsl
from urllib.request import urlretrieve
import uuid
import xml.etree.ElementTree as ET
import zipfile
import docassemble.base.config
if not docassemble.base.config.loaded:
docassemble.base.config.load()
from docassemble.base.config import daconfig, hostname, in_celery, in_cron, DEBUG_BOOT, START_TIME, boot_log
import docassemble.webapp.setup
from docassemble.webapp.setup import da_version
import docassemble.base.astparser
from docassemble.webapp.api_key import encrypt_api_key
from docassemble.base.error import DAError, DAErrorNoEndpoint, DAErrorMissingVariable, DAErrorCompileError, DAValidationError, DAException, DANotFoundError, DAInvalidFilename
import docassemble.base.functions
from docassemble.base.functions import get_default_timezone, ReturnValue, word
import docassemble.base.DA
from docassemble.base.generate_key import random_string, random_lower_string, random_alphanumeric, random_digits
import docassemble.base.interview_cache
from docassemble.base.logger import logmessage
from docassemble.base.pandoc import word_to_markdown, convertible_mimetypes, convertible_extensions
import docassemble.base.parse
import docassemble.base.pdftk
from docassemble.base.standardformatter import as_html, as_sms, get_choices_with_abb
import docassemble.base.util
from docassemble.base.util import DAEmail, DAEmailRecipientList, DAEmailRecipient, DAFileList, DAFile, DAObject, DAFileCollection, DAStaticFile, DADict, DAList
import docassemble.base.core # for backward-compatibility with data pickled in earlier versions
from docassemble.webapp.app_object import app, csrf
import docassemble.webapp.backend
from docassemble.webapp.backend import cloud, initial_dict, can_access_file_number, get_info_from_file_number, get_info_from_file_number_with_uids, da_send_mail, get_new_file_number, encrypt_phrase, pack_phrase, decrypt_phrase, unpack_phrase, encrypt_dictionary, pack_dictionary, decrypt_dictionary, unpack_dictionary, nice_date_from_utc, fetch_user_dict, fetch_previous_user_dict, advance_progress, reset_user_dict, get_chat_log, save_numbered_file, generate_csrf, get_info_from_file_reference, write_ml_source, fix_ml_files, is_package_ml, file_set_attributes, file_user_access, file_privilege_access, url_if_exists, get_person, Message, url_for, encrypt_object, decrypt_object, delete_user_data, delete_temp_user_data, clear_session, clear_specific_session, guess_yaml_filename, get_session, update_session, get_session_uids, project_name, directory_for, add_project
import docassemble.webapp.clicksend
import docassemble.webapp.telnyx
from docassemble.webapp.core.models import Uploads, UploadsUserAuth, SpeakList, Supervisors, Shortener, Email, EmailAttachment, MachineLearning, GlobalObjectStorage
from docassemble.webapp.daredis import r, r_user, r_store
from docassemble.webapp.db_object import db
from docassemble.webapp.develop import CreatePackageForm, CreatePlaygroundPackageForm, UpdatePackageForm, ConfigForm, PlaygroundForm, PlaygroundUploadForm, LogForm, Utilities, PlaygroundFilesForm, PlaygroundFilesEditForm, PlaygroundPackagesForm, GoogleDriveForm, OneDriveForm, GitHubForm, PullPlaygroundPackage, TrainingForm, TrainingUploadForm, APIKey, AddinUploadForm, FunctionFileForm, RenameProject, DeleteProject, NewProject
from docassemble.webapp.files import SavedFile, get_ext_and_mimetype
from docassemble.webapp.fixpickle import fix_pickle_obj
from docassemble.webapp.info import system_packages
from docassemble.webapp.jsonstore import read_answer_json, write_answer_json, delete_answer_json, variables_snapshot_connection
import docassemble.webapp.machinelearning
from docassemble.webapp.packages.models import Package, PackageAuth
from docassemble.webapp.playground import PlaygroundSection
from docassemble.webapp.screenreader import to_text
from docassemble.webapp.translations import setup_translation
from docassemble.webapp.users.forms import MyRegisterForm, MySignInForm, PhoneLoginForm, PhoneLoginVerifyForm, MFASetupForm, MFAReconfigureForm, MFALoginForm, MFAChooseForm, MFASMSSetupForm, MFAVerifySMSSetupForm, MyResendConfirmEmailForm, ManageAccountForm, RequestDeveloperForm, InterviewsListForm
from docassemble.webapp.users.models import UserAuthModel, UserModel, UserDict, UserDictKeys, TempUser, ChatLog, MyUserInvitation, Role, UserRoles, AnonymousUserModel
from docassemble.webapp.users.views import user_profile_page
if not in_celery:
import docassemble.webapp.worker
import celery.exceptions
import packaging
import apiclient
from bs4 import BeautifulSoup
from Crypto.Hash import MD5
from Crypto.PublicKey import RSA
import dateutil
import dateutil.parser
from dateutil import tz
import docassemble_flask_user.emails
import docassemble_flask_user.forms
import docassemble_flask_user.signals
import docassemble_flask_user.views
from docassemble_flask_user import UserManager, SQLAlchemyAdapter
from docassemble_flask_user import login_required, roles_required, user_logged_in, user_changed_password, user_registered
from docassemblekvsession import KVSessionExtension
from docassemble_textstat.textstat import textstat
from flask import make_response, abort, render_template, render_template_string, request, session, send_file, redirect, current_app, get_flashed_messages, flash, jsonify, Response, g
from markupsafe import Markup
from flask_cors import cross_origin
from flask_login import LoginManager
from flask_login import login_user, logout_user, current_user
from flask_wtf.csrf import CSRFError
import googleapiclient.discovery
import httplib2
import humanize
from jinja2.exceptions import TemplateError
import links_from_header
import oauth2client.client
import pandas
from PIL import Image
import pyotp
from pygments import highlight
from pygments.formatters import HtmlFormatter # pylint: disable=no-name-in-module
from pygments.lexers import YamlLexer # pylint: disable=no-name-in-module
# pytz should be loaded into memory because pickled objects might use it
import pytz # noqa: F401 # pylint: disable=unused-import
try:
import zoneinfo
except ImportError:
from backports import zoneinfo
import qrcode
import qrcode.image.svg
from rauth import OAuth1Service, OAuth2Service
from google.oauth2 import id_token
from google.auth.transport import requests as google_requests
import requests
import ruamel.yaml
from simplekv.memory.redisstore import RedisStore
from sqlalchemy import or_, and_, not_, select, delete as sqldelete, update
import tailer
import twilio.twiml
import twilio.twiml.messaging_response
import twilio.twiml.voice_response
from twilio.rest import Client as TwilioRestClient
import werkzeug.exceptions
import werkzeug.utils
import wtforms
import xlsxwriter
from user_agents import parse as ua_parse
import yaml as standardyaml
if DEBUG_BOOT:
boot_log("server: done importing modules")
docassemble.base.util.set_knn_machine_learner(docassemble.webapp.machinelearning.SimpleTextMachineLearner)
docassemble.base.util.set_machine_learning_entry(docassemble.webapp.machinelearning.MachineLearningEntry)
docassemble.base.util.set_random_forest_machine_learner(docassemble.webapp.machinelearning.RandomForestMachineLearner)
docassemble.base.util.set_svm_machine_learner(docassemble.webapp.machinelearning.SVMMachineLearner)
min_system_version = '1.2.0'
re._MAXCACHE = 10000
the_method_type = types.FunctionType
equals_byte = bytes('=', 'utf-8')
TypeType = type(type(None))
NoneType = type(None)
STATS = daconfig.get('collect statistics', False)
DEBUG = daconfig.get('debug', False)
ERROR_TYPES_NO_EMAIL = daconfig.get('suppress error notificiations', [])
COOKIELESS_SESSIONS = daconfig.get('cookieless sessions', False)
BAN_IP_ADDRESSES = daconfig.get('ip address ban enabled', True)
CONCURRENCY_LOCK_TIMEOUT = daconfig.get('concurrency lock timeout', 4)
if DEBUG:
PREVENT_DEMO = False
elif daconfig.get('allow demo', False):
PREVENT_DEMO = False
else:
PREVENT_DEMO = True
REQUIRE_IDEMPOTENT = not daconfig.get('allow non-idempotent questions', True)
STRICT_MODE = daconfig.get('restrict input variables', False)
PACKAGE_PROTECTION = daconfig.get('package protection', True)
PERMISSIONS_LIST = [
'access_privileges',
'access_sessions',
'access_user_info',
'access_user_api_info',
'create_user',
'delete_user',
'demo_interviews',
'edit_privileges',
'edit_sessions',
'edit_user_active_status',
'edit_user_info',
'edit_user_api_info',
'edit_user_password',
'edit_user_privileges',
'interview_data',
'log_user_in',
'playground_control',
'template_parse'
]
HTTP_TO_HTTPS = daconfig.get('behind https load balancer', False)
GITHUB_BRANCH = daconfig.get('github default branch name', 'main')
request_active = True
global_css = ''
global_js = ''
default_playground_yaml = """metadata:
title: Default playground interview
short title: Test
comment: This is a learning tool. Feel free to write over it.
---
objects:
- client: Individual
---
question: |
What is your name?
fields:
- First Name: client.name.first
- Middle Name: client.name.middle
required: False
- Last Name: client.name.last
- Suffix: client.name.suffix
required: False
code: name_suffix()
---
question: |
What is your date of birth?
fields:
- Date of Birth: client.birthdate
datatype: date
---
mandatory: True
question: |
Here is your document, ${ client }.
subquestion: |
In order ${ quest }, you will need this.
attachments:
- name: Information Sheet
filename: info_sheet
content: |
Your name is ${ client }.
% if client.age_in_years() > 60:
You are a senior.
% endif
Your quest is ${ quest }. You
are eligible for ${ benefits }.
---
question: |
What is your quest?
fields:
- Your quest: quest
hint: to find the Loch Ness Monster
---
code: |
if client.age_in_years() < 18:
benefits = "CHIP"
else:
benefits = "Medicaid"
"""
ok_mimetypes = {
"application/javascript": "javascript",
"application/json": "javascript",
"text/css": "css",
"text/html": "htmlmixed",
"text/x-python": "python"
}
ok_extensions = {
"4th": "forth",
"apl": "apl",
"asc": "asciiarmor",
"asn": "asn.1",
"asn1": "asn.1",
"aspx": "htmlembedded",
"b": "brainfuck",
"bash": "shell",
"bf": "brainfuck",
"c": "clike",
"c++": "clike",
"cc": "clike",
"cl": "commonlisp",
"clj": "clojure",
"cljc": "clojure",
"cljs": "clojure",
"cljx": "clojure",
"cob": "cobol",
"coffee": "coffeescript",
"cpp": "clike",
"cpy": "cobol",
"cql": "sql",
"cr": "crystal",
"cs": "clike",
"csharp": "clike",
"css": "css",
"cxx": "clike",
"cyp": "cypher",
"cypher": "cypher",
"d": "d",
"dart": "dart",
"diff": "diff",
"dtd": "dtd",
"dyalog": "apl",
"dyl": "dylan",
"dylan": "dylan",
"e": "eiffel",
"ecl": "ecl",
"ecmascript": "javascript",
"edn": "clojure",
"ejs": "htmlembedded",
"el": "commonlisp",
"elm": "elm",
"erb": "htmlembedded",
"erl": "erlang",
"f": "fortran",
"f77": "fortran",
"f90": "fortran",
"f95": "fortran",
"factor": "factor",
"feature": "gherkin",
"for": "fortran",
"forth": "forth",
"fs": "mllike",
"fth": "forth",
"fun": "mllike",
"go": "go",
"gradle": "groovy",
"groovy": "groovy",
"gss": "css",
"h": "clike",
"h++": "clike",
"haml": "haml",
"handlebars": "htmlmixed",
"hbs": "htmlmixed",
"hh": "clike",
"hpp": "clike",
"hs": "haskell",
"html": "htmlmixed",
"hx": "haxe",
"hxml": "haxe",
"hxx": "clike",
"in": "properties",
"ini": "properties",
"ino": "clike",
"intr": "dylan",
"j2": "jinja2",
"jade": "pug",
"java": "clike",
"jinja": "jinja2",
"jinja2": "jinja2",
"jl": "julia",
"json": "json",
"jsonld": "javascript",
"jsp": "htmlembedded",
"jsx": "jsx",
"ksh": "shell",
"kt": "clike",
"less": "css",
"lhs": "haskell-literate",
"lisp": "commonlisp",
"ls": "livescript",
"ltx": "stex",
"lua": "lua",
"m": "octave",
"markdown": "markdown",
"mbox": "mbox",
"md": "markdown",
"mkd": "markdown",
"mo": "modelica",
"mps": "mumps",
"msc": "mscgen",
"mscgen": "mscgen",
"mscin": "mscgen",
"msgenny": "mscgen",
"node": "javascript",
"nq": "ntriples",
"nsh": "nsis",
"nsi": "nsis",
"nt": "ntriples",
"nut": "clike",
"oz": "oz",
"p": "pascal",
"pas": "pascal",
"patch": "diff",
"pgp": "asciiarmor",
"php": "php",
"php3": "php",
"php4": "php",
"php5": "php",
"php7": "php",
"phtml": "php",
"pig": "pig",
"pl": "perl",
"pls": "sql",
"pm": "perl",
"pp": "puppet",
"pro": "idl",
"properties": "properties",
"proto": "protobuf",
"ps1": "powershell",
"psd1": "powershell",
"psm1": "powershell",
"pug": "pug",
"pxd": "python",
"pxi": "python",
"py": "python",
"pyx": "python",
"q": "q",
"r": "r",
"rb": "ruby",
"rq": "sparql",
"rs": "rust",
"rst": "rst",
"s": "gas",
"sas": "sas",
"sass": "sass",
"scala": "clike",
"scm": "scheme",
"scss": "css",
"sh": "shell",
"sieve": "sieve",
"sig": "asciiarmor",
"siv": "sieve",
"slim": "slim",
"smackspec": "mllike",
"sml": "mllike",
"soy": "soy",
"sparql": "sparql",
"sql": "sql",
"ss": "scheme",
"st": "smalltalk",
"styl": "stylus",
"swift": "swift",
"tcl": "tcl",
"tex": "stex",
"textile": "textile",
"toml": "toml",
"tpl": "smarty",
"ts": "javascript",
"tsx": "javascript",
"ttcn": "ttcn",
"ttcn3": "ttcn",
"ttcnpp": "ttcn",
"ttl": "turtle",
"vb": "vb",
"vbs": "vbscript",
"vhd": "vhdl",
"vhdl": "vhdl",
"vtl": "velocity",
"vue": "vue",
"wast": "wast",
"wat": "wast",
"webidl": "webidl",
"xml": "xml",
"xquery": "xquery",
"xsd": "xml",
"xsl": "xml",
"xu": "mscgen",
"xy": "xquery",
"yaml": "yaml",
"yml": "yaml",
"ys": "yacas",
"z80": "z80"
}
def update_editable():
try:
if 'editable mimetypes' in daconfig and isinstance(daconfig['editable mimetypes'], list):
for item in daconfig['editable mimetypes']:
if isinstance(item, str):
ok_mimetypes[item] = 'null'
except:
pass
try:
if 'editable extensions' in daconfig and isinstance(daconfig['editable extensions'], list):
for item in daconfig['editable extensions']:
if isinstance(item, str):
ok_extensions[item] = 'null'
except:
pass
update_editable()
default_yaml_filename = daconfig.get('default interview', None)
final_default_yaml_filename = daconfig.get('default interview', 'docassemble.base:data/questions/default-interview.yml')
keymap = daconfig.get('keymap', None)
google_config = daconfig['google']
contains_volatile = re.compile(r'^(x\.|x\[|.*\[[ijklmn]\])')
is_integer = re.compile(r'^[0-9]+$')
detect_mobile = re.compile(r'Mobile|iP(hone|od|ad)|Android|BlackBerry|IEMobile|Kindle|NetFront|Silk-Accelerated|(hpw|web)OS|Fennec|Minimo|Opera M(obi|ini)|Blazer|Dolfin|Dolphin|Skyfire|Zune')
alphanumeric_only = re.compile(r'[\W_]+')
phone_pattern = re.compile(r"^[\d\+\-\(\) ]+$")
document_match = re.compile(r'^--- *$', flags=re.MULTILINE)
fix_tabs = re.compile(r'\t')
fix_initial = re.compile(r'^---\n')
noquote_match = re.compile(r'"')
lt_match = re.compile(r'<')
gt_match = re.compile(r'>')
amp_match = re.compile(r'&')
extraneous_var = re.compile(r'^x\.|^x\[')
key_requires_preassembly = re.compile(r'^(session_local\.|device_local\.|user_local\.|x\.|x\[|_multiple_choice|.*\[[ijklmn]\])')
# match_invalid = re.compile('[^A-Za-z0-9_\[\].\'\%\-=]')
# match_invalid_key = re.compile('[^A-Za-z0-9_\[\].\'\%\- =]')
match_brackets = re.compile(r'\[[BRO]?\'[^\]]*\'\]$')
match_inside_and_outside_brackets = re.compile(r'(.*)(\[[BRO]?\'[^\]]*\'\])$')
match_inside_brackets = re.compile(r'\[([BRO]?)\'([^\]]*)\'\]')
valid_python_var = re.compile(r'^[A-Za-z][A-Za-z0-9\_]*$')
valid_python_exp = re.compile(r'^[A-Za-z][A-Za-z0-9\_\.]*$')
default_title = daconfig.get('default title', daconfig.get('brandname', 'docassemble'))
default_short_title = daconfig.get('default short title', default_title)
os.environ['PYTHON_EGG_CACHE'] = tempfile.gettempdir()
PNG_RESOLUTION = daconfig.get('png resolution', 300)
PNG_SCREEN_RESOLUTION = daconfig.get('png screen resolution', 72)
PDFTOPPM_COMMAND = daconfig.get('pdftoppm', 'pdftoppm')
DEFAULT_LANGUAGE = daconfig.get('language', 'en')
DEFAULT_LOCALE = daconfig.get('locale', 'en_US.utf8')
DEFAULT_DIALECT = daconfig.get('dialect', 'us')
DEFAULT_VOICE = daconfig.get('voice', None)
LOGSERVER = daconfig.get('log server', None)
CHECKIN_INTERVAL = int(daconfig.get('checkin interval', 6000))
# message_sequence = dbtableprefix + 'message_id_seq'
NOTIFICATION_CONTAINER = daconfig.get('alert container html', '<div class="datopcenter col-sm-7 col-md-6 col-lg-5" id="daflash">%s</div>')
NOTIFICATION_MESSAGE = daconfig.get('alert html', '<div class="da-alert alert alert-%s alert-dismissible fade show" role="alert">%s<button type="button" class="btn-close" data-bs-dismiss="alert" aria-label="Close"></button></div>')
USING_SUPERVISOR = bool(os.environ.get('SUPERVISOR_SERVER_URL', None))
SINGLE_SERVER = USING_SUPERVISOR and bool(':all:' in ':' + os.environ.get('CONTAINERROLE', 'all') + ':')
audio_mimetype_table = {'mp3': 'audio/mpeg', 'ogg': 'audio/ogg'}
valid_voicerss_dialects = {
'ar': ['eg', 'sa'],
'bg': ['bg'],
'ca': ['es'],
'cs': ['cz'],
'da': ['dk'],
'de': ['de', 'at', 'ch'],
'el': ['gr'],
'en': ['au', 'ca', 'gb', 'in', 'ie', 'us'],
'es': ['mx', 'es'],
'fi': ['fi'],
'fr': ['ca', 'fr', 'ch'],
'he': ['il'],
'hi': ['in'],
'hr': ['hr'],
'hu': ['hu'],
'id': ['id'],
'it': ['it'],
'ja': ['jp'],
'ko': ['kr'],
'ms': ['my'],
'nb': ['no'],
'nl': ['be', 'nl'],
'pl': ['pl'],
'pt': ['br', 'pt'],
'ro': ['ro'],
'ru': ['ru'],
'sk': ['sk'],
'sl': ['si'],
'sv': ['se'],
'ta': ['in'],
'th': ['th'],
'tr': ['tr'],
'vi': ['vn'],
'zh': ['cn', 'hk', 'tw']
}
voicerss_config = daconfig.get('voicerss', None)
VOICERSS_ENABLED = not bool(not voicerss_config or ('enable' in voicerss_config and not voicerss_config['enable']) or not ('key' in voicerss_config and voicerss_config['key']))
ROOT = daconfig.get('root', '/')
# app.logger.warning("default sender is " + current_app.config['MAIL_DEFAULT_SENDER'] + "\n")
exit_page = daconfig.get('exitpage', 'https://docassemble.org')
SUPERVISORCTL = [daconfig.get('supervisorctl', 'supervisorctl')]
if daconfig['supervisor'].get('username', None):
SUPERVISORCTL.extend(['--username', daconfig['supervisor']['username'], '--password', daconfig['supervisor']['password']])
# PACKAGE_CACHE = daconfig.get('packagecache', '/var/www/.cache')
WEBAPP_PATH = daconfig.get('webapp', '/usr/share/docassemble/webapp/docassemble.wsgi')
if packaging.version.parse(daconfig.get('system version', '0.1.12')) < packaging.version.parse('1.4.0'):
READY_FILE = daconfig.get('ready file', '/usr/share/docassemble/webapp/ready')
else:
READY_FILE = daconfig.get('ready file', '/var/run/docassemble/ready')
UPLOAD_DIRECTORY = daconfig.get('uploads', '/usr/share/docassemble/files')
PACKAGE_DIRECTORY = daconfig.get('packages', '/usr/share/docassemble/local' + str(sys.version_info.major) + '.' + str(sys.version_info.minor))
FULL_PACKAGE_DIRECTORY = os.path.join(PACKAGE_DIRECTORY, 'lib', 'python' + str(sys.version_info.major) + '.' + str(sys.version_info.minor), 'site-packages')
LOG_DIRECTORY = daconfig.get('log', '/usr/share/docassemble/log')
PAGINATION_LIMIT = daconfig.get('pagination limit', 100)
PAGINATION_LIMIT_PLUS_ONE = PAGINATION_LIMIT + 1
# PLAYGROUND_MODULES_DIRECTORY = daconfig.get('playground_modules', )
init_py_file = """
__import__('pkg_resources').declare_namespace(__name__)
"""
# if not os.path.isfile(os.path.join(PLAYGROUND_MODULES_DIRECTORY, 'docassemble', '__init__.py')):
# with open(os.path.join(PLAYGROUND_MODULES_DIRECTORY, 'docassemble', '__init__.py'), 'a') as the_file:
# the_file.write(init_py_file)
# USE_PROGRESS_BAR = daconfig.get('use_progress_bar', True)
SHOW_LOGIN = daconfig.get('show login', True)
ALLOW_REGISTRATION = daconfig.get('allow registration', True)
# USER_PACKAGES = daconfig.get('user_packages', '/var/lib/docassemble/dist-packages')
# sys.path.append(USER_PACKAGES)
# if USE_PROGRESS_BAR:
if in_celery:
LOGFILE = daconfig.get('celery flask log', '/tmp/celery-flask.log')
else:
LOGFILE = daconfig.get('flask log', '/tmp/flask.log')
# APACHE_LOGFILE = daconfig.get('apache_log', '/var/log/apache2/error.log')
# connect_string = docassemble.webapp.database.connection_string()
# alchemy_connect_string = docassemble.webapp.database.alchemy_connection_string()
mimetypes.add_type('application/x-yaml', '.yml')
mimetypes.add_type('application/x-yaml', '.yaml')
if DEBUG_BOOT:
boot_log("server: creating session store")
safeyaml = ruamel.yaml.YAML(typ='safe')
altyaml = ruamel.yaml.YAML(typ=['safe', 'bytes'])
altyaml.default_flow_style = False
altyaml.default_style = '"'
altyaml.allow_unicode = True
altyaml.width = 10000
altyamlstring = ruamel.yaml.YAML(typ=['safe', 'string'])
altyamlstring.default_flow_style = False
altyamlstring.default_style = '"'
altyamlstring.allow_unicode = True
altyamlstring.width = 10000
store = RedisStore(r_store)
kv_session = KVSessionExtension(store, app)
def _call_or_get(function_or_property):
return function_or_property() if callable(function_or_property) else function_or_property
def _get_safe_next_param(param_name, default_endpoint):
if param_name in request.args:
safe_next = current_app.user_manager.make_safe_url_function(urllibunquote(request.args[param_name]))
# safe_next = request.args[param_name]
else:
safe_next = _endpoint_url(default_endpoint)
return safe_next
# def _do_login_user(user, safe_next, remember_me=False):
# if not user: return unauthenticated()
# if not _call_or_get(user.is_active):
# flash(word('Your account has not been enabled.'), 'error')
# return redirect(url_for('user.login'))
# user_manager = current_app.user_manager
# if user_manager.enable_email and user_manager.enable_confirm_email \
# and not current_app.user_manager.enable_login_without_confirm_email \
# and not user.has_confirmed_email():
# url = url_for('user.resend_confirm_email')
# flash(docassemble_flask_user.translations.gettext('Your email address has not yet been confirmed. Check your email Inbox and Spam folders for the confirmation email or <a href="%(url)s">Re-send confirmation email</a>.', url=url), 'error')
# return redirect(url_for('user.login'))
# login_user(user, remember=remember_me)
# signals.user_logged_in.send(current_app._get_current_object(), user=user)
# flash(word('You have signed in successfully.'), 'success')
# return redirect(safe_next)
def custom_resend_confirm_email():
user_manager = current_app.user_manager
form = user_manager.resend_confirm_email_form(request.form)
if request.method == 'GET' and 'email' in request.args:
form.email.data = request.args['email']
if request.method == 'POST' and form.validate():
email = form.email.data
user, user_email = user_manager.find_user_by_email(email)
if user:
docassemble_flask_user.views._send_confirm_email(user, user_email)
return redirect(docassemble_flask_user.views._endpoint_url(user_manager.after_resend_confirm_email_endpoint))
response = make_response(user_manager.render_function(user_manager.resend_confirm_email_template, form=form), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
def as_int(val):
try:
return int(val)
except:
return 0
def custom_register():
"""Display registration form and create new User."""
is_json = bool(('json' in request.form and as_int(request.form['json'])) or ('json' in request.args and as_int(request.args['json'])))
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
safe_next = _get_safe_next_param('next', user_manager.after_login_endpoint)
safe_reg_next = _get_safe_next_param('reg_next', user_manager.after_register_endpoint)
if _call_or_get(current_user.is_authenticated) and user_manager.auto_login_at_login:
if safe_next == url_for(user_manager.after_login_endpoint):
url_parts = list(urlparse(safe_next))
query = dict(parse_qsl(url_parts[4]))
query.update({'from_login': 1})
url_parts[4] = urlencode(query)
safe_next = urlunparse(url_parts)
return add_secret_to(redirect(safe_next))
setup_translation()
# Initialize form
login_form = user_manager.login_form() # for login_or_register.html
register_form = user_manager.register_form(request.form) # for register.html
# invite token used to determine validity of registeree
invite_token = request.values.get("token")
the_tz = get_default_timezone()
# require invite without a token should disallow the user from registering
if user_manager.require_invitation and not invite_token:
flash(word("Registration is invite only"), "error")
return redirect(url_for('user.login'))
user_invite = None
if invite_token and db_adapter.UserInvitationClass:
user_invite = db_adapter.find_first_object(db_adapter.UserInvitationClass, token=invite_token)
if user_invite:
register_form.invite_token.data = invite_token
else:
flash(word("Invalid invitation token"), "error")
return redirect(url_for('user.login'))
if request.method != 'POST':
login_form.next.data = register_form.next.data = safe_next
login_form.reg_next.data = register_form.reg_next.data = safe_reg_next
if user_invite:
register_form.email.data = user_invite.email
register_form.timezone.choices = [(x, x) for x in sorted(list(zoneinfo.available_timezones()))]
register_form.timezone.default = the_tz
if str(register_form.timezone.data) == 'None' or str(register_form.timezone.data) == '':
register_form.timezone.data = the_tz
if request.method == 'POST':
if 'timezone' not in app.config['USER_PROFILE_FIELDS']:
register_form.timezone.data = the_tz
for reg_field in ('first_name', 'last_name', 'country', 'subdivisionfirst', 'subdivisionsecond', 'subdivisionthird', 'organization', 'language'):
if reg_field not in app.config['USER_PROFILE_FIELDS']:
getattr(register_form, reg_field).data = ""
# Process valid POST
if request.method == 'POST' and register_form.validate():
email_taken = False
if daconfig.get('confirm registration', False):
try:
docassemble_flask_user.forms.unique_email_validator(register_form, register_form.email)
except wtforms.ValidationError:
email_taken = True
if email_taken:
flash(word('A confirmation email has been sent to %(email)s with instructions to complete your registration.' % {'email': register_form.email.data}), 'success')
subject, html_message, text_message = docassemble_flask_user.emails._render_email(
'flask_user/emails/reregistered',
app_name=app.config['APP_NAME'],
sign_in_link=url_for('user.login', _external=True))
# Send email message using Flask-Mail
user_manager.send_email_function(register_form.email.data, subject, html_message, text_message)
return redirect(url_for('user.login'))
# Create a User object using Form fields that have a corresponding User field
User = db_adapter.UserClass
user_class_fields = User.__dict__
user_fields = {}
# Create a UserEmail object using Form fields that have a corresponding UserEmail field
if db_adapter.UserEmailClass:
UserEmail = db_adapter.UserEmailClass
user_email_class_fields = UserEmail.__dict__
user_email_fields = {}
# Create a UserAuth object using Form fields that have a corresponding UserAuth field
if db_adapter.UserAuthClass:
UserAuth = db_adapter.UserAuthClass
user_auth_class_fields = UserAuth.__dict__
user_auth_fields = {}
# Enable user account
if db_adapter.UserProfileClass:
if hasattr(db_adapter.UserProfileClass, 'active'):
user_auth_fields['active'] = True
elif hasattr(db_adapter.UserProfileClass, 'is_enabled'):
user_auth_fields['is_enabled'] = True
else:
user_auth_fields['is_active'] = True
else:
if hasattr(db_adapter.UserClass, 'active'):
user_fields['active'] = True
elif hasattr(db_adapter.UserClass, 'is_enabled'):
user_fields['is_enabled'] = True
else:
user_fields['is_active'] = True
# For all form fields
for field_name, field_value in register_form.data.items():
# Hash password field
if field_name == 'password':
hashed_password = user_manager.hash_password(field_value)
if db_adapter.UserAuthClass:
user_auth_fields['password'] = hashed_password
else:
user_fields['password'] = hashed_password
# Store corresponding Form fields into the User object and/or UserProfile object
else:
if field_name in user_class_fields:
user_fields[field_name] = field_value
if db_adapter.UserEmailClass:
if field_name in user_email_class_fields:
user_email_fields[field_name] = field_value
if db_adapter.UserAuthClass:
if field_name in user_auth_class_fields:
user_auth_fields[field_name] = field_value
while True:
new_social = 'local$' + random_alphanumeric(32)
existing_user = db.session.execute(select(UserModel).filter_by(social_id=new_social)).first()
if existing_user:
continue
break
user_fields['social_id'] = new_social
# Add User record using named arguments 'user_fields'
user = db_adapter.add_object(User, **user_fields)
# Add UserEmail record using named arguments 'user_email_fields'
if db_adapter.UserEmailClass:
user_email = db_adapter.add_object(UserEmail,
user=user,
is_primary=True,
**user_email_fields)
else:
user_email = None
# Add UserAuth record using named arguments 'user_auth_fields'
if db_adapter.UserAuthClass:
user_auth = db_adapter.add_object(UserAuth, **user_auth_fields)
if db_adapter.UserProfileClass:
user = user_auth
else:
user.user_auth = user_auth
require_email_confirmation = True
if user_invite:
if user_invite.email == register_form.email.data:
require_email_confirmation = False
db_adapter.update_object(user, confirmed_at=datetime.datetime.utcnow())
db_adapter.commit()
# Send 'registered' email and delete new User object if send fails
if user_manager.send_registered_email:
try:
# Send 'registered' email
docassemble_flask_user.views._send_registered_email(user, user_email, require_email_confirmation)
except:
# delete new User object if send fails
db_adapter.delete_object(user)
db_adapter.commit()
raise
# Send user_registered signal
docassemble_flask_user.signals.user_registered.send(current_app._get_current_object(),
user=user,
user_invite=user_invite)
# Redirect if USER_ENABLE_CONFIRM_EMAIL is set
if user_manager.enable_confirm_email and require_email_confirmation:
safe_reg_next = user_manager.make_safe_url_function(register_form.reg_next.data)
return redirect(safe_reg_next)
# Auto-login after register or redirect to login page
if 'reg_next' in request.args:
safe_reg_next = user_manager.make_safe_url_function(register_form.reg_next.data)
else:
safe_reg_next = _endpoint_url(user_manager.after_confirm_endpoint)
if user_manager.auto_login_after_register:
if app.config['USE_MFA']:
if user.otp_secret is None and len(app.config['MFA_REQUIRED_FOR_ROLE']) and user.has_role(*app.config['MFA_REQUIRED_FOR_ROLE']):
session['validated_user'] = user.id
session['next'] = safe_reg_next
if app.config['MFA_ALLOW_APP'] and (twilio_config is None or not app.config['MFA_ALLOW_SMS']):
return redirect(url_for('mfa_setup'))
if not app.config['MFA_ALLOW_APP']:
return redirect(url_for('mfa_sms_setup'))
return redirect(url_for('mfa_choose'))
return docassemble_flask_user.views._do_login_user(user, safe_reg_next)
return redirect(url_for('user.login') + '?next=' + urllibquote(safe_reg_next))
# Process GET or invalid POST
if is_json:
return jsonify(action='register', csrf_token=generate_csrf())
response = make_response(user_manager.render_function(user_manager.register_template,
form=register_form,
login_form=login_form,
register_form=register_form), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
def custom_login():
""" Prompt for username/email and password and sign the user in."""
# logmessage("In custom_login\n")
is_json = bool(('json' in request.form and as_int(request.form['json'])) or ('json' in request.args and as_int(request.args['json'])))
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
safe_next = _get_safe_next_param('next', user_manager.after_login_endpoint)
safe_reg_next = _get_safe_next_param('reg_next', user_manager.after_register_endpoint)
if safe_next and '/officeaddin' in safe_next:
g.embed = True
if _call_or_get(current_user.is_authenticated) and user_manager.auto_login_at_login:
if safe_next == url_for(user_manager.after_login_endpoint):
url_parts = list(urlparse(safe_next))
query = dict(parse_qsl(url_parts[4]))
query.update({'from_login': 1})
url_parts[4] = urlencode(query)
safe_next = urlunparse(url_parts)
return add_secret_to(redirect(safe_next))
setup_translation()
login_form = user_manager.login_form(request.form)
register_form = user_manager.register_form()
if request.method != 'POST':
login_form.next.data = register_form.next.data = safe_next
login_form.reg_next.data = register_form.reg_next.data = safe_reg_next
if request.method == 'GET' and 'validated_user' in session:
del session['validated_user']
if request.method == 'POST' and login_form.validate():
user = None
if user_manager.enable_username:
user = user_manager.find_user_by_username(login_form.username.data)
user_email = None # pylint: disable=unused-variable
if user and db_adapter.UserEmailClass:
user_email = db_adapter.find_first_object(db_adapter.UserEmailClass,
user_id=int(user.get_id()),
is_primary=True,
)
if not user and user_manager.enable_email:
user, user_email = user_manager.find_user_by_email(login_form.username.data)
else:
user, user_email = user_manager.find_user_by_email(login_form.email.data)
# if not user and daconfig['ldap login'].get('enabled', False):
if user:
safe_next = user_manager.make_safe_url_function(login_form.next.data)
# safe_next = login_form.next.data
# safe_next = url_for('post_login', next=login_form.next.data)
if app.config['USE_MFA']:
if user.otp_secret is None and len(app.config['MFA_REQUIRED_FOR_ROLE']) and user.has_role(*app.config['MFA_REQUIRED_FOR_ROLE']):
session['validated_user'] = user.id
session['next'] = safe_next
if app.config['MFA_ALLOW_APP'] and (twilio_config is None or not app.config['MFA_ALLOW_SMS']):
return redirect(url_for('mfa_setup'))
if not app.config['MFA_ALLOW_APP']:
return redirect(url_for('mfa_sms_setup'))
return redirect(url_for('mfa_choose'))
if user.otp_secret is not None:
session['validated_user'] = user.id
session['next'] = safe_next
if user.otp_secret.startswith(':phone:'):
phone_number = re.sub(r'^:phone:', '', user.otp_secret)
verification_code = random_digits(daconfig['verification code digits'])
message = word("Your verification code is") + " " + str(verification_code) + "."
key = 'da:mfa:phone:' + str(phone_number) + ':code'
pipe = r.pipeline()
pipe.set(key, verification_code)
pipe.expire(key, daconfig['verification code timeout'])
pipe.execute()
success = docassemble.base.util.send_sms(to=phone_number, body=message)
if not success:
flash(word("Unable to send verification code."), 'error')
return redirect(url_for('user.login'))
return add_secret_to(redirect(url_for('mfa_login')))
if user_manager.enable_email and user_manager.enable_confirm_email \
and len(daconfig['email confirmation privileges']) \
and user.has_role(*daconfig['email confirmation privileges']) \
and not user.has_confirmed_email():
url = url_for('user.resend_confirm_email', email=user.email)
flash(word('You cannot log in until your e-mail address has been confirmed.') + '<br><a href="' + url + '">' + word('Click here to confirm your e-mail') + '</a>.', 'error')
return redirect(url_for('user.login'))
return add_secret_to(docassemble_flask_user.views._do_login_user(user, safe_next, login_form.remember_me.data))
if is_json:
return jsonify(action='login', csrf_token=generate_csrf())
# if 'officeaddin' in safe_next:
# extra_css = """
# <script type="text/javascript" src="https://appsforoffice.microsoft.com/lib/1.1/hosted/office.debug.js"></script>"""
# extra_js = """
# <script type="text/javascript" src=""" + '"' + url_for('static', filename='office/fabric.js') + '"' + """></script>
# <script type="text/javascript" src=""" + '"' + url_for('static', filename='office/polyfill.js') + '"' + """></script>
# <script type="text/javascript" src=""" + '"' + url_for('static', filename='office/app.js') + '"' + """></script>"""
# return render_template(user_manager.login_template,
# form=login_form,
# login_form=login_form,
# register_form=register_form,
# extra_css=Markup(extra_css),
# extra_js=Markup(extra_js))
# else:
if app.config['AUTO_LOGIN'] and not (app.config['USE_PASSWORD_LOGIN'] or ('admin' in request.args and request.args['admin'] == '1') or ('from_logout' in request.args and request.args['from_logout'] == '1')):
if app.config['AUTO_LOGIN'] is True:
number_of_methods = 0
the_method = None
for login_method in ('USE_PHONE_LOGIN', 'USE_GOOGLE_LOGIN', 'USE_FACEBOOK_LOGIN', 'USE_ZITADEL_LOGIN', 'USE_TWITTER_LOGIN', 'USE_AUTH0_LOGIN', 'USE_KEYCLOAK_LOGIN', 'USE_AZURE_LOGIN'):
if app.config[login_method]:
number_of_methods += 1
the_method = re.sub(r'USE_(.*)_LOGIN', r'\1', login_method).lower()
if number_of_methods > 1:
the_method = None
else:
the_method = app.config['AUTO_LOGIN']
if the_method == 'phone':
return redirect(url_for('phone_login'))
if the_method == 'google':
return redirect(url_for('google_page', next=request.args.get('next', '')))
if the_method in ('facebook', 'twitter', 'auth0', 'keycloak', 'azure'):
return redirect(url_for('oauth_authorize', provider=the_method, next=request.args.get('next', '')))
response = make_response(user_manager.render_function(user_manager.login_template,
form=login_form,
login_form=login_form,
register_form=register_form), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
def add_secret_to(response):
if 'newsecret' in session:
if 'embed' in g:
response.set_cookie('secret', session['newsecret'], httponly=True, secure=app.config['SESSION_COOKIE_SECURE'], samesite='None')
else:
response.set_cookie('secret', session['newsecret'], httponly=True, secure=app.config['SESSION_COOKIE_SECURE'], samesite=app.config['SESSION_COOKIE_SAMESITE'])
del session['newsecret']
return response
def logout():
setup_translation()
# secret = request.cookies.get('secret', None)
# if secret is None:
# secret = random_string(16)
# set_cookie = True
# else:
# secret = str(secret)
# set_cookie = False
user_manager = current_app.user_manager
next_url = None
if 'next' in request.args and request.args['next'] != '':
try:
next_url = decrypt_phrase(repad(bytearray(request.args['next'], encoding='utf-8')).decode(), app.secret_key)
except:
pass
if next_url is None:
next_url = daconfig.get('logoutpage', None)
if next_url is None:
if session.get('language', None) and session['language'] != DEFAULT_LANGUAGE:
next_url = _endpoint_url(user_manager.after_logout_endpoint, lang=session['language'], from_logout='1')
else:
next_url = _endpoint_url(user_manager.after_logout_endpoint, from_logout='1')
if current_user.is_authenticated:
if current_user.social_id.startswith('auth0$') and 'oauth' in daconfig and 'auth0' in daconfig['oauth'] and 'domain' in daconfig['oauth']['auth0']:
if next_url.startswith('/'):
next_url = get_base_url() + next_url
next_url = 'https://' + daconfig['oauth']['auth0']['domain'] + '/v2/logout?' + urlencode({'returnTo': next_url, 'client_id': daconfig['oauth']['auth0']['id']})
if current_user.social_id.startswith('zitadel$') and 'oauth' in daconfig and 'zitadel' in daconfig['oauth'] and 'domain' in daconfig['oauth']['zitadel'] and 'id' in daconfig['oauth']['zitadel']:
next_url = 'https://' + daconfig['oauth']['zitadel']['domain'] + '/oidc/v1/end_session?' + urlencode({'post_logout_redirect_uri': url_for('user.login', _external=True), 'client_id': daconfig['oauth']['zitadel']['id']})
if current_user.social_id.startswith('keycloak$') and 'oauth' in daconfig and 'keycloak' in daconfig['oauth'] and 'domain' in daconfig['oauth']['keycloak']:
if next_url.startswith('/'):
next_url = get_base_url() + next_url
protocol = daconfig['oauth']['keycloak'].get('protocol', 'https://')
if not protocol.endswith('://'):
protocol = protocol + '://'
next_url = protocol + daconfig['oauth']['keycloak']['domain'] + '/realms/' + daconfig['oauth']['keycloak']['realm'] + '/protocol/openid-connect/logout?' + urlencode({'post_logout_redirect_uri': next_url, 'client_id': daconfig['oauth']['keycloak']['id']})
docassemble_flask_user.signals.user_logged_out.send(current_app._get_current_object(), user=current_user)
logout_user()
delete_session_info()
session.clear()
if next_url.startswith('/') and app.config['FLASH_LOGIN_MESSAGES']:
flash(word('You have signed out successfully.'), 'success')
response = redirect(next_url)
response.set_cookie('remember_token', '', expires=0)
response.set_cookie('visitor_secret', '', expires=0)
response.set_cookie('secret', '', expires=0)
response.set_cookie('session', '', expires=0)
return response
# def custom_login():
# logmessage("custom_login")
# user_manager = current_app.user_manager
# db_adapter = user_manager.db_adapter
# secret = request.cookies.get('secret', None)
# if secret is not None:
# secret = str(secret)
# next_url = request.args.get('next', _endpoint_url(user_manager.after_login_endpoint))
# reg_next = request.args.get('reg_next', _endpoint_url(user_manager.after_register_endpoint))
# if _call_or_get(current_user.is_authenticated) and user_manager.auto_login_at_login:
# return redirect(next_url)
# login_form = user_manager.login_form(request.form)
# register_form = user_manager.register_form()
# if request.method != 'POST':
# login_form.next.data = register_form.next.data = next_url
# login_form.reg_next.data = register_form.reg_next.data = reg_next
# if request.method == 'POST':
# try:
# login_form.validate()
# except:
# logmessage("custom_login: got an error when validating login")
# pass
# if request.method == 'POST' and login_form.validate():
# user = None
# user_email = None
# if user_manager.enable_username:
# user = user_manager.find_user_by_username(login_form.username.data)
# user_email = None
# if user and db_adapter.UserEmailClass:
# user_email = db_adapter.find_first_object(db_adapter.UserEmailClass,
# user_id=int(user.get_id()),
# is_primary=True,
# )
# if not user and user_manager.enable_email:
# user, user_email = user_manager.find_user_by_email(login_form.username.data)
# else:
# user, user_email = user_manager.find_user_by_email(login_form.email.data)
# if user:
# return _do_login_user(user, login_form.password.data, secret, login_form.next.data, login_form.remember_me.data)
# return render_template(user_manager.login_template, page_title=word('Sign In'), tab_title=word('Sign In'), form=login_form, login_form=login_form, register_form=register_form)
def unauthenticated():
if not request.args.get('nm', False):
flash(word("You need to log in before you can access") + " " + word(request.path), 'error')
the_url = url_for('user.login', next=fix_http(request.url))
return redirect(the_url)
def unauthorized():
flash(word("You are not authorized to access") + " " + word(request.path), 'error')
return redirect(url_for('interview_list', next=fix_http(request.url)))
def my_default_url(error, endpoint, values): # pylint: disable=unused-argument
return url_for('index')
def make_safe_url(url):
parts = urlsplit(url)
safe_url = parts.path
if parts.query != '':
safe_url += '?' + parts.query
if parts.fragment != '':
safe_url += '#' + parts.fragment
return safe_url
def password_validator(form, field): # pylint: disable=unused-argument
password = list(field.data)
password_length = len(password)
lowers = uppers = digits = punct = 0
for ch in password:
if ch.islower():
lowers += 1
if ch.isupper():
uppers += 1
if ch.isdigit():
digits += 1
if not (ch.islower() or ch.isupper() or ch.isdigit()):
punct += 1
rules = daconfig.get('password complexity', {})
is_valid = password_length >= rules.get('length', 6) and lowers >= rules.get('lowercase', 1) and uppers >= rules.get('uppercase', 1) and digits >= rules.get('digits', 1) and punct >= rules.get('punctuation', 0)
if not is_valid:
if 'error message' in rules:
error_message = str(rules['error message'])
else:
# word("Password must be at least six characters long with at least one lowercase letter, at least one uppercase letter, and at least one number.")
error_message = 'Password must be at least ' + docassemble.base.functions.quantity_noun(rules.get('length', 6), 'character', language='en') + ' long'
standards = []
if rules.get('lowercase', 1) > 0:
standards.append('at least ' + docassemble.base.functions.quantity_noun(rules.get('lowercase', 1), 'lowercase letter', language='en'))
if rules.get('uppercase', 1) > 0:
standards.append('at least ' + docassemble.base.functions.quantity_noun(rules.get('uppercase', 1), 'uppercase letter', language='en'))
if rules.get('digits', 1) > 0:
standards.append('at least ' + docassemble.base.functions.quantity_noun(rules.get('digits', 1), 'number', language='en'))
if rules.get('punctuation', 0) > 0:
standards.append('at least ' + docassemble.base.functions.quantity_noun(rules.get('punctuation', 1), 'punctuation character', language='en'))
if len(standards) > 0:
error_message += ' with ' + docassemble.base.functions.comma_and_list_en(standards)
error_message += '.'
raise wtforms.ValidationError(word(error_message))
if DEBUG_BOOT:
boot_log("server: setting up Flask")
the_db_adapter = SQLAlchemyAdapter(db, UserModel, UserAuthClass=UserAuthModel, UserInvitationClass=MyUserInvitation)
the_user_manager = UserManager()
the_user_manager.init_app(app, db_adapter=the_db_adapter, login_form=MySignInForm, register_form=MyRegisterForm, user_profile_view_function=user_profile_page, logout_view_function=logout, unauthorized_view_function=unauthorized, unauthenticated_view_function=unauthenticated, login_view_function=custom_login, register_view_function=custom_register, resend_confirm_email_view_function=custom_resend_confirm_email, resend_confirm_email_form=MyResendConfirmEmailForm, password_validator=password_validator, make_safe_url_function=make_safe_url)
lm = LoginManager()
lm.init_app(app)
lm.login_view = 'custom_login'
lm.anonymous_user = AnonymousUserModel
if DEBUG_BOOT:
boot_log("server: finished setting up Flask")
def url_for_interview(**args):
for k, v in daconfig.get('dispatch').items():
if v == args['i']:
args['dispatch'] = k
del args['i']
is_new = False
try:
if true_or_false(args['new_session']):
is_new = True
del args['new_session']
except:
is_new = False
if is_new:
return docassemble.base.functions.url_of('run_new_dispatch', **args)
return docassemble.base.functions.url_of('run_dispatch', **args)
return url_for('index', **args)
app.jinja_env.globals.update(url_for=url_for, url_for_interview=url_for_interview)
def syslog_message(message):
message = re.sub(r'\n', ' ', message)
if current_user and current_user.is_authenticated:
the_user = current_user.email
else:
the_user = "anonymous"
if request_active:
try:
sys_logger.debug('%s', LOGFORMAT % {'message': message, 'clientip': get_requester_ip(request), 'yamlfile': docassemble.base.functions.this_thread.current_info.get('yaml_filename', 'na'), 'user': the_user, 'session': docassemble.base.functions.this_thread.current_info.get('session', 'na')})
except Exception as err:
sys.stderr.write("Error writing log message " + str(message) + "\n")
try:
sys.stderr.write("Error was " + err.__class__.__name__ + ": " + str(err) + "\n")
except:
pass
else:
try:
sys_logger.debug('%s', LOGFORMAT % {'message': message, 'clientip': 'localhost', 'yamlfile': 'na', 'user': 'na', 'session': 'na'})
except Exception as err:
sys.stderr.write("Error writing log message " + str(message) + "\n")
try:
sys.stderr.write("Error was " + err.__class__.__name__ + ": " + str(err) + "\n")
except:
pass
def syslog_message_with_timestamp(message):
syslog_message(time.strftime("%Y-%m-%d %H:%M:%S") + " " + message)
if DEBUG_BOOT:
boot_log("server: setting up logging")
sys_logger = logging.getLogger('docassemble')
sys_logger.setLevel(logging.DEBUG)
LOGFORMAT = daconfig.get('log format', 'docassemble: ip=%(clientip)s i=%(yamlfile)s uid=%(session)s user=%(user)s %(message)s')
def add_log_handler():
tries = 0
while tries < 5:
try:
docassemble_log_handler = logging.FileHandler(filename=os.path.join(LOG_DIRECTORY, 'docassemble.log'))
except PermissionError:
time.sleep(1)
continue
sys_logger.addHandler(docassemble_log_handler)
if os.environ.get('SUPERVISORLOGLEVEL', 'info') == 'debug':
stderr_log_handler = logging.StreamHandler(stream=sys.stderr)
sys_logger.addHandler(stderr_log_handler)
break
add_log_handler()
if not (in_celery or in_cron):
if LOGSERVER is None:
docassemble.base.logger.set_logmessage(syslog_message_with_timestamp)
else:
docassemble.base.logger.set_logmessage(syslog_message)
if DEBUG_BOOT:
boot_log("server: finished setting up logging")
def login_as_admin(url, url_root):
found = False
for admin_user in db.session.execute(select(UserModel).filter_by(nickname='admin').order_by(UserModel.id)).scalars():
if not found:
found = True
current_app.login_manager._update_request_context_with_user(admin_user)
docassemble.base.functions.this_thread.current_info = {'user': {'is_anonymous': False, 'is_authenticated': True, 'email': admin_user.email, 'theid': admin_user.id, 'the_user_id': admin_user.id, 'roles': ['admin'], 'firstname': admin_user.first_name, 'lastname': admin_user.last_name, 'nickname': admin_user.nickname, 'country': admin_user.country, 'subdivisionfirst': admin_user.subdivisionfirst, 'subdivisionsecond': admin_user.subdivisionsecond, 'subdivisionthird': admin_user.subdivisionthird, 'organization': admin_user.organization, 'location': None, 'session_uid': 'admin', 'device_id': 'admin'}, 'session': None, 'secret': None, 'yaml_filename': final_default_yaml_filename, 'url': url, 'url_root': url_root, 'encrypted': False, 'action': None, 'interface': 'initialization', 'arguments': {}}
def import_necessary(url, url_root):
login_as_admin(url, url_root)
modules_to_import = daconfig.get('preloaded modules', None)
if isinstance(modules_to_import, list):
for module_name in daconfig['preloaded modules']:
try:
importlib.import_module(module_name)
except:
pass
start_dir = len(FULL_PACKAGE_DIRECTORY.split(os.sep))
avoid_dirs = [os.path.join(FULL_PACKAGE_DIRECTORY, 'docassemble', 'base'),
os.path.join(FULL_PACKAGE_DIRECTORY, 'docassemble', 'demo'),
os.path.join(FULL_PACKAGE_DIRECTORY, 'docassemble', 'webapp')]
modules = ['docassemble.base.legal']
use_whitelist = 'module whitelist' in daconfig
for root, dirs, files in os.walk(os.path.join(FULL_PACKAGE_DIRECTORY, 'docassemble')): # pylint: disable=unused-variable
ok = True
for avoid in avoid_dirs:
if root.startswith(avoid):
ok = False
break
if not ok:
continue
for the_file in files:
if not the_file.endswith('.py'):
continue
thefilename = os.path.join(root, the_file)
if use_whitelist:
parts = thefilename.split(os.sep)[start_dir:]
parts[-1] = parts[-1][0:-3]
module_name = '.'.join(parts)
module_name = re.sub(r'\.__init__$', '', module_name)
if any(fnmatch.fnmatchcase(module_name, whitelist_item) for whitelist_item in daconfig['module whitelist']):
modules.append(module_name)
continue
with open(thefilename, 'r', encoding='utf-8') as fp:
for line in fp:
if line.startswith('# do not pre-load'):
break
if line.startswith('class ') or line.startswith('# pre-load') or 'docassemble.base.util.update' in line:
parts = thefilename.split(os.sep)[start_dir:]
parts[-1] = parts[-1][0:-3]
module_name = '.'.join(parts)
module_name = re.sub(r'\.__init__$', '', module_name)
modules.append(module_name)
break
for module_name in modules:
if any(fnmatch.fnmatchcase(module_name, blacklist_item) for blacklist_item in daconfig['module blacklist']):
continue
current_package = re.sub(r'\.[^\.]+$', '', module_name)
docassemble.base.functions.this_thread.current_package = current_package
docassemble.base.functions.this_thread.current_info.update({'yaml_filename': current_package + ':data/questions/test.yml'})
try:
importlib.import_module(module_name)
except Exception as err:
logmessage("Import of " + module_name + " failed. " + err.__class__.__name__ + ": " + str(err))
current_app.login_manager._update_request_context_with_user()
fax_provider = daconfig.get('fax provider', None) or 'clicksend'
def get_clicksend_config():
if 'clicksend' in daconfig and isinstance(daconfig['clicksend'], (list, dict)):
the_clicksend_config = {'name': {}, 'number': {}}
if isinstance(daconfig['clicksend'], dict):
config_list = [daconfig['clicksend']]
else:
config_list = daconfig['clicksend']
for the_config in config_list:
if isinstance(the_config, dict) and 'api username' in the_config and 'api key' in the_config and 'number' in the_config:
if 'country' not in the_config:
the_config['country'] = docassemble.webapp.backend.DEFAULT_COUNTRY or 'US'
if 'from email' not in the_config:
the_config['from email'] = app.config['MAIL_DEFAULT_SENDER']
the_clicksend_config['number'][str(the_config['number'])] = the_config
if 'default' not in the_clicksend_config['name']:
the_clicksend_config['name']['default'] = the_config
if 'name' in the_config:
the_clicksend_config['name'][the_config['name']] = the_config
else:
logmessage("improper setup in clicksend configuration")
if 'default' not in the_clicksend_config['name']:
the_clicksend_config = None
else:
the_clicksend_config = None
# if fax_provider == 'clicksend' and the_clicksend_config is None:
# logmessage("improper clicksend configuration; faxing will not be functional")
return the_clicksend_config
clicksend_config = get_clicksend_config()
def get_telnyx_config():
if 'telnyx' in daconfig and isinstance(daconfig['telnyx'], (list, dict)):
the_telnyx_config = {'name': {}, 'number': {}}
if isinstance(daconfig['telnyx'], dict):
config_list = [daconfig['telnyx']]
else:
config_list = daconfig['telnyx']
for the_config in config_list:
if isinstance(the_config, dict) and 'app id' in the_config and 'api key' in the_config and 'number' in the_config:
if 'country' not in the_config:
the_config['country'] = docassemble.webapp.backend.DEFAULT_COUNTRY or 'US'
if 'from email' not in the_config:
the_config['from email'] = app.config['MAIL_DEFAULT_SENDER']
the_telnyx_config['number'][str(the_config['number'])] = the_config
if 'default' not in the_telnyx_config['name']:
the_telnyx_config['name']['default'] = the_config
if 'name' in the_config:
the_telnyx_config['name'][the_config['name']] = the_config
else:
logmessage("improper setup in twilio configuration")
if 'default' not in the_telnyx_config['name']:
the_telnyx_config = None
else:
the_telnyx_config = None
if fax_provider == 'telnyx' and the_telnyx_config is None:
logmessage("improper telnyx configuration; faxing will not be functional")
return the_telnyx_config
telnyx_config = get_telnyx_config()
def get_twilio_config():
if 'twilio' in daconfig:
the_twilio_config = {}
the_twilio_config['account sid'] = {}
the_twilio_config['number'] = {}
the_twilio_config['whatsapp number'] = {}
the_twilio_config['name'] = {}
if not isinstance(daconfig['twilio'], list):
config_list = [daconfig['twilio']]
else:
config_list = daconfig['twilio']
for tconfig in config_list:
if isinstance(tconfig, dict) and 'account sid' in tconfig and ('number' in tconfig or 'whatsapp number' in tconfig):
the_twilio_config['account sid'][str(tconfig['account sid'])] = 1
if tconfig.get('number'):
the_twilio_config['number'][str(tconfig['number'])] = tconfig
if tconfig.get('whatsapp number'):
the_twilio_config['whatsapp number'][str(tconfig['whatsapp number'])] = tconfig
if 'default' not in the_twilio_config['name']:
the_twilio_config['name']['default'] = tconfig
if 'name' in tconfig:
the_twilio_config['name'][tconfig['name']] = tconfig
else:
logmessage("improper setup in twilio configuration")
if 'default' not in the_twilio_config['name']:
the_twilio_config = None
else:
the_twilio_config = None
return the_twilio_config
twilio_config = get_twilio_config()
app.debug = False
app.handle_url_build_error = my_default_url
app.config['CONTAINER_CLASS'] = 'container-fluid' if daconfig.get('admin full width', False) else 'container'
app.config['USE_GOOGLE_LOGIN'] = False
app.config['USE_FACEBOOK_LOGIN'] = False
app.config['USE_ZITADEL_LOGIN'] = False
app.config['USE_TWITTER_LOGIN'] = False
app.config['USE_AUTH0_LOGIN'] = False
app.config['USE_KEYCLOAK_LOGIN'] = False
app.config['USE_AZURE_LOGIN'] = False
app.config['USE_GOOGLE_DRIVE'] = False
app.config['USE_ONEDRIVE'] = False
app.config['USE_PHONE_LOGIN'] = False
app.config['USE_GITHUB'] = False
app.config['USE_PASSWORD_LOGIN'] = not bool(daconfig.get('password login', True) is False)
app.config['AUTO_LOGIN'] = daconfig.get('auto login', False)
if twilio_config is not None and daconfig.get('phone login', False) is True:
app.config['USE_PHONE_LOGIN'] = True
if 'oauth' in daconfig:
app.config['OAUTH_CREDENTIALS'] = daconfig['oauth']
app.config['USE_GOOGLE_LOGIN'] = bool('google' in daconfig['oauth'] and not ('enable' in daconfig['oauth']['google'] and daconfig['oauth']['google']['enable'] is False))
app.config['USE_FACEBOOK_LOGIN'] = bool('facebook' in daconfig['oauth'] and not ('enable' in daconfig['oauth']['facebook'] and daconfig['oauth']['facebook']['enable'] is False))
app.config['USE_ZITADEL_LOGIN'] = bool('zitadel' in daconfig['oauth'] and not ('enable' in daconfig['oauth']['zitadel'] and daconfig['oauth']['zitadel']['enable'] is False))
app.config['USE_TWITTER_LOGIN'] = bool('twitter' in daconfig['oauth'] and not ('enable' in daconfig['oauth']['twitter'] and daconfig['oauth']['twitter']['enable'] is False))
app.config['USE_AUTH0_LOGIN'] = bool('auth0' in daconfig['oauth'] and not ('enable' in daconfig['oauth']['auth0'] and daconfig['oauth']['auth0']['enable'] is False))
app.config['USE_KEYCLOAK_LOGIN'] = bool('keycloak' in daconfig['oauth'] and not ('enable' in daconfig['oauth']['keycloak'] and daconfig['oauth']['keycloak']['enable'] is False))
app.config['USE_AZURE_LOGIN'] = bool('azure' in daconfig['oauth'] and not ('enable' in daconfig['oauth']['azure'] and daconfig['oauth']['azure']['enable'] is False))
app.config['USE_GOOGLE_DRIVE'] = bool('googledrive' in daconfig['oauth'] and not ('enable' in daconfig['oauth']['googledrive'] and daconfig['oauth']['googledrive']['enable'] is False))
app.config['USE_ONEDRIVE'] = bool('onedrive' in daconfig['oauth'] and not ('enable' in daconfig['oauth']['onedrive'] and daconfig['oauth']['onedrive']['enable'] is False))
app.config['USE_GITHUB'] = bool('github' in daconfig['oauth'] and not ('enable' in daconfig['oauth']['github'] and daconfig['oauth']['github']['enable'] is False))
else:
app.config['OAUTH_CREDENTIALS'] = {}
app.config['USE_PYPI'] = daconfig.get('pypi', False)
if daconfig.get('button size', 'medium') == 'medium':
app.config['BUTTON_CLASS'] = 'btn-da'
elif daconfig['button size'] == 'large':
app.config['BUTTON_CLASS'] = 'btn-lg btn-da'
elif daconfig['button size'] == 'small':
app.config['BUTTON_CLASS'] = 'btn-sm btn-da'
else:
app.config['BUTTON_CLASS'] = 'btn-da'
if daconfig.get('button style', 'normal') == 'normal':
app.config['BUTTON_STYLE'] = 'btn-'
elif daconfig['button style'] == 'outline':
app.config['BUTTON_STYLE'] = 'btn-outline-'
else:
app.config['BUTTON_STYLE'] = 'btn-'
BUTTON_COLOR_NAV_LOGIN = daconfig['button colors'].get('navigation bar login', 'primary')
app.config['FOOTER_CLASS'] = str(daconfig.get('footer css class', 'bg-light')).strip() + ' dafooter'
def get_page_parts():
the_page_parts = {}
if 'global footer' in daconfig:
if isinstance(daconfig['global footer'], dict):
the_page_parts['global footer'] = {}
for lang, val in daconfig['global footer'].items():
the_page_parts['global footer'][lang] = Markup(val)
else:
the_page_parts['global footer'] = {'*': Markup(str(daconfig['global footer']))}
for page_key in ('login page', 'register page', 'interview page', 'start page', 'profile page', 'reset password page', 'forgot password page', 'change password page', '404 page'):
for part_key in ('title', 'tab title', 'extra css', 'extra javascript', 'heading', 'pre', 'submit', 'post', 'footer', 'navigation bar html'):
key = page_key + ' ' + part_key
if key in daconfig:
if isinstance(daconfig[key], dict):
the_page_parts[key] = {}
for lang, val in daconfig[key].items():
the_page_parts[key][lang] = Markup(val)
else:
the_page_parts[key] = {'*': Markup(str(daconfig[key]))}
the_main_page_parts = {}
lang_list = set()
main_page_parts_list = (
'main page back button label',
'main page continue button label',
'main page corner back button label',
'main page exit label',
'main page exit link',
'main page exit url',
'main page footer',
'main page help label',
'main page logo',
'main page navigation bar html',
'main page post',
'main page pre',
'main page resume button label',
'main page right',
'main page short logo',
'main page short title',
'main page submit',
'main page subtitle',
'main page title url opens in other window',
'main page title url',
'main page title',
'main page under')
for key in main_page_parts_list:
if key in daconfig and isinstance(daconfig[key], dict):
for lang in daconfig[key]:
lang_list.add(lang)
lang_list.add(DEFAULT_LANGUAGE)
lang_list.add('*')
for lang in lang_list:
the_main_page_parts[lang] = {}
for key in main_page_parts_list:
for lang in lang_list:
if key in daconfig:
if isinstance(daconfig[key], dict):
the_main_page_parts[lang][key] = daconfig[key].get(lang, daconfig[key].get('*', ''))
else:
the_main_page_parts[lang][key] = daconfig[key]
else:
the_main_page_parts[lang][key] = ''
if the_main_page_parts[DEFAULT_LANGUAGE][key] == '' and the_main_page_parts['*'][key] != '':
the_main_page_parts[DEFAULT_LANGUAGE][key] = the_main_page_parts['*'][key]
return (the_page_parts, the_main_page_parts)
if DEBUG_BOOT:
boot_log("server: getting page parts from configuration")
(page_parts, main_page_parts) = get_page_parts()
if DEBUG_BOOT:
boot_log("server: finished getting page parts from configuration")
ga_configured = bool(google_config.get('analytics id', None) is not None)
if google_config.get('analytics id', None) is not None or daconfig.get('segment id', None) is not None:
analytics_configured = True
reserved_argnames = ('i', 'json', 'js_target', 'from_list', 'session', 'cache', 'reset', 'new_session', 'action', 'utm_source', 'utm_medium', 'utm_campaign', 'utm_term', 'utm_content')
else:
analytics_configured = False
reserved_argnames = ('i', 'json', 'js_target', 'from_list', 'session', 'cache', 'reset', 'new_session', 'action')
def get_sms_session(phone_number, config='default'):
sess_info = None
if twilio_config is None:
raise DAError("get_sms_session: Twilio not enabled")
if config not in twilio_config['name']:
raise DAError("get_sms_session: Invalid twilio configuration")
tconfig = twilio_config['name'][config]
phone_number = docassemble.base.functions.phone_number_in_e164(phone_number)
if phone_number is None:
raise DAError("terminate_sms_session: phone_number " + str(phone_number) + " is invalid")
sess_contents = r.get('da:sms:client:' + phone_number + ':server:' + tconfig['number'])
if sess_contents is not None:
try:
sess_info = fix_pickle_obj(sess_contents)
except:
logmessage("get_sms_session: unable to decode session information")
sess_info['email'] = None
if 'user_id' in sess_info and sess_info['user_id'] is not None:
user = load_user(sess_info['user_id'])
if user is not None:
sess_info['email'] = user.email
return sess_info
def initiate_sms_session(phone_number, yaml_filename=None, uid=None, secret=None, encrypted=None, user_id=None, email=None, new=False, config='default'):
phone_number = docassemble.base.functions.phone_number_in_e164(phone_number)
if phone_number is None:
raise DAError("initiate_sms_session: phone_number " + str(phone_number) + " is invalid")
if config not in twilio_config['name']:
raise DAError("get_sms_session: Invalid twilio configuration")
tconfig = twilio_config['name'][config]
the_current_info = docassemble.base.functions.get_current_info()
if yaml_filename is None:
yaml_filename = the_current_info.get('yaml_filename', None)
if yaml_filename is None:
yaml_filename = default_yaml_filename
temp_user_id = None
if user_id is None and email is not None:
user = db.session.execute(select(UserModel).where(and_(UserModel.email.ilike(email), UserModel.active == True))).scalar() # noqa: E712 # pylint: disable=singleton-comparison
if user is not None:
user_id = user.id
if user_id is None:
if not new:
if 'user' in the_current_info:
if 'theid' in the_current_info['user']:
if the_current_info['user'].get('is_authenticated', False):
user_id = the_current_info['user']['theid']
else:
temp_user_id = the_current_info['user']['theid']
if user_id is None and temp_user_id is None:
new_temp_user = TempUser()
db.session.add(new_temp_user)
db.session.commit()
temp_user_id = new_temp_user.id
if secret is None:
if not new:
secret = the_current_info['secret']
if secret is None:
secret = random_string(16)
if uid is None:
if new:
uid = get_unique_name(yaml_filename, secret)
else:
uid = the_current_info.get('session', None)
if uid is None:
uid = get_unique_name(yaml_filename, secret)
if encrypted is None:
if new:
encrypted = True
else:
encrypted = the_current_info['encrypted']
sess_info = {'yaml_filename': yaml_filename, 'uid': uid, 'secret': secret, 'number': phone_number, 'encrypted': encrypted, 'tempuser': temp_user_id, 'user_id': user_id}
# logmessage("initiate_sms_session: setting da:sms:client:" + phone_number + ':server:' + tconfig['number'] + " to " + str(sess_info))
r.set('da:sms:client:' + phone_number + ':server:' + tconfig['number'], pickle.dumps(sess_info))
return True
def terminate_sms_session(phone_number, config='default'):
if config not in twilio_config['name']:
raise DAError("get_sms_session: Invalid twilio configuration")
tconfig = twilio_config['name'][config]
phone_number = docassemble.base.functions.phone_number_in_e164(phone_number)
r.delete('da:sms:client:' + phone_number + ':server:' + tconfig['number'])
def fix_http(url):
if HTTP_TO_HTTPS:
return re.sub(r'^http:', 'https:', url)
return url
def safe_quote_func(string, safe='', encoding=None, errors=None): # pylint: disable=unused-argument
return urllibquote(string, safe='', encoding=encoding, errors=errors)
def remove_question_package(args):
if '_question' in args:
del args['_question']
if '_package' in args:
del args['_package']
def encrypt_next(args):
if 'next' not in args:
return
args['next'] = re.sub(r'\s', '', encrypt_phrase(args['next'], app.secret_key)).rstrip('=')
def get_url_from_file_reference(file_reference, **kwargs):
if 'jsembed' in docassemble.base.functions.this_thread.misc or COOKIELESS_SESSIONS:
kwargs['_external'] = True
privileged = kwargs.get('privileged', False)
if isinstance(file_reference, DAFileList) and len(file_reference.elements) > 0:
file_reference = file_reference.elements[0]
elif isinstance(file_reference, DAFileCollection):
file_reference = file_reference._first_file()
elif isinstance(file_reference, DAStaticFile):
return file_reference.url_for(**kwargs)
if isinstance(file_reference, DAFile) and hasattr(file_reference, 'number'):
file_number = file_reference.number
if privileged or can_access_file_number(file_number, uids=get_session_uids()):
url_properties = {}
if hasattr(file_reference, 'filename') and len(file_reference.filename) and file_reference.has_specific_filename:
url_properties['display_filename'] = file_reference.filename
if hasattr(file_reference, 'extension'):
url_properties['ext'] = file_reference.extension
for key, val in kwargs.items():
url_properties[key] = val
the_file = SavedFile(file_number)
if kwargs.get('temporary', False):
return the_file.temp_url_for(**url_properties)
return the_file.url_for(**url_properties)
file_reference = str(file_reference)
if re.search(r'^https?://', file_reference) or re.search(r'^mailto:', file_reference) or file_reference.startswith('/') or file_reference.startswith('?'):
if '?' not in file_reference:
args = {}
for key, val in kwargs.items():
if key in ('_package', '_question', '_external'):
continue
args[key] = val
if len(args) > 0:
if file_reference.startswith('mailto:') and 'body' in args:
args['body'] = re.sub(r'(?<!\r)\n', '\r\n', args['body'], re.MULTILINE)
return file_reference + '?' + urlencode(args, quote_via=safe_quote_func)
return file_reference
kwargs_with_i = copy.copy(kwargs)
if 'i' not in kwargs_with_i:
yaml_filename = docassemble.base.functions.this_thread.current_info.get('yaml_filename', None)
if yaml_filename is not None:
kwargs_with_i['i'] = yaml_filename
if file_reference in ('login', 'signin'):
remove_question_package(kwargs)
return url_for('user.login', **kwargs)
if file_reference == 'profile':
remove_question_package(kwargs)
return url_for('user_profile_page', **kwargs)
if file_reference == 'change_password':
remove_question_package(kwargs)
return url_for('user.change_password', **kwargs)
if file_reference == 'register':
remove_question_package(kwargs)
return url_for('user.register', **kwargs)
if file_reference == 'config':
remove_question_package(kwargs)
return url_for('config_page', **kwargs)
if file_reference == 'leave':
remove_question_package(kwargs)
encrypt_next(kwargs)
return url_for('leave', **kwargs)
if file_reference == 'logout':
remove_question_package(kwargs)
encrypt_next(kwargs)
return url_for('user.logout', **kwargs)
if file_reference == 'restart':
remove_question_package(kwargs_with_i)
return url_for('restart_session', **kwargs_with_i)
if file_reference == 'new_session':
remove_question_package(kwargs_with_i)
return url_for('new_session_endpoint', **kwargs_with_i)
if file_reference == 'help':
return 'javascript:daShowHelpTab()'
if file_reference == 'interview':
remove_question_package(kwargs)
docassemble.base.functions.modify_i_argument(kwargs)
return url_for('index', **kwargs)
if file_reference == 'flex_interview':
remove_question_package(kwargs)
how_called = docassemble.base.functions.this_thread.misc.get('call', None)
if how_called is None:
return url_for('index', **kwargs)
try:
if int(kwargs.get('new_session')):
is_new = True
del kwargs['new_session']
else:
is_new = False
except:
is_new = False
if how_called[0] in ('start', 'run'):
del kwargs['i']
kwargs['package'] = how_called[1]
kwargs['filename'] = how_called[2]
if is_new:
return url_for('redirect_to_interview_in_package', **kwargs)
return url_for('run_interview_in_package', **kwargs)
if how_called[0] in ('start_dispatch', 'run_dispatch'):
del kwargs['i']
kwargs['dispatch'] = how_called[1]
if is_new:
return url_for('redirect_to_interview', **kwargs)
return url_for('run_interview', **kwargs)
if how_called[0] in ('start_directory', 'run_directory'):
del kwargs['i']
kwargs['package'] = how_called[1]
kwargs['directory'] = how_called[2]
kwargs['filename'] = how_called[3]
if is_new:
return url_for('redirect_to_interview_in_package_directory', **kwargs)
return url_for('run_interview_in_package_directory', **kwargs)
if is_new:
kwargs['new_session'] = 1
return url_for('index', **kwargs)
if file_reference == 'interviews':
remove_question_package(kwargs)
return url_for('interview_list', **kwargs)
if file_reference == 'exit':
remove_question_package(kwargs_with_i)
encrypt_next(kwargs)
return url_for('exit_endpoint', **kwargs_with_i)
if file_reference == 'exit_logout':
remove_question_package(kwargs_with_i)
encrypt_next(kwargs)
return url_for('exit_logout', **kwargs_with_i)
if file_reference == 'dispatch':
remove_question_package(kwargs)
return url_for('interview_start', **kwargs)
if file_reference == 'manage':
remove_question_package(kwargs)
return url_for('manage_account', **kwargs)
if file_reference == 'interview_list':
remove_question_package(kwargs)
return url_for('interview_list', **kwargs)
if file_reference == 'playground':
remove_question_package(kwargs)
return url_for('playground_page', **kwargs)
if file_reference == 'playgroundtemplate':
kwargs['section'] = 'template'
remove_question_package(kwargs)
return url_for('playground_files', **kwargs)
if file_reference == 'playgroundstatic':
kwargs['section'] = 'static'
remove_question_package(kwargs)
return url_for('playground_files', **kwargs)
if file_reference == 'playgroundsources':
kwargs['section'] = 'sources'
remove_question_package(kwargs)
return url_for('playground_files', **kwargs)
if file_reference == 'playgroundmodules':
kwargs['section'] = 'modules'
remove_question_package(kwargs)
return url_for('playground_files', **kwargs)
if file_reference == 'playgroundpackages':
remove_question_package(kwargs)
return url_for('playground_packages', **kwargs)
if file_reference == 'playgroundfiles':
remove_question_package(kwargs)
return url_for('playground_files', **kwargs)
if file_reference == 'create_playground_package':
remove_question_package(kwargs)
return url_for('create_playground_package', **kwargs)
if file_reference == 'configuration':
remove_question_package(kwargs)
return url_for('config_page', **kwargs)
if file_reference == 'root':
remove_question_package(kwargs)
return url_for('rootindex', **kwargs)
if file_reference == 'run':
remove_question_package(kwargs)
return url_for('run_interview_in_package', **kwargs)
if file_reference == 'run_dispatch':
remove_question_package(kwargs)
return url_for('run_interview', **kwargs)
if file_reference == 'run_new':
remove_question_package(kwargs)
return url_for('redirect_to_interview_in_package', **kwargs)
if file_reference == 'run_new_dispatch':
remove_question_package(kwargs)
return url_for('redirect_to_interview', **kwargs)
if re.search('^[0-9]+$', file_reference):
remove_question_package(kwargs)
file_number = file_reference
if kwargs.get('temporary', False):
url = SavedFile(file_number).temp_url_for(**kwargs)
elif can_access_file_number(file_number, uids=get_session_uids()):
url = SavedFile(file_number).url_for(**kwargs)
else:
logmessage("Problem accessing " + str(file_number))
url = 'about:blank'
else:
question = kwargs.get('_question', None)
package_arg = kwargs.get('_package', None)
if 'ext' in kwargs and kwargs['ext'] is not None:
extn = kwargs['ext']
extn = re.sub(r'^\.', '', extn)
extn = '.' + extn
else:
extn = ''
parts = file_reference.split(':')
if len(parts) < 2:
file_reference = re.sub(r'^data/static/', '', file_reference)
the_package = None
if question is not None and question.from_source is not None and hasattr(question.from_source, 'package'):
the_package = question.from_source.package
if the_package is None and package_arg is not None:
the_package = package_arg
if the_package is None:
the_package = 'docassemble.base'
parts = [the_package, file_reference]
parts[1] = re.sub(r'^data/[^/]+/', '', parts[1])
url = url_if_exists(parts[0] + ':data/static/' + parts[1] + extn, **kwargs)
return url
def user_id_dict():
output = {}
for user in db.session.execute(select(UserModel).options(db.joinedload(UserModel.roles))).unique().scalars():
output[user.id] = user
anon = FakeUser()
anon_role = FakeRole()
anon_role.name = 'anonymous'
anon.roles = [anon_role]
anon.id = -1
anon.firstname = 'Anonymous'
anon.lastname = 'User'
output[-1] = anon
return output
def get_base_words():
documentation = get_info_from_file_reference('docassemble.base:data/sources/base-words.yml')
if 'fullpath' in documentation and documentation['fullpath'] is not None:
with open(documentation['fullpath'], 'r', encoding='utf-8') as fp:
content = fp.read()
content = fix_tabs.sub(' ', content)
return safeyaml.load(content)
return None
def get_pg_code_cache():
documentation = get_info_from_file_reference('docassemble.base:data/questions/pgcodecache.yml')
if 'fullpath' in documentation and documentation['fullpath'] is not None:
with open(documentation['fullpath'], 'r', encoding='utf-8') as fp:
content = fp.read()
content = fix_tabs.sub(' ', content)
return safeyaml.load(content)
return None
def get_documentation_dict():
documentation = get_info_from_file_reference('docassemble.base:data/questions/documentation.yml')
if 'fullpath' in documentation and documentation['fullpath'] is not None:
with open(documentation['fullpath'], 'r', encoding='utf-8') as fp:
content = fp.read()
content = fix_tabs.sub(' ', content)
return safeyaml.load(content)
return None
def get_name_info():
docstring = get_info_from_file_reference('docassemble.base:data/questions/docstring.yml')
if 'fullpath' in docstring and docstring['fullpath'] is not None:
with open(docstring['fullpath'], 'r', encoding='utf-8') as fp:
content = fp.read()
content = fix_tabs.sub(' ', content)
info = safeyaml.load(content)
for val in info:
info[val]['name'] = val
if 'insert' not in info[val]:
info[val]['insert'] = val
if 'show' not in info[val]:
info[val]['show'] = False
if 'exclude' not in info[val]:
info[val]['exclude'] = False
return info
return None
def get_title_documentation():
documentation = get_info_from_file_reference('docassemble.base:data/questions/title_documentation.yml')
if 'fullpath' in documentation and documentation['fullpath'] is not None:
with open(documentation['fullpath'], 'r', encoding='utf-8') as fp:
content = fp.read()
content = fix_tabs.sub(' ', content)
return safeyaml.load(content)
return None
def pad_to_16(the_string):
if len(the_string) >= 16:
return the_string[:16]
return str(the_string) + (16 - len(the_string)) * '0'
def decrypt_session(secret, user_code=None, filename=None):
# logmessage("decrypt_session: user_code is " + str(user_code) + " and filename is " + str(filename))
nowtime = datetime.datetime.utcnow()
if user_code is None or filename is None or secret is None:
return
for record in db.session.execute(select(SpeakList).filter_by(key=user_code, filename=filename, encrypted=True).with_for_update()).scalars():
phrase = decrypt_phrase(record.phrase, secret)
record.phrase = pack_phrase(phrase)
record.encrypted = False
db.session.commit()
for record in db.session.execute(select(UserDict).filter_by(key=user_code, filename=filename, encrypted=True).order_by(UserDict.indexno).with_for_update()).scalars():
the_dict = decrypt_dictionary(record.dictionary, secret)
record.dictionary = pack_dictionary(the_dict)
record.encrypted = False
record.modtime = nowtime
db.session.commit()
for record in db.session.execute(select(ChatLog).filter_by(key=user_code, filename=filename, encrypted=True).with_for_update()).scalars():
phrase = decrypt_phrase(record.message, secret)
record.message = pack_phrase(phrase)
record.encrypted = False
db.session.commit()
def encrypt_session(secret, user_code=None, filename=None):
# logmessage("encrypt_session: user_code is " + str(user_code) + " and filename is " + str(filename))
nowtime = datetime.datetime.utcnow()
if user_code is None or filename is None or secret is None:
return
for record in db.session.execute(select(SpeakList).filter_by(key=user_code, filename=filename, encrypted=False).with_for_update()).scalars():
phrase = unpack_phrase(record.phrase)
record.phrase = encrypt_phrase(phrase, secret)
record.encrypted = True
db.session.commit()
for record in db.session.execute(select(UserDict).filter_by(key=user_code, filename=filename, encrypted=False).order_by(UserDict.indexno).with_for_update()).scalars():
the_dict = unpack_dictionary(record.dictionary)
record.dictionary = encrypt_dictionary(the_dict, secret)
record.encrypted = True
record.modtime = nowtime
db.session.commit()
for record in db.session.execute(select(ChatLog).filter_by(key=user_code, filename=filename, encrypted=False).with_for_update()).scalars():
phrase = unpack_phrase(record.message)
record.message = encrypt_phrase(phrase, secret)
record.encrypted = True
db.session.commit()
def substitute_secret(oldsecret, newsecret, user=None, to_convert=None):
if user is None:
user = current_user
device_id = request.cookies.get('ds', None)
if device_id is None:
device_id = random_string(16)
the_current_info = current_info(yaml=None, req=request, action=None, session_info=None, secret=oldsecret, device_id=device_id)
docassemble.base.functions.this_thread.current_info = the_current_info
temp_user = session.get('tempuser', None)
# logmessage("substitute_secret: " + repr(oldsecret) + " and " + repr(newsecret) + " and temp_user is " + repr(temp_user))
if oldsecret in ('None', newsecret):
# logmessage("substitute_secret: returning new secret without doing anything")
return newsecret
# logmessage("substitute_secret: continuing")
if temp_user is not None:
temp_user_info = {'email': None, 'the_user_id': 't' + str(temp_user), 'theid': temp_user, 'roles': []}
the_current_info['user'] = temp_user_info
for object_entry in db.session.execute(select(GlobalObjectStorage).filter_by(user_id=user.id, encrypted=True).with_for_update()).scalars():
try:
object_entry.value = encrypt_object(decrypt_object(object_entry.value, oldsecret), newsecret)
except Exception as err:
logmessage("Failure to change encryption of object " + object_entry.key + ": " + str(err))
db.session.commit()
if to_convert is None:
to_do = set()
if 'i' in session and 'uid' in session: # TEMPORARY
get_session(session['i'])
if 'sessions' in session:
for filename, info in session['sessions'].items():
to_do.add((filename, info['uid']))
for the_record in db.session.execute(select(UserDict.filename, UserDict.key).filter_by(user_id=user.id).group_by(UserDict.filename, UserDict.key)):
to_do.add((the_record.filename, the_record.key))
for the_record in db.session.execute(select(UserDictKeys.filename, UserDictKeys.key).join(UserDict, and_(UserDictKeys.filename == UserDict.filename, UserDictKeys.key == UserDict.key)).where(and_(UserDictKeys.user_id == user.id)).group_by(UserDictKeys.filename, UserDictKeys.key)):
to_do.add((the_record.filename, the_record.key))
else:
to_do = set(to_convert)
for (filename, user_code) in to_do:
the_current_info['yaml_filename'] = filename
the_current_info['session'] = user_code
the_current_info['encrypted'] = True
# obtain_lock(user_code, filename)
# logmessage("substitute_secret: filename is " + str(filename) + " and key is " + str(user_code))
for record in db.session.execute(select(SpeakList).filter_by(key=user_code, filename=filename, encrypted=True).with_for_update()).scalars():
try:
phrase = decrypt_phrase(record.phrase, oldsecret)
record.phrase = encrypt_phrase(phrase, newsecret)
except:
pass
db.session.commit()
for object_entry in db.session.execute(select(GlobalObjectStorage).where(and_(GlobalObjectStorage.key.like('da:uid:' + user_code + ':i:' + filename + ':%'), GlobalObjectStorage.encrypted == True)).with_for_update()).scalars(): # noqa: E712 # pylint: disable=singleton-comparison
try:
object_entry.value = encrypt_object(decrypt_object(object_entry.value, oldsecret), newsecret)
except:
pass
db.session.commit()
for record in db.session.execute(select(UserDict).filter_by(key=user_code, filename=filename, encrypted=True).order_by(UserDict.indexno).with_for_update()).scalars():
# logmessage("substitute_secret: record was encrypted")
try:
the_dict = decrypt_dictionary(record.dictionary, oldsecret)
except:
logmessage("substitute_secret: error decrypting dictionary for filename " + filename + " and uid " + user_code)
continue
if not isinstance(the_dict, dict):
logmessage("substitute_secret: dictionary was not a dict for filename " + filename + " and uid " + user_code)
continue
if temp_user:
try:
old_entry = the_dict['_internal']['user_local']['t' + str(temp_user)]
del the_dict['_internal']['user_local']['t' + str(temp_user)]
the_dict['_internal']['user_local'][str(user.id)] = old_entry
except:
pass
record.dictionary = encrypt_dictionary(the_dict, newsecret)
db.session.commit()
if temp_user:
for record in db.session.execute(select(UserDict).filter_by(key=user_code, filename=filename, encrypted=False).order_by(UserDict.indexno).with_for_update()).scalars():
try:
the_dict = unpack_dictionary(record.dictionary)
except:
logmessage("substitute_secret: error unpacking dictionary for filename " + filename + " and uid " + user_code)
continue
if not isinstance(the_dict, dict):
logmessage("substitute_secret: dictionary was not a dict for filename " + filename + " and uid " + user_code)
continue
try:
old_entry = the_dict['_internal']['user_local']['t' + str(temp_user)]
del the_dict['_internal']['user_local']['t' + str(temp_user)]
the_dict['_internal']['user_local'][str(user.id)] = old_entry
except:
pass
record.dictionary = pack_dictionary(the_dict)
db.session.commit()
for record in db.session.execute(select(ChatLog).filter_by(key=user_code, filename=filename, encrypted=True).with_for_update()).scalars():
try:
phrase = decrypt_phrase(record.message, oldsecret)
except:
logmessage("substitute_secret: error decrypting phrase for filename " + filename + " and uid " + user_code)
continue
record.message = encrypt_phrase(phrase, newsecret)
db.session.commit()
# release_lock(user_code, filename)
for object_entry in db.session.execute(select(GlobalObjectStorage).where(and_(GlobalObjectStorage.user_id == user.id, GlobalObjectStorage.encrypted == True)).with_for_update()).scalars(): # noqa: E712 # pylint: disable=singleton-comparison
try:
object_entry.value = encrypt_object(decrypt_object(object_entry.value, oldsecret), newsecret)
except:
pass
db.session.commit()
return newsecret
def MD5Hash(data=None):
if data is None:
data = ''
h = MD5.new()
h.update(bytearray(data, encoding='utf-8'))
return h
def set_request_active(value):
global request_active
request_active = value
def copy_playground_modules():
root_dir = os.path.join(FULL_PACKAGE_DIRECTORY, 'docassemble')
for d in os.listdir(root_dir):
if re.search(r'^playground[0-9]', d) and os.path.isdir(os.path.join(root_dir, d)):
try:
shutil.rmtree(os.path.join(root_dir, d))
except:
logmessage("copy_playground_modules: error deleting " + os.path.join(root_dir, d))
devs = set()
for user in db.session.execute(select(UserModel.id).join(UserRoles, UserModel.id == UserRoles.user_id).join(Role, UserRoles.role_id == Role.id).where(and_(UserModel.active == True, or_(Role.name == 'admin', Role.name == 'developer')))): # noqa: E712 # pylint: disable=singleton-comparison
devs.add(user.id)
for user_id in devs:
mod_dir = SavedFile(user_id, fix=True, section='playgroundmodules')
local_dirs = [(os.path.join(FULL_PACKAGE_DIRECTORY, 'docassemble', 'playground' + str(user_id)), mod_dir.directory)]
for dirname in mod_dir.list_of_dirs():
local_dirs.append((os.path.join(FULL_PACKAGE_DIRECTORY, 'docassemble', 'playground' + str(user_id) + dirname), os.path.join(mod_dir.directory, dirname)))
for local_dir, mod_directory in local_dirs:
if os.path.isdir(local_dir):
try:
shutil.rmtree(local_dir)
except:
logmessage("copy_playground_modules: error deleting " + local_dir + " before replacing it")
os.makedirs(local_dir, exist_ok=True)
# logmessage("Copying " + str(mod_directory) + " to " + str(local_dir))
for f in [f for f in os.listdir(mod_directory) if re.search(r'^[A-Za-z].*\.py$', f)]:
shutil.copyfile(os.path.join(mod_directory, f), os.path.join(local_dir, f))
# shutil.copytree(mod_dir.directory, local_dir)
with open(os.path.join(local_dir, '__init__.py'), 'w', encoding='utf-8') as the_file:
the_file.write(init_py_file)
def proc_example_list(example_list, package, directory, examples):
for example in example_list:
if isinstance(example, dict):
for key, value in example.items():
sublist = []
proc_example_list(value, package, directory, sublist)
examples.append({'title': str(key), 'list': sublist})
break
continue
result = {}
result['id'] = example
result['interview'] = url_for('index', reset=1, i=package + ":data/questions/" + directory + example + ".yml")
example_file = package + ":data/questions/" + directory + example + '.yml'
if package == 'docassemble.base':
result['image'] = url_for('static', filename=directory + example + ".png", v=da_version)
else:
result['image'] = url_for('package_static', package=package, filename=example + ".png")
# logmessage("Giving it " + example_file)
file_info = get_info_from_file_reference(example_file)
# logmessage("Got back " + file_info['fullpath'])
start_block = 1
end_block = 2
if 'fullpath' not in file_info or file_info['fullpath'] is None:
logmessage("proc_example_list: could not find " + example_file)
continue
with open(file_info['fullpath'], 'r', encoding='utf-8') as fp:
content = fp.read()
content = fix_tabs.sub(' ', content)
content = fix_initial.sub('', content)
blocks = list(map(lambda x: x.strip(), document_match.split(content)))
if len(blocks) > 0:
has_context = False
for block in blocks:
if re.search(r'metadata:', block):
try:
the_block = safeyaml.load(block)
if isinstance(the_block, dict) and 'metadata' in the_block:
the_metadata = the_block['metadata']
result['title'] = the_metadata.get('title', the_metadata.get('short title', word('Untitled')))
if isinstance(result['title'], dict):
result['title'] = result['title'].get('en', word('Untitled'))
result['title'] = result['title'].rstrip()
result['documentation'] = the_metadata.get('documentation', None)
start_block = int(the_metadata.get('example start', 1))
end_block = int(the_metadata.get('example end', start_block)) + 1
break
except Exception as err:
logmessage("proc_example_list: error processing " + example_file + ": " + str(err))
continue
if 'title' not in result:
logmessage("proc_example_list: no title in " + example_file)
continue
if re.search(r'metadata:', blocks[0]) and start_block > 0:
initial_block = 1
else:
initial_block = 0
if start_block > initial_block:
result['before_html'] = highlight("\n---\n".join(blocks[initial_block:start_block]) + "\n---", YamlLexer(), HtmlFormatter(cssclass='bg-light highlight dahighlight'))
has_context = True
else:
result['before_html'] = ''
if len(blocks) > end_block:
result['after_html'] = highlight("---\n" + "\n---\n".join(blocks[end_block:len(blocks)]), YamlLexer(), HtmlFormatter(cssclass='bg-light highlight dahighlight'))
has_context = True
else:
result['after_html'] = ''
result['source'] = "\n---\n".join(blocks[start_block:end_block])
result['html'] = highlight(result['source'], YamlLexer(), HtmlFormatter(cssclass='bg-light highlight dahighlight'))
result['has_context'] = has_context
else:
logmessage("proc_example_list: no blocks in " + example_file)
continue
examples.append(result)
def get_examples():
examples = []
file_list = daconfig.get('playground examples', ['docassemble.base:data/questions/example-list.yml'])
if not isinstance(file_list, list):
file_list = [file_list]
for the_file in file_list:
if not isinstance(the_file, str):
continue
example_list_file = get_info_from_file_reference(the_file)
the_package = ''
if 'fullpath' in example_list_file and example_list_file['fullpath'] is not None:
if 'package' in example_list_file:
the_package = example_list_file['package']
else:
continue
if the_package == 'docassemble.base':
the_directory = 'examples/'
else:
the_directory = ''
if os.path.exists(example_list_file['fullpath']):
try:
with open(example_list_file['fullpath'], 'r', encoding='utf-8') as fp:
content = fp.read()
content = fix_tabs.sub(' ', content)
proc_example_list(safeyaml.load(content), the_package, the_directory, examples)
except Exception as the_err:
logmessage("There was an error loading the Playground examples:" + str(the_err))
# logmessage("Examples: " + str(examples))
return examples
def add_timestamps(the_dict, manual_user_id=None):
nowtime = datetime.datetime.utcnow()
the_dict['_internal']['starttime'] = nowtime
the_dict['_internal']['modtime'] = nowtime
if manual_user_id is not None or (current_user and current_user.is_authenticated):
if manual_user_id is not None:
the_user_id = manual_user_id
else:
the_user_id = current_user.id
the_dict['_internal']['accesstime'][the_user_id] = nowtime
else:
the_dict['_internal']['accesstime'][-1] = nowtime
def fresh_dictionary():
the_dict = copy.deepcopy(initial_dict)
add_timestamps(the_dict)
return the_dict
def manual_checkout(manual_session_id=None, manual_filename=None, user_id=None, delete_session=False, temp_user_id=None):
if manual_filename is not None:
yaml_filename = manual_filename
else:
yaml_filename = docassemble.base.functions.this_thread.current_info.get('yaml_filename', None)
if yaml_filename is None:
return
if manual_session_id is not None:
session_id = manual_session_id
else:
session_info = get_session(yaml_filename)
if session_info is not None:
session_id = session_info['uid']
else:
session_id = None
if session_id is None:
return
if user_id is None:
if temp_user_id is not None:
the_user_id = 't' + str(temp_user_id)
else:
if current_user.is_anonymous:
the_user_id = 't' + str(session.get('tempuser', None))
else:
the_user_id = current_user.id
else:
the_user_id = user_id
if delete_session:
if not (not current_user.is_anonymous and user_id != current_user.id):
clear_specific_session(yaml_filename, session_id)
endpart = ':uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id)
pipe = r.pipeline()
pipe.expire('da:session' + endpart, 12)
pipe.expire('da:html' + endpart, 12)
pipe.expire('da:interviewsession' + endpart, 12)
pipe.expire('da:ready' + endpart, 12)
pipe.expire('da:block' + endpart, 12)
pipe.execute()
# r.publish('da:monitor', json.dumps({'messagetype': 'refreshsessions'}))
# logmessage("Done checking out from " + endpart)
def chat_partners_available(session_id, yaml_filename, the_user_id, mode, partner_roles):
key = 'da:session:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id)
peer_ok = bool(mode in ('peer', 'peerhelp'))
help_ok = bool(mode in ('help', 'peerhelp'))
potential_partners = set()
if help_ok and len(partner_roles) and not r.exists('da:block:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id)):
chat_session_key = 'da:interviewsession:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id)
for role in partner_roles:
for the_key in r.keys('da:monitor:role:' + role + ':userid:*'):
user_id = re.sub(r'^.*:userid:', '', the_key.decode())
potential_partners.add(user_id)
for the_key in r.keys('da:monitor:chatpartners:*'):
the_key = the_key.decode()
user_id = re.sub(r'^.*chatpartners:', '', the_key)
if user_id not in potential_partners:
for chat_key in r.hgetall(the_key):
if chat_key.decode() == chat_session_key:
potential_partners.add(user_id)
num_peer = 0
if peer_ok:
for sess_key in r.keys('da:session:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:*'):
if sess_key.decode() != key:
num_peer += 1
result = ChatPartners()
result.peer = num_peer
result.help = len(potential_partners)
return result
def do_redirect(url, is_ajax, is_json, js_target):
if is_ajax:
return jsonify(action='redirect', url=url, csrf_token=generate_csrf())
if is_json:
if re.search(r'\?', url):
url = url + '&json=1'
else:
url = url + '?json=1'
if js_target and 'js_target=' not in url:
if re.search(r'\?', url):
url = url + '&js_target=' + js_target
else:
url = url + '?js_target=' + js_target
return redirect(url)
def do_refresh(is_ajax, yaml_filename):
if is_ajax:
return jsonify(action='refresh', csrf_token=generate_csrf())
return redirect(url_for('index', i=yaml_filename))
def standard_scripts(interview_language=DEFAULT_LANGUAGE, external=False):
if interview_language in ('ar', 'cs', 'et', 'he', 'ka', 'nl', 'ro', 'th', 'zh', 'az', 'da', 'fa', 'hu', 'kr', 'no', 'ru', 'tr', 'bg', 'de', 'fi', 'id', 'kz', 'pl', 'sk', 'uk', 'ca', 'el', 'fr', 'it', 'sl', 'uz', 'cr', 'es', 'gl', 'ja', 'lt', 'pt', 'sv', 'vi'):
fileinput_locale = '\n <script src="' + url_for('static', filename='bootstrap-fileinput/js/locales/' + interview_language + '.js', v=da_version, _external=external) + '"></script>'
else:
fileinput_locale = ''
return '\n <script src="' + url_for('static', filename='app/bundle.js', v=da_version, _external=external) + '"></script>' + fileinput_locale
def additional_scripts(interview_status, yaml_filename, as_javascript=False):
scripts = ''
interview_package = re.sub(r'^docassemble\.', '', re.sub(r':.*', '', yaml_filename))
interview_filename = re.sub(r'\.ya?ml$', '', re.sub(r'.*[:\/]', '', yaml_filename), re.IGNORECASE)
if 'google maps api key' in google_config:
api_key = google_config.get('google maps api key')
elif 'api key' in google_config:
api_key = google_config.get('api key')
else:
api_key = None
if ga_configured and interview_status.question.interview.options.get('analytics on', True):
ga_ids = google_config.get('analytics id')
else:
ga_ids = None
output_js = ''
if api_key is not None:
region = google_config.get('region', None)
if region is None:
region = ''
else:
region = '®ion=' + region
scripts += "\n" + ' <script src="https://maps.googleapis.com/maps/api/js?key=' + api_key + region + '&libraries=places&callback=dagoogleapicallback"></script>'
if as_javascript:
output_js += """\
var daScript = document.createElement('script');
daScript.src = "https://maps.googleapis.com/maps/api/js?key=""" + api_key + """&libraries=places&callback=dagoogleapicallback";
document.head.appendChild(daScript);
"""
if ga_ids is not None:
the_js = """\
var dataLayer = window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
function daPageview(){
var idToUse = daQuestionID['id'];
if (daQuestionID['ga'] != undefined && daQuestionID['ga'] != null){
idToUse = daQuestionID['ga'];
}
if (idToUse != null){
if (!daGAConfigured){"""
for ga_id in ga_ids:
the_js += """
gtag('config', """ + json.dumps(ga_id) + """, {'send_page_view': false""" + (", 'cookie_flags': 'SameSite=None;Secure'" if app.config['SESSION_COOKIE_SECURE'] else '') + """});"""
the_js += """
daGAConfigured = true;
}
gtag('set', 'page_path', """ + json.dumps(interview_package + "/" + interview_filename + "/") + """ + idToUse.replace(/[^A-Za-z0-9]+/g, '_'));
gtag('event', 'page_view', {'page_path': """ + json.dumps(interview_package + "/" + interview_filename + "/") + """ + idToUse.replace(/[^A-Za-z0-9]+/g, '_')});
}
}
"""
scripts += """
<script async src="https://www.googletagmanager.com/gtag/js?id=""" + ga_ids[0] + """"></script>
<script>
""" + the_js + """
</script>
"""
if as_javascript:
# Not good to enable this, since most web sites would have Google Analytics already.
# output_js += """
# var daScript = document.createElement('script');
# daScript.src = "https://www.googletagmanager.com/gtag/js?id=""" + ga_id + """";
# document.head.appendChild(daScript);
# """
output_js += the_js
if as_javascript:
return output_js
return scripts
def additional_css(interview_status, js_only=False):
if 'segment id' in daconfig and interview_status.question.interview.options.get('analytics on', True):
segment_id = daconfig['segment id']
else:
segment_id = None
start_output = ''
the_js = ''
if segment_id is not None:
segment_js = """\
!function(){var analytics=window.analytics=window.analytics||[];if(!analytics.initialize)if(analytics.invoked)window.console&&console.error&&console.error("Segment snippet included twice.");else{analytics.invoked=!0;analytics.methods=["trackSubmit","trackClick","trackLink","trackForm","pageview","identify","reset","group","track","ready","alias","debug","page","once","off","on"];analytics.factory=function(t){return function(){var e=Array.prototype.slice.call(arguments);e.unshift(t);analytics.push(e);return analytics}};for(var t=0;t<analytics.methods.length;t++){var e=analytics.methods[t];analytics[e]=analytics.factory(e)}analytics.load=function(t,e){var n=document.createElement("script");n.type="text/javascript";n.async=!0;n.src="https://cdn.segment.com/analytics.js/v1/"+t+"/analytics.min.js";var a=document.getElementsByTagName("script")[0];a.parentNode.insertBefore(n,a);analytics._loadOptions=e};analytics.SNIPPET_VERSION="4.1.0";
analytics.load(""" + json.dumps(segment_id) + """);
analytics.page();
}}();
function daSegmentEvent(){
var idToUse = daQuestionID['id'];
useArguments = false;
if (daQuestionID['segment'] && daQuestionID['segment']['id']){
idToUse = daQuestionID['segment']['id'];
if (daQuestionID['segment']['arguments']){
for (var keyToUse in daQuestionID['segment']['arguments']){
if (daQuestionID['segment']['arguments'].hasOwnProperty(keyToUse)){
useArguments = true;
break;
}
}
}
}
if (idToUse != null){
if (useArguments){
analytics.track(idToUse.replace(/[^A-Za-z0-9]+/g, '_'), daQuestionID['segment']['arguments']);
}
else{
analytics.track(idToUse.replace(/[^A-Za-z0-9]+/g, '_'));
}
}
}
"""
start_output += """
<script>
""" + segment_js + """\
</script>"""
the_js += segment_js
if len(interview_status.extra_css) > 0:
start_output += '\n' + indent_by("".join(interview_status.extra_css).strip(), 4).rstrip()
if js_only:
return the_js
return start_output
def standard_html_start(interview_language=DEFAULT_LANGUAGE, debug=False, bootstrap_theme=None, external=False, page_title=None, social=None, yaml_filename=None):
if social is None:
social = {}
if page_title is None:
page_title = app.config['BRAND_NAME']
if bootstrap_theme is None and app.config['BOOTSTRAP_THEME'] is not None:
bootstrap_theme = app.config['BOOTSTRAP_THEME']
if bootstrap_theme is None:
bootstrap_part = '\n <link href="' + url_for('static', filename='bootstrap/css/bootstrap.min.css', v=da_version, _external=external) + '" rel="stylesheet">'
else:
bootstrap_part = '\n <link href="' + bootstrap_theme + '" rel="stylesheet">'
output = '<!DOCTYPE html>\n<html lang="' + interview_language + '" itemscope itemtype="http://schema.org/WebPage">\n <head>\n <meta charset="utf-8">\n <meta name="mobile-web-app-capable" content="yes">\n <meta name="apple-mobile-web-app-capable" content="yes">\n <meta http-equiv="X-UA-Compatible" content="IE=edge">\n <meta name="viewport" content="width=device-width, initial-scale=1">\n ' + ('<link rel="shortcut icon" href="' + url_for('favicon', _external=external, **app.config['FAVICON_PARAMS']) + '">\n ' if app.config['USE_FAVICON'] else '') + ('<link rel="apple-touch-icon" sizes="180x180" href="' + url_for('apple_touch_icon', _external=external, **app.config['FAVICON_PARAMS']) + '">\n ' if app.config['USE_APPLE_TOUCH_ICON'] else '') + ('<link rel="icon" type="image/png" href="' + url_for('favicon_md', _external=external, **app.config['FAVICON_PARAMS']) + '" sizes="32x32">\n ' if app.config['USE_FAVICON_MD'] else '') + ('<link rel="icon" type="image/png" href="' + url_for('favicon_sm', _external=external, **app.config['FAVICON_PARAMS']) + '" sizes="16x16">\n ' if app.config['USE_FAVICON_SM'] else '') + ('<link rel="manifest" href="' + url_for('favicon_site_webmanifest', _external=external, **app.config['FAVICON_PARAMS']) + '">\n ' if app.config['USE_SITE_WEBMANIFEST'] else '') + ('<link rel="mask-icon" href="' + url_for('favicon_safari_pinned_tab', _external=external, **app.config['FAVICON_PARAMS']) + '" color="' + app.config['FAVICON_MASK_COLOR'] + '">\n ' if app.config['USE_SAFARI_PINNED_TAB'] else '') + '<meta name="msapplication-TileColor" content="' + app.config['FAVICON_TILE_COLOR'] + '">\n <meta name="theme-color" content="' + app.config['FAVICON_THEME_COLOR'] + '">\n <script defer src="' + url_for('static', filename='fontawesome/js/all.min.js', v=da_version, _external=external) + '"></script>' + bootstrap_part + '\n <link href="' + url_for('static', filename='app/bundle.css', v=da_version, _external=external) + '" rel="stylesheet">'
if debug:
output += '\n <link href="' + url_for('static', filename='app/pygments.min.css', v=da_version, _external=external) + '" rel="stylesheet">'
page_title = page_title.replace('\n', ' ').replace('"', '"').strip()
for key, val in social.items():
if key not in ('twitter', 'og', 'fb'):
output += '\n <meta name="' + key + '" content="' + social[key] + '">'
if 'description' in social:
output += '\n <meta itemprop="description" content="' + social['description'] + '">'
if 'image' in social:
output += '\n <meta itemprop="image" content="' + social['image'] + '">'
if 'name' in social:
output += '\n <meta itemprop="name" content="' + social['name'] + '">'
else:
output += '\n <meta itemprop="name" content="' + page_title + '">'
if 'twitter' in social:
if 'card' not in social['twitter']:
output += '\n <meta name="twitter:card" content="summary">'
for key, val in social['twitter'].items():
output += '\n <meta name="twitter:' + key + '" content="' + val + '">'
if 'title' not in social['twitter']:
output += '\n <meta name="twitter:title" content="' + page_title + '">'
if 'fb' in social:
for key, val in social['fb'].items():
output += '\n <meta name="fb:' + key + '" content="' + val + '">'
if 'og' in social and 'image' in social['og']:
for key, val in social['og'].items():
output += '\n <meta name="og:' + key + '" content="' + val + '">'
if 'title' not in social['og']:
output += '\n <meta name="og:title" content="' + page_title + '">'
if yaml_filename and 'url' not in social['og']:
output += '\n <meta name="og:url" content="' + url_for('index', i=yaml_filename, _external=True) + '">'
if 'site_name' not in social['og']:
output += '\n <meta name="og:site_name" content="' + app.config['BRAND_NAME'].replace('\n', ' ').replace('"', '"').strip() + '">'
if 'locale' not in social['og']:
output += '\n <meta name="og:locale" content="' + app.config['OG_LOCALE'] + '">'
if 'type' not in social['og']:
output += '\n <meta name="og:type" content="website">'
return output
def process_file(saved_file, orig_file, mimetype, extension, initial=True):
if extension == "gif" and daconfig.get('imagemagick', 'convert') is not None:
unconverted = tempfile.NamedTemporaryFile(prefix="datemp", suffix=".gif", delete=False)
converted = tempfile.NamedTemporaryFile(prefix="datemp", suffix=".png", delete=False)
shutil.move(orig_file, unconverted.name)
call_array = [daconfig.get('imagemagick', 'convert'), str(unconverted.name), 'png:' + converted.name]
try:
result = subprocess.run(call_array, timeout=60, check=False).returncode
except subprocess.TimeoutExpired:
logmessage("process_file: convert from gif took too long")
result = 1
if result == 0:
saved_file.copy_from(converted.name, filename=re.sub(r'\.[^\.]+$', '', saved_file.filename) + '.png')
else:
logmessage("process_file: error converting from gif to png")
shutil.move(unconverted.name, saved_file.path)
saved_file.save()
elif extension == "jpg" and daconfig.get('imagemagick', 'convert') is not None:
unrotated = tempfile.NamedTemporaryFile(prefix="datemp", suffix=".jpg", delete=False)
rotated = tempfile.NamedTemporaryFile(prefix="datemp", suffix=".jpg", delete=False)
shutil.move(orig_file, unrotated.name)
call_array = [daconfig.get('imagemagick', 'convert'), str(unrotated.name), '-auto-orient', '-density', '300', 'jpeg:' + rotated.name]
try:
result = subprocess.run(call_array, timeout=60, check=False).returncode
except subprocess.TimeoutExpired:
logmessage("process_file: convert from jpeg took too long")
result = 1
if result == 0:
saved_file.copy_from(rotated.name)
else:
saved_file.copy_from(unrotated.name)
elif initial:
shutil.move(orig_file, saved_file.path)
saved_file.save()
# if mimetype == 'video/quicktime' and daconfig.get('ffmpeg', 'ffmpeg') is not None:
# call_array = [daconfig.get('ffmpeg', 'ffmpeg'), '-i', saved_file.path + '.' + extension, '-vcodec', 'libtheora', '-acodec', 'libvorbis', saved_file.path + '.ogv']
# try:
# result = subprocess.run(call_array, timeout=120).returncode
# except subprocess.TimeoutExpired:
# result = 1
# call_array = [daconfig.get('ffmpeg', 'ffmpeg'), '-i', saved_file.path + '.' + extension, '-vcodec', 'copy', '-acodec', 'copy', saved_file.path + '.mp4']
# try:
# result = subprocess.run(call_array, timeout=120).returncode
# except subprocess.TimeoutExpired:
# result = 1
# if mimetype == 'video/mp4' and daconfig.get('ffmpeg', 'ffmpeg') is not None:
# call_array = [daconfig.get('ffmpeg', 'ffmpeg'), '-i', saved_file.path + '.' + extension, '-vcodec', 'libtheora', '-acodec', 'libvorbis', saved_file.path + '.ogv']
# try:
# result = subprocess.run(call_array, timeout=120).returncode
# except subprocess.TimeoutExpired:
# result = 1
# if mimetype == 'video/ogg' and daconfig.get('ffmpeg', 'ffmpeg') is not None:
# call_array = [daconfig.get('ffmpeg', 'ffmpeg'), '-i', saved_file.path + '.' + extension, '-c:v', 'libx264', '-preset', 'veryslow', '-crf', '22', '-c:a', 'libmp3lame', '-qscale:a', '2', '-ac', '2', '-ar', '44100', saved_file.path + '.mp4']
# try:
# result = subprocess.run(call_array, timeout=120).returncode
# except subprocess.TimeoutExpired:
# result = 1
# if mimetype == 'audio/mpeg' and daconfig.get('pacpl', 'pacpl') is not None:
# call_array = [daconfig.get('pacpl', 'pacpl'), '-t', 'ogg', saved_file.path + '.' + extension]
# try:
# result = subprocess.run(call_array, timeout=120).returncode
# except subprocess.TimeoutExpired:
# result = 1
if mimetype == 'audio/ogg' and daconfig.get('pacpl', 'pacpl') is not None:
call_array = [daconfig.get('pacpl', 'pacpl'), '-t', 'mp3', saved_file.path + '.' + extension]
try:
result = subprocess.run(call_array, timeout=120, check=False).returncode
except subprocess.TimeoutExpired:
result = 1
if mimetype == 'audio/3gpp' and daconfig.get('ffmpeg', 'ffmpeg') is not None:
call_array = [daconfig.get('ffmpeg', 'ffmpeg'), '-i', saved_file.path + '.' + extension, saved_file.path + '.ogg']
try:
result = subprocess.run(call_array, timeout=120, check=False).returncode
except subprocess.TimeoutExpired:
result = 1
call_array = [daconfig.get('ffmpeg', 'ffmpeg'), '-i', saved_file.path + '.' + extension, saved_file.path + '.mp3']
try:
result = subprocess.run(call_array, timeout=120, check=False).returncode
except subprocess.TimeoutExpired:
result = 1
if mimetype in ('audio/x-wav', 'audio/wav') and daconfig.get('pacpl', 'pacpl') is not None:
call_array = [daconfig.get('pacpl', 'pacpl'), '-t', 'mp3', saved_file.path + '.' + extension]
try:
result = subprocess.run(call_array, timeout=120, check=False).returncode
except subprocess.TimeoutExpired:
result = 1
call_array = [daconfig.get('pacpl', 'pacpl'), '-t', 'ogg', saved_file.path + '.' + extension]
try:
result = subprocess.run(call_array, timeout=120, check=False).returncode
except subprocess.TimeoutExpired:
result = 1
# if extension == "pdf":
# make_image_files(saved_file.path)
saved_file.finalize()
def sub_temp_user_dict_key(temp_user_id, user_id):
temp_interviews = []
for record in db.session.execute(select(UserDictKeys).filter_by(temp_user_id=temp_user_id).with_for_update()).scalars():
record.temp_user_id = None
record.user_id = user_id
temp_interviews.append((record.filename, record.key))
db.session.commit()
return temp_interviews
def sub_temp_other(user):
if 'tempuser' in session:
device_id = request.cookies.get('ds', None)
if device_id is None:
device_id = random_string(16)
url_root = daconfig.get('url root', 'http://localhost') + daconfig.get('root', '/')
url = url_root + 'interview'
role_list = [role.name for role in user.roles]
if len(role_list) == 0:
role_list = ['user']
the_current_info = {'user': {'email': user.email, 'roles': role_list, 'the_user_id': user.id, 'theid': user.id, 'firstname': user.first_name, 'lastname': user.last_name, 'nickname': user.nickname, 'country': user.country, 'subdivisionfirst': user.subdivisionfirst, 'subdivisionsecond': user.subdivisionsecond, 'subdivisionthird': user.subdivisionthird, 'organization': user.organization, 'timezone': user.timezone, 'language': user.language, 'location': None, 'session_uid': 'admin', 'device_id': device_id}, 'session': None, 'secret': None, 'yaml_filename': None, 'url': url, 'url_root': url_root, 'encrypted': False, 'action': None, 'interface': 'web', 'arguments': {}}
docassemble.base.functions.this_thread.current_info = the_current_info
for chat_entry in db.session.execute(select(ChatLog).filter_by(temp_user_id=int(session['tempuser'])).with_for_update()).scalars():
chat_entry.user_id = user.id
chat_entry.temp_user_id = None
db.session.commit()
for chat_entry in db.session.execute(select(ChatLog).filter_by(temp_owner_id=int(session['tempuser'])).with_for_update()).scalars():
chat_entry.owner_id = user.id
chat_entry.temp_owner_id = None
db.session.commit()
keys_in_use = {}
for object_entry in db.session.execute(select(GlobalObjectStorage.id, GlobalObjectStorage.key).filter(or_(GlobalObjectStorage.key.like('da:userid:{:d}:%'.format(user.id)), GlobalObjectStorage.key.like('da:daglobal:userid:{:d}:%'.format(user.id))))).all():
if object_entry.key not in keys_in_use:
keys_in_use[object_entry.key] = []
keys_in_use[object_entry.key].append(object_entry.id)
ids_to_delete = []
for object_entry in db.session.execute(select(GlobalObjectStorage).filter_by(temp_user_id=int(session['tempuser'])).with_for_update()).scalars():
object_entry.user_id = user.id
object_entry.temp_user_id = None
if object_entry.key.startswith('da:userid:t{:d}:'.format(session['tempuser'])):
new_key = re.sub(r'^da:userid:t{:d}:'.format(session['tempuser']), 'da:userid:{:d}:'.format(user.id), object_entry.key)
object_entry.key = new_key
if new_key in keys_in_use:
ids_to_delete.extend(keys_in_use[new_key])
if object_entry.encrypted and 'newsecret' in session:
try:
object_entry.value = encrypt_object(decrypt_object(object_entry.value, str(request.cookies.get('secret', None))), session['newsecret'])
except Exception as err:
logmessage("Failure to change encryption of object " + object_entry.key + ": " + str(err))
for object_entry in db.session.execute(select(GlobalObjectStorage).filter(and_(GlobalObjectStorage.temp_user_id == None, GlobalObjectStorage.user_id == None, GlobalObjectStorage.key.like('da:daglobal:userid:t{:d}:%'.format(session['tempuser'])))).with_for_update()).scalars(): # noqa: E711 # pylint: disable=singleton-comparison
new_key = re.sub(r'^da:daglobal:userid:t{:d}:'.format(session['tempuser']), 'da:daglobal:userid:{:d}:'.format(user.id), object_entry.key)
object_entry.key = new_key
if new_key in keys_in_use:
ids_to_delete.extend(keys_in_use[new_key])
for the_id in ids_to_delete:
db.session.execute(sqldelete(GlobalObjectStorage).filter_by(id=the_id))
db.session.commit()
db.session.execute(update(UploadsUserAuth).where(UploadsUserAuth.temp_user_id == int(session['tempuser'])).values(user_id=user.id, temp_user_id=None))
db.session.commit()
del session['tempuser']
def save_user_dict_key(session_id, filename, priors=False, user=None):
if user is not None:
user_id = user.id
is_auth = True
else:
if current_user.is_authenticated:
is_auth = True
user_id = current_user.id
else:
is_auth = False
user_id = session.get('tempuser', None)
if user_id is None:
logmessage("save_user_dict_key: no user ID available for saving")
return
# logmessage("save_user_dict_key: called")
the_interview_list = set([filename])
found = set()
if priors:
for the_record in db.session.execute(select(UserDict.filename).filter_by(key=session_id).group_by(UserDict.filename)):
the_interview_list.add(the_record.filename)
for filename_to_search in the_interview_list:
if is_auth:
for the_record in db.session.execute(select(UserDictKeys).filter_by(key=session_id, filename=filename_to_search, user_id=user_id)):
found.add(filename_to_search)
else:
for the_record in db.session.execute(select(UserDictKeys).filter_by(key=session_id, filename=filename_to_search, temp_user_id=user_id)):
found.add(filename_to_search)
for filename_to_save in (the_interview_list - found):
if is_auth:
new_record = UserDictKeys(key=session_id, filename=filename_to_save, user_id=user_id)
else:
new_record = UserDictKeys(key=session_id, filename=filename_to_save, temp_user_id=user_id)
db.session.add(new_record)
db.session.commit()
def save_user_dict(user_code, user_dict, filename, secret=None, changed=False, encrypt=True, manual_user_id=None, steps=None, max_indexno=None):
# logmessage("save_user_dict: called with encrypt " + str(encrypt))
if REQUIRE_IDEMPOTENT:
for var_name in ('x', 'i', 'j', 'k', 'l', 'm', 'n'):
if var_name in user_dict:
del user_dict[var_name]
user_dict['_internal']['objselections'] = {}
if 'session_local' in user_dict:
del user_dict['session_local']
if 'device_local' in user_dict:
del user_dict['device_local']
if 'user_local' in user_dict:
del user_dict['user_local']
nowtime = datetime.datetime.utcnow()
if steps is not None:
user_dict['_internal']['steps'] = steps
user_dict['_internal']['modtime'] = nowtime
if manual_user_id is not None or (current_user and current_user.is_authenticated):
if manual_user_id is not None:
the_user_id = manual_user_id
else:
the_user_id = current_user.id
user_dict['_internal']['accesstime'][the_user_id] = nowtime
else:
user_dict['_internal']['accesstime'][-1] = nowtime
the_user_id = None
if changed is True:
if encrypt:
new_record = UserDict(modtime=nowtime, key=user_code, dictionary=encrypt_dictionary(user_dict, secret), filename=filename, user_id=the_user_id, encrypted=True)
else:
new_record = UserDict(modtime=nowtime, key=user_code, dictionary=pack_dictionary(user_dict), filename=filename, user_id=the_user_id, encrypted=False)
db.session.add(new_record)
db.session.commit()
else:
if max_indexno is None:
max_indexno = db.session.execute(select(db.func.max(UserDict.indexno)).where(and_(UserDict.key == user_code, UserDict.filename == filename))).scalar()
if max_indexno is None:
if encrypt:
new_record = UserDict(modtime=nowtime, key=user_code, dictionary=encrypt_dictionary(user_dict, secret), filename=filename, user_id=the_user_id, encrypted=True)
else:
new_record = UserDict(modtime=nowtime, key=user_code, dictionary=pack_dictionary(user_dict), filename=filename, user_id=the_user_id, encrypted=False)
db.session.add(new_record)
db.session.commit()
else:
for record in db.session.execute(select(UserDict).filter_by(key=user_code, filename=filename, indexno=max_indexno).with_for_update()).scalars():
if encrypt:
record.dictionary = encrypt_dictionary(user_dict, secret)
record.modtime = nowtime
record.encrypted = True
else:
record.dictionary = pack_dictionary(user_dict)
record.modtime = nowtime
record.encrypted = False
db.session.commit()
def process_bracket_expression(match):
if match.group(1) in ('B', 'R', 'O'):
try:
inner = codecs.decode(repad(bytearray(match.group(2), encoding='utf-8')), 'base64').decode('utf-8')
except:
inner = match.group(2)
else:
inner = match.group(2)
return "[" + repr(inner) + "]"
def myb64unquote(the_string):
return codecs.decode(repad(bytearray(the_string, encoding='utf-8')), 'base64').decode('utf-8')
def safeid(text):
return re.sub(r'[\n=]', '', codecs.encode(text.encode('utf-8'), 'base64').decode())
def from_safeid(text):
return codecs.decode(repad(bytearray(text, encoding='utf-8')), 'base64').decode('utf-8')
def repad(text):
return text + (equals_byte * ((4 - len(text) % 4) % 4))
def test_for_valid_var(varname):
if not valid_python_var.match(varname):
raise DAError(varname + " is not a valid name. A valid name consists only of letters, numbers, and underscores, and begins with a letter.")
def navigation_bar(nav, interview, wrapper=True, inner_div_class=None, inner_div_extra=None, show_links=None, hide_inactive_subs=True, a_class=None, show_nesting=True, include_arrows=False, always_open=False, return_dict=None):
if show_links is None:
show_links = not bool(hasattr(nav, 'disabled') and nav.disabled)
if inner_div_class is None:
inner_div_class = 'nav flex-column nav-pills danav danavlinks danav-vertical danavnested'
if inner_div_extra is None:
inner_div_extra = ''
if a_class is None:
a_class = 'nav-link danavlink'
muted_class = ' text-body-secondary'
else:
muted_class = ''
# logmessage("navigation_bar: starting: " + str(section))
the_language = docassemble.base.functions.get_language()
non_progressive = bool(hasattr(nav, 'progressive') and not nav.progressive)
auto_open = bool(always_open or (hasattr(nav, 'auto_open') and nav.auto_open))
if the_language not in nav.sections:
the_language = DEFAULT_LANGUAGE
if the_language not in nav.sections:
the_language = '*'
if the_language not in nav.sections:
return ''
# raise DAError("Could not find a navigation bar to display. " + str(nav.sections))
the_sections = nav.sections[the_language]
if len(the_sections) == 0:
return ''
if docassemble.base.functions.this_thread.current_question.section is not None and docassemble.base.functions.this_thread.current_section:
the_section = docassemble.base.functions.this_thread.current_section
else:
the_section = nav.current
# logmessage("Current section is " + repr(the_section))
# logmessage("Past sections are: " + str(nav.past))
if the_section is None:
if isinstance(the_sections[0], dict):
the_section = list(the_sections[0])[0]
else:
the_section = the_sections[0]
if wrapper:
output = '<div role="navigation" class="' + daconfig['grid classes']['vertical navigation']['bar'] + ' d-none d-md-block danavdiv">\n <div class="nav flex-column nav-pills danav danav-vertical danavlinks">\n'
else:
output = ''
section_reached = False
indexno = 0
seen = set()
on_first = True
# logmessage("Sections is " + repr(the_sections))
for x in the_sections:
if include_arrows and not on_first:
output += '<span class="dainlinearrow"><i class="fas fa-chevron-right"></i></span>'
on_first = False
indexno += 1
the_key = None
subitems = None
currently_active = False
if isinstance(x, dict):
# logmessage("It is a dict")
if len(x) == 2 and 'subsections' in x:
for key, val in x.items():
if key == 'subsections':
subitems = val
else:
the_key = key
test_for_valid_var(the_key)
the_title = val
elif len(x) == 1:
# logmessage("The len is one")
the_key = list(x)[0]
value = x[the_key]
if isinstance(value, list):
subitems = value
the_title = the_key
else:
test_for_valid_var(the_key)
the_title = value
else:
raise DAError("navigation_bar: too many keys in dict. " + str(the_sections))
else:
# logmessage("It is not a dict")
the_key = None
the_title = str(x)
if (the_key is not None and the_section == the_key) or the_section == the_title:
# output += '<li role="presentation" class="' + li_class + ' active">'
section_reached = True
currently_active = True
active_class = ' active'
if return_dict is not None:
return_dict['parent_key'] = the_key
return_dict['parent_title'] = the_title
return_dict['key'] = the_key
return_dict['title'] = the_title
else:
active_class = ''
# output += '<li class="' + li_class + '" role="presentation">'
new_key = the_title if the_key is None else the_key
seen.add(new_key)
# logmessage("new_key is: " + str(new_key))
# logmessage("seen sections are: " + str(seen))
# logmessage("nav past sections are: " + repr(nav.past))
relevant_past = nav.past.intersection(set(nav.section_ids()))
seen_more = bool(len(relevant_past.difference(seen)) > 0 or new_key in nav.past or the_title in nav.past)
if non_progressive:
seen_more = True
section_reached = False
# logmessage("the title is " + str(the_title) + " and non_progressive is " + str(non_progressive) + " and show links is " + str(show_links) + " and seen_more is " + str(seen_more) + " and active_class is " + repr(active_class) + " and currently_active is " + str(currently_active) + " and section_reached is " + str(section_reached) + " and the_key is " + str(the_key) + " and interview is " + str(interview) + " and in q is " + ('in q' if the_key in interview.questions else 'not in q'))
if show_links and (seen_more or currently_active or not section_reached) and the_key is not None and interview is not None and the_key in interview.questions:
# url = docassemble.base.functions.interview_url_action(the_key)
if section_reached and not currently_active and not seen_more:
output += '<span tabindex="-1" data-index="' + str(indexno) + '" class="' + a_class + ' danotavailableyet' + muted_class + '">' + str(the_title) + '</span>'
else:
if active_class == '' and not section_reached and not seen_more:
output += '<span tabindex="-1" data-index="' + str(indexno) + '" class="' + a_class + ' inactive' + muted_class + '">' + str(the_title) + '</span>'
else:
output += '<a href="#" data-key="' + the_key + '" data-index="' + str(indexno) + '" class="daclickable ' + a_class + active_class + '">' + str(the_title) + '</a>'
else:
if section_reached and not currently_active and not seen_more:
output += '<span tabindex="-1" data-index="' + str(indexno) + '" class="' + a_class + ' danotavailableyet' + muted_class + '">' + str(the_title) + '</span>'
else:
if active_class == '' and not section_reached and not seen_more:
output += '<span tabindex="-1" data-index="' + str(indexno) + '" class="' + a_class + ' inactive' + muted_class + '">' + str(the_title) + '</span>'
else:
output += '<a tabindex="-1" data-index="' + str(indexno) + '" class="' + a_class + active_class + '">' + str(the_title) + '</a>'
suboutput = ''
if subitems:
current_is_within = False
oldindexno = indexno
for y in subitems:
if include_arrows:
suboutput += '<span class="dainlinearrow"><i class="fas fa-chevron-right"></i></span>'
indexno += 1
sub_currently_active = False
if isinstance(y, dict):
if len(y) == 1:
sub_key = list(y)[0]
test_for_valid_var(sub_key)
sub_title = y[sub_key]
else:
raise DAError("navigation_bar: too many keys in dict. " + str(the_sections))
else:
sub_key = None
sub_title = str(y)
if (sub_key is not None and the_section == sub_key) or the_section == sub_title:
# suboutput += '<li class="' + li_class + ' active" role="presentation">'
section_reached = True
current_is_within = True
sub_currently_active = True
sub_active_class = ' active'
if return_dict is not None:
return_dict['key'] = sub_key
return_dict['title'] = sub_title
else:
sub_active_class = ''
# suboutput += '<li class="' + li_class + '" role="presentation">'
new_sub_key = sub_title if sub_key is None else sub_key
seen.add(new_sub_key)
# logmessage("sub: seen sections are: " + str(seen))
relevant_past = nav.past.intersection(set(nav.section_ids()))
seen_more = bool(len(relevant_past.difference(seen)) > 0 or new_sub_key in nav.past or sub_title in nav.past)
if non_progressive:
# logmessage("Setting seen_more to True bc non-progressive")
seen_more = True
section_reached = False
# logmessage("First sub is %s, indexno is %d, sub_currently_active is %s, sub_key is %s, sub_title is %s, section_reached is %s, current_is_within is %s, sub_active_class is %s, new_sub_key is %s, seen_more is %s, section_reached is %s, show_links is %s" % (str(first_sub), indexno, str(sub_currently_active), sub_key, sub_title, section_reached, current_is_within, sub_active_class, new_sub_key, str(seen_more), str(section_reached), str(show_links)))
if show_links and (seen_more or sub_currently_active or not section_reached) and sub_key is not None and interview is not None and sub_key in interview.questions:
# url = docassemble.base.functions.interview_url_action(sub_key)
suboutput += '<a href="#" data-key="' + sub_key + '" data-index="' + str(indexno) + '" class="daclickable ' + a_class + sub_active_class + '">' + str(sub_title) + '</a>'
else:
if section_reached and not sub_currently_active and not seen_more:
suboutput += '<span tabindex="-1" data-index="' + str(indexno) + '" class="' + a_class + ' danotavailableyet' + muted_class + '">' + str(sub_title) + '</span>'
else:
suboutput += '<a tabindex="-1" data-index="' + str(indexno) + '" class="' + a_class + sub_active_class + ' inactive">' + str(sub_title) + '</a>'
# suboutput += "</li>"
if currently_active or current_is_within or hide_inactive_subs is False or show_nesting:
if currently_active or current_is_within or auto_open:
suboutput = '<div class="' + inner_div_class + '"' + inner_div_extra + '>' + suboutput
else:
suboutput = '<div style="display: none;" class="danotshowing ' + inner_div_class + '"' + inner_div_extra + '>' + suboutput
suboutput += "</div>"
output += suboutput
else:
indexno = oldindexno
# output += "</li>"
if wrapper:
output += "\n</div>\n</div>\n"
if (not non_progressive) and (not section_reached):
logmessage("Section \"" + str(the_section) + "\" did not exist.")
return output
def progress_bar(progress, interview):
if progress is None:
return ''
progress = float(progress)
if progress <= 0:
return ''
progress = min(progress, 100)
if hasattr(interview, 'show_progress_bar_percentage') and interview.show_progress_bar_percentage:
percentage = str(int(progress)) + '%'
else:
percentage = ''
return '<div class="progress mt-2" role="progressbar" aria-label="' + noquote(word('Interview Progress')) + '" aria-valuenow="' + str(progress) + '" aria-valuemin="0" aria-valuemax="100"><div class="progress-bar" style="width: ' + str(progress) + '%;">' + percentage + '</div></div>\n'
def get_unique_name(filename, secret):
nowtime = datetime.datetime.utcnow()
while True:
newname = random_alphanumeric(32)
obtain_lock(newname, filename)
existing_key = db.session.execute(select(UserDict).filter_by(key=newname)).first()
if existing_key:
release_lock(newname, filename)
continue
new_user_dict = UserDict(modtime=nowtime, key=newname, filename=filename, dictionary=encrypt_dictionary(fresh_dictionary(), secret))
db.session.add(new_user_dict)
db.session.commit()
return newname
def obtain_lock(user_code, filename):
key = 'da:lock:' + user_code + ':' + filename
# logmessage("obtain_lock: getting " + key)
found = False
count = CONCURRENCY_LOCK_TIMEOUT * 3
while count > 0:
record = r.get(key)
if record:
logmessage("obtain_lock: waiting for " + key)
time.sleep(1.0)
else:
found = False
break
found = True
count -= 1
if found:
logmessage("Request for " + key + " deadlocked")
release_lock(user_code, filename)
pipe = r.pipeline()
pipe.set(key, 1)
pipe.expire(key, CONCURRENCY_LOCK_TIMEOUT)
pipe.execute()
def obtain_lock_patiently(user_code, filename):
key = 'da:lock:' + user_code + ':' + filename
# logmessage("obtain_lock: getting " + key)
found = False
count = 200
while count > 0:
record = r.get(key)
if record:
logmessage("obtain_lock: waiting for " + key)
time.sleep(3.0)
else:
found = False
break
found = True
count -= 1
if found:
# logmessage("Request for " + key + " deadlocked")
# release_lock(user_code, filename)
raise DAException("obtain_lock_patiently: aborting attempt to obtain lock on " + user_code + " for " + filename + " due to deadlock")
pipe = r.pipeline()
pipe.set(key, 1)
pipe.expire(key, CONCURRENCY_LOCK_TIMEOUT)
pipe.execute()
def release_lock(user_code, filename):
key = 'da:lock:' + user_code + ':' + filename
# logmessage("release_lock: releasing " + key)
r.delete(key)
def make_navbar(status, steps, show_login, chat_info, debug_mode, index_params, extra_class=None): # pylint: disable=unused-argument
if 'inverse navbar' in status.question.interview.options:
if status.question.interview.options['inverse navbar']:
inverse = 'bg-dark'
theme = 'dark'
else:
inverse = 'bg-body-tertiary'
theme = 'light'
elif daconfig.get('inverse navbar', True):
inverse = 'bg-dark'
theme = 'dark'
else:
inverse = 'bg-body-tertiary'
theme = 'light'
if 'jsembed' in docassemble.base.functions.this_thread.misc:
fixed_top = ''
else:
fixed_top = ' fixed-top'
if extra_class is not None:
fixed_top += ' ' + extra_class
navbar = """\
<div class="danavbarcontainer" data-bs-theme=""" + '"' + theme + '"' + """>
<div class="navbar""" + fixed_top + """ navbar-expand-md """ + inverse + '"' + """ role="banner">
<div class="container danavcontainer justify-content-start">
"""
if status.question.can_go_back and steps > 1:
if status.question.interview.navigation_back_button:
navbar += """\
<form style="display: inline-block" id="dabackbutton" method="POST" action=""" + json.dumps(url_for('index', **index_params)) + """><input type="hidden" name="csrf_token" value=""" + '"' + generate_csrf() + '"' + """/><input type="hidden" name="_back_one" value="1"/><button class="navbar-brand navbar-nav dabackicon dabackbuttoncolor me-3" type="submit" title=""" + json.dumps(word("Go back to the previous question")) + """><span class="nav-link"><i class="fas fa-chevron-left"></i><span class="daback">""" + status.cornerback + """</span></span></button></form>
"""
else:
navbar += """\
<form hidden style="display: inline-block" id="dabackbutton" method="POST" action=""" + json.dumps(url_for('index', **index_params)) + """><input type="hidden" name="csrf_token" value=""" + '"' + generate_csrf() + '"' + """/><input type="hidden" name="_back_one" value="1"/></form>
"""
if status.title_url:
if str(status.title_url_opens_in_other_window) == 'False':
target = ''
else:
target = ' target="_blank"'
navbar += """\
<a id="dapagetitle" class="navbar-brand danavbar-title dapointer" href=""" + '"' + status.title_url + '"' + target + """><span class="d-none d-lg-block">""" + status.display_title + """</span><span class="d-block d-lg-none">""" + status.display_short_title + """</span></a>
"""
else:
navbar += """\
<span id="dapagetitle" class="navbar-brand danavbar-title"><span class="d-none d-lg-block">""" + status.display_title + """</span><span class="d-block d-lg-none">""" + status.display_short_title + """</span></span>
"""
help_message = word("Help is available")
help_label = None
if status.question.interview.question_help_button:
the_sections = status.interviewHelpText
else:
the_sections = status.helpText + status.interviewHelpText
for help_section in the_sections:
if help_section['label']:
help_label = help_section['label']
break
if help_label is None:
help_label = status.extras.get('help label text', None)
if help_label is None:
help_label = status.question.help()
extra_help_message = word("Help is available for this question")
phone_sr = word("Phone help")
phone_message = word("Phone help is available")
chat_sr = word("Live chat")
source_message = word("Information for the developer")
if debug_mode:
source_button = '<div class="nav-item navbar-nav d-none d-md-block"><button class="btn btn-link nav-link da-no-outline" title=' + json.dumps(source_message) + ' id="dasourcetoggle" data-bs-toggle="collapse" data-bs-target="#dasource"><i class="fas fa-code"></i></button></div>'
source_menu_item = '<a class="dropdown-item d-block d-lg-none" title=' + json.dumps(source_message) + ' href="#dasource" data-bs-toggle="collapse" aria-expanded="false" aria-controls="source">' + word('Source') + '</a>'
else:
source_button = ''
source_menu_item = ''
hidden_question_button = '<li class="nav-item visually-hidden-focusable"><button class="btn btn-link nav-link active da-no-outline" id="daquestionlabel" data-bs-toggle="tab" data-bs-target="#daquestion">' + word('Question') + '</button></li>'
navbar += ' ' + source_button + '<ul id="nav-bar-tab-list" class="nav navbar-nav damynavbar-right" role="tablist">' + hidden_question_button
if len(status.interviewHelpText) > 0 or (len(status.helpText) > 0 and not status.question.interview.question_help_button):
if status.question.helptext is None or status.question.interview.question_help_button:
navbar += '<li class="nav-item" role="presentation"><button class="btn btn-link nav-link dahelptrigger da-no-outline" data-bs-target="#dahelp" data-bs-toggle="tab" role="tab" id="dahelptoggle" title=' + json.dumps(help_message) + '>' + help_label + '</button></li>'
else:
navbar += '<li class="nav-item" role="presentation"><button class="btn btn-link nav-link dahelptrigger da-no-outline daactivetext" data-bs-target="#dahelp" data-bs-toggle="tab" role="tab" id="dahelptoggle" title=' + json.dumps(extra_help_message) + '>' + help_label + ' <i class="fas fa-star"></i></button></li>'
else:
navbar += '<li hidden class="nav-item dainvisible" role="presentation"><button class="btn btn-link nav-link dahelptrigger da-no-outline" id="dahelptoggle" data-bs-target="#dahelp" data-bs-toggle="tab" role="tab">' + word('Help') + '</button></li>'
navbar += '<li hidden class="nav-item dainvisible" id="daPhoneAvailable"><button data-bs-target="#dahelp" data-bs-toggle="tab" role="tab" title=' + json.dumps(phone_message) + ' class="btn btn-link nav-link dapointer dahelptrigger da-no-outline"><i class="fas fa-phone da-chat-active"></i><span class="visually-hidden">' + phone_sr + '</span></button></li>' + \
'<li class="nav-item dainvisible" id="daChatAvailable"><button data-bs-target="#dahelp" data-bs-toggle="tab" class="btn btn-link nav-link dapointer dahelptrigger da-no-outline"><i class="fas fa-comment-alt"></i><span class="visually-hidden">' + chat_sr + '</span></button></li></ul>'
if not status.question.interview.options.get('hide corner interface', False):
navbar += """
<button id="damobile-toggler" type="button" class="navbar-toggler ms-auto" data-bs-toggle="collapse" data-bs-target="#danavbar-collapse">
<span class="navbar-toggler-icon"></span><span class="visually-hidden">""" + word("Display the menu") + """</span>
</button>
<div class="collapse navbar-collapse" id="danavbar-collapse">
<ul class="navbar-nav ms-auto">
"""
navbar += status.nav_item
if 'menu_items' in status.extras:
if not isinstance(status.extras['menu_items'], list):
custom_menu = '<a tabindex="-1" class="dropdown-item">' + word("Error: menu_items is not a Python list") + '</a>'
elif len(status.extras['menu_items']) > 0:
custom_menu = ""
for menu_item in status.extras['menu_items']:
if not (isinstance(menu_item, dict) and 'url' in menu_item and 'label' in menu_item):
custom_menu += '<a tabindex="-1" class="dropdown-item">' + word("Error: menu item is not a Python dict with keys of url and label") + '</li>'
else:
screen_size = menu_item.get('screen_size', '')
if screen_size == 'small':
menu_item_classes = ' d-block d-md-none'
elif screen_size == 'large':
menu_item_classes = ' d-none d-md-block'
else:
menu_item_classes = ''
match_action = re.search(r'^\?action=([^\&]+)', menu_item['url'])
if match_action:
custom_menu += '<a class="dropdown-item' + menu_item_classes + '" data-embaction="' + match_action.group(1) + '" href="' + menu_item['url'] + '">' + menu_item['label'] + '</a>'
else:
custom_menu += '<a class="dropdown-item' + menu_item_classes + '" href="' + menu_item['url'] + '">' + menu_item['label'] + '</a>'
else:
custom_menu = ""
else:
custom_menu = ""
if ALLOW_REGISTRATION:
sign_in_text = word('Sign in or sign up to save answers')
else:
sign_in_text = word('Sign in to save answers')
if daconfig.get('resume interview after login', False):
login_url = url_for('user.login', next=url_for('index', **index_params))
else:
login_url = url_for('user.login')
admin_menu = ''
if not status.question.interview.options.get('hide standard menu', False):
for item in app.config['ADMIN_INTERVIEWS']:
if item.can_use() and item.is_not(docassemble.base.functions.this_thread.current_info.get('yaml_filename', '')):
admin_menu += '<a class="dropdown-item" href="' + item.get_url() + '">' + item.get_title(docassemble.base.functions.get_language()) + '</a>'
if show_login:
if current_user.is_anonymous:
if custom_menu or admin_menu:
navbar += ' <li class="nav-item dropdown"><a href="#" class="nav-link dropdown-toggle d-none d-md-block" data-bs-toggle="dropdown" role="button" id="damenuLabel" aria-haspopup="true" aria-expanded="false">' + word("Menu") + '</a><div class="dropdown-menu dropdown-menu-end" aria-labelledby="damenuLabel">' + custom_menu + admin_menu + '<a class="dropdown-item" href="' + login_url + '">' + sign_in_text + '</a></div></li>'
else:
if daconfig.get('login link style', 'normal') == 'button':
if ALLOW_REGISTRATION:
if daconfig.get('resume interview after login', False):
register_url = url_for('user.register', next=url_for('index', **index_params))
else:
register_url = url_for('user.register')
navbar += ' <li class="nav-item"><a class="nav-link" href="' + register_url + '">' + word('Sign up') + '</a></li>'
navbar += ' <li class="nav-item"><a class="nav-link d-block d-md-none" href="' + login_url + '">' + word('Sign in') + '</a>'
else:
navbar += ' <li class="nav-item"><a class="nav-link" href="' + login_url + '">' + sign_in_text + '</a></li>'
elif current_user.is_authenticated:
if custom_menu == '' and status.question.interview.options.get('hide standard menu', False):
navbar += ' <li class="nav-item"><a class="nav-link" tabindex="-1">' + (current_user.email if current_user.email else re.sub(r'.*\$', '', current_user.social_id)) + '</a></li>'
else:
navbar += ' <li class="nav-item dropdown"><a class="nav-link dropdown-toggle d-none d-md-block" href="#" data-bs-toggle="dropdown" role="button" id="damenuLabel" aria-haspopup="true" aria-expanded="false">' + (current_user.email if current_user.email else re.sub(r'.*\$', '', current_user.social_id)) + '</a><div class="dropdown-menu dropdown-menu-end" aria-labelledby="damenuLabel">'
if custom_menu:
navbar += custom_menu
if not status.question.interview.options.get('hide standard menu', False):
if current_user.has_role('admin', 'developer'):
navbar += source_menu_item
if current_user.has_role('admin', 'advocate') and app.config['ENABLE_MONITOR']:
navbar += '<a class="dropdown-item" href="' + url_for('monitor') + '">' + word('Monitor') + '</a>'
if current_user.has_role('admin', 'developer', 'trainer'):
navbar += '<a class="dropdown-item" href="' + url_for('train') + '">' + word('Train') + '</a>'
if current_user.has_role('admin', 'developer'):
if app.config['ALLOW_UPDATES'] and (app.config['DEVELOPER_CAN_INSTALL'] or current_user.has_role('admin')):
navbar += '<a class="dropdown-item" href="' + url_for('update_package') + '">' + word('Package Management') + '</a>'
if app.config['ALLOW_LOG_VIEWING']:
navbar += '<a class="dropdown-item" href="' + url_for('logs') + '">' + word('Logs') + '</a>'
if app.config['ENABLE_PLAYGROUND']:
navbar += '<a class="dropdown-item" href="' + url_for('playground_page') + '">' + word('Playground') + '</a>'
navbar += '<a class="dropdown-item" href="' + url_for('utilities') + '">' + word('Utilities') + '</a>'
if current_user.has_role('admin', 'advocate') or current_user.can_do('access_user_info'):
navbar += '<a class="dropdown-item" href="' + url_for('user_list') + '">' + word('User List') + '</a>'
if current_user.has_role('admin') and app.config['ALLOW_CONFIGURATION_EDITING']:
navbar += '<a class="dropdown-item" href="' + url_for('config_page') + '">' + word('Configuration') + '</a>'
if app.config['SHOW_DISPATCH']:
navbar += '<a class="dropdown-item" href="' + url_for('interview_start') + '">' + word('Available Interviews') + '</a>'
navbar += admin_menu
if app.config['SHOW_MY_INTERVIEWS'] or current_user.has_role('admin'):
navbar += '<a class="dropdown-item" href="' + url_for('interview_list') + '">' + word('My Interviews') + '</a>'
if current_user.has_role('admin', 'developer'):
navbar += '<a class="dropdown-item" href="' + url_for('user_profile_page') + '">' + word('Profile') + '</a>'
else:
if app.config['SHOW_PROFILE'] or current_user.has_role('admin', 'developer'):
navbar += '<a class="dropdown-item" href="' + url_for('user_profile_page') + '">' + word('Profile') + '</a>'
elif current_user.social_id.startswith('local') and app.config['ALLOW_CHANGING_PASSWORD']:
navbar += '<a class="dropdown-item" href="' + url_for('user.change_password') + '">' + word('Change Password') + '</a>'
navbar += '<a class="dropdown-item" href="' + url_for('user.logout') + '">' + word('Sign Out') + '</a>'
navbar += '</div></li>'
else:
if custom_menu or admin_menu:
navbar += ' <li class="nav-item dropdown"><a class="nav-link dropdown-toggle" href="#" class="dropdown-toggle d-none d-md-block" data-bs-toggle="dropdown" role="button" aria-haspopup="true" aria-expanded="false">' + word("Menu") + '</a><div class="dropdown-menu dropdown-menu-end">' + custom_menu + admin_menu
if not status.question.interview.options.get('hide standard menu', False):
navbar += '<a class="dropdown-item" href="' + exit_href() + '">' + status.exit_label + '</a>'
navbar += '</div></li>'
else:
navbar += ' <li class="nav-item"><a class="nav-link" href="' + exit_href() + '">' + status.exit_label + '</a></li>'
navbar += """
</ul>"""
if daconfig.get('login link style', 'normal') == 'button' and show_login and current_user.is_anonymous and not custom_menu:
navbar += '\n <a class="btn btn-' + BUTTON_COLOR_NAV_LOGIN + ' btn-sm mb-0 ms-3 d-none d-md-block" href="' + login_url + '">' + word('Sign in') + '</a>'
navbar += """
</div>"""
navbar += """
</div>
</div>
</div>
"""
return navbar
def exit_href(data=False):
url = docassemble.base.functions.url_action('_da_exit')
if not data:
action_search = re.search(r'[\?\&]action=([^\&]+)', url)
if action_search:
return url + '" data-embaction="' + action_search.group(1)
return url
def delete_session_for_interview(i=None):
if i is not None:
clear_session(i)
for key in ('i', 'uid', 'key_logged', 'encrypted', 'chatstatus', 'observer', 'monitor', 'doing_sms', 'alt_session'):
if key in session:
del session[key]
def delete_session_sessions():
if 'sessions' in session:
del session['sessions']
def delete_session_info():
for key in ('i', 'uid', 'key_logged', 'tempuser', 'user_id', 'encrypted', 'chatstatus', 'observer', 'monitor', 'variablefile', 'doing_sms', 'playgroundfile', 'playgroundtemplate', 'playgroundstatic', 'playgroundsources', 'playgroundmodules', 'playgroundpackages', 'taskwait', 'phone_number', 'otp_secret', 'validated_user', 'github_next', 'next', 'sessions', 'alt_session', 'zitadel_verifier'):
if key in session:
del session[key]
def backup_session():
backup = {}
for key in ('i', 'uid', 'key_logged', 'tempuser', 'user_id', 'encrypted', 'chatstatus', 'observer', 'monitor', 'variablefile', 'doing_sms', 'taskwait', 'phone_number', 'otp_secret', 'validated_user', 'github_next', 'next', 'sessions', 'alt_session'):
if key in session:
backup[key] = session[key]
return backup
def restore_session(backup):
for key in ('i', 'uid', 'key_logged', 'tempuser', 'user_id', 'encrypted', 'google_id', 'google_email', 'chatstatus', 'observer', 'monitor', 'variablefile', 'doing_sms', 'taskwait', 'phone_number', 'otp_secret', 'validated_user', 'github_next', 'next', 'sessions', 'alt_session'):
if key in backup:
session[key] = backup[key]
def get_existing_session(yaml_filename, secret):
keys = [result.key for result in db.session.execute(select(UserDictKeys.filename, UserDictKeys.key).where(and_(UserDictKeys.user_id == current_user.id, UserDictKeys.filename == yaml_filename)).order_by(UserDictKeys.indexno))]
for key in keys:
try:
steps, user_dict, is_encrypted = fetch_user_dict(key, yaml_filename, secret=secret) # pylint: disable=unused-variable
except:
logmessage("get_existing_session: unable to decrypt existing interview session " + key)
continue
update_session(yaml_filename, uid=key, key_logged=True, encrypted=is_encrypted)
return key, is_encrypted
return None, True
def reset_session(yaml_filename, secret):
user_dict = fresh_dictionary()
user_code = get_unique_name(yaml_filename, secret)
if STATS:
r.incr('da:stats:sessions')
update_session(yaml_filename, uid=user_code)
return (user_code, user_dict)
def _endpoint_url(endpoint, **kwargs):
url = url_for('index')
if endpoint:
url = url_for(endpoint, **kwargs)
return url
def user_can_edit_package(pkgname=None, giturl=None):
if current_user.has_role('admin'):
return True
if not PACKAGE_PROTECTION:
if pkgname in ('docassemble.base', 'docassemble.demo', 'docassemble.webapp'):
return False
return True
if pkgname is not None:
pkgname = pkgname.strip()
if pkgname == '' or re.search(r'\s', pkgname):
return False
results = db.session.execute(select(Package.id, PackageAuth.user_id, PackageAuth.authtype).outerjoin(PackageAuth, Package.id == PackageAuth.package_id).where(and_(Package.name == pkgname, Package.active == True))).all() # noqa: E712 # pylint: disable=singleton-comparison
the_count = 0
the_count += len(results)
if the_count == 0:
return True
for d in results:
if d.user_id == current_user.id:
return True
if giturl is not None:
giturl = giturl.strip()
if giturl == '' or re.search(r'\s', giturl):
return False
results = db.session.execute(select(Package.id, PackageAuth.user_id, PackageAuth.authtype).outerjoin(PackageAuth, Package.id == PackageAuth.package_id).where(and_(or_(Package.giturl == giturl + '/', Package.giturl == giturl), Package.active == True))).all() # noqa: E712 # pylint: disable=singleton-comparison
the_count = len(results)
if the_count == 0:
return True
for d in results:
if d.user_id == current_user.id:
return True
return False
def uninstall_package(packagename):
# logmessage("server uninstall_package: " + packagename)
existing_package = db.session.execute(select(Package).filter_by(name=packagename, active=True).order_by(Package.id.desc())).first()
if existing_package is None:
flash(word("Package did not exist"), 'error')
return
db.session.execute(update(Package).where(Package.name == packagename, Package.active == True).values(active=False)) # noqa: E712 # pylint: disable=singleton-comparison
db.session.commit()
def summarize_results(results, logmessages, html=True):
if html:
output = '<br>'.join([x + ': ' + results[x] for x in sorted(results.keys())])
if len(logmessages) > 0:
if len(output) > 0:
output += '<br><br><strong>' + word("pip log") + ':</strong><br>'
else:
output = ''
output += re.sub(r'\n', r'<br>', logmessages)
return Markup(output)
output = '\n'.join([x + ': ' + results[x] for x in sorted(results.keys())])
if len(logmessages) > 0:
if len(output) > 0:
output += "\n" + word("pip log") + ':\n'
else:
output = ''
output += logmessages
return output
def install_zip_package(packagename, file_number):
# logmessage("install_zip_package: " + packagename + " " + str(file_number))
existing_package = db.session.execute(select(Package).filter_by(name=packagename).order_by(Package.id.desc()).with_for_update()).scalar()
if existing_package is None:
package_auth = PackageAuth(user_id=current_user.id)
package_entry = Package(name=packagename, package_auth=package_auth, upload=file_number, active=True, type='zip', version=1)
db.session.add(package_auth)
db.session.add(package_entry)
else:
if existing_package.type == 'zip' and existing_package.upload is not None and existing_package.upload != file_number:
SavedFile(existing_package.upload).delete()
existing_package.package_auth.user_id = current_user.id
existing_package.package_auth.authtype = 'owner'
existing_package.upload = file_number
existing_package.active = True
existing_package.limitation = None
existing_package.giturl = None
existing_package.gitbranch = None
existing_package.type = 'zip'
existing_package.version += 1
db.session.commit()
def install_git_package(packagename, giturl, branch):
# logmessage("install_git_package: " + packagename + " " + str(giturl))
giturl = str(giturl).rstrip('/')
if branch is None or str(branch).lower().strip() in ('none', ''):
branch = GITHUB_BRANCH
if db.session.execute(select(Package).filter_by(name=packagename)).first() is None and db.session.execute(select(Package).where(or_(Package.giturl == giturl, Package.giturl == giturl + '/')).with_for_update()).scalar() is None:
package_auth = PackageAuth(user_id=current_user.id)
package_entry = Package(name=packagename, giturl=giturl, package_auth=package_auth, version=1, active=True, type='git', upload=None, limitation=None, gitbranch=branch)
db.session.add(package_auth)
db.session.add(package_entry)
else:
existing_package = db.session.execute(select(Package).filter_by(name=packagename).order_by(Package.id.desc()).with_for_update()).scalar()
if existing_package is None:
existing_package = db.session.execute(select(Package).where(or_(Package.giturl == giturl, Package.giturl == giturl + '/')).order_by(Package.id.desc()).with_for_update()).scalar()
if existing_package is not None:
if existing_package.type == 'zip' and existing_package.upload is not None:
SavedFile(existing_package.upload).delete()
existing_package.package_auth.user_id = current_user.id
existing_package.package_auth.authtype = 'owner'
existing_package.name = packagename
existing_package.giturl = giturl
existing_package.upload = None
existing_package.version += 1
existing_package.limitation = None
existing_package.active = True
if branch:
existing_package.gitbranch = branch
existing_package.type = 'git'
else:
logmessage("install_git_package: package " + str(giturl) + " appeared to exist but could not be found")
db.session.commit()
def install_pip_package(packagename, limitation):
# logmessage("install_pip_package: " + packagename + " " + str(limitation))
existing_package = db.session.execute(select(Package).filter_by(name=packagename).order_by(Package.id.desc()).with_for_update()).scalar()
if existing_package is None:
package_auth = PackageAuth(user_id=current_user.id)
package_entry = Package(name=packagename, package_auth=package_auth, limitation=limitation, version=1, active=True, type='pip')
db.session.add(package_auth)
db.session.add(package_entry)
else:
if existing_package.type == 'zip' and existing_package.upload is not None:
SavedFile(existing_package.upload).delete()
existing_package.package_auth.user_id = current_user.id
existing_package.package_auth.authtype = 'owner'
existing_package.version += 1
existing_package.type = 'pip'
existing_package.limitation = limitation
existing_package.giturl = None
existing_package.gitbranch = None
existing_package.upload = None
existing_package.active = True
db.session.commit()
def get_package_info():
is_admin = current_user.has_role('admin')
package_list = []
package_auth = {}
seen = {}
for auth in db.session.execute(select(PackageAuth)).scalars():
if auth.package_id not in package_auth:
package_auth[auth.package_id] = {}
package_auth[auth.package_id][auth.user_id] = auth.authtype
for package in db.session.execute(select(Package).filter_by(active=True).order_by(Package.name, Package.id.desc())).scalars():
# if exclude_core and package.name in ('docassemble', 'docassemble.base', 'docassemble.webapp'):
# continue
if package.name in seen:
continue
seen[package.name] = 1
if package.type is not None:
can_update = not bool(package.type == 'zip')
can_uninstall = bool(is_admin or (package.id in package_auth and current_user.id in package_auth[package.id]))
if package.name in system_packages:
can_uninstall = False
can_update = False
if package.name == 'docassemble.webapp':
can_uninstall = False
can_update = is_admin
package_list.append(Object(package=package, can_update=can_update, can_uninstall=can_uninstall))
return package_list, package_auth
def name_of_user(user, include_email=False):
output = ''
if user.first_name:
output += user.first_name
if user.last_name:
output += ' '
if user.last_name:
output += user.last_name
if include_email and user.email:
if output:
output += ', '
output += user.email
return output
def flash_as_html(message, message_type="info", is_ajax=True):
if message_type == 'error':
message_type = 'danger'
output = "\n " + (NOTIFICATION_MESSAGE % (message_type, str(message))) + "\n"
if not is_ajax:
flash(message, message_type)
return output
def make_example_html(examples, first_id, example_html, data_dict):
example_html.append(' <ul class="nav flex-column nav-pills da-example-list da-example-hidden">\n')
for example in examples:
if 'list' in example:
example_html.append(' <li class="nav-item"><a tabindex="0" class="nav-link da-example-heading">' + example['title'] + '</a>')
make_example_html(example['list'], first_id, example_html, data_dict)
example_html.append(' </li>')
continue
if len(first_id) == 0:
first_id.append(example['id'])
example_html.append(' <li class="nav-item"><a tabindex="0" class="nav-link da-example-link" data-example="' + example['id'] + '">' + example['title'] + '</a></li>')
data_dict[example['id']] = example
example_html.append(' </ul>')
def public_method(method, the_class):
if isinstance(method, the_method_type) and method.__name__ != 'init' and not method.__name__.startswith('_') and method.__name__ in the_class.__dict__:
return True
return False
def noquotetrunc(string):
string = noquote(string)
if string is not None:
try:
str('') + string
except:
string = ''
if len(string) > 163:
string = string[:160] + '...'
return string
def noquote(string):
if string is None:
return string
string = amp_match.sub('&', string)
string = noquote_match.sub('"', string)
string = lt_match.sub('<', string)
string = gt_match.sub('>', string)
return string
def infobutton(title):
docstring = ''
if 'doc' in title_documentation[title]:
docstring += noquote(title_documentation[title]['doc'])
if 'url' in title_documentation[title]:
docstring += "<br><a target='_blank' href='" + title_documentation[title]['url'] + "'>" + word("View documentation") + "</a>"
return ' <a tabindex="0" role="button" class="daquestionsign" data-bs-container="body" data-bs-toggle="popover" data-bs-placement="auto" data-bs-content="' + docstring + '" title="' + noquote(title_documentation[title].get('title', title)) + '"><i class="fas fa-question-circle"></i></a>'
# title=' + json.dumps(word("Help"))
# data-bs-selector="true"
def search_button(var, field_origins, name_origins, interview_source, all_sources):
in_this_file = False
usage = {}
if var in field_origins:
for x in sorted(field_origins[var]):
if x is interview_source:
in_this_file = True
else:
if x.path not in usage:
usage[x.path] = set()
usage[x.path].add('defined')
all_sources.add(x)
if var in name_origins:
for x in sorted(name_origins[var]):
if x is interview_source:
in_this_file = True
else:
if x.path not in usage:
usage[x.path] = set()
usage[x.path].add('used')
all_sources.add(x)
usage_type = [set(), set(), set()]
for path, the_set in usage.items():
if 'defined' in the_set and 'used' in the_set:
usage_type[2].add(path)
elif 'used' in the_set:
usage_type[1].add(path)
elif 'defined' in the_set:
usage_type[0].add(path)
else:
continue
messages = []
if len(usage_type[2]) > 0:
messages.append(word("Defined and used in " + docassemble.base.functions.comma_and_list(sorted(usage_type[2]))))
elif len(usage_type[0]) > 0:
messages.append(word("Defined in") + ' ' + docassemble.base.functions.comma_and_list(sorted(usage_type[0])))
elif len(usage_type[2]) > 0:
messages.append(word("Used in") + ' ' + docassemble.base.functions.comma_and_list(sorted(usage_type[0])))
if len(messages) > 0:
title = 'title="' + '; '.join(messages) + '" '
else:
title = ''
if in_this_file:
classname = 'dasearchthis'
else:
classname = 'dasearchother'
return '<a tabindex="0" class="dasearchicon ' + classname + '" ' + title + 'data-name="' + noquote(var) + '"><i class="fas fa-search"></i></a>'
search_key = """
<tr><td><h4>""" + word("Note") + """</h4></td></tr>
<tr><td><a tabindex="0" class="dasearchicon dasearchthis"><i class="fas fa-search"></i></a> """ + word("means the name is located in this file") + """</td></tr>
<tr><td><a tabindex="0" class="dasearchicon dasearchother"><i class="fas fa-search"></i></a> """ + word("means the name may be located in a file included by reference, such as:") + """</td></tr>"""
def find_needed_names(interview, needed_names, the_name=None, the_question=None):
if the_name is not None:
needed_names.add(the_name)
if the_name in interview.questions:
for lang in interview.questions[the_name]:
for question in interview.questions[the_name][lang]:
find_needed_names(interview, needed_names, the_question=question)
elif the_question is not None:
for the_set in (the_question.mako_names, the_question.names_used):
for name in the_set:
if name in needed_names:
continue
find_needed_names(interview, needed_names, the_name=name)
else:
for question in interview.questions_list:
# if not (question.is_mandatory or question.is_initial):
# continue
find_needed_names(interview, needed_names, the_question=question)
def get_ml_info(varname, default_package, default_file):
parts = varname.split(':')
if len(parts) == 3 and parts[0].startswith('docassemble.') and re.match(r'data/sources/.*\.json', parts[1]):
the_package = parts[0]
the_file = parts[1]
the_varname = parts[2]
elif len(parts) == 2 and parts[0] == 'global':
the_package = '_global'
the_file = '_global'
the_varname = parts[1]
elif len(parts) == 2 and (re.match(r'data/sources/.*\.json', parts[0]) or re.match(r'[^/]+\.json', parts[0])):
the_package = default_package
the_file = re.sub(r'^data/sources/', '', parts[0])
the_varname = parts[1]
elif len(parts) != 1:
the_package = '_global'
the_file = '_global'
the_varname = varname
else:
the_package = default_package
the_file = default_file
the_varname = varname
return (the_package, the_file, the_varname)
pg_code_cache = get_pg_code_cache()
def source_code_url(the_name, datatype=None):
if datatype == 'module':
try:
if (not hasattr(the_name, '__path__')) or (not the_name.__path__):
# logmessage("Nothing for module " + the_name)
return None
source_file = re.sub(r'\.pyc$', r'.py', the_name.__path__[0])
line_number = 1
except:
return None
elif datatype == 'class':
try:
source_file = inspect.getsourcefile(the_name)
line_number = inspect.findsource(the_name)[1]
except:
# logmessage("Nothing for class " + the_name)
return None
elif hasattr(the_name, '__code__'):
source_file = the_name.__code__.co_filename
line_number = the_name.__code__.co_firstlineno
else:
# logmessage("Nothing for " + the_name)
return None
source_file = re.sub(r'.*/site-packages/', '', source_file)
m = re.search(r'^docassemble/(base|webapp|demo)/', source_file)
if m:
output = 'https://github.com/jhpyle/docassemble/blob/master/docassemble_' + m.group(1) + '/' + source_file
if line_number == 1:
return output
return output + '#L' + str(line_number)
# logmessage("no match for " + str(source_file))
return None
def get_vars_in_use(interview, interview_status, debug_mode=False, return_json=False, show_messages=True, show_jinja_help=False, current_project='default', use_playground=True):
user_dict = fresh_dictionary()
# if 'uid' not in session:
# session['uid'] = random_alphanumeric(32)
if debug_mode:
has_error = True
error_message = "Not checking variables because in debug mode."
error_type = Exception
else:
if not interview.success:
has_error = True
error_type = DAErrorCompileError
else:
old_language = docassemble.base.functions.get_language()
try:
interview.assemble(user_dict, interview_status)
has_error = False
except Exception as errmess:
has_error = True
error_message = str(errmess)
error_type = type(errmess)
logmessage("get_vars_in_use: failed assembly with error type " + str(error_type) + " and message: " + error_message)
docassemble.base.functions.set_language(old_language)
fields_used = set()
names_used = set()
field_origins = {}
name_origins = {}
all_sources = set()
names_used.update(interview.names_used)
for question in interview.questions_list:
for the_set in (question.mako_names, question.names_used, question.fields_used):
names_used.update(the_set)
for key in the_set:
if key not in name_origins:
name_origins[key] = set()
name_origins[key].add(question.from_source)
fields_used.update(question.fields_used)
for key in question.fields_used:
if key not in field_origins:
field_origins[key] = set()
field_origins[key].add(question.from_source)
for val in interview.questions:
names_used.add(val)
if val not in name_origins:
name_origins[val] = set()
for lang in interview.questions[val]:
for q in interview.questions[val][lang]:
name_origins[val].add(q.from_source)
fields_used.add(val)
if val not in field_origins:
field_origins[val] = set()
for lang in interview.questions[val]:
for q in interview.questions[val][lang]:
field_origins[val].add(q.from_source)
needed_names = set()
find_needed_names(interview, needed_names)
functions = set()
modules = set()
classes = set()
name_info = copy.deepcopy(base_name_info)
if use_playground:
playground_user = get_playground_user()
area = SavedFile(playground_user.id, fix=True, section='playgroundtemplate')
the_directory = directory_for(area, current_project)
templates = sorted([f for f in os.listdir(the_directory) if os.path.isfile(os.path.join(the_directory, f)) and re.search(r'^[A-Za-z0-9]', f)])
area = SavedFile(playground_user.id, fix=True, section='playgroundstatic')
the_directory = directory_for(area, current_project)
static = sorted([f for f in os.listdir(the_directory) if os.path.isfile(os.path.join(the_directory, f)) and re.search(r'^[A-Za-z0-9]', f)])
area = SavedFile(playground_user.id, fix=True, section='playgroundsources')
the_directory = directory_for(area, current_project)
sources = sorted([f for f in os.listdir(the_directory) if os.path.isfile(os.path.join(the_directory, f)) and re.search(r'^[A-Za-z0-9]', f)])
area = SavedFile(playground_user.id, fix=True, section='playgroundmodules')
the_directory = directory_for(area, current_project)
avail_modules = sorted([re.sub(r'.py$', '', f) for f in os.listdir(the_directory) if os.path.isfile(os.path.join(the_directory, f)) and re.search(r'^[A-Za-z0-9]', f)])
else:
templates = []
static = []
sources = []
avail_modules = []
for val in user_dict:
if isinstance(user_dict[val], types.FunctionType):
if val not in pg_code_cache:
try:
pg_code_cache[val] = {'doc': noquotetrunc(inspect.getdoc(user_dict[val])), 'name': str(val), 'insert': str(val) + '()', 'tag': str(val) + str(inspect.signature(user_dict[val])), 'git': source_code_url(user_dict[val])}
except:
pg_code_cache[val] = {'doc': '', 'name': str(val), 'insert': str(val) + '()', 'tag': str(val) + '()', 'git': source_code_url(user_dict[val])}
name_info[val] = copy.copy(pg_code_cache[val])
if 'tag' in name_info[val]:
functions.add(val)
elif isinstance(user_dict[val], types.ModuleType):
if val not in pg_code_cache:
try:
pg_code_cache[val] = {'doc': noquotetrunc(inspect.getdoc(user_dict[val])), 'name': str(val), 'insert': str(val), 'git': source_code_url(user_dict[val], datatype='module')}
except:
pg_code_cache[val] = {'doc': '', 'name': str(val), 'insert': str(val), 'git': source_code_url(user_dict[val], datatype='module')}
name_info[val] = copy.copy(pg_code_cache[val])
modules.add(val)
elif isinstance(user_dict[val], TypeType):
if val not in pg_code_cache:
bases = []
for x in list(user_dict[val].__bases__):
if x.__name__ != 'DAObject':
bases.append(x.__name__)
try:
methods = inspect.getmembers(user_dict[val], predicate=lambda x, the_val=val: public_method(x, user_dict[the_val]))
except:
methods = []
method_list = []
for name, value in methods:
try:
method_list.append({'insert': '.' + str(name) + '()', 'name': str(name), 'doc': noquotetrunc(inspect.getdoc(value)), 'tag': '.' + str(name) + str(inspect.signature(value)), 'git': source_code_url(value)})
except:
method_list.append({'insert': '.' + str(name) + '()', 'name': str(name), 'doc': '', 'tag': '.' + str(name) + '()', 'git': source_code_url(value)})
try:
pg_code_cache[val] = {'doc': noquotetrunc(inspect.getdoc(user_dict[val])), 'name': str(val), 'insert': str(val), 'bases': bases, 'methods': method_list, 'git': source_code_url(user_dict[val], datatype='class')}
except:
pg_code_cache[val] = {'doc': '', 'name': str(val), 'insert': str(val), 'bases': bases, 'methods': method_list, 'git': source_code_url(user_dict[val], datatype='class')}
name_info[val] = copy.copy(pg_code_cache[val])
if 'methods' in name_info[val]:
classes.add(val)
for val in docassemble.base.functions.pickleable_objects(user_dict):
names_used.add(val)
if val not in name_info:
name_info[val] = {}
name_info[val]['type'] = user_dict[val].__class__.__name__
name_info[val]['iterable'] = bool(hasattr(user_dict[val], '__iter__') and not isinstance(user_dict[val], str))
for var in base_name_info:
if base_name_info[var]['show']:
names_used.add(var)
names_used = set(i for i in names_used if not extraneous_var.search(i))
for var in ('_internal', '__object_type', '_DAOBJECTDEFAULTDA'):
names_used.discard(var)
for var in interview.mlfields:
names_used.discard(var + '.text')
if len(interview.mlfields) > 0:
classes.add('DAModel')
method_list = [{'insert': '.predict()', 'name': 'predict', 'doc': "Generates a prediction based on the 'text' attribute and sets the attributes 'entry_id,' 'predictions,' 'prediction,' and 'probability.' Called automatically.", 'tag': '.predict(self)'}]
name_info['DAModel'] = {'doc': 'Applies natural language processing to user input and returns a prediction.', 'name': 'DAModel', 'insert': 'DAModel', 'bases': [], 'methods': method_list}
view_doc_text = word("View documentation")
word_documentation = word("Documentation")
attr_documentation = word("Show attributes")
ml_parts = interview.get_ml_store().split(':')
if len(ml_parts) == 2:
ml_parts[1] = re.sub(r'^data/sources/ml-|\.json$', '', ml_parts[1])
else:
ml_parts = ['_global', '_global']
for var in documentation_dict:
if var not in name_info:
name_info[var] = {}
if 'doc' in name_info[var] and name_info[var]['doc'] is not None:
name_info[var]['doc'] += '<br>'
else:
name_info[var]['doc'] = ''
name_info[var]['doc'] += "<a target='_blank' href='" + DOCUMENTATION_BASE + documentation_dict[var] + "'>" + view_doc_text + "</a>"
for var in name_info:
if 'methods' in name_info[var]:
for method in name_info[var]['methods']:
if var + '.' + method['name'] in documentation_dict:
if method['doc'] is None:
method['doc'] = ''
else:
method['doc'] += '<br>'
if view_doc_text not in method['doc']:
method['doc'] += "<a target='_blank' href='" + DOCUMENTATION_BASE + documentation_dict[var + '.' + method['name']] + "'>" + view_doc_text + "</a>"
content = ''
if has_error and show_messages:
error_style = 'danger'
if error_type is DAErrorNoEndpoint:
error_style = 'warning'
message_to_use = title_documentation['incomplete']['doc']
elif error_type is DAErrorCompileError:
message_to_use = title_documentation['compilefail']['doc']
elif error_type is DAErrorMissingVariable:
message_to_use = error_message
else:
message_to_use = title_documentation['generic error']['doc']
content += '\n <tr><td class="playground-warning-box"><div class="alert alert-' + error_style + '">' + message_to_use + '</div></td></tr>'
vocab_dict = {}
vocab_set = (names_used | functions | classes | modules | fields_used | set(key for key in base_name_info if not re.search(r'\.', key)) | set(key for key in name_info if not re.search(r'\.', key)) | set(templates) | set(static) | set(sources) | set(avail_modules) | set(interview.images.keys()))
vocab_set = set(i for i in vocab_set if not extraneous_var.search(i))
names_used = names_used.difference(functions | classes | modules | set(avail_modules))
undefined_names = names_used.difference(fields_used | set(base_name_info.keys()) | set(x for x in names_used if '.' in x))
implicitly_defined = set()
for var in fields_used:
the_var = var
while '.' in the_var:
the_var = re.sub(r'(.*)\..*$', r'\1', the_var, flags=re.DOTALL)
implicitly_defined.add(the_var)
for var in ('_internal', '__object_type', '_DAOBJECTDEFAULTDA'):
undefined_names.discard(var)
vocab_set.discard(var)
for var in [x for x in undefined_names if x.endswith(']')]:
undefined_names.discard(var)
for var in (functions | classes | modules):
undefined_names.discard(var)
for var in user_dict:
undefined_names.discard(var)
names_used = names_used.difference(undefined_names)
if return_json:
if len(names_used) > 0:
has_parent = {}
has_children = set()
for var in names_used:
parent = re.sub(r'[\.\[].*', '', var)
if parent != var:
has_parent[var] = parent
has_children.add(parent)
var_list = []
for var in sorted(names_used):
var_trans = re.sub(r'\[[0-9]+\]', '[i]', var)
# var_trans = re.sub(r'\[i\](.*)\[i\](.*)\[i\](.*)\[i\](.*)\[i\](.*)\[i\]', r'[i]\1[j]\2[k]\3[l]\4[m]\5[n]', var_trans)
# var_trans = re.sub(r'\[i\](.*)\[i\](.*)\[i\](.*)\[i\](.*)\[i\]', r'[i]\1[j]\2[k]\3[l]\4[m]', var_trans)
# var_trans = re.sub(r'\[i\](.*)\[i\](.*)\[i\](.*)\[i\]', r'[i]\1[j]\2[k]\3[l]', var_trans)
var_trans = re.sub(r'\[i\](.*)\[i\](.*)\[i\]', r'[i]\1[j]\2[k]', var_trans)
var_trans = re.sub(r'\[i\](.*)\[i\]', r'[i]\1[j]', var_trans)
info = {'var': var, 'to_insert': var}
if var_trans != var:
info['var_base'] = var_trans
info['hide'] = bool(var in has_parent)
if var in base_name_info:
if not base_name_info[var]['show']:
continue
if var in documentation_dict or var in base_name_info:
info['var_type'] = 'builtin'
elif var not in fields_used and var not in implicitly_defined and var_trans not in fields_used and var_trans not in implicitly_defined:
info['var_type'] = 'not_used'
elif var not in needed_names:
info['var_type'] = 'possibly_not_used'
else:
info['var_type'] = 'default'
if var in name_info and 'type' in name_info[var] and name_info[var]['type']:
info['class_name'] = name_info[var]['type']
elif var in interview.mlfields:
info['class_name'] = 'DAModel'
if var in name_info and 'iterable' in name_info[var]:
info['iterable'] = name_info[var]['iterable']
if var in name_info and 'doc' in name_info[var] and name_info[var]['doc']:
info['doc_content'] = name_info[var]['doc']
info['doc_title'] = word_documentation
if var in interview.mlfields:
if 'ml_group' in interview.mlfields[var] and not interview.mlfields[var]['ml_group'].uses_mako:
(ml_package, ml_file, ml_group_id) = get_ml_info(interview.mlfields[var]['ml_group'].original_text, ml_parts[0], ml_parts[1])
info['train_link'] = url_for('train', package=ml_package, file=ml_file, group_id=ml_group_id)
else:
info['train_link'] = url_for('train', package=ml_parts[0], file=ml_parts[1], group_id=var)
var_list.append(info)
functions_list = []
if len(functions) > 0:
for var in sorted(functions):
info = {'var': var, 'to_insert': name_info[var]['insert'], 'name': name_info[var]['tag']}
if 'doc' in name_info[var] and name_info[var]['doc']:
info['doc_content'] = name_info[var]['doc']
info['doc_title'] = word_documentation
functions_list.append(info)
classes_list = []
if len(classes) > 0:
for var in sorted(classes):
info = {'var': var, 'to_insert': name_info[var]['insert'], 'name': name_info[var]['name']}
if name_info[var]['bases']:
info['bases'] = name_info[var]['bases']
if 'doc' in name_info[var] and name_info[var]['doc']:
info['doc_content'] = name_info[var]['doc']
info['doc_title'] = word_documentation
if 'methods' in name_info[var] and len(name_info[var]['methods']):
info['methods'] = []
for method_item in name_info[var]['methods']:
method_info = {'name': method_item['name'], 'to_insert': method_item['insert'], 'tag': method_item['tag']}
if 'git' in method_item:
method_info['git'] = method_item['git']
if method_item['doc']:
method_info['doc_content'] = method_item['doc']
method_info['doc_title'] = word_documentation
info['methods'].append(method_info)
classes_list.append(info)
modules_list = []
if len(modules) > 0:
for var in sorted(modules):
info = {'var': var, 'to_insert': name_info[var]['insert']}
if name_info[var]['doc']:
info['doc_content'] = name_info[var]['doc']
info['doc_title'] = word_documentation
modules_list.append(info)
if use_playground:
modules_available_list = []
if len(avail_modules) > 0:
for var in sorted(avail_modules):
info = {'var': var, 'to_insert': "." + var}
modules_available_list.append(info)
templates_list = []
if len(templates) > 0:
for var in sorted(templates):
info = {'var': var, 'to_insert': var}
templates_list.append(info)
sources_list = []
if len(sources) > 0:
for var in sorted(sources):
info = {'var': var, 'to_insert': var}
sources_list.append(info)
static_list = []
if len(static) > 0:
for var in sorted(static):
info = {'var': var, 'to_insert': var}
static_list.append(info)
images_list = []
if len(interview.images) > 0:
for var in sorted(interview.images):
info = {'var': var, 'to_insert': var}
the_ref = get_url_from_file_reference(interview.images[var].get_reference())
if the_ref:
info['url'] = the_ref
images_list.append(info)
if use_playground:
return {'undefined_names': list(sorted(undefined_names)), 'var_list': var_list, 'functions_list': functions_list, 'classes_list': classes_list, 'modules_list': modules_list, 'modules_available_list': modules_available_list, 'templates_list': templates_list, 'sources_list': sources_list, 'images_list': images_list, 'static_list': static_list}, sorted(vocab_set), vocab_dict
return {'undefined_names': list(sorted(undefined_names)), 'var_list': var_list, 'functions_list': functions_list, 'classes_list': classes_list, 'modules_list': modules_list, 'images_list': images_list}, sorted(vocab_set), vocab_dict
if len(undefined_names) > 0:
content += '\n <tr><td><h4>' + word('Undefined names') + infobutton('undefined') + '</h4></td></tr>'
for var in sorted(undefined_names):
content += '\n <tr><td>' + search_button(var, field_origins, name_origins, interview.source, all_sources) + '<a role="button" tabindex="0" data-name="' + noquote(var) + '" data-insert="' + noquote(var) + '" class="btn btn-danger btn-sm playground-variable">' + var + '</a></td></tr>'
vocab_dict[var] = var
if len(names_used) > 0:
content += '\n <tr><td><h4>' + word('Variables') + infobutton('variables') + '</h4></td></tr>'
has_parent = {}
has_children = set()
for var in names_used:
parent = re.sub(r'[\.\[].*', '', var)
if parent != var:
has_parent[var] = parent
has_children.add(parent)
for var in sorted(names_used):
var_trans = re.sub(r'\[[0-9]\]', '[i]', var)
var_trans = re.sub(r'\[i\](.*)\[i\](.*)\[i\]', r'[i]\1[j]\2[k]', var_trans)
var_trans = re.sub(r'\[i\](.*)\[i\]', r'[i]\1[j]', var_trans)
if var in has_parent:
hide_it = ' style="display: none" data-parent="' + noquote(has_parent[var]) + '"'
else:
hide_it = ''
if var in base_name_info:
if not base_name_info[var]['show']:
continue
if var in documentation_dict or var in base_name_info:
class_type = 'btn-info'
title = 'title=' + json.dumps(word("Special variable")) + ' '
elif var not in fields_used and var not in implicitly_defined and var_trans not in fields_used and var_trans not in implicitly_defined:
class_type = 'btn-secondary'
title = 'title=' + json.dumps(word("Possibly not defined")) + ' '
elif var not in needed_names:
class_type = 'btn-warning'
title = 'title=' + json.dumps(word("Possibly not used")) + ' '
else:
class_type = 'btn-primary'
title = ''
content += '\n <tr' + hide_it + '><td>' + search_button(var, field_origins, name_origins, interview.source, all_sources) + '<a role="button" tabindex="0" data-name="' + noquote(var) + '" data-insert="' + noquote(var) + '" ' + title + 'class="btn btn-sm ' + class_type + ' playground-variable">' + var + '</a>'
vocab_dict[var] = var
if var in has_children:
content += ' <a tabindex="0" class="dashowattributes" role="button" data-name="' + noquote(var) + '" title=' + json.dumps(attr_documentation) + '><i class="fas fa-ellipsis-h"></i></a>'
if var in name_info and 'type' in name_info[var] and name_info[var]['type']:
content += ' <span data-ref="' + noquote(name_info[var]['type']) + '" class="daparenthetical">(' + name_info[var]['type'] + ')</span>'
elif var in interview.mlfields:
content += ' <span data-ref="DAModel" class="daparenthetical">(DAModel)</span>'
if var in name_info and 'doc' in name_info[var] and name_info[var]['doc']:
if 'git' in name_info[var] and name_info[var]['git']:
git_link = noquote("<a class='float-end' target='_blank' href='" + name_info[var]['git'] + "'><i class='fas fa-code'></i></a>")
else:
git_link = ''
content += ' <a tabindex="0" class="dainfosign" role="button" data-bs-container="body" data-bs-toggle="popover" data-bs-placement="auto" data-bs-content="' + name_info[var]['doc'] + '" title="' + var + git_link + '"><i class="fas fa-info-circle"></i></a>' # data-bs-selector="true" title=' + json.dumps(word_documentation) + '
if var in interview.mlfields:
if 'ml_group' in interview.mlfields[var] and not interview.mlfields[var]['ml_group'].uses_mako:
(ml_package, ml_file, ml_group_id) = get_ml_info(interview.mlfields[var]['ml_group'].original_text, ml_parts[0], ml_parts[1])
content += ' <a class="datrain" target="_blank" href="' + url_for('train', package=ml_package, file=ml_file, group_id=ml_group_id) + '" title=' + json.dumps(word("Train")) + '><i class="fas fa-graduation-cap"></i></a>'
else:
content += ' <a class="datrain" target="_blank" href="' + url_for('train', package=ml_parts[0], file=ml_parts[1], group_id=var) + '" title=' + json.dumps(word("Train")) + '><i class="fas fa-graduation-cap"></i></a>'
content += '</td></tr>'
if len(all_sources) > 0 and show_messages:
content += search_key
content += '\n <tr><td>'
content += '\n <ul>'
for path in sorted([x.path for x in all_sources]):
content += '\n <li><a target="_blank" href="' + url_for('view_source', i=path, project=current_project) + '">' + path + '<a></li>'
content += '\n </ul>'
content += '\n </td></tr>'
if len(functions) > 0:
content += '\n <tr><td><h4>' + word('Functions') + infobutton('functions') + '</h4></td></tr>'
for var in sorted(functions):
if var in name_info:
content += '\n <tr><td><a role="button" tabindex="0" data-name="' + noquote(var) + '" data-insert="' + noquote(name_info[var]['insert']) + '" class="btn btn-sm btn-warning playground-variable">' + name_info[var]['tag'] + '</a>'
vocab_dict[var] = name_info[var]['insert']
if var in name_info and 'doc' in name_info[var] and name_info[var]['doc']:
if 'git' in name_info[var] and name_info[var]['git']:
git_link = noquote("<a class='float-end' target='_blank' href='" + name_info[var]['git'] + "'><i class='fas fa-code'></i></a>")
else:
git_link = ''
content += ' <a tabindex="0" class="dainfosign" role="button" data-bs-container="body" data-bs-toggle="popover" data-bs-placement="auto" data-bs-content="' + name_info[var]['doc'] + '" title="' + var + git_link + '"><i class="fas fa-info-circle"></i></a>' # data-bs-selector="true" title=' + json.dumps(word_documentation) + '
content += '</td></tr>'
if len(classes) > 0:
content += '\n <tr><td><h4>' + word('Classes') + infobutton('classes') + '</h4></td></tr>'
for var in sorted(classes):
content += '\n <tr><td><a role="button" tabindex="0" data-name="' + noquote(var) + '" data-insert="' + noquote(name_info[var]['insert']) + '" class="btn btn-sm btn-info playground-variable">' + name_info[var]['name'] + '</a>'
vocab_dict[var] = name_info[var]['insert']
if name_info[var]['bases']:
content += ' <span data-ref="' + noquote(name_info[var]['bases'][0]) + '" class="daparenthetical">(' + name_info[var]['bases'][0] + ')</span>'
if name_info[var]['doc']:
if 'git' in name_info[var] and name_info[var]['git']:
git_link = noquote("<a class='float-end' target='_blank' href='" + name_info[var]['git'] + "'><i class='fas fa-code'></i></a>")
else:
git_link = ''
content += ' <a tabindex="0" class="dainfosign" role="button" data-bs-container="body" data-bs-toggle="popover" data-bs-placement="auto" data-bs-content="' + name_info[var]['doc'] + '" title="' + var + git_link + '"><i class="fas fa-info-circle"></i></a>' # data-bs-selector="true" title=' + json.dumps(word_documentation) + '
if len(name_info[var]['methods']) > 0:
content += ' <a tabindex="0" class="dashowmethods" role="button" data-showhide="XMETHODX' + var + '" title=' + json.dumps(word('Methods')) + '><i class="fas fa-cog"></i></a>'
content += '<div style="display: none;" id="XMETHODX' + var + '"><table><tbody>'
for method_info in name_info[var]['methods']:
if 'git' in method_info and method_info['git']:
git_link = noquote("<a class='float-end' target='_blank' href='" + method_info['git'] + "'><i class='fas fa-code'></i></a>")
else:
git_link = ''
content += '<tr><td><a tabindex="0" role="button" data-name="' + noquote(method_info['name']) + '" data-insert="' + noquote(method_info['insert']) + '" class="btn btn-sm btn-warning playground-variable">' + method_info['tag'] + '</a>'
# vocab_dict[method_info['name']] = method_info['insert']
if method_info['doc']:
content += ' <a tabindex="0" class="dainfosign" role="button" data-bs-container="body" data-bs-toggle="popover" data-bs-placement="auto" data-bs-content="' + method_info['doc'] + '" data-bs-title="' + noquote(method_info['name']) + git_link + '"><i class="fas fa-info-circle"></i></a>' # data-bs-selector="true" title=' + json.dumps(word_documentation) + '
content += '</td></tr>'
content += '</tbody></table></div>'
content += '</td></tr>'
if len(modules) > 0:
content += '\n <tr><td><h4>' + word('Modules defined') + infobutton('modules') + '</h4></td></tr>'
for var in sorted(modules):
content += '\n <tr><td><a tabindex="0" data-name="' + noquote(var) + '" data-insert="' + noquote(name_info[var]['insert']) + '" role="button" class="btn btn-sm btn-success playground-variable">' + name_info[var]['name'] + '</a>'
vocab_dict[var] = name_info[var]['insert']
if name_info[var]['doc']:
if 'git' in name_info[var] and name_info[var]['git']:
git_link = noquote("<a class='float-end' target='_blank' href='" + name_info[var]['git'] + "'><i class='fas fa-code'></i></a>")
else:
git_link = ''
content += ' <a tabindex="0" class="dainfosign" role="button" data-bs-container="body" data-bs-toggle="popover" data-bs-placement="auto" data-bs-content="' + name_info[var]['doc'] + '" data-bs-title="' + noquote(var) + git_link + '"><i class="fas fa-info-circle"></i></a>' # data-bs-selector="true" title=' + json.dumps(word_documentation) + '
content += '</td></tr>'
if len(avail_modules) > 0:
content += '\n <tr><td><h4>' + word('Modules available in Playground') + infobutton('playground_modules') + '</h4></td></tr>'
for var in avail_modules:
content += '\n <tr><td><a role="button" tabindex="0" data-name="' + noquote(var) + '" data-insert=".' + noquote(var) + '" class="btn btn-sm btn-success playground-variable">.' + noquote(var) + '</a>'
vocab_dict[var] = var
content += '</td></tr>'
if len(templates) > 0:
content += '\n <tr><td><h4>' + word('Templates') + infobutton('templates') + '</h4></td></tr>'
for var in templates:
content += '\n <tr><td><a role="button" tabindex="0" data-name="' + noquote(var) + '" data-insert="' + noquote(var) + '" class="btn btn-sm btn-secondary playground-variable">' + noquote(var) + '</a>'
vocab_dict[var] = var
content += '</td></tr>'
if len(static) > 0:
content += '\n <tr><td><h4>' + word('Static files') + infobutton('static') + '</h4></td></tr>'
for var in static:
content += '\n <tr><td><a role="button" tabindex="0" data-name="' + noquote(var) + '" data-insert="' + noquote(var) + '" class="btn btn-sm btn-secondary playground-variable">' + noquote(var) + '</a>'
vocab_dict[var] = var
content += '</td></tr>'
if len(sources) > 0:
content += '\n <tr><td><h4>' + word('Source files') + infobutton('sources') + '</h4></td></tr>'
for var in sources:
content += '\n <tr><td><a role="button" tabindex="0" data-name="' + noquote(var) + '" data-insert="' + noquote(var) + '" class="btn btn-sm btn-secondary playground-variable">' + noquote(var) + '</a>'
vocab_dict[var] = var
content += '</td></tr>'
if len(interview.images) > 0:
content += '\n <tr><td><h4>' + word('Decorations') + infobutton('decorations') + '</h4></td></tr>'
show_images = not bool(cloud and len(interview.images) > 10)
for var in sorted(interview.images):
content += '\n <tr><td>'
the_ref = get_url_from_file_reference(interview.images[var].get_reference())
if the_ref is None:
content += '<a role="button" tabindex="0" title=' + json.dumps(word("This image file does not exist")) + ' data-name="' + noquote(var) + '" data-insert="' + noquote(var) + '" class="btn btn-sm btn-danger playground-variable">' + noquote(var) + '</a>'
else:
if show_images:
content += '<img class="daimageicon" src="' + the_ref + '"> '
content += '<a role="button" tabindex="0" data-name="' + noquote(var) + '" data-insert="' + noquote(var) + '" class="btn btn-sm btn-primary playground-variable">' + noquote(var) + '</a>'
vocab_dict[var] = var
content += '</td></tr>'
if show_messages:
content += "\n <tr><td><br><em>" + word("Type Ctrl-space to autocomplete.") + "</em></td><tr>"
if show_jinja_help:
content += "\n <tr><td><h4 class=\"mt-2\">" + word("Using Jinja2") + infobutton('jinja2') + "</h4>\n " + re.sub("table-striped", "table-bordered", docassemble.base.util.markdown_to_html(word("Jinja2 help template"), trim=False, do_terms=False)) + "</td><tr>"
for item in base_name_info:
if item not in vocab_dict and not base_name_info.get('exclude', False):
vocab_dict[item] = base_name_info.get('insert', item)
return content, sorted(vocab_set), vocab_dict
def ocr_google_in_background(image_file, raw_result, user_code):
return docassemble.webapp.worker.ocr_google.delay(image_file, raw_result, user_code)
def make_png_for_pdf(doc, prefix, page=None):
if prefix == 'page':
resolution = PNG_RESOLUTION
else:
resolution = PNG_SCREEN_RESOLUTION
session_id = docassemble.base.functions.get_uid()
task = docassemble.webapp.worker.make_png_for_pdf.delay(doc, prefix, resolution, session_id, PDFTOPPM_COMMAND, page=page)
return task.id
def fg_make_png_for_pdf(doc, prefix, page=None):
if prefix == 'page':
resolution = PNG_RESOLUTION
else:
resolution = PNG_SCREEN_RESOLUTION
docassemble.base.util.make_png_for_pdf(doc, prefix, resolution, PDFTOPPM_COMMAND, page=page)
def fg_make_png_for_pdf_path(path, prefix, page=None):
if prefix == 'page':
resolution = PNG_RESOLUTION
else:
resolution = PNG_SCREEN_RESOLUTION
docassemble.base.util.make_png_for_pdf_path(path, prefix, resolution, PDFTOPPM_COMMAND, page=page)
def fg_make_pdf_for_word_path(path, extension):
success = docassemble.base.pandoc.word_to_pdf(path, extension, path + ".pdf")
if not success:
raise DAError("fg_make_pdf_for_word_path: unable to make PDF from " + path + " using extension " + extension + " and writing to " + path + ".pdf")
def task_ready(task_id):
result = docassemble.webapp.worker.workerapp.AsyncResult(id=task_id)
if result.ready():
return True
return False
def wait_for_task(task_id, timeout=None):
if timeout is None:
timeout = 3
# logmessage("wait_for_task: starting")
try:
result = docassemble.webapp.worker.workerapp.AsyncResult(id=task_id)
if result.ready():
# logmessage("wait_for_task: was ready")
return True
# logmessage("wait_for_task: waiting for task to complete")
result.get(timeout=timeout)
# logmessage("wait_for_task: returning true")
return True
except celery.exceptions.TimeoutError:
logmessage("wait_for_task: timed out")
return False
except Exception as the_error:
logmessage("wait_for_task: got error: " + str(the_error))
return False
# def make_image_files(path):
# if PDFTOPPM_COMMAND is not None:
# args = [PDFTOPPM_COMMAND, '-r', str(PNG_RESOLUTION), '-png', path, path + 'page']
# result = call(args)
# if result > 0:
# raise DAError("Call to pdftoppm failed")
# args = [PDFTOPPM_COMMAND, '-r', str(PNG_SCREEN_RESOLUTION), '-png', path, path + 'screen']
# result = call(args)
# if result > 0:
# raise DAError("Call to pdftoppm failed")
def trigger_update(except_for=None):
logmessage("trigger_update: except_for is " + str(except_for) + " and hostname is " + hostname)
if USING_SUPERVISOR:
to_delete = set()
for host in db.session.execute(select(Supervisors)).scalars():
if host.url and not (except_for and host.hostname == except_for):
if host.hostname == hostname:
the_url = 'http://localhost:9001'
logmessage("trigger_update: using http://localhost:9001")
else:
the_url = host.url
args = SUPERVISORCTL + ['-s', the_url, 'start', 'update']
result = subprocess.run(args, check=False).returncode
if result == 0:
logmessage("trigger_update: sent update to " + str(host.hostname) + " using " + the_url)
else:
logmessage("trigger_update: call to supervisorctl on " + str(host.hostname) + " was not successful")
to_delete.add(host.id)
for id_to_delete in to_delete:
db.session.execute(sqldelete(Supervisors).filter_by(id=id_to_delete))
db.session.commit()
def restart_on(host):
logmessage("restart_on: " + str(host.hostname))
if host.hostname == hostname:
the_url = 'http://localhost:9001'
else:
the_url = host.url
args = SUPERVISORCTL + ['-s', the_url, 'start', 'reset']
result = subprocess.run(args, check=False).returncode
if result == 0:
logmessage("restart_on: sent reset to " + str(host.hostname))
else:
logmessage("restart_on: call to supervisorctl with reset on " + str(host.hostname) + " was not successful")
return False
return True
def restart_all():
logmessage("restarting all")
for interview_path in [x.decode() for x in r.keys('da:interviewsource:*')]:
r.delete(interview_path)
if not SINGLE_SERVER:
restart_others()
restart_this()
def restart_this():
logmessage("restart_this: hostname is " + str(hostname))
if SINGLE_SERVER:
args = SUPERVISORCTL + ['-s', 'http://localhost:9001', 'start', 'reset']
result = subprocess.run(args, check=False).returncode
if result == 0:
logmessage("restart_this: sent reset")
else:
logmessage("restart_this: call to supervisorctl with reset was not successful")
return
if USING_SUPERVISOR:
to_delete = set()
for host in db.session.execute(select(Supervisors)).scalars():
if host.url:
logmessage("restart_this: considering " + str(host.hostname) + " against " + str(hostname))
if host.hostname == hostname:
result = restart_on(host)
if not result:
to_delete.add(host.id)
for id_to_delete in to_delete:
db.session.execute(sqldelete(Supervisors).filter_by(id=id_to_delete))
db.session.commit()
else:
logmessage("restart_this: touching wsgi file")
wsgi_file = WEBAPP_PATH
if os.path.isfile(wsgi_file):
with open(wsgi_file, 'a', encoding='utf-8'):
os.utime(wsgi_file, None)
def restart_others():
logmessage("restart_others: starting")
if USING_SUPERVISOR:
cron_key = 'da:cron_restart'
cron_url = None
to_delete = set()
for host in db.session.execute(select(Supervisors)).scalars():
if host.url and host.hostname != hostname and ':cron:' in str(host.role):
pipe = r.pipeline()
pipe.set(cron_key, 1)
pipe.expire(cron_key, 10)
pipe.execute()
result = restart_on(host)
if not result:
to_delete.add(host.id)
while r.get(cron_key) is not None:
time.sleep(1)
cron_url = host.url
for host in db.session.execute(select(Supervisors)).scalars():
if host.url and host.url != cron_url and host.hostname != hostname and host.id not in to_delete:
result = restart_on(host)
if not result:
to_delete.add(host.id)
for id_to_delete in to_delete:
db.session.execute(sqldelete(Supervisors).filter_by(id=id_to_delete))
db.session.commit()
def get_requester_ip(req):
if not req:
return '127.0.0.1'
if HTTP_TO_HTTPS:
if 'X-Real-Ip' in req.headers:
return req.headers['X-Real-Ip']
if 'X-Forwarded-For' in req.headers:
return req.headers['X-Forwarded-For']
return req.remote_addr
def current_info(yaml=None, req=None, action=None, location=None, interface='web', session_info=None, secret=None, device_id=None, session_uid=None): # pylint: disable=redefined-outer-name
# logmessage("interface is " + str(interface))
if current_user.is_authenticated:
role_list = [str(role.name) for role in current_user.roles]
if len(role_list) == 0:
role_list = ['user']
ext = {'email': current_user.email, 'roles': role_list, 'the_user_id': current_user.id, 'theid': current_user.id, 'firstname': current_user.first_name, 'lastname': current_user.last_name, 'nickname': current_user.nickname, 'country': current_user.country, 'subdivisionfirst': current_user.subdivisionfirst, 'subdivisionsecond': current_user.subdivisionsecond, 'subdivisionthird': current_user.subdivisionthird, 'organization': current_user.organization, 'timezone': current_user.timezone, 'language': current_user.language}
else:
ext = {'email': None, 'the_user_id': 't' + str(session.get('tempuser', None)), 'theid': session.get('tempuser', None), 'roles': []}
headers = {}
if req is None:
url_root = daconfig.get('url root', 'http://localhost') + ROOT
url = url_root + 'interview'
clientip = None
method = None
session_uid = '0'
else:
url_root = url_for('rootindex', _external=True)
url = url_root + 'interview'
if secret is None:
secret = req.cookies.get('secret', None)
for key, value in req.headers.items():
headers[key] = value
clientip = get_requester_ip(req)
method = req.method
if session_uid is None:
if 'session' in req.cookies:
session_uid = str(req.cookies.get('session'))[5:15]
else:
session_uid = ''
if session_uid == '':
session_uid = app.session_interface.manual_save_session(app, session).decode()[5:15]
# logmessage("unique id is " + session_uid)
if device_id is None:
device_id = random_string(16)
if secret is not None:
secret = str(secret)
if session_info is None and yaml is not None:
session_info = get_session(yaml)
if session_info is not None:
user_code = session_info['uid']
encrypted = session_info['encrypted']
else:
user_code = None
encrypted = True
return_val = {'session': user_code, 'secret': secret, 'yaml_filename': yaml, 'interface': interface, 'url': url, 'url_root': url_root, 'encrypted': encrypted, 'user': {'is_anonymous': bool(current_user.is_anonymous), 'is_authenticated': bool(current_user.is_authenticated), 'session_uid': session_uid, 'device_id': device_id}, 'headers': headers, 'clientip': clientip, 'method': method}
if action is not None:
# logmessage("current_info: setting an action " + repr(action))
return_val.update(action)
# return_val['orig_action'] = action['action']
# return_val['orig_arguments'] = action['arguments']
if location is not None:
ext['location'] = location
else:
ext['location'] = None
return_val['user'].update(ext)
return return_val
def html_escape(text):
text = re.sub('&', '&', text)
text = re.sub('<', '<', text)
text = re.sub('>', '>', text)
return text
def indent_by(text, num):
if not text:
return ""
return (" " * num) + re.sub(r'\n', "\n" + (" " * num), text).rstrip() + "\n"
def call_sync():
if not USING_SUPERVISOR:
return
args = SUPERVISORCTL + ['-s', 'http://localhost:9001', 'start', 'sync']
result = subprocess.run(args, check=False).returncode
if result == 0:
pass
# logmessage("call_sync: sent message to " + hostname)
else:
logmessage("call_sync: call to supervisorctl on " + hostname + " was not successful")
abort(404)
in_process = 1
counter = 10
check_args = SUPERVISORCTL + ['-s', 'http://localhost:9001', 'status', 'sync']
while in_process == 1 and counter > 0:
output, err = Popen(check_args, stdout=PIPE, stderr=PIPE).communicate() # pylint: disable=unused-variable
if not re.search(r'RUNNING', output.decode()):
in_process = 0
else:
time.sleep(1)
counter -= 1
def formatted_current_time():
if current_user.timezone:
the_timezone = zoneinfo.ZoneInfo(current_user.timezone)
else:
the_timezone = zoneinfo.ZoneInfo(get_default_timezone())
return datetime.datetime.utcnow().replace(tzinfo=tz.tzutc()).astimezone(the_timezone).strftime('%H:%M:%S %Z')
def formatted_current_date():
if current_user.timezone:
the_timezone = zoneinfo.ZoneInfo(current_user.timezone)
else:
the_timezone = zoneinfo.ZoneInfo(get_default_timezone())
return datetime.datetime.utcnow().replace(tzinfo=tz.tzutc()).astimezone(the_timezone).strftime("%Y-%m-%d")
class Object:
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class FakeUser:
pass
class FakeRole:
pass
def verify_email(email):
if len(daconfig['authorized registration domains']) != 0:
ok = False
email = str(email).lower().strip()
for domain in daconfig['authorized registration domains']:
if email.endswith(domain):
ok = True
break
if not ok:
return False
return True
class OAuthSignIn:
providers = {}
providers_obtained = False
def __init__(self, provider_name):
self.provider_name = provider_name
credentials = current_app.config['OAUTH_CREDENTIALS'].get(provider_name, {})
self.consumer_id = credentials.get('id', None)
self.consumer_secret = credentials.get('secret', None)
self.consumer_domain = credentials.get('domain', None)
def authorize(self):
pass
def callback(self):
pass
def get_callback_url(self):
return url_for('oauth_callback', provider=self.provider_name,
_external=True)
@classmethod
def get_provider(cls, provider_name):
if not cls.providers_obtained:
for provider_class in cls.__subclasses__():
provider = provider_class()
cls.providers[provider.provider_name] = provider
cls.providers_obtained = True
return cls.providers[provider_name]
class GoogleSignIn(OAuthSignIn):
def __init__(self):
super().__init__('google')
self.service = OAuth2Service(
name='google',
client_id=self.consumer_id,
client_secret=self.consumer_secret,
authorize_url=None,
access_token_url=None,
base_url=None
)
def authorize(self):
pass
def callback(self):
# logmessage("GoogleCallback, args: " + str([str(arg) + ": " + str(request.args[arg]) for arg in request.args]))
# logmessage("GoogleCallback, request: " + str(request.data))
csrf_cookie = request.cookies.get('g_csrf_token', None)
post_data = request.form.copy()
csrf_body = post_data.get('g_csrf_token', None)
token = post_data.get('credential', None)
if token is None or csrf_cookie is None or csrf_cookie != csrf_body or not app.config['USE_GOOGLE_LOGIN']:
logmessage("Google authentication problem")
return (None, None, None, None)
try:
idinfo = id_token.verify_oauth2_token(token, google_requests.Request(), app.config['OAUTH_CREDENTIALS']['google']['id'])
except ValueError:
logmessage("Google ID did not verify")
return (None, None, None, None)
google_id = idinfo.get('sub', None)
email = idinfo.get('email', None)
google_name = idinfo.get('name', None)
first_name = idinfo.get('given_name', None)
last_name = idinfo.get('family_name', None)
if email is not None and google_id is not None:
return (
'google$' + str(google_id),
email.split('@')[0],
email,
{'name': google_name, 'first_name': first_name, 'last_name': last_name}
)
raise DAException("Could not get Google authorization information")
class FacebookSignIn(OAuthSignIn):
def __init__(self):
super().__init__('facebook')
self.service = OAuth2Service(
name='facebook',
client_id=self.consumer_id,
client_secret=self.consumer_secret,
authorize_url='https://www.facebook.com/v3.0/dialog/oauth',
access_token_url='https://graph.facebook.com/v3.0/oauth/access_token',
base_url='https://graph.facebook.com/v3.0'
)
def authorize(self):
return redirect(self.service.get_authorize_url(
scope='public_profile,email',
response_type='code',
redirect_uri=self.get_callback_url())
)
def callback(self):
if 'code' not in request.args:
return None, None, None, None
oauth_session = self.service.get_auth_session(
decoder=safe_json_loads,
data={'code': request.args['code'],
'redirect_uri': self.get_callback_url()}
)
me = oauth_session.get('me', params={'fields': 'id,name,first_name,middle_name,last_name,name_format,email'}).json()
# logmessage("Facebook: returned " + json.dumps(me))
return (
'facebook$' + str(me['id']),
me.get('email').split('@')[0],
me.get('email'),
{'first_name': me.get('first_name', None),
'last_name': me.get('last_name', None),
'name': me.get('name', None)}
)
class ZitadelSignIn(OAuthSignIn):
def __init__(self):
super().__init__('zitadel')
self.service = OAuth2Service(
name='zitadel',
client_id=self.consumer_id,
client_secret=None,
authorize_url='https://' + str(self.consumer_domain) + '/oauth/v2/authorize',
access_token_url='https://' + str(self.consumer_domain) + '/oauth/v2/token',
base_url='https://' + str(self.consumer_domain)
)
def authorize(self):
session['zitadel_verifier'] = random_alphanumeric(43)
code_challenge = base64.b64encode(hashlib.sha256(session['zitadel_verifier'].encode()).digest()).decode()
code_challenge = re.sub(r'\+', '-', code_challenge)
code_challenge = re.sub(r'/', '_', code_challenge)
code_challenge = re.sub(r'=', '', code_challenge)
the_url = self.service.get_authorize_url(
scope='openid email profile',
response_type='code',
redirect_uri=self.get_callback_url(),
code_challenge=code_challenge,
code_challenge_method='S256')
return redirect(the_url)
def callback(self):
if 'code' not in request.args or 'zitadel_verifier' not in session:
return None, None, None, None
the_data = {'code': request.args['code'],
'grant_type': 'authorization_code',
'code_verifier': session['zitadel_verifier'],
'redirect_uri': self.get_callback_url()}
oauth_session = self.service.get_auth_session(
decoder=safe_json_loads,
data=the_data
)
me = oauth_session.get('oidc/v1/userinfo').json()
del session['zitadel_verifier']
return (
'zitadel$' + str(me['sub']),
me.get('email').split('@')[0],
me.get('email'),
{'first_name': me.get('given_name', None),
'last_name': me.get('family_name', None),
'name': me.get('name', None),
'language': me.get('locale', None)}
)
class AzureSignIn(OAuthSignIn):
def __init__(self):
super().__init__('azure')
self.service = OAuth2Service(
name='azure',
client_id=self.consumer_id,
client_secret=self.consumer_secret,
authorize_url='https://login.microsoftonline.com/common/oauth2/authorize',
access_token_url='https://login.microsoftonline.com/common/oauth2/token',
base_url='https://graph.microsoft.com/v1.0/'
)
def authorize(self):
return redirect(self.service.get_authorize_url(
response_type='code',
client_id=self.consumer_id,
redirect_uri=self.get_callback_url())
)
def callback(self):
if 'code' not in request.args:
return None, None, None, None
oauth_session = self.service.get_auth_session(
decoder=safe_json_loads,
data={'code': request.args['code'],
'client_id': self.consumer_id,
'client_secret': self.consumer_secret,
'resource': 'https://graph.microsoft.com/',
'grant_type': 'authorization_code',
'redirect_uri': self.get_callback_url()}
)
me = oauth_session.get('me').json()
return (
'azure$' + str(me['id']),
me.get('mail').split('@')[0],
me.get('mail'),
{'first_name': me.get('givenName', None),
'last_name': me.get('surname', None),
'name': me.get('displayName', me.get('userPrincipalName', None))}
)
def safe_json_loads(data):
return json.loads(data.decode("utf-8", "strict"))
class Auth0SignIn(OAuthSignIn):
def __init__(self):
super().__init__('auth0')
self.service = OAuth2Service(
name='auth0',
client_id=self.consumer_id,
client_secret=self.consumer_secret,
authorize_url='https://' + str(self.consumer_domain) + '/authorize',
access_token_url='https://' + str(self.consumer_domain) + '/oauth/token',
base_url='https://' + str(self.consumer_domain)
)
def authorize(self):
if 'oauth' in daconfig and 'auth0' in daconfig['oauth'] and daconfig['oauth']['auth0'].get('enable', True) and self.consumer_domain is None:
raise DAException("To use Auth0, you need to set your domain in the configuration.")
return redirect(self.service.get_authorize_url(
response_type='code',
scope='openid profile email',
audience='https://' + str(self.consumer_domain) + '/userinfo',
redirect_uri=self.get_callback_url())
)
def callback(self):
if 'code' not in request.args:
return None, None, None, None
oauth_session = self.service.get_auth_session(
decoder=safe_json_loads,
data={'code': request.args['code'],
'grant_type': 'authorization_code',
'redirect_uri': self.get_callback_url()}
)
me = oauth_session.get('userinfo').json()
# logmessage("Auth0 returned " + json.dumps(me))
user_id = me.get('sub', me.get('user_id'))
social_id = 'auth0$' + str(user_id)
username = me.get('name')
email = me.get('email')
if user_id is None or username is None or email is None:
raise DAException("Error: could not get necessary information from Auth0")
return social_id, username, email, {'name': me.get('name', None)}
class KeycloakSignIn(OAuthSignIn):
def __init__(self):
super().__init__('keycloak')
try:
realm = daconfig['oauth']['keycloak']['realm']
except:
realm = None
try:
protocol = daconfig['oauth']['keycloak']['protocol']
except KeyError:
protocol = 'https://'
if not protocol.endswith('://'):
protocol = protocol + '://'
self.service = OAuth2Service(
name='keycloak',
client_id=self.consumer_id,
client_secret=self.consumer_secret,
authorize_url=protocol + str(self.consumer_domain) + '/realms/' + str(realm) + '/protocol/openid-connect/auth',
access_token_url=protocol + str(self.consumer_domain) + '/realms/' + str(realm) + '/protocol/openid-connect/token',
base_url=protocol + str(self.consumer_domain)
)
def authorize(self):
if 'oauth' in daconfig and 'keycloak' in daconfig['oauth'] and daconfig['oauth']['keycloak'].get('enable', True) and self.consumer_domain is None:
raise DAException("To use keycloak, you need to set your domain in the configuration.")
return redirect(self.service.get_authorize_url(
response_type='code',
scope='openid profile email',
redirect_uri=self.get_callback_url())
)
def callback(self):
if 'code' not in request.args:
return None, None, None, None
oauth_session = self.service.get_auth_session(
decoder=safe_json_loads,
data={'code': request.args['code'],
'grant_type': 'authorization_code',
'redirect_uri': self.get_callback_url()}
)
me = oauth_session.get('realms/' + daconfig['oauth']['keycloak']['realm'] + '/protocol/openid-connect/userinfo').json()
# logmessage("keycloak returned " + json.dumps(me))
user_id = me.get('sub')
social_id = 'keycloak$' + str(user_id)
username = me.get('preferred_username')
email = me.get('email')
if email is None and '@' in username:
email = username
if user_id is None or username is None or email is None:
raise DAException("Error: could not get necessary information from keycloak")
info_dict = {'name': me.get('name', None)}
if 'given_name' in me:
info_dict['first_name'] = me.get('given_name')
if 'family_name' in me:
info_dict['last_name'] = me.get('family_name')
return social_id, username, email, info_dict
class TwitterSignIn(OAuthSignIn):
def __init__(self):
super().__init__('twitter')
self.service = OAuth1Service(
name='twitter',
consumer_key=self.consumer_id,
consumer_secret=self.consumer_secret,
request_token_url='https://api.twitter.com/oauth/request_token',
authorize_url='https://api.twitter.com/oauth/authorize',
access_token_url='https://api.twitter.com/oauth/access_token',
base_url='https://api.twitter.com/1.1/'
)
def authorize(self):
request_token = self.service.get_request_token(
params={'oauth_callback': self.get_callback_url()}
)
session['request_token'] = request_token
return redirect(self.service.get_authorize_url(request_token[0]))
def callback(self):
request_token = session.pop('request_token')
if 'oauth_verifier' not in request.args:
return None, None, None, None
oauth_session = self.service.get_auth_session(
request_token[0],
request_token[1],
data={'oauth_verifier': request.args['oauth_verifier']}
)
me = oauth_session.get('account/verify_credentials.json', params={'skip_status': 'true', 'include_email': 'true', 'include_entites': 'false'}).json()
# logmessage("Twitter returned " + json.dumps(me))
social_id = 'twitter$' + str(me.get('id_str'))
username = me.get('screen_name')
email = me.get('email')
return social_id, username, email, {'name': me.get('name', None)}
# @flaskbabel.localeselector
# def get_locale():
# translations = [str(translation) for translation in flaskbabel.list_translations()]
# return request.accept_languages.best_match(translations)
def get_user_object(user_id):
the_user = db.session.execute(select(UserModel).options(db.joinedload(UserModel.roles)).where(UserModel.id == user_id)).scalar()
return the_user
@lm.user_loader
def load_user(the_id):
return UserModel.query.options(db.joinedload(UserModel.roles)).get(int(the_id))
@app.route('/goto', methods=['GET'])
def run_temp():
code = request.args.get('c', None)
if code is None:
abort(403)
ua_string = request.headers.get('User-Agent', None)
if ua_string is not None:
response = ua_parse(ua_string)
if response.device.brand == 'Spider':
return render_template_string('')
the_key = 'da:temporary_url:' + str(code)
data = r.get(the_key)
if data is None:
raise DAError(word("The link has expired."), code=403)
try:
data = json.loads(data.decode())
if data.get('once', False):
r.delete(the_key)
url = data.get('url')
except:
r.delete(the_key)
url = data.decode()
return redirect(url)
@app.route('/user/autologin', methods=['GET'])
def auto_login():
ua_string = request.headers.get('User-Agent', None)
if ua_string is not None:
response = ua_parse(ua_string)
if response.device.brand == 'Spider':
return render_template_string('')
if 'key' not in request.args or len(request.args['key']) != 40:
abort(403)
code = str(request.args['key'][16:40])
decryption_key = str(request.args['key'][0:16])
the_key = 'da:auto_login:' + code
info_text = r.get(the_key)
if info_text is None:
abort(403)
r.delete(the_key)
info_text = info_text.decode()
try:
info = decrypt_dictionary(info_text, decryption_key)
except:
abort(403)
user = db.session.execute(select(UserModel).options(db.joinedload(UserModel.roles)).where(UserModel.id == info['user_id'])).scalar()
if (not user) or user.social_id.startswith('disabled$') or not user.active:
abort(403)
login_user(user, remember=False)
update_last_login(user)
if 'i' in info:
url_info = {'i': info['i']}
if 'url_args' in info:
url_info.update(info['url_args'])
next_url = url_for('index', **url_info)
if 'session' in info:
update_session(info['i'], uid=info['session'], encrypted=info['encrypted'])
elif 'next' in info:
url_info = info.get('url_args', {})
next_url = get_url_from_file_reference(info['next'], **url_info)
else:
next_url = url_for('interview_list', from_login='1')
response = redirect(next_url)
response.set_cookie('secret', info['secret'], httponly=True, secure=app.config['SESSION_COOKIE_SECURE'], samesite=app.config['SESSION_COOKIE_SAMESITE'])
return response
@app.route('/headers', methods=['POST', 'GET'])
@csrf.exempt
def show_headers():
return jsonify(headers=dict(request.headers), ipaddress=request.remote_addr)
@app.route('/authorize/<provider>', methods=['POST', 'GET'])
@csrf.exempt
def oauth_authorize(provider):
if not current_user.is_anonymous:
return redirect(url_for('interview_list', from_login='1'))
oauth = OAuthSignIn.get_provider(provider)
next_url = app.user_manager.make_safe_url_function(request.args.get('next', ''))
if next_url:
session['next'] = next_url
return oauth.authorize()
@app.route('/callback/<provider>', methods=['POST', 'GET'])
@csrf.exempt
def oauth_callback(provider):
if not current_user.is_anonymous:
return redirect(url_for('interview_list', from_login='1'))
if request.method == 'POST' and provider != 'google':
return ('The method is not allowed for the requested URL.', 405)
# for argument in request.args:
# logmessage("argument " + str(argument) + " is " + str(request.args[argument]))
oauth = OAuthSignIn.get_provider(provider)
social_id, username, email, name_data = oauth.callback()
if not verify_email(email):
flash(word('E-mail addresses with this domain are not authorized to register for accounts on this system.'), 'error')
return redirect(url_for('user.login'))
if social_id is None:
flash(word('Authentication failed.'), 'error')
return redirect(url_for('interview_list', from_login='1'))
user = db.session.execute(select(UserModel).options(db.joinedload(UserModel.roles)).filter_by(social_id=social_id)).scalar()
if not user:
user = db.session.execute(select(UserModel).options(db.joinedload(UserModel.roles)).filter_by(email=email)).scalar()
if user and user.social_id is not None and user.social_id.startswith('local'):
flash(word('There is already a username and password on this system with the e-mail address') + " " + str(email) + ". " + word("Please log in."), 'error')
return redirect(url_for('user.login'))
if not user:
user = UserModel(social_id=social_id, nickname=username, email=email, active=True)
if 'first_name' in name_data and 'last_name' in name_data and name_data['first_name'] is not None and name_data['last_name'] is not None:
user.first_name = name_data['first_name']
user.last_name = name_data['last_name']
elif 'name' in name_data and name_data['name'] is not None and ' ' in name_data['name']:
user.first_name = re.sub(r' .*', '', name_data['name'])
user.last_name = re.sub(r'.* ', '', name_data['name'])
if 'language' in name_data and name_data['language']:
user.language = name_data['language']
db.session.add(user)
db.session.commit()
session["_flashes"] = []
login_user(user, remember=False)
update_last_login(user)
if 'i' in session: # TEMPORARY
get_session(session['i'])
to_convert = []
if 'tempuser' in session:
to_convert.extend(sub_temp_user_dict_key(session['tempuser'], user.id))
if 'sessions' in session:
for filename, info in session['sessions'].items():
if (filename, info['uid']) not in to_convert:
to_convert.append((filename, info['uid']))
save_user_dict_key(info['uid'], filename, priors=True, user=user)
update_session(filename, key_logged=True)
# logmessage("oauth_callback: calling substitute_secret")
secret = substitute_secret(str(request.cookies.get('secret', None)), pad_to_16(MD5Hash(data=social_id).hexdigest()), to_convert=to_convert)
sub_temp_other(user)
if 'next' in session:
the_url = session['next']
del session['next']
response = redirect(the_url)
else:
response = redirect(url_for('interview_list', from_login='1'))
response.set_cookie('secret', secret, httponly=True, secure=app.config['SESSION_COOKIE_SECURE'], samesite=app.config['SESSION_COOKIE_SAMESITE'])
return response
@app.route('/phone_login', methods=['POST', 'GET'])
def phone_login():
if not app.config['USE_PHONE_LOGIN']:
return ('File not found', 404)
form = PhoneLoginForm(request.form)
# next = request.args.get('next', url_for('interview_list'))
if request.method == 'POST' and form.submit.data:
ok = True
if form.validate():
phone_number = form.phone_number.data
if docassemble.base.functions.phone_number_is_valid(phone_number):
phone_number = docassemble.base.functions.phone_number_in_e164(phone_number)
else:
ok = False
else:
ok = False
if ok:
social_id = 'phone$' + str(phone_number)
user = db.session.execute(select(UserModel).options(db.joinedload(UserModel.roles)).filter_by(social_id=social_id)).scalar()
if user and user.active is False:
flash(word("Your account has been disabled."), 'error')
return redirect(url_for('phone_login'))
verification_code = random_digits(daconfig['verification code digits'])
message = word("Your verification code is") + " " + str(verification_code) + "."
user_agent = request.headers.get('User-Agent', '')
if detect_mobile.search(user_agent):
message += ' ' + word("You can also follow this link: ") + url_for('phone_login_verify', _external=True, p=phone_number, c=verification_code)
tracker_prefix = 'da:phonelogin:ip:' + str(get_requester_ip(request)) + ':phone:'
tracker_key = tracker_prefix + str(phone_number)
pipe = r.pipeline()
pipe.incr(tracker_key)
pipe.expire(tracker_key, daconfig['ban period'])
pipe.execute()
total_attempts = 0
for key in r.keys(tracker_prefix + '*'):
val = r.get(key.decode())
total_attempts += int(val)
if total_attempts > daconfig['attempt limit']:
logmessage("IP address " + str(get_requester_ip(request)) + " attempted to log in too many times.")
flash(word("You have made too many login attempts."), 'error')
return redirect(url_for('user.login'))
total_attempts = 0
for key in r.keys('da:phonelogin:ip:*:phone:' + phone_number):
val = r.get(key.decode())
total_attempts += int(val)
if total_attempts > daconfig['attempt limit']:
logmessage("Too many attempts were made to log in to phone number " + str(phone_number))
flash(word("You have made too many login attempts."), 'error')
return redirect(url_for('user.login'))
key = 'da:phonelogin:' + str(phone_number) + ':code'
pipe = r.pipeline()
pipe.set(key, verification_code)
pipe.expire(key, daconfig['verification code timeout'])
pipe.execute()
# logmessage("Writing code " + str(verification_code) + " to " + key)
docassemble.base.functions.this_thread.current_info = current_info(req=request)
success = docassemble.base.util.send_sms(to=phone_number, body=message)
if success:
session['phone_number'] = phone_number
return redirect(url_for('phone_login_verify'))
flash(word("There was a problem sending you a text message. Please log in another way."), 'error')
return redirect(url_for('user.login'))
flash(word("Please enter a valid phone number"), 'error')
return render_template('flask_user/phone_login.html', form=form, version_warning=None, title=word("Sign in with your mobile phone"), tab_title=word("Sign In"), page_title=word("Sign in"))
@app.route('/pv', methods=['POST', 'GET'])
def phone_login_verify():
if not app.config['USE_PHONE_LOGIN']:
return ('File not found', 404)
phone_number = session.get('phone_number', request.args.get('p', None))
if phone_number is None:
return ('File not found', 404)
form = PhoneLoginVerifyForm(request.form)
form.phone_number.data = phone_number
if 'c' in request.args and 'p' in request.args:
submitted = True
form.verification_code.data = request.args.get('c', None)
else:
submitted = False
if submitted or (request.method == 'POST' and form.submit.data):
if form.validate():
social_id = 'phone$' + str(phone_number)
user = db.session.execute(select(UserModel).options(db.joinedload(UserModel.roles)).filter_by(social_id=social_id)).scalar()
if user and user.active is False:
flash(word("Your account has been disabled."), 'error')
return redirect(url_for('phone_login'))
if not user:
user = UserModel(social_id=social_id, nickname=phone_number, active=True)
db.session.add(user)
db.session.commit()
login_user(user, remember=False)
update_last_login(user)
r.delete('da:phonelogin:ip:' + str(get_requester_ip(request)) + ':phone:' + phone_number)
to_convert = []
if 'i' in session: # TEMPORARY
get_session(session['i'])
if 'tempuser' in session:
to_convert.extend(sub_temp_user_dict_key(session['tempuser'], user.id))
if 'sessions' in session:
for filename, info in session['sessions'].items():
if (filename, info['uid']) not in to_convert:
to_convert.append((filename, info['uid']))
save_user_dict_key(info['uid'], filename, priors=True, user=user)
update_session(filename, key_logged=True)
secret = substitute_secret(str(request.cookies.get('secret', None)), pad_to_16(MD5Hash(data=social_id).hexdigest()), user=user, to_convert=to_convert)
response = redirect(url_for('interview_list', from_login='1'))
response.set_cookie('secret', secret, httponly=True, secure=app.config['SESSION_COOKIE_SECURE'], samesite=app.config['SESSION_COOKIE_SAMESITE'])
return response
logmessage("IP address " + str(get_requester_ip(request)) + " made a failed login attempt using phone number " + str(phone_number) + ".")
flash(word("Your verification code is invalid or expired. Please try again."), 'error')
return redirect(url_for('user.login'))
return render_template('flask_user/phone_login_verify.html', form=form, version_warning=None, title=word("Verify your phone"), tab_title=word("Enter code"), page_title=word("Enter code"), description=word("We just sent you a text message with a verification code. Enter the verification code to proceed."))
@app.route('/mfa_setup', methods=['POST', 'GET'])
def mfa_setup():
in_login = False
if current_user.is_authenticated:
user = current_user
elif 'validated_user' in session:
in_login = True
user = load_user(session['validated_user'])
else:
return ('File not found', 404)
if not app.config['USE_MFA'] or not user.has_role(*app.config['MFA_ROLES']) or not user.social_id.startswith('local'):
return ('File not found', 404)
form = MFASetupForm(request.form)
if request.method == 'POST' and form.submit.data:
if 'otp_secret' not in session:
return ('File not found', 404)
otp_secret = session['otp_secret']
del session['otp_secret']
supplied_verification_code = re.sub(r'[^0-9]', '', form.verification_code.data)
totp = pyotp.TOTP(otp_secret)
if not totp.verify(supplied_verification_code):
flash(word("Your verification code was invalid."), 'error')
if in_login:
del session['validated_user']
if 'next' in session:
del session['next']
return redirect(url_for('user.login'))
return redirect(url_for('user_profile_page'))
user = load_user(user.id)
user.otp_secret = otp_secret
db.session.commit()
if in_login:
if 'next' in session:
next_url = session['next']
del session['next']
else:
next_url = url_for('interview_list', from_login='1')
return docassemble_flask_user.views._do_login_user(user, next_url, False)
flash(word("You are now set up with two factor authentication."), 'success')
return redirect(url_for('user_profile_page'))
otp_secret = pyotp.random_base32()
if user.email:
the_name = user.email
else:
the_name = re.sub(r'.*\$', '', user.social_id)
the_url = pyotp.totp.TOTP(otp_secret).provisioning_uri(the_name, issuer_name=app.config['APP_NAME'])
im = qrcode.make(the_url, image_factory=qrcode.image.svg.SvgPathImage)
output = BytesIO()
im.save(output)
the_qrcode = output.getvalue().decode()
the_qrcode = re.sub(r"<\?xml version='1.0' encoding='UTF-8'\?>\n", '', the_qrcode)
the_qrcode = re.sub(r'height="[0-9]+mm" ', '', the_qrcode)
the_qrcode = re.sub(r'width="[0-9]+mm" ', '', the_qrcode)
m = re.search(r'(viewBox="[^"]+")', the_qrcode)
if m:
viewbox = ' ' + m.group(1)
else:
viewbox = ''
the_qrcode = '<svg class="damfasvg"' + viewbox + '><g transform="scale(1.0)">' + the_qrcode + '</g></svg>'
session['otp_secret'] = otp_secret
return render_template('flask_user/mfa_setup.html', form=form, version_warning=None, title=word("Two-factor authentication"), tab_title=word("Authentication"), page_title=word("Authentication"), description=word("Scan the barcode with your phone's authenticator app and enter the verification code."), the_qrcode=Markup(the_qrcode), manual_code=otp_secret)
@login_required
@app.route('/mfa_reconfigure', methods=['POST', 'GET'])
def mfa_reconfigure():
setup_translation()
if not app.config['USE_MFA'] or not current_user.has_role(*app.config['MFA_ROLES']) or not current_user.social_id.startswith('local'):
return ('File not found', 404)
user = load_user(current_user.id)
if user.otp_secret is None:
if app.config['MFA_ALLOW_APP'] and (twilio_config is None or not app.config['MFA_ALLOW_SMS']):
return redirect(url_for('mfa_setup'))
if not app.config['MFA_ALLOW_APP']:
return redirect(url_for('mfa_sms_setup'))
return redirect(url_for('mfa_choose'))
form = MFAReconfigureForm(request.form)
if request.method == 'POST':
if form.reconfigure.data:
if app.config['MFA_ALLOW_APP'] and (twilio_config is None or not app.config['MFA_ALLOW_SMS']):
return redirect(url_for('mfa_setup'))
if not app.config['MFA_ALLOW_APP']:
return redirect(url_for('mfa_sms_setup'))
return redirect(url_for('mfa_choose'))
if form.disable.data and not (len(app.config['MFA_REQUIRED_FOR_ROLE']) and current_user.has_role(*app.config['MFA_REQUIRED_FOR_ROLE'])):
user.otp_secret = None
db.session.commit()
flash(word("Your account no longer uses two-factor authentication."), 'success')
return redirect(url_for('user_profile_page'))
if form.cancel.data:
return redirect(url_for('user_profile_page'))
if len(app.config['MFA_REQUIRED_FOR_ROLE']) > 0 and current_user.has_role(*app.config['MFA_REQUIRED_FOR_ROLE']):
return render_template('flask_user/mfa_reconfigure.html', form=form, version_warning=None, title=word("Two-factor authentication"), tab_title=word("Authentication"), page_title=word("Authentication"), allow_disable=False, description=word("Would you like to reconfigure two-factor authentication?"))
return render_template('flask_user/mfa_reconfigure.html', form=form, version_warning=None, title=word("Two-factor authentication"), tab_title=word("Authentication"), page_title=word("Authentication"), allow_disable=True, description=word("Your account already has two-factor authentication enabled. Would you like to reconfigure or disable two-factor authentication?"))
@app.route('/mfa_choose', methods=['POST', 'GET'])
def mfa_choose():
in_login = False
if current_user.is_authenticated:
user = current_user
elif 'validated_user' in session:
in_login = True
user = load_user(session['validated_user'])
else:
return ('File not found', 404)
if not app.config['USE_MFA'] or user.is_anonymous or not user.has_role(*app.config['MFA_ROLES']) or not user.social_id.startswith('local'):
return ('File not found', 404)
if app.config['MFA_ALLOW_APP'] and (twilio_config is None or not app.config['MFA_ALLOW_SMS']):
return redirect(url_for('mfa_setup'))
if not app.config['MFA_ALLOW_APP']:
return redirect(url_for('mfa_sms_setup'))
user = load_user(user.id)
form = MFAChooseForm(request.form)
if request.method == 'POST':
if form.sms.data:
return redirect(url_for('mfa_sms_setup'))
if form.auth.data:
return redirect(url_for('mfa_setup'))
if in_login:
del session['validated_user']
if 'next' in session:
del session['next']
return redirect(url_for('user.login'))
return redirect(url_for('user_profile_page'))
return render_template('flask_user/mfa_choose.html', form=form, version_warning=None, title=word("Two-factor authentication"), tab_title=word("Authentication"), page_title=word("Authentication"), description=Markup(word("""Which type of two-factor authentication would you like to use? The first option is to use an authentication app like <a target="_blank" href="https://en.wikipedia.org/wiki/Google_Authenticator">Google Authenticator</a> or <a target="_blank" href="https://authy.com/">Authy</a>. The second option is to receive a text (SMS) message containing a verification code.""")))
@app.route('/mfa_sms_setup', methods=['POST', 'GET'])
def mfa_sms_setup():
in_login = False
if current_user.is_authenticated:
user = current_user
elif 'validated_user' in session:
in_login = True
user = load_user(session['validated_user'])
else:
return ('File not found', 404)
if twilio_config is None or not app.config['USE_MFA'] or not user.has_role(*app.config['MFA_ROLES']) or not user.social_id.startswith('local'):
return ('File not found', 404)
form = MFASMSSetupForm(request.form)
user = load_user(user.id)
if request.method == 'GET' and user.otp_secret is not None and user.otp_secret.startswith(':phone:'):
form.phone_number.data = re.sub(r'^:phone:', '', user.otp_secret)
if request.method == 'POST' and form.submit.data:
phone_number = form.phone_number.data
if docassemble.base.functions.phone_number_is_valid(phone_number):
phone_number = docassemble.base.functions.phone_number_in_e164(phone_number)
verification_code = random_digits(daconfig['verification code digits'])
message = word("Your verification code is") + " " + str(verification_code) + "."
success = docassemble.base.util.send_sms(to=phone_number, body=message)
if success:
session['phone_number'] = phone_number
key = 'da:mfa:phone:' + str(phone_number) + ':code'
pipe = r.pipeline()
pipe.set(key, verification_code)
pipe.expire(key, daconfig['verification code timeout'])
pipe.execute()
return redirect(url_for('mfa_verify_sms_setup'))
flash(word("There was a problem sending the text message."), 'error')
if in_login:
del session['validated_user']
if 'next' in session:
del session['next']
return redirect(url_for('user.login'))
return redirect(url_for('user_profile_page'))
flash(word("Invalid phone number."), 'error')
return render_template('flask_user/mfa_sms_setup.html', form=form, version_warning=None, title=word("Two-factor authentication"), tab_title=word("Authentication"), page_title=word("Authentication"), description=word("""Enter your phone number. A confirmation code will be sent to you."""))
@app.route('/mfa_verify_sms_setup', methods=['POST', 'GET'])
def mfa_verify_sms_setup():
in_login = False
if current_user.is_authenticated:
user = current_user
elif 'validated_user' in session:
in_login = True
user = load_user(session['validated_user'])
else:
return ('File not found', 404)
if 'phone_number' not in session or twilio_config is None or not app.config['USE_MFA'] or not user.has_role(*app.config['MFA_ROLES']) or not user.social_id.startswith('local'):
return ('File not found', 404)
form = MFAVerifySMSSetupForm(request.form)
if request.method == 'POST' and form.submit.data:
phone_number = session['phone_number']
del session['phone_number']
key = 'da:mfa:phone:' + str(phone_number) + ':code'
verification_code = r.get(key)
r.delete(key)
supplied_verification_code = re.sub(r'[^0-9]', '', form.verification_code.data)
if verification_code is None:
flash(word('Your verification code was missing or expired'), 'error')
return redirect(url_for('user_profile_page'))
if verification_code.decode() == supplied_verification_code:
user = load_user(user.id)
user.otp_secret = ':phone:' + phone_number
db.session.commit()
if in_login:
if 'next' in session:
next_url = session['next']
del session['next']
else:
next_url = url_for('interview_list', from_login='1')
return docassemble_flask_user.views._do_login_user(user, next_url, False)
flash(word("You are now set up with two factor authentication."), 'success')
return redirect(url_for('user_profile_page'))
return render_template('flask_user/mfa_verify_sms_setup.html', form=form, version_warning=None, title=word("Two-factor authentication"), tab_title=word("Authentication"), page_title=word("Authentication"), description=word('We just sent you a text message with a verification code. Enter the verification code to proceed.'))
@app.route('/mfa_login', methods=['POST', 'GET'])
def mfa_login():
if not app.config['USE_MFA']:
logmessage("mfa_login: two factor authentication not configured")
return ('File not found', 404)
if 'validated_user' not in session:
logmessage("mfa_login: validated_user not in session")
return ('File not found', 404)
user = load_user(session['validated_user'])
if current_user.is_authenticated and current_user.id != user.id:
del session['validated_user']
return ('File not found', 404)
if user is None or user.otp_secret is None or not user.social_id.startswith('local'):
logmessage("mfa_login: user not setup for MFA where validated_user was " + str(session['validated_user']))
return ('File not found', 404)
form = MFALoginForm(request.form)
if not form.next.data:
form.next.data = _get_safe_next_param('next', url_for('interview_list', from_login='1'))
if request.method == 'POST' and form.submit.data:
del session['validated_user']
if 'next' in session:
safe_next = session['next']
del session['next']
else:
safe_next = form.next.data
if BAN_IP_ADDRESSES:
fail_key = 'da:failedlogin:ip:' + str(get_requester_ip(request))
failed_attempts = r.get(fail_key)
if failed_attempts is not None and int(failed_attempts) > daconfig['attempt limit']:
return ('File not found', 404)
supplied_verification_code = re.sub(r'[^0-9]', '', form.verification_code.data)
if user.otp_secret.startswith(':phone:'):
phone_number = re.sub(r'^:phone:', '', user.otp_secret)
key = 'da:mfa:phone:' + str(phone_number) + ':code'
verification_code = r.get(key)
r.delete(key)
if verification_code is None or supplied_verification_code != verification_code.decode():
r.incr(fail_key)
r.expire(fail_key, 86400)
flash(word("Your verification code was invalid or expired."), 'error')
return redirect(url_for('user.login'))
if failed_attempts is not None:
r.delete(fail_key)
else:
totp = pyotp.TOTP(user.otp_secret)
if not totp.verify(supplied_verification_code):
r.incr(fail_key)
r.expire(fail_key, 86400)
flash(word("Your verification code was invalid."), 'error')
if 'validated_user' in session:
del session['validated_user']
if 'next' in session:
return redirect(url_for('user.login', next=session['next']))
return redirect(url_for('user.login'))
if failed_attempts is not None:
r.delete(fail_key)
return docassemble_flask_user.views._do_login_user(user, safe_next, False)
description = word("This account uses two-factor authentication.")
if user.otp_secret.startswith(':phone:'):
description += " " + word("Please enter the verification code from the text message we just sent you.")
else:
description += " " + word("Please enter the verification code from your authentication app.")
return render_template('flask_user/mfa_login.html', form=form, version_warning=None, title=word("Two-factor authentication"), tab_title=word("Authentication"), page_title=word("Authentication"), description=description)
@app.route('/user/manage', methods=['POST', 'GET'])
def manage_account():
if (current_user.is_authenticated and current_user.has_roles(['admin'])) or not daconfig.get('user can delete account', True):
abort(403)
if current_user.is_anonymous and not daconfig.get('allow anonymous access', True):
return redirect(url_for('user.login'))
secret = request.cookies.get('secret', None)
if current_user.is_anonymous:
logged_in = False
if 'tempuser' not in session:
return ('File not found', 404)
temp_user_id = int(session['tempuser'])
else:
logged_in = True
delete_shared = daconfig.get('delete account deletes shared', False)
form = ManageAccountForm(request.form)
if request.method == 'POST' and form.validate():
if current_user.is_authenticated:
user_interviews(user_id=current_user.id, secret=secret, exclude_invalid=False, action='delete_all', delete_shared=delete_shared)
the_user_id = current_user.id
logout_user()
delete_user_data(the_user_id, r, r_user)
else:
sessions_to_delete = set()
interview_query = db.session.execute(select(UserDictKeys.filename, UserDictKeys.key).where(UserDictKeys.temp_user_id == temp_user_id).group_by(UserDictKeys.filename, UserDictKeys.key))
for interview_info in interview_query:
sessions_to_delete.add((interview_info.key, interview_info.filename))
for session_id, yaml_filename in sessions_to_delete:
manual_checkout(manual_session_id=session_id, manual_filename=yaml_filename)
reset_user_dict(session_id, yaml_filename, temp_user_id=temp_user_id, force=delete_shared)
delete_temp_user_data(temp_user_id, r)
delete_session_info()
session.clear()
response = redirect(exit_page)
response.set_cookie('remember_token', '', expires=0)
response.set_cookie('visitor_secret', '', expires=0)
response.set_cookie('secret', '', expires=0)
response.set_cookie('session', '', expires=0)
return response
if logged_in:
description = word("""You can delete your account on this page. Type "delete my account" (in lowercase, without the quotes) into the box below and then press the "Delete account" button. This will erase your interview sessions and your user profile. To go back to your user profile page, press the "Cancel" button.""")
else:
description = word("""You can delete your account on this page. Type "delete my account" (in lowercase, without the quotes) into the box below and then press the "Delete account" button. This will erase your interview sessions.""")
return render_template('pages/manage_account.html', form=form, version_warning=None, title=word("Manage account"), tab_title=word("Manage account"), page_title=word("Manage account"), description=description, logged_in=logged_in)
def get_github_flow():
app_credentials = current_app.config['OAUTH_CREDENTIALS'].get('github', {})
client_id = app_credentials.get('id', None)
client_secret = app_credentials.get('secret', None)
if client_id is None or client_secret is None:
raise DAError('GitHub integration is not configured')
flow = oauth2client.client.OAuth2WebServerFlow(
client_id=client_id,
client_secret=client_secret,
scope='repo admin:public_key read:user user:email read:org',
redirect_uri=url_for('github_oauth_callback', _external=True),
auth_uri='https://github.com/login/oauth/authorize',
token_uri='https://github.com/login/oauth/access_token',
access_type='offline',
prompt='consent')
return flow
def delete_ssh_keys():
area = SavedFile(current_user.id, fix=True, section='playgroundpackages')
area.delete_file('.ssh-private')
area.delete_file('.ssh-public')
# area.delete_file('.ssh_command.sh')
area.finalize()
def get_ssh_keys(email):
area = SavedFile(current_user.id, fix=True, section='playgroundpackages')
private_key_file = os.path.join(area.directory, '.ssh-private')
public_key_file = os.path.join(area.directory, '.ssh-public')
if (not (os.path.isfile(private_key_file) and os.path.isfile(private_key_file))) or (not (os.path.isfile(public_key_file) and os.path.isfile(public_key_file))):
key = RSA.generate(4096)
pubkey = key.publickey()
area.write_content(key.exportKey('PEM').decode(), filename=private_key_file, save=False)
pubkey_text = pubkey.exportKey('OpenSSH').decode() + " " + str(email) + "\n"
area.write_content(pubkey_text, filename=public_key_file, save=False)
area.finalize()
return (private_key_file, public_key_file)
def get_next_link(resp):
if 'link' in resp and resp['link']:
link_info = links_from_header.extract(resp['link'])
if 'next' in link_info:
return link_info['next']
return None
@app.route('/github_menu', methods=['POST', 'GET'])
@login_required
@roles_required(['admin', 'developer'])
def github_menu():
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
if not app.config['USE_GITHUB']:
return ('File not found', 404)
setup_translation()
form = GitHubForm(request.form)
if request.method == 'POST':
if form.configure.data:
r.delete('da:github:userid:' + str(current_user.id))
return redirect(url_for('github_configure'))
if form.unconfigure.data:
return redirect(url_for('github_unconfigure'))
if form.cancel.data:
return redirect(url_for('user_profile_page'))
if form.save.data:
info = {}
info['shared'] = bool(form.shared.data)
info['orgs'] = bool(form.orgs.data)
r.set('da:using_github:userid:' + str(current_user.id), json.dumps(info))
flash(word("Your GitHub settings were saved."), 'info')
uses_github = r.get('da:using_github:userid:' + str(current_user.id))
if uses_github is not None:
uses_github = uses_github.decode()
if uses_github == '1':
form.shared.data = True
form.orgs.data = True
else:
info = json.loads(uses_github)
form.shared.data = info['shared']
form.orgs.data = info['orgs']
description = word("Your GitHub integration is currently turned on. Below, you can change which repositories docassemble can access. You can disable GitHub integration if you no longer wish to use it.")
else:
description = word("If you have a GitHub account, you can turn on GitHub integration. This will allow you to use GitHub as a version control system for packages from inside the Playground.")
return render_template('pages/github.html', form=form, version_warning=None, title=word("GitHub Integration"), tab_title=word("GitHub"), page_title=word("GitHub"), description=description, uses_github=uses_github, bodyclass='daadminbody')
@app.route('/github_configure', methods=['POST', 'GET'])
@login_required
@roles_required(['admin', 'developer'])
def github_configure():
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
if not app.config['USE_GITHUB']:
return ('File not found', 404)
setup_translation()
storage = RedisCredStorage(oauth_app='github')
credentials = storage.get()
if not credentials or credentials.invalid:
state_string = random_string(16)
session['github_next'] = json.dumps({'state': state_string, 'path': 'github_configure', 'arguments': request.args})
flow = get_github_flow()
uri = flow.step1_get_authorize_url(state=state_string)
return redirect(uri)
http = credentials.authorize(httplib2.Http())
found = False
try:
resp, content = http.request("https://api.github.com/user/emails", "GET")
assert int(resp['status']) == 200
except:
r.delete('da:github:userid:' + str(current_user.id))
r.delete('da:using_github:userid:' + str(current_user.id))
flash(word("There was a problem connecting to GitHub. Please check your GitHub configuration and try again."), 'danger')
return redirect(url_for('github_menu'))
user_info_list = json.loads(content.decode())
user_info = None
for item in user_info_list:
if item.get('email', None) and item.get('visibility', None) != 'private':
user_info = item
if user_info is None:
logmessage("github_configure: could not get information about user")
r.delete('da:github:userid:' + str(current_user.id))
r.delete('da:using_github:userid:' + str(current_user.id))
flash(word("There was a problem connecting to GitHub. Please check your GitHub configuration and try again."), 'danger')
return redirect(url_for('github_menu'))
try:
resp, content = http.request("https://api.github.com/user/keys", "GET")
assert int(resp['status']) == 200
for key in json.loads(content.decode()):
if key['title'] == app.config['APP_NAME'] or key['title'] == app.config['APP_NAME'] + '_user_' + str(current_user.id):
found = True
except:
logmessage("github_configure: could not get information about ssh keys")
r.delete('da:github:userid:' + str(current_user.id))
r.delete('da:using_github:userid:' + str(current_user.id))
flash(word("There was a problem connecting to GitHub. Please check your GitHub configuration and try again."), 'danger')
return redirect(url_for('github_menu'))
while found is False:
next_link = get_next_link(resp)
if next_link:
resp, content = http.request(next_link, "GET")
if int(resp['status']) == 200:
for key in json.loads(content.decode()):
if key['title'] == app.config['APP_NAME'] or key['title'] == app.config['APP_NAME'] + '_user_' + str(current_user.id):
found = True
else:
r.delete('da:github:userid:' + str(current_user.id))
r.delete('da:using_github:userid:' + str(current_user.id))
flash(word("There was a problem connecting to GitHub. Please check your GitHub configuration and try again."), 'danger')
return redirect(url_for('github_menu'))
else:
break
if found:
flash(word("An SSH key is already installed on your GitHub account. The existing SSH key will not be replaced. Note that if you are connecting to GitHub from multiple docassemble servers, each server needs to have a different appname in the Configuration. If you have problems using GitHub, disable the integration and configure it again."), 'info')
if not found:
(private_key_file, public_key_file) = get_ssh_keys(user_info['email']) # pylint: disable=unused-variable
with open(public_key_file, 'r', encoding='utf-8') as fp:
public_key = fp.read()
headers = {'Content-Type': 'application/json'}
body = json.dumps({'title': app.config['APP_NAME'] + '_user_' + str(current_user.id), 'key': public_key})
resp, content = http.request("https://api.github.com/user/keys", "POST", headers=headers, body=body)
if int(resp['status']) == 201:
flash(word("GitHub integration was successfully configured."), 'info')
else:
logmessage("github_configure: error setting public key")
r.delete('da:github:userid:' + str(current_user.id))
r.delete('da:using_github:userid:' + str(current_user.id))
flash(word("There was a problem connecting to GitHub. Please check your GitHub configuration and try again."), 'danger')
return redirect(url_for('github_menu'))
r.set('da:using_github:userid:' + str(current_user.id), json.dumps({'shared': True, 'orgs': True}))
return redirect(url_for('github_menu'))
@app.route('/github_unconfigure', methods=['POST', 'GET'])
@login_required
@roles_required(['admin', 'developer'])
def github_unconfigure():
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
if not app.config['USE_GITHUB']:
return ('File not found', 404)
setup_translation()
storage = RedisCredStorage(oauth_app='github')
credentials = storage.get()
if not credentials or credentials.invalid:
state_string = random_string(16)
session['github_next'] = json.dumps({'state': state_string, 'path': 'github_unconfigure', 'arguments': request.args})
flow = get_github_flow()
uri = flow.step1_get_authorize_url(state=state_string)
return redirect(uri)
http = credentials.authorize(httplib2.Http())
ids_to_remove = []
try:
resp, content = http.request("https://api.github.com/user/keys", "GET")
if int(resp['status']) == 200:
for key in json.loads(content.decode()):
if key['title'] == app.config['APP_NAME'] or key['title'] == app.config['APP_NAME'] + '_user_' + str(current_user.id):
ids_to_remove.append(key['id'])
else:
raise DAError("github_configure: could not get information about ssh keys")
while True:
next_link = get_next_link(resp)
if next_link:
resp, content = http.request(next_link, "GET")
if int(resp['status']) == 200:
for key in json.loads(content.decode()):
if key['title'] == app.config['APP_NAME'] or key['title'] == app.config['APP_NAME'] + '_user_' + str(current_user.id):
ids_to_remove.append(key['id'])
else:
raise DAError("github_unconfigure: could not get additional information about ssh keys")
else:
break
for id_to_remove in ids_to_remove:
resp, content = http.request("https://api.github.com/user/keys/" + str(id_to_remove), "DELETE")
if int(resp['status']) != 204:
raise DAError("github_unconfigure: error deleting public key " + str(id_to_remove) + ": " + str(resp['status']) + " content: " + content.decode())
except:
logmessage("Error deleting SSH keys on GitHub")
delete_ssh_keys()
r.delete('da:github:userid:' + str(current_user.id))
r.delete('da:using_github:userid:' + str(current_user.id))
flash(word("GitHub integration was successfully disconnected."), 'info')
return redirect(url_for('user_profile_page'))
@app.route('/github_oauth_callback', methods=['POST', 'GET'])
@login_required
@roles_required(['admin', 'developer'])
def github_oauth_callback():
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
setup_translation()
failed = False
do_redirect = False
if not app.config['USE_GITHUB']:
logmessage('github_oauth_callback: server does not use github')
failed = True
elif 'github_next' not in session:
logmessage('github_oauth_callback: next not in session')
failed = True
if failed is False:
github_next = json.loads(session['github_next'])
del session['github_next']
if 'code' not in request.args or 'state' not in request.args:
logmessage('github_oauth_callback: code and state not in args')
failed = True
do_redirect = True
elif request.args['state'] != github_next['state']:
logmessage('github_oauth_callback: state did not match')
failed = True
if failed:
r.delete('da:github:userid:' + str(current_user.id))
r.delete('da:using_github:userid:' + str(current_user.id))
if do_redirect:
flash(word("There was a problem connecting to GitHub. Please check your GitHub configuration and try again."), 'danger')
return redirect(url_for('github_menu'))
return ('File not found', 404)
flow = get_github_flow()
credentials = flow.step2_exchange(request.args['code'])
storage = RedisCredStorage(oauth_app='github')
storage.put(credentials)
return redirect(github_next['path'], **github_next['arguments'])
@app.route('/user/google-sign-in')
def google_page():
return render_template('flask_user/google_login.html', version_warning=None, title=word("Sign In"), tab_title=word("Sign In"), page_title=word("Sign in"))
@app.route("/user/post-sign-in", methods=['GET'])
def post_sign_in():
return redirect(url_for('interview_list', from_login='1'))
@app.route("/leave", methods=['GET'])
def leave():
the_exit_page = None
if 'next' in request.args and request.args['next'] != '':
try:
the_exit_page = decrypt_phrase(repad(bytearray(request.args['next'], encoding='utf-8')).decode(), app.secret_key)
except:
pass
if the_exit_page is None:
the_exit_page = exit_page
# if current_user.is_authenticated:
# flask_user.signals.user_logged_out.send(current_app._get_current_object(), user=current_user)
# logout_user()
# delete_session_for_interview(i=request.args.get('i', None))
# delete_session_info()
# response = redirect(exit_page)
# response.set_cookie('remember_token', '', expires=0)
# response.set_cookie('visitor_secret', '', expires=0)
# response.set_cookie('secret', '', expires=0)
# response.set_cookie('session', '', expires=0)
# return response
return redirect(the_exit_page)
@app.route("/restart_session", methods=['GET'])
def restart_session():
yaml_filename = request.args.get('i', None)
if yaml_filename is None:
return redirect(url_for('index'))
session_info = get_session(yaml_filename)
if session_info is None:
return redirect(url_for('index'))
session_id = session_info['uid']
manual_checkout(manual_filename=yaml_filename)
if 'visitor_secret' in request.cookies:
secret = request.cookies['visitor_secret']
else:
secret = request.cookies.get('secret', None)
if secret is not None:
secret = str(secret)
if current_user.is_authenticated:
temp_session_uid = current_user.email
elif 'tempuser' in session:
temp_session_uid = 't' + str(session['tempuser'])
else:
temp_session_uid = random_string(16)
docassemble.base.functions.this_thread.current_info = current_info(yaml=yaml_filename, req=request, interface='vars', device_id=request.cookies.get('ds', None), session_uid=temp_session_uid)
try:
steps, user_dict, is_encrypted = fetch_user_dict(session_id, yaml_filename, secret=secret) # pylint: disable=unused-variable
except:
return redirect(url_for('index', i=yaml_filename))
url_args = user_dict['url_args']
url_args['reset'] = '1'
url_args['i'] = yaml_filename
return redirect(url_for('index', **url_args))
@app.route("/new_session", methods=['GET'])
def new_session_endpoint():
yaml_filename = request.args.get('i', None)
if yaml_filename is None:
return redirect(url_for('index'))
manual_checkout(manual_filename=yaml_filename)
url_args = {'i': yaml_filename, 'new_session': '1'}
return redirect(url_for('index', **url_args))
@app.route("/exit", methods=['GET'])
def exit_endpoint():
the_exit_page = None
if 'next' in request.args and request.args['next'] != '':
try:
the_exit_page = decrypt_phrase(repad(bytearray(request.args['next'], encoding='utf-8')).decode(), app.secret_key)
except:
pass
if the_exit_page is None:
the_exit_page = exit_page
yaml_filename = request.args.get('i', None)
if yaml_filename is not None:
session_info = get_session(yaml_filename)
if session_info is not None:
manual_checkout(manual_filename=yaml_filename)
reset_user_dict(session_info['uid'], yaml_filename)
delete_session_for_interview(i=yaml_filename)
return redirect(the_exit_page)
@app.route("/exit_logout", methods=['GET'])
def exit_logout():
the_exit_page = None
if 'next' in request.args and request.args['next'] != '':
try:
the_exit_page = decrypt_phrase(repad(bytearray(request.args['next'], encoding='utf-8')).decode(), app.secret_key)
except:
pass
if the_exit_page is None:
the_exit_page = exit_page
yaml_filename = request.args.get('i', guess_yaml_filename())
if yaml_filename is not None:
session_info = get_session(yaml_filename)
if session_info is not None:
manual_checkout(manual_filename=yaml_filename)
reset_user_dict(session_info['uid'], yaml_filename)
if current_user.is_authenticated:
docassemble_flask_user.signals.user_logged_out.send(current_app._get_current_object(), user=current_user)
logout_user()
session.clear()
response = redirect(the_exit_page)
response.set_cookie('remember_token', '', expires=0)
response.set_cookie('visitor_secret', '', expires=0)
response.set_cookie('secret', '', expires=0)
response.set_cookie('session', '', expires=0)
return response
@app.route("/cleanup_sessions", methods=['GET'])
def cleanup_sessions():
kv_session.cleanup_sessions()
return render_template('base_templates/blank.html')
@app.route("/health_status", methods=['GET'])
def health_status():
ok = True
if request.args.get('ready', False):
if not os.path.isfile(READY_FILE):
ok = False
return jsonify({'ok': ok, 'server_start_time': START_TIME, 'version': da_version})
@app.route("/health_check", methods=['GET'])
def health_check():
if request.args.get('ready', False):
if not os.path.isfile(READY_FILE):
return ('', 400)
response = make_response(render_template('pages/health_check.html', content="OK"), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
@app.route("/checkout", methods=['POST'])
def checkout():
try:
manual_checkout(manual_filename=request.args['i'])
except:
return jsonify(success=False)
return jsonify(success=True)
@app.route("/restart_ajax", methods=['POST'])
@login_required
@roles_required(['admin', 'developer'])
def restart_ajax():
if not app.config['ALLOW_RESTARTING']:
return ('File not found', 404)
# logmessage("restart_ajax: action is " + str(request.form.get('action', None)))
# if current_user.has_role('admin', 'developer'):
# logmessage("restart_ajax: user has permission")
# else:
# logmessage("restart_ajax: user has no permission")
if request.form.get('action', None) == 'restart' and current_user.has_role('admin', 'developer'):
logmessage("restart_ajax: restarting")
restart_all()
return jsonify(success=True)
return jsonify(success=False)
class ChatPartners:
pass
def get_current_chat_log(yaml_filename, session_id, secret, utc=True, timezone=None):
if timezone is None:
timezone = get_default_timezone()
timezone = zoneinfo.ZoneInfo(timezone)
output = []
if yaml_filename is None or session_id is None:
return output
user_cache = {}
for record in db.session.execute(select(ChatLog).where(and_(ChatLog.filename == yaml_filename, ChatLog.key == session_id)).order_by(ChatLog.id)).scalars():
if record.encrypted:
try:
message = decrypt_phrase(record.message, secret)
except:
logmessage("get_current_chat_log: Could not decrypt phrase with secret " + secret)
continue
else:
message = unpack_phrase(record.message)
# if record.temp_owner_id:
# owner_first_name = None
# owner_last_name = None
# owner_email = None
# elif record.owner_id in user_cache:
# owner_first_name = user_cache[record.owner_id].first_name
# owner_last_name = user_cache[record.owner_id].last_name
# owner_email = user_cache[record.owner_id].email
# else:
# logmessage("get_current_chat_log: Invalid owner ID in chat log")
# continue
if record.temp_user_id:
user_first_name = None
user_last_name = None
user_email = None
elif record.user_id in user_cache:
user_first_name = user_cache[record.user_id].first_name
user_last_name = user_cache[record.user_id].last_name
user_email = user_cache[record.user_id].email
else:
new_user = get_user_object(record.user_id)
if new_user is None:
logmessage("get_current_chat_log: Invalid user ID in chat log")
continue
user_cache[record.user_id] = new_user
user_first_name = user_cache[record.user_id].first_name
user_last_name = user_cache[record.user_id].last_name
user_email = user_cache[record.user_id].email
if utc:
the_datetime = record.modtime.replace(tzinfo=tz.tzutc())
else:
the_datetime = record.modtime.replace(tzinfo=tz.tzutc()).astimezone(timezone)
output.append({'message': message, 'datetime': the_datetime, 'user_email': user_email, 'user_first_name': user_first_name, 'user_last_name': user_last_name})
return output
def jsonify_with_cache(*pargs, **kwargs):
response = jsonify(*pargs, **kwargs)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
@app.route("/checkin", methods=['POST', 'GET'])
def checkin():
yaml_filename = request.args.get('i', None)
if yaml_filename is None:
return jsonify_with_cache(success=False)
session_info = get_session(yaml_filename)
if session_info is None:
return jsonify_with_cache(success=False)
session_id = session_info['uid']
if 'visitor_secret' in request.cookies:
secret = request.cookies['visitor_secret']
else:
secret = request.cookies.get('secret', None)
if secret is not None:
secret = str(secret)
if current_user.is_anonymous:
if 'tempuser' not in session:
return jsonify_with_cache(success=False)
the_user_id = 't' + str(session['tempuser'])
auth_user_id = None
temp_user_id = int(session['tempuser'])
elif current_user.is_authenticated:
auth_user_id = current_user.id
the_user_id = current_user.id
temp_user_id = None
else:
return jsonify_with_cache(success=True, action='reload')
the_current_info = current_info(yaml=yaml_filename, req=request, action=None, session_info=session_info, secret=secret, device_id=request.cookies.get('ds', None))
docassemble.base.functions.this_thread.current_info = the_current_info
if request.form.get('action', None) == 'chat_log':
# logmessage("checkin: fetch_user_dict1")
steps, user_dict, is_encrypted = fetch_user_dict(session_id, yaml_filename, secret=secret)
if user_dict is None or user_dict['_internal']['livehelp']['availability'] != 'available':
return jsonify_with_cache(success=False)
the_current_info['encrypted'] = is_encrypted
messages = get_chat_log(user_dict['_internal']['livehelp']['mode'], yaml_filename, session_id, auth_user_id, temp_user_id, secret, auth_user_id, temp_user_id)
return jsonify_with_cache(success=True, messages=messages)
if request.form.get('action', None) == 'checkin':
commands = []
checkin_code = request.form.get('checkinCode', None)
do_action = request.form.get('do_action', None)
# logmessage("in checkin")
if do_action is not None:
parameters = {}
form_parameters = request.form.get('parameters', None)
if form_parameters is not None:
parameters = json.loads(form_parameters)
# logmessage("Action was " + str(do_action) + " and parameters were " + repr(parameters))
obtain_lock(session_id, yaml_filename)
# logmessage("checkin: fetch_user_dict2")
steps, user_dict, is_encrypted = fetch_user_dict(session_id, yaml_filename, secret=secret)
the_current_info['encrypted'] = is_encrypted
interview = docassemble.base.interview_cache.get_interview(yaml_filename)
interview_status = docassemble.base.parse.InterviewStatus(current_info=the_current_info)
interview_status.checkin = True
interview.assemble(user_dict, interview_status=interview_status)
interview_status.current_info.update({'action': do_action, 'arguments': parameters})
interview.assemble(user_dict, interview_status=interview_status)
if interview_status.question.question_type == "backgroundresponse":
the_response = interview_status.question.backgroundresponse
if isinstance(the_response, dict) and 'pargs' in the_response and isinstance(the_response['pargs'], list) and len(the_response['pargs']) == 2 and the_response['pargs'][1] in ('javascript', 'flash', 'refresh', 'fields'):
if the_response['pargs'][1] == 'refresh':
commands.append({'action': do_action, 'value': None, 'extra': the_response['pargs'][1]})
else:
commands.append({'action': do_action, 'value': docassemble.base.functions.safe_json(the_response['pargs'][0]), 'extra': the_response['pargs'][1]})
elif isinstance(the_response, list) and len(the_response) == 2 and the_response[1] in ('javascript', 'flash', 'refresh', 'fields'):
commands.append({'action': do_action, 'value': docassemble.base.functions.safe_json(the_response[0]), 'extra': the_response[1]})
elif isinstance(the_response, str) and the_response == 'refresh':
commands.append({'action': do_action, 'value': docassemble.base.functions.safe_json(None), 'extra': 'refresh'})
else:
commands.append({'action': do_action, 'value': docassemble.base.functions.safe_json(the_response), 'extra': 'backgroundresponse'})
elif interview_status.question.question_type == "template" and interview_status.question.target is not None:
commands.append({'action': do_action, 'value': {'target': interview_status.question.target, 'content': docassemble.base.util.markdown_to_html(interview_status.questionText, trim=True)}, 'extra': 'backgroundresponse'})
save_user_dict(session_id, user_dict, yaml_filename, secret=secret, encrypt=is_encrypted, steps=steps)
release_lock(session_id, yaml_filename)
peer_ok = False
help_ok = False
num_peers = 0
help_available = 0
session_info = get_session(yaml_filename)
old_chatstatus = session_info['chatstatus']
chatstatus = request.form.get('chatstatus', 'off')
if old_chatstatus != chatstatus:
update_session(yaml_filename, chatstatus=chatstatus)
obj = {'chatstatus': chatstatus, 'i': yaml_filename, 'uid': session_id, 'userid': the_user_id}
key = 'da:session:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id)
call_forwarding_on = False
forwarding_phone_number = None
if twilio_config is not None:
forwarding_phone_number = twilio_config['name']['default'].get('number', None)
if forwarding_phone_number is not None:
call_forwarding_on = True
call_forwarding_code = None
call_forwarding_message = None
if call_forwarding_on:
for call_key in r.keys(re.sub(r'^da:session:uid:', 'da:phonecode:monitor:*:uid:', key)):
call_key = call_key.decode()
call_forwarding_code = r.get(call_key)
if call_forwarding_code is not None:
call_forwarding_code = call_forwarding_code.decode()
other_value = r.get('da:callforward:' + call_forwarding_code)
if other_value is None:
r.delete(call_key)
continue
other_value = other_value.decode()
remaining_seconds = r.ttl(call_key)
if remaining_seconds > 30:
call_forwarding_message = '<span class="daphone-message"><i class="fas fa-phone"></i> ' + word('To reach an advocate who can assist you, call') + ' <a class="daphone-number" href="tel:' + str(forwarding_phone_number) + '">' + str(forwarding_phone_number) + '</a> ' + word("and enter the code") + ' <span class="daphone-code">' + str(call_forwarding_code) + '</span>.</span>'
break
chat_session_key = 'da:interviewsession:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id)
potential_partners = []
if str(chatstatus) != 'off': # in ('waiting', 'standby', 'ringing', 'ready', 'on', 'hangup', 'observeonly'):
# logmessage("checkin: fetch_user_dict3")
steps, user_dict, is_encrypted = fetch_user_dict(session_id, yaml_filename, secret=secret)
the_current_info['encrypted'] = is_encrypted
if user_dict is None:
logmessage("checkin: error accessing dictionary for %s and %s" % (session_id, yaml_filename))
return jsonify_with_cache(success=False)
obj['chatstatus'] = chatstatus
obj['secret'] = secret
obj['encrypted'] = is_encrypted
obj['mode'] = user_dict['_internal']['livehelp']['mode']
if obj['mode'] in ('peer', 'peerhelp'):
peer_ok = True
if obj['mode'] in ('help', 'peerhelp'):
help_ok = True
obj['partner_roles'] = user_dict['_internal']['livehelp']['partner_roles']
if current_user.is_authenticated:
for attribute in ('email', 'confirmed_at', 'first_name', 'last_name', 'country', 'subdivisionfirst', 'subdivisionsecond', 'subdivisionthird', 'organization', 'timezone', 'language'):
obj[attribute] = str(getattr(current_user, attribute, None))
else:
obj['temp_user_id'] = temp_user_id
if help_ok and len(obj['partner_roles']) and not r.exists('da:block:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id)):
pipe = r.pipeline()
for role in obj['partner_roles']:
role_key = 'da:chat:roletype:' + str(role)
pipe.set(role_key, 1)
pipe.expire(role_key, 2592000)
pipe.execute()
for role in obj['partner_roles']:
for the_key in r.keys('da:monitor:role:' + role + ':userid:*'):
user_id = re.sub(r'^.*:userid:', '', the_key.decode())
if user_id not in potential_partners:
potential_partners.append(user_id)
for the_key in r.keys('da:monitor:chatpartners:*'):
user_id = re.sub(r'^.*chatpartners:', '', the_key.decode())
if user_id not in potential_partners:
for chat_key in r.hgetall(the_key):
if chat_key.decode() == chat_session_key:
potential_partners.append(user_id)
if len(potential_partners) > 0:
if chatstatus == 'ringing':
lkey = 'da:ready:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id)
# logmessage("Writing to " + str(lkey))
pipe = r.pipeline()
failure = True
for user_id in potential_partners:
for the_key in r.keys('da:monitor:available:' + str(user_id)):
pipe.rpush(lkey, the_key.decode())
failure = False
if peer_ok:
for the_key in r.keys('da:interviewsession:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:*'):
the_key = the_key.decode()
if the_key != chat_session_key:
pipe.rpush(lkey, the_key)
failure = False
if failure:
if peer_ok:
chatstatus = 'ready'
else:
chatstatus = 'waiting'
update_session(yaml_filename, chatstatus=chatstatus)
obj['chatstatus'] = chatstatus
else:
pipe.expire(lkey, 60)
pipe.execute()
chatstatus = 'ready'
update_session(yaml_filename, chatstatus=chatstatus)
obj['chatstatus'] = chatstatus
elif chatstatus == 'on':
if len(potential_partners) > 0:
already_connected_to_help = False
for user_id in potential_partners:
for the_key in r.hgetall('da:monitor:chatpartners:' + str(user_id)):
if the_key.decode() == chat_session_key:
already_connected_to_help = True
if not already_connected_to_help:
for user_id in potential_partners:
mon_sid = r.get('da:monitor:available:' + str(user_id))
if mon_sid is None:
continue
mon_sid = mon_sid.decode()
int_sid = r.get('da:interviewsession:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id))
if int_sid is None:
continue
int_sid = int_sid.decode()
r.publish(mon_sid, json.dumps({'messagetype': 'chatready', 'uid': session_id, 'i': yaml_filename, 'userid': the_user_id, 'secret': secret, 'sid': int_sid}))
r.publish(int_sid, json.dumps({'messagetype': 'chatpartner', 'sid': mon_sid}))
break
if chatstatus in ('waiting', 'hangup'):
chatstatus = 'standby'
update_session(yaml_filename, chatstatus=chatstatus)
obj['chatstatus'] = chatstatus
else:
if peer_ok:
if chatstatus == 'ringing':
lkey = 'da:ready:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id)
pipe = r.pipeline()
failure = True
for the_key in r.keys('da:interviewsession:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:*'):
the_key = the_key.decode()
if the_key != chat_session_key:
pipe.rpush(lkey, the_key)
failure = False
if not failure:
pipe.expire(lkey, 6000)
pipe.execute()
chatstatus = 'ready'
update_session(yaml_filename, chatstatus=chatstatus)
obj['chatstatus'] = chatstatus
elif chatstatus in ('waiting', 'hangup'):
chatstatus = 'standby'
update_session(yaml_filename, chatstatus=chatstatus)
obj['chatstatus'] = chatstatus
else:
if chatstatus in ('standby', 'ready', 'ringing', 'hangup'):
chatstatus = 'waiting'
update_session(yaml_filename, chatstatus=chatstatus)
obj['chatstatus'] = chatstatus
if peer_ok:
for sess_key in r.keys('da:session:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:*'):
if sess_key.decode() != key:
num_peers += 1
help_available = len(potential_partners)
html_key = 'da:html:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id)
if old_chatstatus != chatstatus:
html = r.get(html_key)
if html is not None:
html_obj = json.loads(html.decode())
if 'browser_title' in html_obj:
obj['browser_title'] = html_obj['browser_title']
obj['blocked'] = bool(r.exists('da:block:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id)))
r.publish('da:monitor', json.dumps({'messagetype': 'sessionupdate', 'key': key, 'session': obj}))
else:
logmessage("checkin: the html was not found at " + str(html_key))
pipe = r.pipeline()
pipe.set(key, pickle.dumps(obj))
pipe.expire(key, 60)
pipe.expire(html_key, 60)
pipe.execute()
ocontrol_key = 'da:control:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id)
ocontrol = r.get(ocontrol_key)
observer_control = not bool(ocontrol is None)
parameters = request.form.get('raw_parameters', None)
if parameters is not None:
key = 'da:input:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id)
r.publish(key, parameters)
worker_key = 'da:worker:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id)
worker_len = r.llen(worker_key)
if worker_len > 0:
workers_inspected = 0
while workers_inspected <= worker_len:
worker_id = r.lpop(worker_key)
if worker_id is not None:
try:
result = docassemble.webapp.worker.workerapp.AsyncResult(id=worker_id)
if result.ready():
if isinstance(result.result, ReturnValue):
commands.append({'value': docassemble.base.functions.safe_json(result.result.value), 'extra': result.result.extra})
else:
r.rpush(worker_key, worker_id)
except Exception as errstr:
logmessage("checkin: got error " + str(errstr))
r.rpush(worker_key, worker_id)
workers_inspected += 1
if peer_ok or help_ok:
return jsonify_with_cache(success=True, chat_status=chatstatus, num_peers=num_peers, help_available=help_available, phone=call_forwarding_message, observerControl=observer_control, commands=commands, checkin_code=checkin_code)
return jsonify_with_cache(success=True, chat_status=chatstatus, phone=call_forwarding_message, observerControl=observer_control, commands=commands, checkin_code=checkin_code)
return jsonify_with_cache(success=False)
@app.before_request
def setup_variables():
# logmessage("Request on " + str(os.getpid()) + " " + str(threading.current_thread().ident) + " for " + request.path + " at " + time.strftime("%Y-%m-%d %H:%M:%S"))
# g.request_start_time = time.time()
# docassemble.base.functions.reset_thread_variables()
docassemble.base.functions.reset_local_variables()
@app.after_request
def apply_security_headers(response):
if app.config['SESSION_COOKIE_SECURE']:
response.headers['Strict-Transport-Security'] = 'max-age=31536000'
if 'embed' in g:
return response
response.headers["X-Content-Type-Options"] = 'nosniff'
response.headers["X-XSS-Protection"] = '1'
if daconfig.get('allow embedding', False) is not True:
response.headers["X-Frame-Options"] = 'SAMEORIGIN'
response.headers["Content-Security-Policy"] = "frame-ancestors 'self';"
elif daconfig.get('cross site domains', []):
response.headers["Content-Security-Policy"] = "frame-ancestors 'self' " + ' '.join(daconfig['cross site domains']) + ';'
return response
# @app.after_request
# def print_time_of_request(response):
# time_spent = time.time() - g.request_start_time
# logmessage("Request on " + str(os.getpid()) + " " + str(threading.current_thread().ident) + " complete after " + str("%.5fs" % time_spent))
# if time_spent > 3.0:
# if hasattr(g, 'start_index'):
# logmessage("Duration to beginning: %fs" % (g.start_index - g.request_start_time))
# if hasattr(g, 'got_dict'):
# logmessage("Duration to getting dictionary: %fs" % (g.got_dict - g.request_start_time))
# if hasattr(g, 'before_interview'):
# logmessage("Duration to before interview: %fs" % (g.before_interview - g.request_start_time))
# if hasattr(g, 'after_interview'):
# logmessage("Duration to after interview: %fs" % (g.after_interview - g.request_start_time))
# if hasattr(g, 'status_created'):
# logmessage("Duration to status: %fs" % (g.status_created - g.request_start_time))
# if hasattr(g, 'assembly_start'):
# logmessage("Duration to assembly start: %fs" % (g.assembly_start - g.request_start_time))
# if hasattr(g, 'assembly_end'):
# logmessage("Duration to assembly end: %fs" % (g.assembly_end - g.request_start_time))
# logmessage("Duration to end of request: %fs" % time_spent)
# if hasattr(g, 'interview') and hasattr(g, 'interview_status'):
# logmessage(to_text(get_history(g.interview, g.interview_status)))
# return response
# @app.before_request
# def setup_celery():
# docassemble.webapp.worker.workerapp.set_current()
# @app.before_request
# def before_request():
# docassemble.base.functions.reset_thread_variables()
# docassemble.base.functions.reset_local_variables()
# g.request_start_time = time.time()
# g.request_time = lambda: "%.5fs" % (time.time() - g.request_start_time)
@app.route("/vars", methods=['POST', 'GET'])
def get_variables():
yaml_filename = request.args.get('i', None)
if yaml_filename is None:
return ("Invalid request", 400)
session_info = get_session(yaml_filename)
if session_info is None:
return ("Invalid request", 400)
session_id = session_info['uid']
if 'visitor_secret' in request.cookies:
secret = request.cookies['visitor_secret']
else:
secret = request.cookies.get('secret', None)
if secret is not None:
secret = str(secret)
# session_cookie_id = request.cookies.get('session', None)
if session_id is None or yaml_filename is None:
return jsonify(success=False)
# logmessage("get_variables: fetch_user_dict")
docassemble.base.functions.this_thread.current_info = current_info(yaml=yaml_filename, req=request, interface='vars', device_id=request.cookies.get('ds', None))
try:
steps, user_dict, is_encrypted = fetch_user_dict(session_id, yaml_filename, secret=secret)
assert user_dict is not None
except:
return jsonify(success=False)
if (not DEBUG) and '_internal' in user_dict and 'misc' in user_dict['_internal'] and 'variable_access' in user_dict['_internal']['misc'] and user_dict['_internal']['misc']['variable_access'] is False:
return jsonify(success=False)
variables = docassemble.base.functions.serializable_dict(user_dict, include_internal=True)
# variables['_internal'] = docassemble.base.functions.serializable_dict(user_dict['_internal'])
return jsonify(success=True, variables=variables, steps=steps, encrypted=is_encrypted, uid=session_id, i=yaml_filename)
@app.route("/", methods=['GET'])
def rootindex():
# setup_translation()
if current_user.is_anonymous and not daconfig.get('allow anonymous access', True):
return redirect(url_for('user.login'))
url = daconfig.get('root redirect url', None)
if url is not None:
return redirect(url)
yaml_filename = request.args.get('i', None)
if yaml_filename is None:
if 'default interview' not in daconfig and len(daconfig['dispatch']):
return redirect(url_for('interview_start'))
yaml_filename = final_default_yaml_filename
if COOKIELESS_SESSIONS:
return html_index()
the_args = {}
for key, val in request.args.items():
the_args[key] = val
the_args['i'] = yaml_filename
request.args = the_args
return index(refer=['root'])
def title_converter(content, part, status):
if part in ('exit link', 'exit url', 'title url', 'title url opens in other window'):
return content
if part in ('title', 'subtitle', 'short title', 'tab title', 'exit label', 'back button label', 'corner back button label', 'logo', 'short logo', 'navigation bar html'):
return docassemble.base.util.markdown_to_html(content, status=status, trim=True, do_terms=False)
return docassemble.base.util.markdown_to_html(content, status=status)
@app.route("/test_embed", methods=['GET'])
@login_required
@roles_required(['admin', 'developer'])
def test_embed():
setup_translation()
yaml_filename = request.args.get('i', final_default_yaml_filename)
user_dict = fresh_dictionary()
interview = docassemble.base.interview_cache.get_interview(yaml_filename)
the_current_info = current_info(yaml=yaml_filename, req=request, action=None, location=None, interface='web', device_id=request.cookies.get('ds', None))
docassemble.base.functions.this_thread.current_info = the_current_info
interview_status = docassemble.base.parse.InterviewStatus(current_info=the_current_info)
try:
interview.assemble(user_dict, interview_status)
except:
pass
current_language = docassemble.base.functions.get_language()
page_title = word("Embed test")
start_part = standard_html_start(interview_language=current_language, debug=False, bootstrap_theme=interview_status.question.interview.get_bootstrap_theme(), external=True, page_title=page_title, social=daconfig['social'], yaml_filename=yaml_filename) + global_css + additional_css(interview_status)
scripts = standard_scripts(interview_language=current_language, external=True) + additional_scripts(interview_status, yaml_filename) + global_js
response = make_response(render_template('pages/test_embed.html', scripts=scripts, start_part=start_part, interview_url=url_for('index', i=yaml_filename, js_target='dablock', _external=True), page_title=page_title), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
@app.route("/launch", methods=['GET'])
def launch():
# setup_translation()
if COOKIELESS_SESSIONS:
return html_index()
code = request.args.get('c', None)
if code is None:
abort(403)
the_key = 'da:resume_interview:' + str(code)
data = r.get(the_key)
if data is None:
raise DAError(word("The link has expired."), code=403)
data = json.loads(data.decode())
if data.get('once', False):
r.delete(the_key)
if 'url_args' in data:
args = data['url_args']
else:
args = {}
for key, val in request.args.items():
if key not in ('session', 'c'):
args[key] = val
args['i'] = data['i']
if 'session' in data:
delete_session_for_interview(data['i'])
session['alt_session'] = [data['i'], data['session']]
else:
args['new_session'] = '1'
request.args = args
return index(refer=['launch'])
@app.route("/resume", methods=['POST'])
@csrf.exempt
def resume():
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
if 'session' not in post_data or 'i' not in post_data:
abort(403)
update_session(post_data['i'], uid=post_data['session'])
del post_data['session']
if 'ajax' in post_data:
ajax_value = int(post_data['ajax'])
del post_data['ajax']
if ajax_value:
return jsonify(action='redirect', url=url_for('index', **post_data), csrf_token=generate_csrf())
return redirect(url_for('index', **post_data))
def json64unquote(text):
try:
return json.loads(myb64unquote(text))
except:
return {}
def tidy_action(action):
result = {}
if not isinstance(action, dict):
return result
if 'action' in action:
result['action'] = action['action']
if 'arguments' in action:
result['arguments'] = action['arguments']
return result
def make_response_wrapper(set_cookie, secret, set_device_id, device_id, expire_visitor_secret):
def the_wrapper(response):
if set_cookie:
response.set_cookie('secret', secret, httponly=True, secure=app.config['SESSION_COOKIE_SECURE'], samesite=app.config['SESSION_COOKIE_SAMESITE'])
if expire_visitor_secret:
response.set_cookie('visitor_secret', '', expires=0)
if set_device_id:
response.set_cookie('ds', device_id, httponly=True, secure=app.config['SESSION_COOKIE_SECURE'], samesite=app.config['SESSION_COOKIE_SAMESITE'], expires=datetime.datetime.now() + datetime.timedelta(weeks=520))
return the_wrapper
def populate_social(social, metadata):
for key in ('image', 'description'):
if key in metadata:
if metadata[key] is None:
if key in social:
del social[key]
elif isinstance(metadata[key], str):
social[key] = metadata[key].replace('\n', ' ').replace('"', '"').strip()
for key in ('og', 'fb', 'twitter'):
if key in metadata and isinstance(metadata[key], dict):
for subkey, val in metadata[key].items():
if val is None:
if subkey in social[key]:
del social[key][subkey]
elif isinstance(val, str):
social[key][subkey] = val.replace('\n', ' ').replace('"', '"').strip()
if COOKIELESS_SESSIONS:
index_path = '/i'
html_index_path = '/interview'
else:
index_path = '/interview'
html_index_path = '/i'
def refresh_or_continue(interview, post_data):
return_val = False
try:
if interview.questions_by_name[post_data['_question_name']].fields[0].choices[int(post_data['X211bHRpcGxlX2Nob2ljZQ'])]['key'].question_type in ('refresh', 'continue'):
return_val = True
except:
pass
return return_val
def update_current_info_with_session_info(the_current_info, session_info):
if session_info is not None:
user_code = session_info['uid']
encrypted = session_info['encrypted']
else:
user_code = None
encrypted = True
the_current_info.update({'session': user_code, 'encrypted': encrypted})
@app.route(index_path, methods=['POST', 'GET'])
def index(action_argument=None, refer=None):
# if refer is None and request.method == 'GET':
# setup_translation()
is_ajax = bool(request.method == 'POST' and 'ajax' in request.form and int(request.form['ajax']))
docassemble.base.functions.this_thread.misc['call'] = refer
return_fake_html = False
if (request.method == 'POST' and 'json' in request.form and as_int(request.form['json'])) or ('json' in request.args and as_int(request.args['json'])):
the_interface = 'json'
is_json = True
is_js = False
js_target = False
elif 'js_target' in request.args and request.args['js_target'] != '':
the_interface = 'web'
is_json = False
docassemble.base.functions.this_thread.misc['jsembed'] = request.args['js_target']
if is_ajax:
js_target = False
else:
js_target = request.args['js_target']
is_js = True
else:
the_interface = 'web'
is_json = False
is_js = False
js_target = False
if current_user.is_anonymous:
if 'tempuser' not in session:
new_temp_user = TempUser()
db.session.add(new_temp_user)
db.session.commit()
session['tempuser'] = new_temp_user.id
elif not current_user.is_authenticated:
response = do_redirect(url_for('user.login'), is_ajax, is_json, js_target)
response.set_cookie('remember_token', '', expires=0)
response.set_cookie('visitor_secret', '', expires=0)
response.set_cookie('secret', '', expires=0)
response.set_cookie('session', '', expires=0)
return response
elif 'user_id' not in session:
session['user_id'] = current_user.id
expire_visitor_secret = False
if 'visitor_secret' in request.cookies:
if 'session' in request.args:
secret = request.cookies.get('secret', None)
expire_visitor_secret = True
else:
secret = request.cookies['visitor_secret']
else:
secret = request.cookies.get('secret', None)
use_cache = int(request.args.get('cache', 1))
reset_interview = int(request.args.get('reset', 0))
new_interview = int(request.args.get('new_session', 0))
if secret is None:
secret = random_string(16)
set_cookie = True
set_device_id = True
else:
secret = str(secret)
set_cookie = False
set_device_id = False
device_id = request.cookies.get('ds', None)
if device_id is None:
device_id = random_string(16)
set_device_id = True
steps = 1
need_to_reset = False
if 'i' not in request.args and 'state' in request.args:
try:
yaml_filename = re.sub(r'\^.*', '', from_safeid(request.args['state']))
except:
yaml_filename = guess_yaml_filename()
else:
yaml_filename = request.args.get('i', guess_yaml_filename())
if yaml_filename is None:
if current_user.is_anonymous and not daconfig.get('allow anonymous access', True):
logmessage("Redirecting to login because no YAML filename provided and no anonymous access is allowed.")
return redirect(url_for('user.login'))
if len(daconfig['dispatch']) > 0:
logmessage("Redirecting to dispatch page because no YAML filename provided.")
return redirect(url_for('interview_start'))
yaml_filename = final_default_yaml_filename
action = None
if '_action' in request.form and 'in error' not in session:
action = tidy_action(json64unquote(request.form['_action']))
no_defs = True
elif 'action' in request.args and 'in error' not in session:
action = tidy_action(json64unquote(request.args['action']))
no_defs = True
elif action_argument:
action = tidy_action(action_argument)
no_defs = False
else:
no_defs = False
disregard_input = not bool(request.method == 'POST' and not no_defs)
if disregard_input:
post_data = {}
else:
post_data = request.form.copy()
if current_user.is_anonymous:
the_user_id = 't' + str(session['tempuser'])
else:
the_user_id = current_user.id
if '_track_location' in post_data and post_data['_track_location']:
the_location = json.loads(post_data['_track_location'])
else:
the_location = None
session_info = get_session(yaml_filename)
session_parameter = request.args.get('session', None)
the_current_info = current_info(yaml=yaml_filename, req=request, action=None, location=the_location, interface=the_interface, session_info=session_info, secret=secret, device_id=device_id)
docassemble.base.functions.this_thread.current_info = the_current_info
if session_info is None or reset_interview or new_interview:
was_new = True
if 'alt_session' in session and yaml_filename == session['alt_session'][0]:
session_parameter = session['alt_session'][1]
del session['alt_session']
if (PREVENT_DEMO) and (yaml_filename.startswith('docassemble.base:') or yaml_filename.startswith('docassemble.demo:')) and (current_user.is_anonymous or not (current_user.has_role('admin', 'developer') or current_user.can_do('demo_interviews'))):
raise DAError(word("Not authorized"), code=403)
if current_user.is_anonymous and not daconfig.get('allow anonymous access', True):
logmessage("Redirecting to login because no anonymous access allowed.")
return redirect(url_for('user.login', next=url_for('index', **request.args)))
if yaml_filename.startswith('docassemble.playground'):
if not app.config['ENABLE_PLAYGROUND']:
raise DAError(word("Not authorized"), code=403)
else:
yaml_filename = re.sub(r':([^\/]+)$', r':data/questions/\1', yaml_filename)
docassemble.base.functions.this_thread.current_info['yaml_filename'] = yaml_filename
show_flash = False
interview = docassemble.base.interview_cache.get_interview(yaml_filename)
if session_info is None and request.args.get('from_list', None) is None and not yaml_filename.startswith("docassemble.playground") and not yaml_filename.startswith("docassemble.base") and not yaml_filename.startswith("docassemble.demo") and SHOW_LOGIN and not new_interview and len(session['sessions']) > 0:
show_flash = True
if current_user.is_authenticated and current_user.has_role('admin', 'developer', 'advocate'):
show_flash = False
if session_parameter is None:
if show_flash:
if current_user.is_authenticated:
# word("Starting a new interview. To go back to your previous interview, go to My Interviews on the menu.")
message = "Starting a new interview. To go back to your previous interview, go to My Interviews on the menu."
else:
# word("Starting a new interview. To go back to your previous interview, log in to see a list of your interviews.")
message = "Starting a new interview. To go back to your previous interview, log in to see a list of your interviews."
if reset_interview and session_info is not None:
reset_user_dict(session_info['uid'], yaml_filename)
unique_sessions = interview.consolidated_metadata.get('sessions are unique', False)
if unique_sessions is not False and not current_user.is_authenticated:
delete_session_for_interview(yaml_filename)
flash(word("You need to be logged in to access this interview."), "info")
logmessage("Redirecting to login because sessions are unique.")
return redirect(url_for('user.login', next=url_for('index', **request.args)))
if interview.consolidated_metadata.get('temporary session', False):
if session_info is not None:
reset_user_dict(session_info['uid'], yaml_filename)
if current_user.is_authenticated:
while True:
session_id, encrypted = get_existing_session(yaml_filename, secret)
if session_id:
reset_user_dict(session_id, yaml_filename)
else:
break
the_current_info['session'] = session_id
the_current_info['encrypted'] = encrypted
reset_interview = 1
if current_user.is_anonymous:
if (not interview.allowed_to_initiate(is_anonymous=True)) or (not interview.allowed_to_access(is_anonymous=True)):
delete_session_for_interview(yaml_filename)
flash(word("You need to be logged in to access this interview."), "info")
logmessage("Redirecting to login because anonymous user not allowed to access this interview.")
return redirect(url_for('user.login', next=url_for('index', **request.args)))
elif not interview.allowed_to_initiate(has_roles=[role.name for role in current_user.roles]):
delete_session_for_interview(yaml_filename)
raise DAError(word("You are not allowed to access this interview."), code=403)
elif not interview.allowed_to_access(has_roles=[role.name for role in current_user.roles]):
raise DAError(word('You are not allowed to access this interview.'), code=403)
session_id = None
if reset_interview == 2:
delete_session_sessions()
if (not reset_interview) and (unique_sessions is True or (isinstance(unique_sessions, list) and len(unique_sessions) and current_user.has_role(*unique_sessions))):
session_id, encrypted = get_existing_session(yaml_filename, secret)
if session_id is None:
user_code, user_dict = reset_session(yaml_filename, secret)
add_referer(user_dict)
save_user_dict(user_code, user_dict, yaml_filename, secret=secret)
release_lock(user_code, yaml_filename)
need_to_reset = True
session_info = get_session(yaml_filename)
update_current_info_with_session_info(the_current_info, session_info)
else:
unique_sessions = interview.consolidated_metadata.get('sessions are unique', False)
if unique_sessions is not False and not current_user.is_authenticated:
delete_session_for_interview(yaml_filename)
session['alt_session'] = [yaml_filename, session_parameter]
flash(word("You need to be logged in to access this interview."), "info")
logmessage("Redirecting to login because sessions are unique.")
return redirect(url_for('user.login', next=url_for('index', **request.args)))
if current_user.is_anonymous:
if (not interview.allowed_to_initiate(is_anonymous=True)) or (not interview.allowed_to_access(is_anonymous=True)):
delete_session_for_interview(yaml_filename)
session['alt_session'] = [yaml_filename, session_parameter]
flash(word("You need to be logged in to access this interview."), "info")
logmessage("Redirecting to login because anonymous user not allowed to access this interview.")
return redirect(url_for('user.login', next=url_for('index', **request.args)))
elif not interview.allowed_to_initiate(has_roles=[role.name for role in current_user.roles]):
delete_session_for_interview(yaml_filename)
raise DAError(word("You are not allowed to access this interview."), code=403)
elif not interview.allowed_to_access(has_roles=[role.name for role in current_user.roles]):
raise DAError(word('You are not allowed to access this interview.'), code=403)
if reset_interview:
reset_user_dict(session_parameter, yaml_filename)
if reset_interview == 2:
delete_session_sessions()
user_code, user_dict = reset_session(yaml_filename, secret)
add_referer(user_dict)
save_user_dict(user_code, user_dict, yaml_filename, secret=secret)
release_lock(user_code, yaml_filename)
session_info = get_session(yaml_filename)
update_current_info_with_session_info(the_current_info, session_info)
need_to_reset = True
else:
session_info = update_session(yaml_filename, uid=session_parameter)
update_current_info_with_session_info(the_current_info, session_info)
need_to_reset = True
if show_flash:
if current_user.is_authenticated:
# word("Entering a different interview. To go back to your previous interview, go to My Interviews on the menu.")
message = "Entering a different interview. To go back to your previous interview, go to My Interviews on the menu."
else:
# word("Entering a different interview. To go back to your previous interview, log in to see a list of your interviews.")
message = "Entering a different interview. To go back to your previous interview, log in to see a list of your interviews."
if show_flash:
flash(word(message), 'info')
else:
was_new = False
if session_parameter is not None and not need_to_reset:
session_info = update_session(yaml_filename, uid=session_parameter)
update_current_info_with_session_info(the_current_info, session_info)
need_to_reset = True
user_code = session_info['uid']
encrypted = session_info['encrypted']
obtain_lock(user_code, yaml_filename)
try:
steps, user_dict, is_encrypted = fetch_user_dict(user_code, yaml_filename, secret=secret)
except Exception as the_err:
try:
logmessage("index: there was an exception " + str(the_err.__class__.__name__) + ": " + str(the_err) + " after fetch_user_dict with %s and %s, so we need to reset" % (user_code, yaml_filename))
except:
logmessage("index: there was an exception " + str(the_err.__class__.__name__) + " after fetch_user_dict with %s and %s, so we need to reset" % (user_code, yaml_filename))
release_lock(user_code, yaml_filename)
logmessage("index: dictionary fetch failed")
clear_session(yaml_filename)
if session_parameter is not None:
redirect_url = daconfig.get('session error redirect url', None)
if isinstance(redirect_url, str) and redirect_url:
redirect_url = redirect_url.format(i=urllibquote(yaml_filename), error=urllibquote('answers_fetch_fail'))
logmessage("Session error because failure to get user dictionary.")
return do_redirect(redirect_url, is_ajax, is_json, js_target)
logmessage("Redirecting back to index because of failure to get user dictionary.")
response = do_redirect(url_for('index', i=yaml_filename), is_ajax, is_json, js_target)
if session_parameter is not None:
flash(word("Unable to retrieve interview session. Starting a new session instead."), "error")
return response
if user_dict is None:
logmessage("index: no user_dict found after fetch_user_dict with %s and %s, so we need to reset" % (user_code, yaml_filename))
release_lock(user_code, yaml_filename)
logmessage("index: dictionary fetch returned no results")
clear_session(yaml_filename)
redirect_url = daconfig.get('session error redirect url', None)
if isinstance(redirect_url, str) and redirect_url:
redirect_url = redirect_url.format(i=urllibquote(yaml_filename), error=urllibquote('answers_missing'))
logmessage("Session error because user dictionary was None.")
return do_redirect(redirect_url, is_ajax, is_json, js_target)
logmessage("Redirecting back to index because user dictionary was None.")
response = do_redirect(url_for('index', i=yaml_filename), is_ajax, is_json, js_target)
flash(word("Unable to locate interview session. Starting a new session instead."), "error")
return response
if encrypted != is_encrypted:
update_session(yaml_filename, encrypted=is_encrypted)
encrypted = is_encrypted
if user_dict.get('multi_user', False) is True and encrypted is True:
encrypted = False
update_session(yaml_filename, encrypted=encrypted)
decrypt_session(secret, user_code=user_code, filename=yaml_filename)
if user_dict.get('multi_user', False) is False and encrypted is False:
encrypt_session(secret, user_code=user_code, filename=yaml_filename)
encrypted = True
update_session(yaml_filename, encrypted=encrypted)
the_current_info['encrypted'] = encrypted
if not session_info['key_logged']:
save_user_dict_key(user_code, yaml_filename)
update_session(yaml_filename, key_logged=True)
url_args_changed = False
if len(request.args) > 0:
for argname in request.args:
if argname in reserved_argnames:
continue
if not url_args_changed:
old_url_args = copy.deepcopy(user_dict['url_args'])
url_args_changed = True
user_dict['url_args'][argname] = request.args.get(argname)
if url_args_changed:
if old_url_args == user_dict['url_args']:
url_args_changed = False
index_params = {'i': yaml_filename}
if analytics_configured:
for argname in request.args:
if argname in ('utm_source', 'utm_medium', 'utm_campaign', 'utm_term', 'utm_content'):
index_params[argname] = request.args[argname]
if need_to_reset or set_device_id:
if use_cache == 0:
docassemble.base.parse.interview_source_from_string(yaml_filename).update_index()
response_wrapper = make_response_wrapper(set_cookie, secret, set_device_id, device_id, expire_visitor_secret)
else:
response_wrapper = None
interview = docassemble.base.interview_cache.get_interview(yaml_filename)
interview_status = docassemble.base.parse.InterviewStatus(current_info=the_current_info, tracker=user_dict['_internal']['tracker'])
old_user_dict = None
if '_back_one' in post_data and steps > 1:
ok_to_go_back = True
if STRICT_MODE:
interview.assemble(user_dict, interview_status=interview_status)
if not interview_status.question.can_go_back:
ok_to_go_back = False
if ok_to_go_back:
action = None
the_current_info = current_info(yaml=yaml_filename, req=request, action=action, location=the_location, interface=the_interface, session_info=session_info, secret=secret, device_id=device_id)
docassemble.base.functions.this_thread.current_info = the_current_info
old_user_dict = user_dict
steps, user_dict, is_encrypted = fetch_previous_user_dict(user_code, yaml_filename, secret)
if encrypted != is_encrypted:
encrypted = is_encrypted
update_session(yaml_filename, encrypted=encrypted)
the_current_info['encrypted'] = encrypted
interview_status = docassemble.base.parse.InterviewStatus(current_info=the_current_info, tracker=user_dict['_internal']['tracker'])
post_data = {}
disregard_input = True
known_varnames = {}
all_invisible = False
if '_varnames' in post_data:
known_varnames = json.loads(myb64unquote(post_data['_varnames']))
if '_visible' in post_data and post_data['_visible'] != "":
visible_field_names = json.loads(myb64unquote(post_data['_visible']))
if len(visible_field_names) == 0 and '_question_name' in post_data and len(known_varnames) > 0:
all_invisible = True
else:
visible_field_names = []
known_varnames_visible = {}
for key, val in known_varnames.items():
if key in visible_field_names:
known_varnames_visible[key] = val
all_field_numbers = {}
field_numbers = {}
numbered_fields = {}
visible_fields = set()
raw_visible_fields = set()
for field_name in visible_field_names:
try:
m = re.search(r'(.*)(\[[^\]]+\])$', from_safeid(field_name))
if m:
if safeid(m.group(1)) in known_varnames:
visible_fields.add(safeid(from_safeid(known_varnames[safeid(m.group(1))]) + m.group(2)))
except:
pass
raw_visible_fields.add(field_name)
if field_name in known_varnames:
visible_fields.add(known_varnames[field_name])
else:
visible_fields.add(field_name)
for kv_key, kv_var in known_varnames.items():
try:
field_identifier = myb64unquote(kv_key)
m = re.search(r'_field(?:_[0-9]+)?_([0-9]+)', field_identifier)
if m:
numbered_fields[kv_var] = kv_key
if kv_key in raw_visible_fields or kv_var in raw_visible_fields:
field_numbers[kv_var] = int(m.group(1))
m = re.search(r'_field_((?:[0-9]+_)?[0-9]+)', field_identifier)
if m:
if kv_var not in all_field_numbers:
all_field_numbers[kv_var] = set()
if '_' in m.group(1):
all_field_numbers[kv_var].add(m.group(1))
else:
all_field_numbers[kv_var].add(int(m.group(1)))
except:
logmessage("index: error where kv_key is " + str(kv_key) + " and kv_var is " + str(kv_var))
list_collect_list = None
if not STRICT_MODE:
if '_list_collect_list' in post_data:
the_list = json.loads(myb64unquote(post_data['_list_collect_list']))
if not illegal_variable_name(the_list):
list_collect_list = the_list
exec(list_collect_list + '._allow_appending()', user_dict)
if '_checkboxes' in post_data:
checkbox_fields = json.loads(myb64unquote(post_data['_checkboxes'])) # post_data['_checkboxes'].split(",")
for checkbox_field, checkbox_value in checkbox_fields.items():
if checkbox_field in visible_fields and checkbox_field not in post_data and not (checkbox_field in numbered_fields and numbered_fields[checkbox_field] in post_data):
post_data.add(checkbox_field, checkbox_value)
if '_empties' in post_data:
empty_fields = json.loads(myb64unquote(post_data['_empties']))
for empty_field in empty_fields:
if empty_field not in post_data:
post_data.add(empty_field, 'None')
else:
empty_fields = {}
if '_ml_info' in post_data:
ml_info = json.loads(myb64unquote(post_data['_ml_info']))
else:
ml_info = {}
something_changed = False
if '_tracker' in post_data and re.search(r'^-?[0-9]+$', post_data['_tracker']) and user_dict['_internal']['tracker'] != int(post_data['_tracker']):
if user_dict['_internal']['tracker'] > int(post_data['_tracker']):
logmessage("index: the assemble function has been run since the question was posed.")
else:
logmessage("index: the tracker in the dictionary is behind the tracker in the question.")
something_changed = True
user_dict['_internal']['tracker'] = max(int(post_data['_tracker']), user_dict['_internal']['tracker'])
interview_status.tracker = user_dict['_internal']['tracker']
should_assemble = False
known_datatypes = {}
if not STRICT_MODE:
if '_datatypes' in post_data:
known_datatypes = json.loads(myb64unquote(post_data['_datatypes']))
for data_type in known_datatypes.values():
if data_type.startswith('object') or data_type in ('integer', 'float', 'currency', 'number'):
should_assemble = True
if not should_assemble:
for key in post_data:
if key.startswith('_') or key in ('csrf_token', 'ajax', 'json', 'informed'):
continue
try:
the_key = from_safeid(key)
if the_key.startswith('_field_'):
if key in known_varnames:
if not (known_varnames[key] in post_data and post_data[known_varnames[key]] != '' and post_data[key] == ''):
the_key = from_safeid(known_varnames[key])
else:
m = re.search(r'^(_field(?:_[0-9]+)?_[0-9]+)(\[.*\])', key)
if m:
base_orig_key = safeid(m.group(1))
if base_orig_key in known_varnames:
the_key = myb64unquote(known_varnames[base_orig_key]) + m.group(2)
if key_requires_preassembly.search(the_key):
if the_key == '_multiple_choice' and '_question_name' in post_data:
if refresh_or_continue(interview, post_data):
continue
should_assemble = True
break
except Exception as the_err:
logmessage("index: bad key was " + str(key) + " and error was " + the_err.__class__.__name__)
try:
logmessage("index: bad key error message was " + str(the_err))
except:
pass
if not interview.from_cache and len(interview.mlfields):
ensure_training_loaded(interview)
debug_mode = interview.debug
vars_set = set()
old_values = {}
new_values = {}
if ('_email_attachments' in post_data and '_attachment_email_address' in post_data) or '_download_attachments' in post_data:
should_assemble = True
error_messages = []
already_assembled = False
if (STRICT_MODE and not disregard_input) or should_assemble or something_changed:
interview.assemble(user_dict, interview_status=interview_status)
already_assembled = True
if STRICT_MODE and ('_question_name' not in post_data or post_data['_question_name'] != interview_status.question.name):
if refresh_or_continue(interview, post_data) is False and action is None and len([key for key in post_data if not (key.startswith('_') or key in ('csrf_token', 'ajax', 'json', 'informed'))]) > 0:
error_messages.append(("success", word("Input not processed. Please try again.")))
post_data = {}
disregard_input = True
elif should_assemble and '_question_name' in post_data and post_data['_question_name'] != interview_status.question.name:
logmessage("index: not the same question name: " + str(post_data['_question_name']) + " versus " + str(interview_status.question.name))
if REQUIRE_IDEMPOTENT:
error_messages.append(("success", word("Input not processed because the question changed. Please continue.")))
post_data = {}
disregard_input = True
if STRICT_MODE and not disregard_input:
field_info = interview_status.get_field_info()
known_datatypes = field_info['datatypes']
list_collect_list = field_info['list_collect_list']
if list_collect_list is not None:
exec(list_collect_list + '._allow_appending()', user_dict)
for checkbox_field, checkbox_value in field_info['checkboxes'].items():
if checkbox_field in visible_fields and checkbox_field not in post_data and not (checkbox_field in numbered_fields and numbered_fields[checkbox_field] in post_data):
for k, v in known_varnames_visible.items():
if v == checkbox_field:
checkbox_field = k
break
post_data.add(checkbox_field, checkbox_value)
empty_fields = field_info['hiddens']
for empty_field in empty_fields:
if empty_field not in post_data:
post_data.add(empty_field, 'None')
ml_info = field_info['ml_info']
authorized_fields = [from_safeid(field.saveas) for field in interview_status.get_fields_and_sub_fields_and_collect_fields(user_dict) if hasattr(field, 'saveas')]
if 'allowed_to_set' in interview_status.extras:
authorized_fields.extend(interview_status.extras['allowed_to_set'])
if interview_status.question.question_type == "multiple_choice":
authorized_fields.append('_multiple_choice')
authorized_fields = set(authorized_fields).union(interview_status.get_all_fields_used(user_dict))
if interview_status.extras.get('list_collect_is_final', False) and interview_status.extras['list_collect'].auto_gather:
if interview_status.extras['list_collect'].ask_number:
authorized_fields.add(interview_status.extras['list_collect'].instanceName + ".target_number")
else:
authorized_fields.add(interview_status.extras['list_collect'].instanceName + ".there_is_another")
else:
if STRICT_MODE:
empty_fields = []
authorized_fields = set()
changed = False
if '_null_question' in post_data or all_invisible:
changed = True
if '_email_attachments' in post_data and '_attachment_email_address' in post_data:
success = False
attachment_email_address = post_data['_attachment_email_address'].strip()
if '_attachment_include_editable' in post_data:
include_editable = bool(post_data['_attachment_include_editable'] == 'True')
del post_data['_attachment_include_editable']
else:
include_editable = False
del post_data['_email_attachments']
del post_data['_attachment_email_address']
if len(interview_status.attachments) > 0:
attached_file_count = 0
attachment_info = []
for the_attachment in interview_status.attachments:
file_formats = []
if 'pdf' in the_attachment['valid_formats'] or '*' in the_attachment['valid_formats']:
file_formats.append('pdf')
if include_editable or 'pdf' not in file_formats:
if 'rtf' in the_attachment['valid_formats'] or '*' in the_attachment['valid_formats']:
file_formats.append('rtf')
if 'docx' in the_attachment['valid_formats']:
file_formats.append('docx')
if 'rtf to docx' in the_attachment['valid_formats']:
file_formats.append('rtf to docx')
if 'md' in the_attachment['valid_formats']:
file_formats.append('md')
if 'raw' in the_attachment['valid_formats']:
file_formats.append('raw')
for the_format in file_formats:
if the_format == 'raw':
attachment_info.append({'filename': str(the_attachment['filename']) + the_attachment['raw'], 'number': the_attachment['file'][the_format], 'mimetype': the_attachment['mimetype'][the_format], 'attachment': the_attachment})
else:
attachment_info.append({'filename': str(the_attachment['filename']) + '.' + str(docassemble.base.parse.extension_of_doc_format[the_format]), 'number': the_attachment['file'][the_format], 'mimetype': the_attachment['mimetype'][the_format], 'attachment': the_attachment})
attached_file_count += 1
worker_key = 'da:worker:uid:' + str(user_code) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id)
for email_address in re.split(r' *[,;] *', attachment_email_address):
try:
result = docassemble.webapp.worker.email_attachments.delay(user_code, email_address, attachment_info, docassemble.base.functions.get_language(), subject=interview_status.extras.get('email_subject', None), body=interview_status.extras.get('email_body', None), html=interview_status.extras.get('email_html', None), config=interview.consolidated_metadata.get('email config', None))
r.rpush(worker_key, result.id)
success = True
except Exception as errmess:
success = False
logmessage("index: failed with " + str(errmess))
break
if success:
flash(word("Your documents will be e-mailed to") + " " + str(attachment_email_address) + ".", 'success')
else:
flash(word("Unable to e-mail your documents to") + " " + str(attachment_email_address) + ".", 'error')
else:
flash(word("Unable to find documents to e-mail."), 'error')
if '_download_attachments' in post_data:
success = False
if '_attachment_include_editable' in post_data:
include_editable = bool(post_data['_attachment_include_editable'] == 'True')
del post_data['_attachment_include_editable']
else:
include_editable = False
del post_data['_download_attachments']
if len(interview_status.attachments) > 0:
attached_file_count = 0
files_to_zip = []
if 'zip_filename' in interview_status.extras and interview_status.extras['zip_filename']:
zip_file_name = interview_status.extras['zip_filename']
else:
zip_file_name = 'file.zip'
for the_attachment in interview_status.attachments:
file_formats = []
if 'pdf' in the_attachment['valid_formats'] or '*' in the_attachment['valid_formats']:
file_formats.append('pdf')
if include_editable or 'pdf' not in file_formats:
if 'rtf' in the_attachment['valid_formats'] or '*' in the_attachment['valid_formats']:
file_formats.append('rtf')
if 'docx' in the_attachment['valid_formats']:
file_formats.append('docx')
if 'rtf to docx' in the_attachment['valid_formats']:
file_formats.append('rtf to docx')
for the_format in file_formats:
files_to_zip.append(str(the_attachment['file'][the_format]))
attached_file_count += 1
the_zip_file = docassemble.base.util.zip_file(*files_to_zip, filename=zip_file_name)
response = send_file(the_zip_file.path(), mimetype='application/zip', as_attachment=True, download_name=zip_file_name)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
if response_wrapper:
response_wrapper(response)
return response
if '_the_image' in post_data and (STRICT_MODE is False or interview_status.question.question_type == 'signature'):
if STRICT_MODE:
file_field = from_safeid(field_info['signature_saveas'])
else:
file_field = from_safeid(post_data['_save_as'])
if illegal_variable_name(file_field):
error_messages.append(("error", "Error: Invalid character in file_field: " + str(file_field)))
else:
if not already_assembled:
interview.assemble(user_dict, interview_status)
already_assembled = True
initial_string = 'import docassemble.base.util'
try:
exec(initial_string, user_dict)
except Exception as errMess:
error_messages.append(("error", "Error: " + str(errMess)))
file_field_tr = sub_indices(file_field, user_dict)
if '_success' in post_data and post_data['_success']:
theImage = base64.b64decode(re.search(r'base64,(.*)', post_data['_the_image']).group(1) + '==')
filename = secure_filename('canvas.png')
file_number = get_new_file_number(user_code, filename, yaml_file_name=yaml_filename)
extension, mimetype = get_ext_and_mimetype(filename)
new_file = SavedFile(file_number, extension=extension, fix=True, should_not_exist=True)
new_file.write_content(theImage, binary=True)
new_file.finalize()
the_string = file_field + " = docassemble.base.util.DAFile(" + repr(file_field_tr) + ", filename='" + str(filename) + "', number=" + str(file_number) + ", mimetype='" + str(mimetype) + "', make_pngs=True, extension='" + str(extension) + "')"
else:
the_string = file_field + " = docassemble.base.util.DAFile(" + repr(file_field_tr) + ")"
process_set_variable(file_field, user_dict, vars_set, old_values)
try:
exec(the_string, user_dict)
changed = True
except Exception as errMess:
try:
logmessage(errMess.__class__.__name__ + ": " + str(errMess) + " after running " + the_string)
except:
pass
error_messages.append(("error", "Error: " + errMess.__class__.__name__ + ": " + str(errMess)))
if '_next_action_to_set' in post_data:
next_action_to_set = json.loads(myb64unquote(post_data['_next_action_to_set']))
else:
next_action_to_set = None
if '_question_name' in post_data and post_data['_question_name'] in interview.questions_by_name:
if already_assembled:
the_question = interview_status.question
else:
the_question = interview.questions_by_name[post_data['_question_name']]
if not already_assembled:
uses_permissions = False
for the_field in the_question.fields:
if hasattr(the_field, 'permissions'):
uses_permissions = True
if uses_permissions or the_question.validation_code is not None:
interview.assemble(user_dict, interview_status)
else:
for the_field in the_question.fields:
if hasattr(the_field, 'validate'):
interview.assemble(user_dict, interview_status)
break
elif already_assembled:
the_question = interview_status.question
else:
the_question = None
key_to_orig_key = {}
for orig_key in copy.deepcopy(post_data):
if orig_key in ('_checkboxes', '_empties', '_ml_info', '_back_one', '_files', '_files_inline', '_question_name', '_the_image', '_save_as', '_success', '_datatypes', '_event', '_visible', '_tracker', '_track_location', '_varnames', '_next_action', '_next_action_to_set', 'ajax', 'json', 'informed', 'csrf_token', '_action', '_order_changes', '_collect', '_collect_delete', '_list_collect_list', '_null_question') or orig_key.startswith('_ignore'):
continue
try:
key = myb64unquote(orig_key)
except:
continue
if key.startswith('_field_'):
if orig_key in known_varnames:
if not (known_varnames[orig_key] in post_data and post_data[known_varnames[orig_key]] != '' and post_data[orig_key] == ''):
post_data[known_varnames[orig_key]] = post_data[orig_key]
key_to_orig_key[from_safeid(known_varnames[orig_key])] = orig_key
else:
m = re.search(r'^(_field(?:_[0-9]+)?_[0-9]+)(\[.*\])', key)
if m:
base_orig_key = safeid(m.group(1))
if base_orig_key in known_varnames:
the_key = myb64unquote(known_varnames[base_orig_key]) + m.group(2)
key_to_orig_key[the_key] = orig_key
full_key = safeid(the_key)
post_data[full_key] = post_data[orig_key]
if key.endswith('.gathered'):
if STRICT_MODE and key not in authorized_fields:
raise DAError("The variable " + repr(key) + " was not in the allowed fields, which were " + repr(authorized_fields))
objname = re.sub(r'\.gathered$', '', key)
if illegal_variable_name(objname):
error_messages.append(("error", "Error: Invalid key " + objname))
break
try:
eval(objname, user_dict)
except:
safe_objname = safeid(objname)
if safe_objname in known_datatypes:
if known_datatypes[safe_objname] in ('object_multiselect', 'object_checkboxes'):
docassemble.base.parse.ensure_object_exists(objname, 'object_checkboxes', user_dict)
elif known_datatypes[safe_objname] in ('multiselect', 'checkboxes'):
docassemble.base.parse.ensure_object_exists(objname, known_datatypes[safe_objname], user_dict)
field_error = {}
validated = True
pre_user_dict = user_dict
imported_core = False
special_question = None
for orig_key in post_data:
if orig_key in ('_checkboxes', '_empties', '_ml_info', '_back_one', '_files', '_files_inline', '_question_name', '_the_image', '_save_as', '_success', '_datatypes', '_event', '_visible', '_tracker', '_track_location', '_varnames', '_next_action', '_next_action_to_set', 'ajax', 'json', 'informed', 'csrf_token', '_action', '_order_changes', '', '_collect', '_collect_delete', '_list_collect_list', '_null_question') or orig_key.startswith('_ignore'):
continue
raw_data = post_data[orig_key]
try:
key = myb64unquote(orig_key)
except:
raise DAError("index: invalid name " + str(orig_key))
if key.startswith('_field_'):
continue
bracket_expression = None
if orig_key in empty_fields:
set_to_empty = empty_fields[orig_key]
else:
set_to_empty = None
if match_brackets.search(key):
match = match_inside_and_outside_brackets.search(key)
try:
key = match.group(1)
except:
try:
error_message = "index: invalid bracket name " + str(match.group(1)) + " in " + repr(key)
except:
error_message = "index: invalid bracket name in " + repr(key)
raise DAError(error_message)
real_key = safeid(key)
b_match = match_inside_brackets.search(match.group(2))
if b_match:
if b_match.group(1) in ('B', 'R', 'O'):
try:
bracket_expression = from_safeid(b_match.group(2))
except:
bracket_expression = b_match.group(2)
else:
bracket_expression = b_match.group(2)
bracket = match_inside_brackets.sub(process_bracket_expression, match.group(2))
parse_result = docassemble.base.parse.parse_var_name(key)
if not parse_result['valid']:
error_messages.append(("error", "Error: Invalid key " + key + ": " + parse_result['reason']))
break
pre_bracket_key = key
key = key + bracket
core_key_name = parse_result['final_parts'][0]
whole_key = core_key_name + parse_result['final_parts'][1]
real_key = safeid(whole_key)
if STRICT_MODE and (pre_bracket_key not in authorized_fields or pre_bracket_key + '.gathered' not in authorized_fields) and (key not in authorized_fields):
raise DAError("The variables " + repr(pre_bracket_key) + " and " + repr(key) + " were not in the allowed fields, which were " + repr(authorized_fields))
if illegal_variable_name(whole_key) or illegal_variable_name(core_key_name) or illegal_variable_name(key):
error_messages.append(("error", "Error: Invalid key " + whole_key))
break
if whole_key in user_dict:
it_exists = True
else:
try:
the_object = eval(whole_key, user_dict) # noqa: F841 # pylint: disable=unused-variable
it_exists = True
except:
it_exists = False
if not it_exists:
method = None
commands = []
if parse_result['final_parts'][1] != '':
if parse_result['final_parts'][1][0] == '.':
try:
core_key = eval(core_key_name, user_dict)
if hasattr(core_key, 'instanceName'):
method = 'attribute'
except:
pass
elif parse_result['final_parts'][1][0] == '[':
try:
core_key = eval(core_key_name, user_dict)
if hasattr(core_key, 'instanceName'):
method = 'index'
except:
pass
datatype = known_datatypes.get(real_key, None)
if not imported_core:
commands.append("import docassemble.base.util")
imported_core = True
if method == 'attribute':
attribute_name = parse_result['final_parts'][1][1:]
if datatype in ('multiselect', 'checkboxes'):
commands.append(core_key_name + ".initializeAttribute(" + repr(attribute_name) + ", docassemble.base.util.DADict, auto_gather=False, gathered=True)")
elif datatype in ('object_multiselect', 'object_checkboxes'):
commands.append(core_key_name + ".initializeAttribute(" + repr(attribute_name) + ", docassemble.base.util.DAList, auto_gather=False, gathered=True)")
process_set_variable(core_key_name + '.' + attribute_name, user_dict, vars_set, old_values)
elif method == 'index':
index_name = parse_result['final_parts'][1][1:-1]
orig_index_name = index_name
if index_name in ('i', 'j', 'k', 'l', 'm', 'n'):
index_name = repr(user_dict.get(index_name, index_name))
if datatype in ('multiselect', 'checkboxes'):
commands.append(core_key_name + ".initializeObject(" + index_name + ", docassemble.base.util.DADict, auto_gather=False, gathered=True)")
elif datatype in ('object_multiselect', 'object_checkboxes'):
commands.append(core_key_name + ".initializeObject(" + index_name + ", docassemble.base.util.DAList, auto_gather=False, gathered=True)")
process_set_variable(core_key_name + '[' + orig_index_name + ']', user_dict, vars_set, old_values)
else:
whole_key_tr = sub_indices(whole_key, user_dict)
if datatype in ('multiselect', 'checkboxes'):
commands.append(whole_key + ' = docassemble.base.util.DADict(' + repr(whole_key_tr) + ', auto_gather=False, gathered=True)')
elif datatype in ('object_multiselect', 'object_checkboxes'):
commands.append(whole_key + ' = docassemble.base.util.DAList(' + repr(whole_key_tr) + ', auto_gather=False, gathered=True)')
process_set_variable(whole_key, user_dict, vars_set, old_values)
for command in commands:
exec(command, user_dict)
else:
real_key = orig_key
parse_result = docassemble.base.parse.parse_var_name(key)
if not parse_result['valid']:
error_messages.append(("error", "Error: Invalid character in key: " + key))
break
if STRICT_MODE and key not in authorized_fields:
raise DAError("The variable " + repr(key) + " was not in the allowed fields, which were " + repr(authorized_fields))
if illegal_variable_name(key):
error_messages.append(("error", "Error: Invalid key " + key))
break
do_append = False
do_opposite = False
is_ml = False
is_date = False
is_object = False
test_data = raw_data
if real_key in known_datatypes:
if known_datatypes[real_key] in ('boolean', 'multiselect', 'checkboxes'):
if raw_data == "True":
data = "True"
test_data = True
elif raw_data == "False":
data = "False"
test_data = False
else:
data = "None"
test_data = None
elif known_datatypes[real_key] == 'threestate':
if raw_data == "True":
data = "True"
test_data = True
elif raw_data == "False":
data = "False"
test_data = False
else:
data = "None"
test_data = None
elif known_datatypes[real_key] in ('date', 'datetime', 'datetime-local'):
if isinstance(raw_data, str):
raw_data = raw_data.strip()
if raw_data != '':
try:
dateutil.parser.parse(raw_data)
except:
validated = False
if known_datatypes[real_key] == 'date':
field_error[orig_key] = word("You need to enter a valid date.")
else:
field_error[orig_key] = word("You need to enter a valid date and time.")
new_values[key] = repr(raw_data)
continue
test_data = raw_data
is_date = True
data = 'docassemble.base.util.as_datetime(' + repr(raw_data) + ')'
else:
data = repr('')
else:
data = repr('')
elif known_datatypes[real_key] == 'time':
if isinstance(raw_data, str):
raw_data = raw_data.strip()
if raw_data != '':
try:
dateutil.parser.parse(raw_data)
except:
validated = False
field_error[orig_key] = word("You need to enter a valid time.")
new_values[key] = repr(raw_data)
continue
test_data = raw_data
is_date = True
data = 'docassemble.base.util.as_datetime(' + repr(raw_data) + ').time()'
else:
data = repr('')
else:
data = repr('')
elif known_datatypes[real_key] == 'integer':
raw_data = raw_data.replace(',', '')
if raw_data.strip() == '':
raw_data = '0'
try:
test_data = int(raw_data)
except:
validated = False
field_error[orig_key] = word("You need to enter a valid number.")
new_values[key] = repr(raw_data)
continue
data = "int(" + repr(raw_data) + ")"
elif known_datatypes[real_key] in ('ml', 'mlarea'):
is_ml = True
data = "None"
elif known_datatypes[real_key] in ('number', 'float', 'currency', 'range'):
raw_data = raw_data.replace('%', '')
raw_data = raw_data.replace(',', '')
if raw_data == '':
raw_data = 0.0
try:
test_data = float(raw_data)
except:
validated = False
field_error[orig_key] = word("You need to enter a valid number.")
new_values[key] = repr(raw_data)
continue
data = "float(" + repr(raw_data) + ")"
elif known_datatypes[real_key] in ('object', 'object_radio'):
if raw_data == '' or set_to_empty:
continue
if raw_data == 'None':
data = 'None'
else:
data = "_internal['objselections'][" + repr(key) + "][" + repr(raw_data) + "]"
elif known_datatypes[real_key] in ('object_multiselect', 'object_checkboxes') and bracket_expression is not None:
if raw_data not in ('True', 'False', 'None') or set_to_empty:
continue
do_append = True
if raw_data == 'False':
do_opposite = True
data = "_internal['objselections'][" + repr(from_safeid(real_key)) + "][" + repr(bracket_expression) + "]"
elif set_to_empty in ('object_multiselect', 'object_checkboxes'):
continue
elif known_datatypes[real_key] in ('file', 'files', 'camera', 'user', 'environment'):
continue
elif known_datatypes[real_key] in docassemble.base.functions.custom_types:
info = docassemble.base.functions.custom_types[known_datatypes[real_key]]
if info['is_object']:
is_object = True
if set_to_empty:
if info['skip_if_empty']:
continue
test_data = info['class'].empty()
if is_object:
user_dict['__DANEWOBJECT'] = raw_data
data = '__DANEWOBJECT'
else:
data = repr(test_data)
else:
try:
if not info['class'].validate(raw_data):
raise DAValidationError(word("You need to enter a valid value."))
except DAValidationError as err:
validated = False
if key in key_to_orig_key:
field_error[key_to_orig_key[key]] = word(str(err))
else:
field_error[orig_key] = word(str(err))
new_values[key] = repr(raw_data)
continue
test_data = info['class'].transform(raw_data)
if is_object:
user_dict['__DANEWOBJECT'] = test_data
data = '__DANEWOBJECT'
else:
data = repr(test_data)
elif known_datatypes[real_key] == 'raw':
if raw_data == "None" and set_to_empty is not None:
test_data = None
data = "None"
else:
test_data = raw_data
data = repr(raw_data)
else:
if isinstance(raw_data, str):
raw_data = BeautifulSoup(raw_data, "html.parser").get_text('\n')
if raw_data == "None" and set_to_empty is not None:
test_data = None
data = "None"
else:
test_data = raw_data
data = repr(raw_data)
if known_datatypes[real_key] in ('object_multiselect', 'object_checkboxes'):
do_append = True
elif orig_key in known_datatypes:
if known_datatypes[orig_key] in ('boolean', 'multiselect', 'checkboxes'):
if raw_data == "True":
data = "True"
test_data = True
elif raw_data == "False":
data = "False"
test_data = False
else:
data = "None"
test_data = None
elif known_datatypes[orig_key] == 'threestate':
if raw_data == "True":
data = "True"
test_data = True
elif raw_data == "False":
data = "False"
test_data = False
else:
data = "None"
test_data = None
elif known_datatypes[orig_key] in ('date', 'datetime'):
if isinstance(raw_data, str):
raw_data = raw_data.strip()
if raw_data != '':
try:
dateutil.parser.parse(raw_data)
except:
validated = False
if known_datatypes[orig_key] == 'date':
field_error[orig_key] = word("You need to enter a valid date.")
else:
field_error[orig_key] = word("You need to enter a valid date and time.")
new_values[key] = repr(raw_data)
continue
test_data = raw_data
is_date = True
data = 'docassemble.base.util.as_datetime(' + repr(raw_data) + ')'
else:
data = repr('')
else:
data = repr('')
elif known_datatypes[orig_key] == 'time':
if isinstance(raw_data, str):
raw_data = raw_data.strip()
if raw_data != '':
try:
dateutil.parser.parse(raw_data)
except:
validated = False
field_error[orig_key] = word("You need to enter a valid time.")
new_values[key] = repr(raw_data)
continue
test_data = raw_data
is_date = True
data = 'docassemble.base.util.as_datetime(' + repr(raw_data) + ').time()'
else:
data = repr('')
else:
data = repr('')
elif known_datatypes[orig_key] == 'integer':
raw_data = raw_data.replace(',', '')
if raw_data.strip() == '':
raw_data = '0'
try:
test_data = int(raw_data)
except:
validated = False
field_error[orig_key] = word("You need to enter a valid number.")
new_values[key] = repr(raw_data)
continue
data = "int(" + repr(raw_data) + ")"
elif known_datatypes[orig_key] in ('ml', 'mlarea'):
is_ml = True
data = "None"
elif known_datatypes[orig_key] in ('number', 'float', 'currency', 'range'):
raw_data = raw_data.replace(',', '')
raw_data = raw_data.replace('%', '')
if raw_data == '':
raw_data = '0.0'
test_data = float(raw_data)
data = "float(" + repr(raw_data) + ")"
elif known_datatypes[orig_key] in ('object', 'object_radio'):
if raw_data == '' or set_to_empty:
continue
if raw_data == 'None':
data = 'None'
else:
data = "_internal['objselections'][" + repr(key) + "][" + repr(raw_data) + "]"
elif set_to_empty in ('object_multiselect', 'object_checkboxes'):
continue
elif real_key in known_datatypes and known_datatypes[real_key] in ('file', 'files', 'camera', 'user', 'environment'):
continue
elif known_datatypes[orig_key] in docassemble.base.functions.custom_types:
info = docassemble.base.functions.custom_types[known_datatypes[orig_key]]
if set_to_empty:
if info['skip_if_empty']:
continue
test_data = info['class'].empty()
data = repr(test_data)
else:
try:
if not info['class'].validate(raw_data):
raise DAValidationError(word("You need to enter a valid value."))
except DAValidationError as err:
validated = False
if key in key_to_orig_key:
field_error[key_to_orig_key[key]] = word(str(err))
else:
field_error[orig_key] = word(str(err))
new_values[key] = repr(raw_data)
continue
test_data = info['class'].transform(raw_data)
data = repr(test_data)
else:
if isinstance(raw_data, str):
raw_data = raw_data.strip()
test_data = raw_data
data = repr(raw_data)
elif key == "_multiple_choice":
data = "int(" + repr(raw_data) + ")"
else:
data = repr(raw_data)
if key == "_multiple_choice":
if '_question_name' in post_data:
question_name = post_data['_question_name']
if question_name == 'Question_Temp':
key = '_internal["answers"][' + repr(interview_status.question.extended_question_name(user_dict)) + ']'
else:
key = '_internal["answers"][' + repr(interview.questions_by_name[question_name].extended_question_name(user_dict)) + ']'
if is_integer.match(str(post_data[orig_key])):
the_choice = int(str(post_data[orig_key]))
if len(interview.questions_by_name[question_name].fields[0].choices) > the_choice and 'key' in interview.questions_by_name[question_name].fields[0].choices[the_choice] and hasattr(interview.questions_by_name[question_name].fields[0].choices[the_choice]['key'], 'question_type'):
if interview.questions_by_name[question_name].fields[0].choices[the_choice]['key'].question_type in ('restart', 'exit', 'logout', 'exit_logout', 'leave'):
special_question = interview.questions_by_name[question_name].fields[0].choices[the_choice]['key']
elif interview.questions_by_name[question_name].fields[0].choices[the_choice]['key'].question_type == 'continue' and 'continue button field' in interview.questions_by_name[question_name].fields[0].extras:
key = interview.questions_by_name[question_name].fields[0].extras['continue button field']
data = 'True'
if is_date:
try:
exec("import docassemble.base.util", user_dict)
except Exception as errMess:
error_messages.append(("error", "Error: " + str(errMess)))
key_tr = sub_indices(key, user_dict)
if is_ml:
try:
exec("import docassemble.base.util", user_dict)
except Exception as errMess:
error_messages.append(("error", "Error: " + str(errMess)))
if orig_key in ml_info and 'train' in ml_info[orig_key]:
if not ml_info[orig_key]['train']:
use_for_training = 'False'
else:
use_for_training = 'True'
else:
use_for_training = 'True'
if orig_key in ml_info and 'group_id' in ml_info[orig_key]:
data = 'docassemble.base.util.DAModel(' + repr(key_tr) + ', group_id=' + repr(ml_info[orig_key]['group_id']) + ', text=' + repr(raw_data) + ', store=' + repr(interview.get_ml_store()) + ', use_for_training=' + use_for_training + ')'
else:
data = 'docassemble.base.util.DAModel(' + repr(key_tr) + ', text=' + repr(raw_data) + ', store=' + repr(interview.get_ml_store()) + ', use_for_training=' + use_for_training + ')'
if set_to_empty:
if set_to_empty in ('multiselect', 'checkboxes'):
try:
exec("import docassemble.base.util", user_dict)
except Exception as errMess:
error_messages.append(("error", "Error: " + str(errMess)))
data = 'docassemble.base.util.DADict(' + repr(key_tr) + ', auto_gather=False, gathered=True)'
else:
data = 'None'
if do_append and not set_to_empty:
key_to_use = from_safeid(real_key)
if illegal_variable_name(data):
logmessage("Received illegal variable name " + str(data))
continue
if illegal_variable_name(key_to_use):
logmessage("Received illegal variable name " + str(key_to_use))
continue
if do_opposite:
the_string = 'if ' + data + ' in ' + key_to_use + '.elements:\n ' + key_to_use + '.remove(' + data + ')'
else:
the_string = 'if ' + data + ' not in ' + key_to_use + '.elements:\n ' + key_to_use + '.append(' + data + ')'
if key_to_use not in new_values:
new_values[key_to_use] = []
new_values[key_to_use].append(data)
else:
process_set_variable(key, user_dict, vars_set, old_values)
the_string = key + ' = ' + data
new_values[key] = data
if orig_key in field_numbers and the_question is not None and len(the_question.fields) > field_numbers[orig_key] and hasattr(the_question.fields[field_numbers[orig_key]], 'validate'):
field_name = safeid('_field_' + str(field_numbers[orig_key]))
if field_name in post_data:
the_key = field_name
else:
the_key = orig_key
the_func = eval(the_question.fields[field_numbers[orig_key]].validate['compute'], user_dict)
try:
the_result = the_func(test_data)
if not the_result:
field_error[the_key] = word("Please enter a valid value.")
validated = False
continue
except Exception as errstr:
field_error[the_key] = str(errstr)
validated = False
continue
try:
exec(the_string, user_dict)
changed = True
except Exception as errMess:
error_messages.append(("error", "Error: " + errMess.__class__.__name__ + ": " + str(errMess)))
try:
logmessage("Tried to run " + the_string + " and got error " + errMess.__class__.__name__ + ": " + str(errMess))
except:
pass
if is_object:
if '__DANEWOBJECT' in user_dict:
del user_dict['__DANEWOBJECT']
if key not in key_to_orig_key:
key_to_orig_key[key] = orig_key
if validated and special_question is None and not disregard_input:
for orig_key in empty_fields:
key = myb64unquote(orig_key)
if STRICT_MODE and key not in authorized_fields:
raise DAError("The variable " + repr(key) + " was not in the allowed fields, which were " + repr(authorized_fields))
process_set_variable(key + '.gathered', user_dict, vars_set, old_values)
if illegal_variable_name(key):
logmessage("Received illegal variable name " + str(key))
continue
if empty_fields[orig_key] in ('object_multiselect', 'object_checkboxes'):
docassemble.base.parse.ensure_object_exists(key, empty_fields[orig_key], user_dict)
exec(key + '.clear()', user_dict)
exec(key + '.gathered = True', user_dict)
elif empty_fields[orig_key] in ('object', 'object_radio'):
process_set_variable(key, user_dict, vars_set, old_values)
try:
eval(key, user_dict)
except:
exec(key + ' = None', user_dict)
new_values[key] = 'None'
if validated and special_question is None:
if '_order_changes' in post_data:
orderChanges = json.loads(post_data['_order_changes'])
for tableName, changes in orderChanges.items():
tableName = myb64unquote(tableName)
# if STRICT_MODE and tableName not in authorized_fields:
# raise DAError("The variable " + repr(tableName) + " was not in the allowed fields, which were " + repr(authorized_fields))
if illegal_variable_name(tableName):
error_messages.append(("error", "Error: Invalid character in table reorder: " + str(tableName)))
continue
try:
the_table_list = eval(tableName, user_dict)
assert isinstance(the_table_list, DAList)
except:
error_messages.append(("error", "Error: Invalid table: " + str(tableName)))
continue
for item in changes:
if not (isinstance(item, list) and len(item) == 2 and isinstance(item[0], int) and isinstance(item[1], int)):
error_messages.append(("error", "Error: Invalid row number in table reorder: " + str(tableName) + " " + str(item)))
break
exec(tableName + '._reorder(' + ', '.join([repr(item) for item in changes]) + ')', user_dict)
inline_files_processed = []
if '_files_inline' in post_data:
fileDict = json.loads(myb64unquote(post_data['_files_inline']))
if not isinstance(fileDict, dict):
raise DAError("inline files was not a dict")
file_fields = fileDict['keys']
has_invalid_fields = False
should_assemble_now = False
empty_file_vars = set()
for orig_file_field in file_fields:
if orig_file_field in known_varnames:
orig_file_field = known_varnames[orig_file_field]
if orig_file_field not in visible_fields:
empty_file_vars.add(orig_file_field)
try:
file_field = from_safeid(orig_file_field)
except:
error_messages.append(("error", "Error: Invalid file_field: " + orig_file_field))
break
if STRICT_MODE and file_field not in authorized_fields:
raise DAError("The variable " + repr(file_field) + " was not in the allowed fields, which were " + repr(authorized_fields))
if illegal_variable_name(file_field):
has_invalid_fields = True
error_messages.append(("error", "Error: Invalid character in file_field: " + str(file_field)))
break
if key_requires_preassembly.search(file_field):
should_assemble_now = True
if not has_invalid_fields:
initial_string = 'import docassemble.base.util'
try:
exec(initial_string, user_dict)
except Exception as errMess:
error_messages.append(("error", "Error: " + str(errMess)))
if should_assemble_now and not already_assembled:
interview.assemble(user_dict, interview_status)
already_assembled = True
for orig_file_field_raw in file_fields:
if orig_file_field_raw in known_varnames:
orig_file_field_raw = known_varnames[orig_file_field_raw]
# set_empty = bool(orig_file_field_raw not in visible_fields)
if not validated:
break
orig_file_field = orig_file_field_raw
var_to_store = orig_file_field_raw
if orig_file_field not in fileDict['values'] and len(known_varnames):
for key, val in known_varnames_visible.items():
if val == orig_file_field_raw:
orig_file_field = key
var_to_store = val
break
if orig_file_field in fileDict['values']:
the_files = fileDict['values'][orig_file_field]
if the_files:
files_to_process = []
for the_file in the_files:
temp_file = tempfile.NamedTemporaryFile(prefix="datemp", delete=False)
start_index = 0
char_index = 0
for char in the_file['content']:
char_index += 1
if char == ',':
start_index = char_index
break
temp_file.write(codecs.decode(bytearray(the_file['content'][start_index:], encoding='utf-8'), 'base64'))
temp_file.close()
filename = secure_filename(the_file['name'])
extension, mimetype = get_ext_and_mimetype(filename)
try:
img = Image.open(temp_file.name)
the_format = img.format.lower()
the_format = re.sub(r'jpeg', 'jpg', the_format)
except:
the_format = extension
logmessage("Could not read file type from file " + str(filename))
if the_format != extension:
filename = re.sub(r'\.[^\.]+$', '', filename) + '.' + the_format
extension, mimetype = get_ext_and_mimetype(filename)
file_number = get_new_file_number(user_code, filename, yaml_file_name=yaml_filename)
saved_file = SavedFile(file_number, extension=extension, fix=True, should_not_exist=True)
process_file(saved_file, temp_file.name, mimetype, extension)
files_to_process.append((filename, file_number, mimetype, extension))
try:
file_field = from_safeid(var_to_store)
except:
error_messages.append(("error", "Error: Invalid file_field: " + str(var_to_store)))
break
if STRICT_MODE and file_field not in authorized_fields:
raise DAError("The variable " + repr(file_field) + " was not in the allowed fields, which were " + repr(authorized_fields))
if illegal_variable_name(file_field):
error_messages.append(("error", "Error: Invalid character in file_field: " + str(file_field)))
break
file_field_tr = sub_indices(file_field, user_dict)
if len(files_to_process) > 0:
elements = []
indexno = 0
for (filename, file_number, mimetype, extension) in files_to_process:
elements.append("docassemble.base.util.DAFile(" + repr(file_field_tr + "[" + str(indexno) + "]") + ", filename=" + repr(filename) + ", number=" + str(file_number) + ", make_pngs=True, mimetype=" + repr(mimetype) + ", extension=" + repr(extension) + ")")
indexno += 1
the_file_list = "docassemble.base.util.DAFileList(" + repr(file_field_tr) + ", elements=[" + ", ".join(elements) + "])"
if var_to_store in field_numbers and the_question is not None and len(the_question.fields) > field_numbers[var_to_store]:
the_field = the_question.fields[field_numbers[var_to_store]]
add_permissions_for_field(the_field, interview_status, files_to_process)
if hasattr(the_field, 'validate'):
the_key = orig_file_field
the_func = eval(the_field.validate['compute'], user_dict)
try:
the_result = the_func(eval(the_file_list))
if not the_result:
field_error[the_key] = word("Please enter a valid value.")
validated = False
break
except Exception as errstr:
field_error[the_key] = str(errstr)
validated = False
break
the_string = file_field + " = " + the_file_list
inline_files_processed.append(file_field)
else:
the_string = file_field + " = None"
key_to_orig_key[file_field] = orig_file_field
process_set_variable(file_field, user_dict, vars_set, old_values)
try:
exec(the_string, user_dict)
changed = True
except Exception as errMess:
try:
logmessage("Error: " + errMess.__class__.__name__ + ": " + str(errMess) + " after trying to run " + the_string)
except:
pass
error_messages.append(("error", "Error: " + errMess.__class__.__name__ + ": " + str(errMess)))
else:
try:
file_field = from_safeid(var_to_store)
except:
error_messages.append(("error", "Error: Invalid file_field: " + str(var_to_store)))
break
if STRICT_MODE and file_field not in authorized_fields:
raise DAError("The variable " + repr(file_field) + " was not in the allowed fields, which were " + repr(authorized_fields))
if illegal_variable_name(file_field):
error_messages.append(("error", "Error: Invalid character in file_field: " + str(file_field)))
break
the_string = file_field + " = None"
key_to_orig_key[file_field] = orig_file_field
process_set_variable(file_field, user_dict, vars_set, old_values)
try:
exec(the_string, user_dict)
changed = True
except Exception as errMess:
logmessage("Error: " + errMess.__class__.__name__ + ": " + str(errMess) + " after running " + the_string)
error_messages.append(("error", "Error: " + errMess.__class__.__name__ + ": " + str(errMess)))
if '_files' in post_data or (STRICT_MODE and (not disregard_input) and len(field_info['files']) > 0):
if STRICT_MODE:
file_fields = field_info['files']
else:
file_fields = json.loads(myb64unquote(post_data['_files']))
has_invalid_fields = False
should_assemble_now = False
empty_file_vars = set()
for orig_file_field in file_fields:
if orig_file_field not in raw_visible_fields:
continue
file_field_to_use = orig_file_field
if file_field_to_use in known_varnames:
file_field_to_use = known_varnames[orig_file_field]
if file_field_to_use not in visible_fields:
empty_file_vars.add(orig_file_field)
try:
file_field = from_safeid(file_field_to_use)
except:
error_messages.append(("error", "Error: Invalid file_field: " + str(file_field_to_use)))
break
if STRICT_MODE and file_field not in authorized_fields:
raise DAError("The variable " + repr(file_field) + " was not in the allowed fields, which were " + repr(authorized_fields))
if illegal_variable_name(file_field):
has_invalid_fields = True
error_messages.append(("error", "Error: Invalid character in file_field: " + str(file_field)))
break
if key_requires_preassembly.search(file_field):
should_assemble_now = True
key_to_orig_key[file_field] = orig_file_field
if not has_invalid_fields:
initial_string = 'import docassemble.base.util'
try:
exec(initial_string, user_dict)
except Exception as errMess:
error_messages.append(("error", "Error: " + str(errMess)))
if not already_assembled:
interview.assemble(user_dict, interview_status)
already_assembled = True
for orig_file_field_raw in file_fields:
if orig_file_field_raw not in raw_visible_fields:
continue
if orig_file_field_raw in known_varnames:
orig_file_field_raw = known_varnames[orig_file_field_raw]
if orig_file_field_raw not in visible_fields:
continue
if not validated:
break
orig_file_field = orig_file_field_raw
var_to_store = orig_file_field_raw
if (orig_file_field not in request.files or request.files[orig_file_field].filename == "") and len(known_varnames):
for key, val in known_varnames_visible.items():
if val == orig_file_field_raw:
orig_file_field = key
var_to_store = val
break
if orig_file_field in request.files and request.files[orig_file_field].filename != "":
the_files = request.files.getlist(orig_file_field)
if the_files:
files_to_process = []
for the_file in the_files:
if is_ajax:
return_fake_html = True
filename = secure_filename(the_file.filename)
file_number = get_new_file_number(user_code, filename, yaml_file_name=yaml_filename)
extension, mimetype = get_ext_and_mimetype(filename)
saved_file = SavedFile(file_number, extension=extension, fix=True, should_not_exist=True)
temp_file = tempfile.NamedTemporaryFile(prefix="datemp", suffix='.' + extension, delete=False)
the_file.save(temp_file.name)
process_file(saved_file, temp_file.name, mimetype, extension)
files_to_process.append((filename, file_number, mimetype, extension))
try:
file_field = from_safeid(var_to_store)
except:
error_messages.append(("error", "Error: Invalid file_field: " + str(var_to_store)))
break
if STRICT_MODE and file_field not in authorized_fields:
raise DAError("The variable " + repr(file_field) + " was not in the allowed fields, which were " + repr(authorized_fields))
if illegal_variable_name(file_field):
error_messages.append(("error", "Error: Invalid character in file_field: " + str(file_field)))
break
file_field_tr = sub_indices(file_field, user_dict)
if len(files_to_process) > 0:
elements = []
indexno = 0
for (filename, file_number, mimetype, extension) in files_to_process:
elements.append("docassemble.base.util.DAFile(" + repr(file_field_tr + '[' + str(indexno) + ']') + ", filename=" + repr(filename) + ", number=" + str(file_number) + ", make_pngs=True, mimetype=" + repr(mimetype) + ", extension=" + repr(extension) + ")")
indexno += 1
the_file_list = "docassemble.base.util.DAFileList(" + repr(file_field_tr) + ", elements=[" + ", ".join(elements) + "])"
if var_to_store in field_numbers and the_question is not None and len(the_question.fields) > field_numbers[var_to_store]:
the_field = the_question.fields[field_numbers[var_to_store]]
add_permissions_for_field(the_field, interview_status, files_to_process)
if hasattr(the_question.fields[field_numbers[var_to_store]], 'validate'):
the_key = orig_file_field
the_func = eval(the_question.fields[field_numbers[var_to_store]].validate['compute'], user_dict)
try:
the_result = the_func(eval(the_file_list))
if not the_result:
field_error[the_key] = word("Please enter a valid value.")
validated = False
break
except Exception as errstr:
field_error[the_key] = str(errstr)
validated = False
break
the_string = file_field + " = " + the_file_list
else:
the_string = file_field + " = None"
process_set_variable(file_field, user_dict, vars_set, old_values)
if validated:
try:
exec(the_string, user_dict)
changed = True
except Exception as errMess:
logmessage("Error: " + errMess.__class__.__name__ + ": " + str(errMess) + "after running " + the_string)
error_messages.append(("error", "Error: " + errMess.__class__.__name__ + ": " + str(errMess)))
else:
try:
file_field = from_safeid(var_to_store)
except:
error_messages.append(("error", "Error: Invalid file_field: " + str(var_to_store)))
break
if file_field in inline_files_processed:
continue
if STRICT_MODE and file_field not in authorized_fields:
raise DAError("The variable " + repr(file_field) + " was not in the allowed fields, which were " + repr(authorized_fields))
if illegal_variable_name(file_field):
error_messages.append(("error", "Error: Invalid character in file_field: " + str(file_field)))
break
the_string = file_field + " = None"
process_set_variable(file_field, user_dict, vars_set, old_values)
try:
exec(the_string, user_dict)
changed = True
except Exception as errMess:
logmessage("Error: " + errMess.__class__.__name__ + ": " + str(errMess) + "after running " + the_string)
error_messages.append(("error", "Error: " + errMess.__class__.__name__ + ": " + str(errMess)))
if validated:
if 'informed' in request.form:
user_dict['_internal']['informed'][the_user_id] = {}
for key in request.form['informed'].split(','):
user_dict['_internal']['informed'][the_user_id][key] = 1
if changed and '_question_name' in post_data and post_data['_question_name'] not in user_dict['_internal']['answers']:
try:
interview.questions_by_name[post_data['_question_name']].mark_as_answered(user_dict)
except:
logmessage("index: question name could not be found")
if ('_event' in post_data or (STRICT_MODE and (not disregard_input) and field_info['orig_sought'] is not None)) and 'event_stack' in user_dict['_internal']:
if STRICT_MODE:
events_list = [field_info['orig_sought']]
else:
events_list = json.loads(myb64unquote(post_data['_event']))
if len(events_list) > 0:
session_uid = interview_status.current_info['user']['session_uid']
if session_uid in user_dict['_internal']['event_stack'] and len(user_dict['_internal']['event_stack'][session_uid]):
for event_name in events_list:
if user_dict['_internal']['event_stack'][session_uid][0]['action'] == event_name:
user_dict['_internal']['event_stack'][session_uid].pop(0)
if 'action' in interview_status.current_info and interview_status.current_info['action'] == event_name:
del interview_status.current_info['action']
if 'arguments' in interview_status.current_info:
del interview_status.current_info['arguments']
break
if len(user_dict['_internal']['event_stack'][session_uid]) == 0:
break
for var_name in list(vars_set):
vars_set.add(sub_indices(var_name, user_dict))
if len(vars_set) > 0 and 'event_stack' in user_dict['_internal']:
session_uid = interview_status.current_info['user']['session_uid']
popped = True
while popped:
popped = False
if session_uid in user_dict['_internal']['event_stack'] and len(user_dict['_internal']['event_stack'][session_uid]):
for var_name in vars_set:
if user_dict['_internal']['event_stack'][session_uid][0]['action'] == var_name:
popped = True
user_dict['_internal']['event_stack'][session_uid].pop(0)
if len(user_dict['_internal']['event_stack'][session_uid]) == 0:
break
else:
steps, user_dict, is_encrypted = fetch_user_dict(user_code, yaml_filename, secret=secret)
else:
steps, user_dict, is_encrypted = fetch_user_dict(user_code, yaml_filename, secret=secret)
if validated and special_question is None:
if '_collect_delete' in post_data and list_collect_list is not None:
to_delete = json.loads(post_data['_collect_delete'])
is_ok = True
for item in to_delete:
if not isinstance(item, int):
is_ok = False
if is_ok:
exec(list_collect_list + ' ._remove_items_by_number(' + ', '.join(map(str, to_delete)) + ')', user_dict)
changed = True
if '_collect' in post_data and list_collect_list is not None:
collect = json.loads(myb64unquote(post_data['_collect']))
if collect['function'] == 'add':
add_action_to_stack(interview_status, user_dict, '_da_list_add', {'list': list_collect_list, 'complete': False})
if list_collect_list is not None:
exec(list_collect_list + '._disallow_appending()', user_dict)
if the_question is not None and the_question.validation_code:
try:
exec(the_question.validation_code, user_dict)
except Exception as validation_error:
the_error_message = str(validation_error)
logmessage("index: exception during validation: " + the_error_message)
if the_error_message == '':
the_error_message = word("Please enter a valid value.")
if isinstance(validation_error, DAValidationError) and isinstance(validation_error.field, str):
the_field = validation_error.field
logmessage("field is " + the_field)
if the_field not in key_to_orig_key:
for item in key_to_orig_key.keys():
if item.startswith(the_field + '['):
the_field = item
break
if the_field in key_to_orig_key:
field_error[key_to_orig_key[the_field]] = the_error_message
else:
error_messages.append(("error", the_error_message))
else:
error_messages.append(("error", the_error_message))
validated = False
steps, user_dict, is_encrypted = fetch_user_dict(user_code, yaml_filename, secret=secret)
if validated:
for var_name in vars_set:
if var_name in interview.invalidation_todo:
interview.invalidate_dependencies(var_name, user_dict, old_values)
elif var_name in interview.onchange_todo:
if not already_assembled:
interview.assemble(user_dict, interview_status)
already_assembled = True
interview.invalidate_dependencies(var_name, user_dict, old_values)
try:
del user_dict['_internal']['dirty'][var_name]
except:
pass
if action is not None:
interview_status.current_info.update(action)
interview.assemble(user_dict, interview_status, old_user_dict, force_question=special_question)
current_language = docassemble.base.functions.get_language()
session['language'] = current_language
if not interview_status.can_go_back:
user_dict['_internal']['steps_offset'] = steps
if was_new:
docassemble.base.functions.this_thread.misc['save_status'] = 'overwrite'
if not changed and url_args_changed:
changed = True
validated = True
if interview_status.question.question_type == "restart":
manual_checkout(manual_filename=yaml_filename)
url_args = user_dict['url_args']
referer = user_dict['_internal'].get('referer', None)
user_dict = fresh_dictionary()
user_dict['url_args'] = url_args
user_dict['_internal']['referer'] = referer
the_current_info = current_info(yaml=yaml_filename, req=request, interface=the_interface, session_info=session_info, secret=secret, device_id=device_id)
docassemble.base.functions.this_thread.current_info = the_current_info
interview_status = docassemble.base.parse.InterviewStatus(current_info=the_current_info)
reset_user_dict(user_code, yaml_filename)
if 'visitor_secret' not in request.cookies:
save_user_dict_key(user_code, yaml_filename)
update_session(yaml_filename, uid=user_code, key_logged=True)
steps = 1
changed = False
interview.assemble(user_dict, interview_status)
elif interview_status.question.question_type == "new_session":
manual_checkout(manual_filename=yaml_filename)
url_args = user_dict['url_args']
referer = user_dict['_internal'].get('referer', None)
the_current_info = current_info(yaml=yaml_filename, req=request, interface=the_interface, session_info=session_info, secret=secret, device_id=device_id)
docassemble.base.functions.this_thread.current_info = the_current_info
interview_status = docassemble.base.parse.InterviewStatus(current_info=the_current_info)
release_lock(user_code, yaml_filename)
user_code, user_dict = reset_session(yaml_filename, secret)
user_dict['url_args'] = url_args
user_dict['_internal']['referer'] = referer
if 'visitor_secret' not in request.cookies:
save_user_dict_key(user_code, yaml_filename)
update_session(yaml_filename, uid=user_code, key_logged=True)
steps = 1
changed = False
interview.assemble(user_dict, interview_status)
title_info = interview.get_title(user_dict, status=interview_status, converter=lambda content, part: title_converter(content, part, interview_status))
save_status = docassemble.base.functions.this_thread.misc.get('save_status', 'new')
if interview_status.question.question_type == "interview_exit":
exit_link = title_info.get('exit link', 'exit')
if exit_link in ('exit', 'leave', 'logout'):
interview_status.question.question_type = exit_link
if interview_status.question.question_type == "exit":
manual_checkout(manual_filename=yaml_filename)
reset_user_dict(user_code, yaml_filename)
delete_session_for_interview(i=yaml_filename)
release_lock(user_code, yaml_filename)
session["_flashes"] = []
logmessage("Redirecting because of an exit.")
if interview_status.questionText != '':
response = do_redirect(interview_status.questionText, is_ajax, is_json, js_target)
else:
response = do_redirect(title_info.get('exit url', None) or exit_page, is_ajax, is_json, js_target)
if return_fake_html:
fake_up(response, current_language)
if response_wrapper:
response_wrapper(response)
return response
if interview_status.question.question_type in ("exit_logout", "logout"):
manual_checkout(manual_filename=yaml_filename)
if interview_status.question.question_type == "exit_logout":
reset_user_dict(user_code, yaml_filename)
release_lock(user_code, yaml_filename)
delete_session_info()
logmessage("Redirecting because of a logout.")
if interview_status.questionText != '':
response = do_redirect(interview_status.questionText, is_ajax, is_json, js_target)
else:
response = do_redirect(title_info.get('exit url', None) or exit_page, is_ajax, is_json, js_target)
if current_user.is_authenticated:
docassemble_flask_user.signals.user_logged_out.send(current_app._get_current_object(), user=current_user)
logout_user()
delete_session_info()
session.clear()
response.set_cookie('remember_token', '', expires=0)
response.set_cookie('visitor_secret', '', expires=0)
response.set_cookie('secret', '', expires=0)
response.set_cookie('session', '', expires=0)
if return_fake_html:
fake_up(response, current_language)
return response
if interview_status.question.question_type == "refresh":
release_lock(user_code, yaml_filename)
response = do_refresh(is_ajax, yaml_filename)
if return_fake_html:
fake_up(response, current_language)
if response_wrapper:
response_wrapper(response)
return response
if interview_status.question.question_type == "signin":
release_lock(user_code, yaml_filename)
logmessage("Redirecting because of a signin.")
response = do_redirect(url_for('user.login', next=url_for('index', i=yaml_filename, session=user_code)), is_ajax, is_json, js_target)
if return_fake_html:
fake_up(response, current_language)
if response_wrapper:
response_wrapper(response)
return response
if interview_status.question.question_type == "register":
release_lock(user_code, yaml_filename)
logmessage("Redirecting because of a register.")
response = do_redirect(url_for('user.register', next=url_for('index', i=yaml_filename, session=user_code)), is_ajax, is_json, js_target)
if return_fake_html:
fake_up(response, current_language)
if response_wrapper:
response_wrapper(response)
return response
if interview_status.question.question_type == "leave":
release_lock(user_code, yaml_filename)
session["_flashes"] = []
logmessage("Redirecting because of a leave.")
if interview_status.questionText != '':
response = do_redirect(interview_status.questionText, is_ajax, is_json, js_target)
else:
response = do_redirect(title_info.get('exit url', None) or exit_page, is_ajax, is_json, js_target)
if return_fake_html:
fake_up(response, current_language)
if response_wrapper:
response_wrapper(response)
return response
if interview.use_progress_bar and interview_status.question.progress is not None:
if interview_status.question.progress == -1:
user_dict['_internal']['progress'] = None
elif user_dict['_internal']['progress'] is None or interview_status.question.interview.options.get('strict progress', False) or interview_status.question.progress > user_dict['_internal']['progress']:
user_dict['_internal']['progress'] = interview_status.question.progress
if interview.use_navigation and interview_status.question.section is not None and docassemble.base.functions.this_thread.current_section:
user_dict['nav'].set_section(docassemble.base.functions.this_thread.current_section)
if interview_status.question.question_type == "response":
if is_ajax:
release_lock(user_code, yaml_filename)
response = jsonify(action='resubmit', csrf_token=generate_csrf())
if return_fake_html:
fake_up(response, current_language)
if response_wrapper:
response_wrapper(response)
return response
if hasattr(interview_status.question, 'response_code'):
resp_code = interview_status.question.response_code
else:
resp_code = 200
if hasattr(interview_status.question, 'all_variables'):
if hasattr(interview_status.question, 'include_internal'):
include_internal = interview_status.question.include_internal
else:
include_internal = False
response_to_send = make_response(docassemble.base.functions.dict_as_json(user_dict, include_internal=include_internal).encode('utf-8'), resp_code)
elif hasattr(interview_status.question, 'binaryresponse'):
response_to_send = make_response(interview_status.question.binaryresponse, resp_code)
else:
response_to_send = make_response(interview_status.questionText.encode('utf-8'), resp_code)
response_to_send.headers['Content-Type'] = interview_status.extras['content_type']
elif interview_status.question.question_type == "sendfile":
if is_ajax:
release_lock(user_code, yaml_filename)
response = jsonify(action='resubmit', csrf_token=generate_csrf())
if return_fake_html:
fake_up(response, current_language)
if response_wrapper:
response_wrapper(response)
return response
if interview_status.question.response_file is not None:
the_path = interview_status.question.response_file.path()
else:
logmessage("index: could not send file because the response was None")
return ('File not found', 404)
if not os.path.isfile(the_path):
logmessage("index: could not send file because file (" + the_path + ") not found")
return ('File not found', 404)
response_to_send = send_file(the_path, mimetype=interview_status.extras['content_type'])
response_to_send.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
elif interview_status.question.question_type == "redirect":
logmessage("Redirecting because of a redirect.")
session["_flashes"] = []
response_to_send = do_redirect(interview_status.questionText, is_ajax, is_json, js_target)
else:
response_to_send = None
if (not interview_status.followed_mc) and len(user_dict['_internal']['answers']):
user_dict['_internal']['answers'].clear()
if not validated:
changed = False
if changed and validated:
if save_status == 'new':
steps += 1
user_dict['_internal']['steps'] = steps
if action and not changed:
changed = True
if save_status == 'new':
steps += 1
user_dict['_internal']['steps'] = steps
if changed and interview.use_progress_bar and interview_status.question.progress is None and save_status == 'new':
advance_progress(user_dict, interview)
title_info = interview.get_title(user_dict, status=interview_status, converter=lambda content, part: title_converter(content, part, interview_status))
if save_status != 'ignore':
if save_status == 'overwrite':
changed = False
save_user_dict(user_code, user_dict, yaml_filename, secret=secret, changed=changed, encrypt=encrypted, steps=steps)
if user_dict.get('multi_user', False) is True and encrypted is True:
encrypted = False
update_session(yaml_filename, encrypted=encrypted)
decrypt_session(secret, user_code=user_code, filename=yaml_filename)
if user_dict.get('multi_user', False) is False and encrypted is False:
encrypt_session(secret, user_code=user_code, filename=yaml_filename)
encrypted = True
update_session(yaml_filename, encrypted=encrypted)
if response_to_send is not None:
release_lock(user_code, yaml_filename)
if return_fake_html:
fake_up(response_to_send, current_language)
if response_wrapper:
response_wrapper(response_to_send)
return response_to_send
messages = get_flashed_messages(with_categories=True) + error_messages
if messages and len(messages):
notification_interior = ''
for classname, message in messages:
if classname == 'error':
classname = 'danger'
notification_interior += NOTIFICATION_MESSAGE % (classname, str(message))
flash_content = NOTIFICATION_CONTAINER % (notification_interior,)
else:
flash_content = ''
if 'reload_after' in interview_status.extras:
reload_after = 1000 * int(interview_status.extras['reload_after'])
else:
reload_after = 0
allow_going_back = bool(interview_status.question.can_go_back and (steps - user_dict['_internal']['steps_offset']) > 1)
if hasattr(interview_status.question, 'id'):
question_id = interview_status.question.id
else:
question_id = None
question_id_dict = {'id': question_id}
if interview.options.get('analytics on', True):
if 'segment' in interview_status.extras:
question_id_dict['segment'] = interview_status.extras['segment']
if 'ga_id' in interview_status.extras:
question_id_dict['ga'] = interview_status.extras['ga_id']
append_script_urls = []
append_javascript = ''
if not is_ajax:
scripts = standard_scripts(interview_language=current_language) + additional_scripts(interview_status, yaml_filename)
if is_js:
append_javascript += additional_scripts(interview_status, yaml_filename, as_javascript=True)
if 'javascript' in interview.external_files:
for packageref, fileref in interview.external_files['javascript']:
the_url = get_url_from_file_reference(fileref, _package=packageref)
if the_url is not None:
scripts += "\n" + ' <script src="' + get_url_from_file_reference(fileref, _package=packageref) + '"></script>'
if is_js:
append_script_urls.append(get_url_from_file_reference(fileref, _package=packageref))
else:
logmessage("index: could not find javascript file " + str(fileref))
if interview_status.question.checkin is not None:
do_action = json.dumps(interview_status.question.checkin)
else:
do_action = 'null'
chat_available = user_dict['_internal']['livehelp']['availability']
chat_mode = user_dict['_internal']['livehelp']['mode']
if chat_available == 'unavailable':
chat_status = 'off'
update_session(yaml_filename, chatstatus='off')
elif chat_available == 'observeonly':
chat_status = 'observeonly'
update_session(yaml_filename, chatstatus='observeonly')
else:
chat_status = session_info['chatstatus']
if chat_status in ('ready', 'on'):
chat_status = 'ringing'
update_session(yaml_filename, chatstatus='ringing')
if chat_status != 'off':
send_changes = 'true'
else:
if do_action != 'null':
send_changes = 'true'
else:
send_changes = 'false'
if current_user.is_authenticated:
user_id_string = str(current_user.id)
if current_user.has_role('admin', 'developer', 'advocate'):
is_user = 'false'
else:
is_user = 'true'
else:
user_id_string = 't' + str(session['tempuser'])
is_user = 'true'
if r.get('da:control:uid:' + str(user_code) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id)) is not None:
being_controlled = 'true'
else:
being_controlled = 'false'
if debug_mode:
debug_readability_help = """
$("#dareadability-help").show();
$("#dareadability-question").hide();
"""
debug_readability_question = """
$("#dareadability-help").hide();
$("#dareadability-question").show();
"""
else:
debug_readability_help = ''
debug_readability_question = ''
if interview.force_fullscreen is True or (re.search(r'mobile', str(interview.force_fullscreen).lower()) and is_mobile_or_tablet()):
forceFullScreen = """
if (data.steps > 1 && window != top) {
top.location.href = location.href;
return;
}
"""
else:
forceFullScreen = ''
the_checkin_interval = interview.options.get('checkin interval', CHECKIN_INTERVAL)
if interview.options.get('analytics on', True):
if ga_configured:
ga_ids = google_config.get('analytics id')
else:
ga_ids = None
if 'segment id' in daconfig:
segment_id = daconfig['segment id']
else:
segment_id = None
else:
ga_ids = None
segment_id = None
page_sep = "#page"
if refer is None:
location_bar = url_for('index', **index_params)
elif refer[0] in ('start', 'run'):
location_bar = url_for('run_interview_in_package', package=refer[1], filename=refer[2])
page_sep = "#/"
elif refer[0] in ('start_dispatch', 'run_dispatch'):
location_bar = url_for('run_interview', dispatch=refer[1])
page_sep = "#/"
elif refer[0] in ('start_directory', 'run_directory'):
location_bar = url_for('run_interview_in_package_directory', package=refer[1], directory=refer[2], filename=refer[3])
page_sep = "#/"
else:
location_bar = None
for k, v in daconfig['dispatch'].items():
if v == yaml_filename:
location_bar = url_for('run_interview', dispatch=k)
page_sep = "#/"
break
if location_bar is None:
location_bar = url_for('index', **index_params)
index_params_external = copy.copy(index_params)
index_params_external['_external'] = True
if daconfig.get("auto color scheme", True) and not is_js:
color_scheme = """\
if (window.matchMedia('(prefers-color-scheme: dark)').matches) {
document.documentElement.setAttribute('data-bs-theme', 'dark');
}
"""
else:
color_scheme = ""
the_js = color_scheme + """\
if (typeof($) == 'undefined'){
var $ = jQuery.noConflict();
}
var daRequestPending = false;
var isAndroid = /android/i.test(navigator.userAgent.toLowerCase());
var daMapInfo = null;
var daThicknessScalingFactor = """ + str(daconfig.get("signature pen thickness scaling factor")) + """;
var daWhichButton = null;
var daSocket = null;
var daChatHistory = [];
var daCheckinCode = null;
var daCheckingIn = 0;
var daShowingHelp = 0;
var daIframeEmbed;
if ( window.location !== window.parent.location ) {
daIframeEmbed = true;
}
else {
daIframeEmbed = false;
}
var daJsEmbed = """ + (json.dumps(js_target) if is_js else 'false') + """;
var daAllowGoingBack = """ + ('true' if allow_going_back else 'false') + """;
var daSteps = """ + str(steps) + """;
var daIsUser = """ + is_user + """;
var daUserId = """ + ('null' if current_user.is_anonymous else str(current_user.id)) + """;
var daChatStatus = """ + json.dumps(chat_status) + """;
var daChatAvailable = """ + json.dumps(chat_available) + """;
var daChatPartnersAvailable = 0;
var daPhoneAvailable = false;
var daChatMode = """ + json.dumps(chat_mode) + """;
var daSendChanges = """ + send_changes + """;
var daInitialized = false;
var daNotYetScrolled = true;
var daBeingControlled = """ + being_controlled + """;
var daInformedChanged = false;
var daInformed = """ + json.dumps(user_dict['_internal']['informed'].get(user_id_string, {})) + """;
var daShowingSpinner = false;
var daSpinnerTimeout = null;
var daSubmitter = null;
var daUsingGA = """ + ("true" if ga_ids is not None else 'false') + """;
var daGAConfigured = false;
var daUsingSegment = """ + ("true" if segment_id is not None else 'false') + """;
var daDoAction = """ + do_action + """;
var daQuestionID = """ + json.dumps(question_id_dict) + """;
var daCsrf = """ + json.dumps(generate_csrf()) + """;
var daComboboxButtonLabel = """ + json.dumps(word("Dropdown")) + """;
var daShowIfInProcess = false;
var daFieldsToSkip = ['_checkboxes', '_empties', '_ml_info', '_back_one', '_files', '_files_inline', '_question_name', '_the_image', '_save_as', '_success', '_datatypes', '_event', '_visible', '_tracker', '_track_location', '_varnames', '_next_action', '_next_action_to_set', 'ajax', 'json', 'informed', 'csrf_token', '_action', '_order_changes', '_collect', '_list_collect_list', '_null_question'];
var daVarLookup = Object();
var daVarLookupRev = Object();
var daVarLookupMulti = Object();
var daVarLookupRevMulti = Object();
var daVarLookupSelect = Object();
var daVarLookupCheckbox = Object();
var daVarLookupOption = Object();
var daTargetDiv;
var daComboBoxes = Object();
var daGlobalEval = eval;
var daInterviewUrl = """ + json.dumps(url_for('index', **index_params)) + """;
var daLocationBar = """ + json.dumps(location_bar) + """;
var daPostURL = """ + json.dumps(url_for('index', **index_params_external)) + """;
var daYamlFilename = """ + json.dumps(yaml_filename) + """;
var daFetchAcceptIncoming = false;
var daFetchAjaxTimeout = null;
var daFetchAjaxTimeoutRunning = null;
var daFetchAjaxTimeoutFetchAfter = null;
var daShowHideHappened = false;
if (daJsEmbed){
daTargetDiv = '#' + daJsEmbed;
}
else{
daTargetDiv = "#dabody";
}
var daNotificationContainer = """ + json.dumps(NOTIFICATION_CONTAINER) + """;
var daNotificationMessage = """ + json.dumps(NOTIFICATION_MESSAGE) + """;
Object.defineProperty(String.prototype, "daSprintf", {
value: function () {
var args = Array.from(arguments),
i = 0;
function defaultNumber(iValue) {
return iValue != undefined && !isNaN(iValue) ? iValue : "0";
}
function defaultString(iValue) {
return iValue == undefined ? "" : "" + iValue;
}
return this.replace(
/%%|%([+\\-])?([^1-9])?(\\d+)?(\\.\\d+)?([deEfhHioQqs])/g,
function (match, sign, filler, scale, precision, type) {
var strOut, space, value;
var asNumber = false;
if (match == "%%") return "%";
if (i >= args.length) return match;
value = args[i];
while (Array.isArray(value)) {
args.splice(i, 1);
for (var j = i; value.length > 0; j++)
args.splice(j, 0, value.shift());
value = args[i];
}
i++;
if (filler == undefined) filler = " "; // default
if (scale == undefined && !isNaN(filler)) {
scale = filler;
filler = " ";
}
if (sign == undefined) sign = "sqQ".indexOf(type) >= 0 ? "+" : "-"; // default
if (scale == undefined) scale = 0; // default
if (precision == undefined) precision = ".0"; // default
scale = parseInt(scale);
precision = parseInt(precision.substr(1));
switch (type) {
case "d":
case "i":
// decimal integer
asNumber = true;
strOut = parseInt(defaultNumber(value));
if (precision > 0) strOut += "." + "0".repeat(precision);
break;
case "e":
case "E":
// float in exponential notation
asNumber = true;
strOut = parseFloat(defaultNumber(value));
if (precision == 0) strOut = strOut.toExponential();
else strOut = strOut.toExponential(precision);
if (type == "E") strOut = strOut.replace("e", "E");
break;
case "f":
// decimal float
asNumber = true;
strOut = parseFloat(defaultNumber(value));
if (precision != 0) strOut = strOut.toFixed(precision);
break;
case "o":
case "h":
case "H":
// Octal or Hexagesimal integer notation
strOut =
"\\\\" +
(type == "o" ? "0" : type) +
parseInt(defaultNumber(value)).toString(type == "o" ? 8 : 16);
break;
case "q":
// single quoted string
strOut = "'" + defaultString(value) + "'";
break;
case "Q":
// double quoted string
strOut = '"' + defaultString(value) + '"';
break;
default:
// string
strOut = defaultString(value);
break;
}
if (typeof strOut != "string") strOut = "" + strOut;
if ((space = strOut.length) < scale) {
if (asNumber) {
if (sign == "-") {
if (strOut.indexOf("-") < 0)
strOut = filler.repeat(scale - space) + strOut;
else
strOut =
"-" +
filler.repeat(scale - space) +
strOut.replace("-", "");
} else {
if (strOut.indexOf("-") < 0)
strOut = "+" + filler.repeat(scale - space - 1) + strOut;
else
strOut =
"-" +
filler.repeat(scale - space) +
strOut.replace("-", "");
}
} else {
if (sign == "-") strOut = filler.repeat(scale - space) + strOut;
else strOut = strOut + filler.repeat(scale - space);
}
} else if (asNumber && sign == "+" && strOut.indexOf("-") < 0)
strOut = "+" + strOut;
return strOut;
}
);
},
});
Object.defineProperty(window, "daSprintf", {
value: function (str, ...rest) {
if (typeof str == "string")
return String.prototype.daSprintf.apply(str, rest);
return "";
},
});
function daGoToAnchor(target){
if (daJsEmbed){
scrollTarget = $(target).first().position().top - 60;
}
else{
scrollTarget = $(target).first().offset().top - 60;
}
if (scrollTarget != null){
if (daJsEmbed){
$(daTargetDiv).animate({
scrollTop: scrollTarget
}, 500);
}
else{
$("html, body").animate({
scrollTop: scrollTarget
}, 500);
}
}
}
function atou(b64) {
return decodeURIComponent(escape(atob(b64)));
}
function utoa(data) {
return btoa(unescape(encodeURIComponent(data)));
}
function dabtoa(str) {
return window.utoa(str).replace(/[\\n=]/g, '');
}
function daatob(str) {
return atou(str);
}
function hideTablist() {
var anyTabs = $("#daChatAvailable").is(":visible")
|| $("daPhoneAvailable").is(":visible")
|| $("#dahelptoggle").is(":visible");
if (anyTabs) {
$("#nav-bar-tab-list").removeClass("dainvisible");
$("#daquestionlabel").parent().removeClass("dainvisible");
} else {
$("#nav-bar-tab-list").addClass("dainvisible");
$("#daquestionlabel").parent().addClass("dainvisible");
}
}
function getFields(){
var allFields = [];
for (var rawFieldName in daVarLookup){
if (daVarLookup.hasOwnProperty(rawFieldName)){
var fieldName = atou(rawFieldName);
if (allFields.indexOf(fieldName) == -1){
allFields.push(fieldName);
}
}
}
return allFields;
}
var daGetFields = getFields;
function daAppendIfExists(fieldName, theArray){
var elem = $("[name='" + fieldName + "']");
if (elem.length > 0){
for (var i = 0; i < theArray.length; ++i){
if (theArray[i] == elem[0]){
return;
}
}
theArray.push(elem[0]);
}
}
function getField(fieldName, notInDiv){
if (daVarLookupCheckbox[fieldName]){
var n = daVarLookupCheckbox[fieldName].length;
for (var i = 0; i < n; ++i){
var elem = daVarLookupCheckbox[fieldName][i].checkboxes[0].elem;
if (!$(elem).prop('disabled')){
var showifParents = $(elem).parents(".dajsshowif,.dashowif");
if (showifParents.length == 0 || $(showifParents[0]).data("isVisible") == '1'){
if (notInDiv && $.contains(notInDiv, elem)){
continue;
}
return daVarLookupCheckbox[fieldName][i].elem;
}
}
}
}
if (daVarLookupSelect[fieldName]){
var n = daVarLookupSelect[fieldName].length;
for (var i = 0; i < n; ++i){
var elem = daVarLookupSelect[fieldName][i].select;
if (!$(elem).prop('disabled')){
var showifParents = $(elem).parents(".dajsshowif,.dashowif");
if (showifParents.length == 0 || $(showifParents[0]).data("isVisible") == '1'){
if (notInDiv && $.contains(notInDiv, elem)){
continue;
}
return elem;
}
}
}
}
var fieldNameEscaped = dabtoa(fieldName);
var possibleElements = [];
daAppendIfExists(fieldNameEscaped, possibleElements);
if (daVarLookupMulti.hasOwnProperty(fieldNameEscaped)){
for (var i = 0; i < daVarLookupMulti[fieldNameEscaped].length; ++i){
daAppendIfExists(daVarLookupMulti[fieldNameEscaped][i], possibleElements);
}
}
var returnVal = null;
for (var i = 0; i < possibleElements.length; ++i){
if (!$(possibleElements[i]).prop('disabled') || $(possibleElements[i]).parents(".file-input.is-locked").length > 0 ){
var showifParents = $(possibleElements[i]).parents(".dajsshowif,.dashowif");
if (showifParents.length == 0 || $(showifParents[0]).data("isVisible") == '1'){
if (notInDiv && $.contains(notInDiv, possibleElements[i])){
continue;
}
returnVal = possibleElements[i];
}
}
}
if ($(returnVal).hasClass('da-to-labelauty') && $(returnVal).parents('fieldset').length > 0){
var fieldSet = $(returnVal).parents('fieldset')[0];
if (!$(fieldSet).hasClass('da-field-checkbox') && !$(fieldSet).hasClass('da-field-checkboxes')){
return fieldSet;
}
}
return returnVal;
}
var daGetField = getField;
function setChoices(fieldName, choices){
var elem = daGetField(fieldName);
if (elem == null){
console.log("setChoices: reference to non-existent field " + fieldName);
return;
}
var isCombobox = ($(elem).attr('type') == "hidden" && $(elem).parents('.combobox-container').length > 0);
if (isCombobox){
var comboInput = $(elem).parents('.combobox-container').first().find('input.combobox').first();
var comboObject = daComboBoxes[$(comboInput).attr('id')];
var oldComboVal = comboObject.$target.val();
elem = comboObject.$source;
}
if ($(elem).prop('tagName') != "SELECT"){
console.log("setField: field " + fieldName + " is not a dropdown field");
return;
}
var oldVal = $(elem).val();
$(elem).find("option[value!='']").each(function(){
$(this).remove();
});
var n = choices.length;
for (var i = 0; i < n; i++){
var opt = $("<option>");
opt.val(choices[i][0]);
opt.text(choices[i][1]);
if (oldVal == choices[i][0]){
opt.attr("selected", "selected")
}
$(elem).append(opt);
}
if (isCombobox){
comboObject.refresh();
comboObject.clearTarget();
if (oldComboVal != ""){
daSetField(fieldName, oldComboVal);
}
}
}
var daSetChoices = setChoices;
function setField(fieldName, theValue){
var elem = daGetField(fieldName);
if (elem == null){
console.log('setField: reference to non-existent field ' + fieldName);
return;
}
if ($(elem).prop('tagName') == "FIELDSET" && $(elem).hasClass("da-field-radio")){
elem = $(elem).find('input')[0];
}
if ($(elem).attr('type') == "checkbox"){
if (theValue){
if ($(elem).prop('checked') != true){
$(elem).click();
}
}
else{
if ($(elem).prop('checked') != false){
$(elem).click();
}
}
}
else if ($(elem).attr('type') == "radio"){
var fieldNameEscaped = $(elem).attr('name').replace(/(:|\.|\[|\]|,|=)/g, "\\\\$1");
var wasSet = false;
if (theValue === true){
theValue = 'True';
}
if (theValue === false){
theValue = 'False';
}
$("input[name='" + fieldNameEscaped + "']").each(function(){
if ($(this).val() == theValue){
if ($(this).prop('checked') != true){
$(this).prop('checked', true);
$(this).trigger('change');
}
wasSet = true;
return false;
}
});
if (!wasSet){
console.log('setField: could not set radio button ' + fieldName + ' to ' + theValue);
}
}
else if ($(elem).attr('type') == "hidden"){
if ($(elem).val() != theValue){
if ($(elem).parents('.combobox-container').length > 0){
var comboInput = $(elem).parents('.combobox-container').first().find('input.combobox').first();
daComboBoxes[$(comboInput).attr('id')].manualSelect(theValue);
}
else{
$(elem).val(theValue);
$(elem).trigger('change');
}
}
}
else if ($(elem).prop('tagName') == "FIELDSET" && $(elem).hasClass("da-field-checkboxes")){
if (!Array.isArray(theValue)){
throw new Error('setField: value must be an array');
}
var n = theValue.length;
$(elem).find('input').each(function(){
if ($(this).hasClass('danota-checkbox')){
$(this).prop('checked', n == 0);
$(this).trigger('change');
return;
}
if ($(this).hasClass('daaota-checkbox')){
$(this).prop('checked', false);
$(this).trigger('change');
return;
}
if ($(this).attr('name').substr(0, 7) === '_ignore'){
return;
}
var theVal = atou($(this).data('cbvalue'));
if ($(elem).hasClass("daobject")){
theVal = atou(theVal);
}
var oldVal = $(this).prop('checked') == true;
var newVal = false;
for (var i = 0; i < n; ++i){
if (theValue[i] == theVal){
newVal = true;
}
}
if (oldVal != newVal){
$(this).click();
}
});
}
else if ($(elem).prop('tagName') == "SELECT" && $(elem).hasClass('damultiselect')){
if (daVarLookupSelect[fieldName]){
var n = daVarLookupSelect[fieldName].length;
for (var i = 0; i < n; ++i){
if (daVarLookupSelect[fieldName][i].select === elem){
var oldValue = $(daVarLookupSelect[fieldName][i].option).prop('selected') == true;
if (oldValue != theValue){
$(daVarLookupSelect[fieldName][i].option).prop('selected', theValue);
$(elem).trigger('change');
}
}
}
}
else{
if (!Array.isArray(theValue)){
throw new Error('setField: value must be an array');
}
var n = theValue.length;
var changed = false;
$(elem).find('option').each(function(){
var thisVal = daVarLookupOption[$(this).val()];
var oldVal = $(this).prop('selected') == true;
var newVal = false;
for (var i = 0; i < n; ++i){
if (thisVal == theValue[i]){
newVal = true;
}
}
if (newVal !== oldVal){
changed = true;
$(this).prop('selected', newVal);
}
});
if (changed){
$(elem).trigger('change');
}
}
}
else{
if ($(elem).val() != theValue){
$(elem).val(theValue);
$(elem).trigger('change');
}
}
}
var daSetField = setField;
function val(fieldName){
var elem = daGetField(fieldName);
if (elem == null){
return null;
}
if ($(elem).prop('tagName') == "FIELDSET" && $(elem).hasClass("da-field-radio")){
elem = $(elem).find('input')[0];
}
if ($(elem).attr('type') == "checkbox"){
if ($(elem).prop('checked')){
theVal = true;
}
else{
theVal = false;
}
}
else if ($(elem).attr('type') == "radio"){
var fieldNameEscaped = $(elem).attr('name').replace(/(:|\.|\[|\]|,|=)/g, "\\\\$1");
theVal = $("input[name='" + fieldNameEscaped + "']:checked").val();
if (typeof(theVal) == 'undefined'){
theVal = null;
}
else{
if ($(elem).hasClass("daobject")){
theVal = atou(theVal);
}
else if (theVal == 'True'){
theVal = true;
}
else if (theVal == 'False'){
theVal = false;
}
}
}
else if ($(elem).prop('tagName') == "FIELDSET" && $(elem).hasClass("da-field-checkboxes")){
var cbSelected = [];
$(elem).find('input').each(function(){
if ($(this).attr('name').substr(0,7) === '_ignore'){
return;
}
var theVal = atou($(this).data('cbvalue'));
if ($(elem).hasClass("daobject")){
theVal = atou(theVal);
}
if ($(this).prop('checked')){
cbSelected.push(theVal);
}
});
return cbSelected;
}
else if ($(elem).prop('tagName') == "SELECT" && $(elem).hasClass('damultiselect')){
if (daVarLookupSelect[fieldName]){
var n = daVarLookupSelect[fieldName].length;
for (var i = 0; i < n; ++i){
if (daVarLookupSelect[fieldName][i].select === elem){
return $(daVarLookupSelect[fieldName][i].option).prop('selected');
}
}
}
else{
var selectedVals = [];
$(elem).find('option').each(function(){
if ($(this).prop('selected')){
if (daVarLookupOption[$(this).val()]){
selectedVals.push(daVarLookupOption[$(this).val()]);
}
}
});
return selectedVals;
}
}
else if ($(elem).prop('tagName') == "SELECT" && $(elem).hasClass('daobject')){
theVal = atou($(elem).val());
}
else{
theVal = $(elem).val();
}
return theVal;
}
var da_val = val;
function daFormAsJSON(elem){
var isInitial = false;
var formData = $("#daform").serializeArray();
var data = Object();
if (elem == 'initial'){
elem = null;
data['_initial'] = true;
}
else{
data['_initial'] = false;
}
if (elem !== null && $(elem).hasClass('combobox')){
elem = $(elem).parent().find('input[type="hidden"]');
}
data['_changed'] = null;
var n = formData.length;
for (var i = 0; i < n; ++i){
var key = formData[i]['name'];
var val = formData[i]['value'];
if ($.inArray(key, daFieldsToSkip) != -1 || key.indexOf('_ignore') == 0){
continue;
}
var isChangedElem = false;
if (elem !== null && key == $(elem).attr('name')){
isChangedElem = true;
}
if (typeof daVarLookupRev[key] != "undefined"){
data[atou(daVarLookupRev[key])] = val;
if (isChangedElem){
data['_changed'] = atou(daVarLookupRev[key])
}
}
else{
data[atou(key)] = val;
if (isChangedElem){
data['_changed'] = atou(key)
}
}
}
return JSON.stringify(data);
}
var daMessageLog = JSON.parse(atou(""" + json.dumps(safeid(json.dumps(docassemble.base.functions.get_message_log()))) + """));
function daPreloadImage(url){
var img = new Image();
img.src = url;
}
daPreloadImage('""" + str(url_for('static', filename='app/chat.ico', v=da_version)) + """');
function daShowHelpTab(){
$('#dahelptoggle').tab('show');
}
function addCsrfHeader(xhr, settings){
if (daJsEmbed && !/^(GET|HEAD|OPTIONS|TRACE)$/i.test(settings.type)){
xhr.setRequestHeader("X-CSRFToken", daCsrf);
}
}
function flash(message, priority, clear){
if (priority == null){
priority = 'info'
}
if (!$("#daflash").length){
$(daTargetDiv).append(daSprintf(daNotificationContainer, ""));
}
if (clear){
$("#daflash").empty();
}
if (message != null){
$("#daflash").append(daSprintf(daNotificationMessage, priority, message));
if (priority == 'success'){
setTimeout(function(){
$("#daflash .alert-success").hide(300, function(){
$(this).remove();
});
}, 3000);
}
}
}
var da_flash = flash;
function url_action(action, args){
if (args == null){
args = {};
}
data = {action: action, arguments: args};
var url;
if (daJsEmbed){
url = daPostURL + "&action=" + encodeURIComponent(utoa(JSON_stringify(data)))
}
else{
if (daLocationBar.indexOf('?') !== -1){
url = daLocationBar + "&action=" + encodeURIComponent(utoa(JSON_stringify(data)))
}
else {
url = daLocationBar + "?action=" + encodeURIComponent(utoa(JSON_stringify(data)))
}
}
return url;
}
var da_url_action = url_action;
function action_call(action, args, callback, forgetPrior=false){
if (args == null){
args = {};
}
if (forgetPrior){
args = {_action: action, _arguments: args};
action = '_da_priority_action';
}
if (callback == null){
callback = function(){};
}
var data = {action: action, arguments: args};
var url;
if (daJsEmbed){
url = daPostURL + "&action=" + encodeURIComponent(utoa(JSON_stringify(data)))
}
else{
url = daInterviewUrl + "&action=" + encodeURIComponent(utoa(JSON_stringify(data)))
}
return $.ajax({
type: "GET",
url: url,
success: callback,
beforeSend: addCsrfHeader,
xhrFields: {
withCredentials: true
},
error: function(xhr, status, error){
setTimeout(function(){
daProcessAjaxError(xhr, status, error);
}, 0);
}
});
}
var da_action_call = action_call;
var url_action_call = action_call;
function action_perform(action, args, forgetPrior=false){
if (args == null){
args = {};
}
if (forgetPrior){
args = {_action: action, _arguments: args};
action = '_da_priority_action';
}
var data = {action: action, arguments: args};
daSpinnerTimeout = setTimeout(daShowSpinner, 1000);
daRequestPending = true;
return $.ajax({
type: "POST",
url: daInterviewUrl,
beforeSend: addCsrfHeader,
xhrFields: {
withCredentials: true
},
data: $.param({_action: utoa(JSON_stringify(data)), csrf_token: daCsrf, ajax: 1}),
success: function(data){
setTimeout(function(){
daProcessAjax(data, $("#daform"), 1);
}, 0);
},
error: function(xhr, status, error){
setTimeout(function(){
daProcessAjaxError(xhr, status, error);
}, 0);
},
dataType: 'json'
});
}
var da_action_perform = action_perform;
var url_action_perform = action_perform;
function action_perform_with_next(action, args, next_data, forgetPrior=false){
//console.log("action_perform_with_next: " + action + " | " + next_data)
if (args == null){
args = {};
}
if (forgetPrior){
args = {_action: action, _arguments: args};
action = '_da_priority_action';
}
var data = {action: action, arguments: args};
daSpinnerTimeout = setTimeout(daShowSpinner, 1000);
daRequestPending = true;
return $.ajax({
type: "POST",
url: daInterviewUrl,
beforeSend: addCsrfHeader,
xhrFields: {
withCredentials: true
},
data: $.param({_action: utoa(JSON_stringify(data)), _next_action_to_set: utoa(JSON_stringify(next_data)), csrf_token: daCsrf, ajax: 1}),
success: function(data){
setTimeout(function(){
daProcessAjax(data, $("#daform"), 1);
}, 0);
},
error: function(xhr, status, error){
setTimeout(function(){
daProcessAjaxError(xhr, status, error);
}, 0);
},
dataType: 'json'
});
}
var da_action_perform_with_next = action_perform_with_next;
var url_action_perform_with_next = action_perform_with_next;
function get_interview_variables(callback){
if (callback == null){
callback = function(){};
}
return $.ajax({
type: "GET",
url: """ + '"' + url_for('get_variables', i=yaml_filename) + '"' + """,
success: callback,
beforeSend: addCsrfHeader,
xhrFields: {
withCredentials: true
},
error: function(xhr, status, error){
setTimeout(function(){
daProcessAjaxError(xhr, status, error);
}, 0);
}
});
}
var da_get_interview_variables = get_interview_variables;
function daInformAbout(subject, chatMessage){
if (subject in daInformed || (subject != 'chatmessage' && !daIsUser)){
return;
}
if (daShowingHelp && subject != 'chatmessage'){
daInformed[subject] = 1;
daInformedChanged = true;
return;
}
if (daShowingHelp && subject == 'chatmessage'){
return;
}
var target;
var message;
var waitPeriod = 3000;
if (subject == 'chat'){
target = "#daChatAvailable a";
message = """ + json.dumps(word("Get help through live chat by clicking here.")) + """;
}
else if (subject == 'chatmessage'){
target = "#daChatAvailable a";
//message = """ + json.dumps(word("A chat message has arrived.")) + """;
message = chatMessage;
}
else if (subject == 'phone'){
target = "#daPhoneAvailable a";
message = """ + json.dumps(word("Click here to get help over the phone.")) + """;
}
else{
return;
}
if (subject != 'chatmessage'){
daInformed[subject] = 1;
daInformedChanged = true;
}
if (subject == 'chatmessage'){
$(target).popover({"content": message, "placement": "bottom", "trigger": "manual", "container": "body", "title": """ + json.dumps(word("New chat message")) + """});
}
else {
$(target).popover({"content": message, "placement": "bottom", "trigger": "manual", "container": "body", "title": """ + json.dumps(word("Live chat is available")) + """});
}
$(target).popover('show');
setTimeout(function(){
$(target).popover('dispose');
$(target).removeAttr('title');
}, waitPeriod);
}
// function daCloseSocket(){
// if (typeof daSocket !== 'undefined' && daSocket.connected){
// //daSocket.emit('terminate');
// //io.unwatch();
// }
// }
function daPublishMessage(data){
var newDiv = document.createElement('li');
$(newDiv).addClass("list-group-item");
if (data.is_self){
$(newDiv).addClass("list-group-item-primary dalistright");
}
else{
$(newDiv).addClass("list-group-item-secondary dalistleft");
}
//var newSpan = document.createElement('span');
//$(newSpan).html(data.message);
//$(newSpan).appendTo($(newDiv));
//var newName = document.createElement('span');
//$(newName).html(userNameString(data));
//$(newName).appendTo($(newDiv));
$(newDiv).html(data.message);
$("#daCorrespondence").append(newDiv);
}
function daScrollChat(){
var chatScroller = $("#daCorrespondence");
if (chatScroller.length){
var height = chatScroller[0].scrollHeight;
//console.log("Slow scrolling to " + height);
if (height == 0){
daNotYetScrolled = true;
return;
}
chatScroller.animate({scrollTop: height}, 800);
}
else{
console.log("daScrollChat: error");
}
}
function daScrollChatFast(){
var chatScroller = $("#daCorrespondence");
if (chatScroller.length){
var height = chatScroller[0].scrollHeight;
if (height == 0){
daNotYetScrolled = true;
return;
}
//console.log("Scrolling to " + height + " where there are " + chatScroller[0].childElementCount + " children");
chatScroller.scrollTop(height);
}
else{
console.log("daScrollChatFast: error");
}
}
function daSender(){
//console.log("daSender");
if ($("#daMessage").val().length){
daSocket.emit('chatmessage', {data: $("#daMessage").val(), i: daYamlFilename});
$("#daMessage").val("");
$("#daMessage").focus();
}
return false;
}
function daShowControl(mode){
//console.log("You are now being controlled");
if ($("body").hasClass("dacontrolled")){
return;
}
$('input[type="submit"], button[type="submit"]').prop("disabled", true);
$("body").addClass("dacontrolled");
var newDiv = document.createElement('div');
$(newDiv).addClass("datop-alert col-xs-10 col-sm-7 col-md-6 col-lg-5 dacol-centered");
$(newDiv).html(""" + json.dumps(word("Your screen is being controlled by an operator.")) + """)
$(newDiv).attr('id', "dacontrolAlert");
$(newDiv).css("display", "none");
$(newDiv).appendTo($(daTargetDiv));
if (mode == 'animated'){
$(newDiv).slideDown();
}
else{
$(newDiv).show();
}
}
function daHideControl(){
//console.log("You are no longer being controlled");
if (! $("body").hasClass("dacontrolled")){
return;
}
$('input[type="submit"], button[type="submit"]').prop("disabled", false);
$("body").removeClass("dacontrolled");
$("#dacontrolAlert").html(""" + json.dumps(word("The operator is no longer controlling your screen.")) + """);
setTimeout(function(){
$("#dacontrolAlert").slideUp(300, function(){
$("#dacontrolAlert").remove();
});
}, 2000);
}
function daInitializeSocket(){
if (daSocket != null){
if (daSocket.connected){
//console.log("Calling connectagain");
if (daChatStatus == 'ready'){
daSocket.emit('connectagain', {i: daYamlFilename});
}
if (daBeingControlled){
daShowControl('animated');
daSocket.emit('start_being_controlled', {i: daYamlFilename});
}
}
else{
//console.log('daInitializeSocket: daSocket.connect()');
daSocket.connect();
}
return;
}
if (location.protocol === 'http:' || document.location.protocol === 'http:'){
daSocket = io.connect('http://' + document.domain + '/wsinterview', {path: '""" + ROOT + """ws/socket.io', query: "i=" + daYamlFilename});
}
if (location.protocol === 'https:' || document.location.protocol === 'https:'){
daSocket = io.connect('https://' + document.domain + '/wsinterview', {path: '""" + ROOT + """ws/socket.io', query: "i=" + daYamlFilename});
}
//console.log("daInitializeSocket: socket is " + daSocket);
if (daSocket != null){
daSocket.on('connect', function() {
if (daSocket == null){
console.log("Error: socket is null");
return;
}
//console.log("Connected socket with sid " + daSocket.id);
if (daChatStatus == 'ready'){
daChatStatus = 'on';
daDisplayChat();
daPushChanges();
//daTurnOnChat();
//console.log("Emitting chat_log from on connect");
daSocket.emit('chat_log', {i: daYamlFilename});
}
if (daBeingControlled){
daShowControl('animated')
daSocket.emit('start_being_controlled', {i: daYamlFilename});
}
});
daSocket.on('chat_log', function(arg) {
//console.log("Got chat_log");
$("#daCorrespondence").html('');
daChatHistory = [];
var messages = arg.data;
for (var i = 0; i < messages.length; ++i){
daChatHistory.push(messages[i]);
daPublishMessage(messages[i]);
}
daScrollChatFast();
});
daSocket.on('chatready', function(data) {
//var key = 'da:session:uid:' + data.uid + ':i:' + data.i + ':userid:' + data.userid
//console.log('chatready');
});
daSocket.on('terminate', function() {
//console.log("interview: terminating socket");
daSocket.disconnect();
});
daSocket.on('controllerstart', function(){
daBeingControlled = true;
daShowControl('animated');
});
daSocket.on('controllerexit', function(){
daBeingControlled = false;
//console.log("Hiding control 2");
daHideControl();
if (daChatStatus != 'on'){
if (daSocket != null && daSocket.connected){
//console.log('Terminating interview socket because control over');
daSocket.emit('terminate');
}
}
});
daSocket.on('disconnect', function() {
//console.log("Manual disconnect");
//daSocket.emit('manual_disconnect', {i: daYamlFilename});
//console.log("Disconnected socket");
//daSocket = null;
});
daSocket.on('reconnected', function() {
//console.log("Reconnected");
daChatStatus = 'on';
daDisplayChat();
daPushChanges();
daTurnOnChat();
//console.log("Emitting chat_log from reconnected");
daSocket.emit('chat_log', {i: daYamlFilename});
});
daSocket.on('mymessage', function(arg) {
//console.log("Received " + arg.data);
$("#daPushResult").html(arg.data);
});
daSocket.on('departure', function(arg) {
//console.log("Departure " + arg.numpartners);
if (arg.numpartners == 0){
daCloseChat();
}
});
daSocket.on('chatmessage', function(arg) {
//console.log("Received chat message " + arg.data);
daChatHistory.push(arg.data);
daPublishMessage(arg.data);
daScrollChat();
daInformAbout('chatmessage', arg.data.message);
});
daSocket.on('newpage', function(incoming) {
//console.log("newpage received");
var data = incoming.obj;
daProcessAjax(data, $("#daform"), 1);
});
daSocket.on('controllerchanges', function(data) {
//console.log("controllerchanges: " + data.parameters);
var valArray = Object();
var values = JSON.parse(data.parameters);
for (var i = 0; i < values.length; i++) {
valArray[values[i].name] = values[i].value;
}
//console.log("valArray is " + JSON.stringify(valArray));
$("#daform").each(function(){
$(this).find(':input').each(function(){
var type = $(this).attr('type');
var id = $(this).attr('id');
var name = $(this).attr('name');
if (type == 'checkbox'){
if (name in valArray){
if (valArray[name] == 'True'){
if ($(this).prop('checked') != true){
$(this).prop('checked', true);
$(this).trigger('change');
}
}
else{
if ($(this).prop('checked') != false){
$(this).prop('checked', false);
$(this).trigger('change');
}
}
}
else{
if ($(this).prop('checked') != false){
$(this).prop('checked', false);
$(this).trigger('change');
}
}
}
else if (type == 'radio'){
if (name in valArray){
if (valArray[name] == $(this).val()){
if ($(this).prop('checked') != true){
$(this).prop('checked', true);
$(this).trigger('change');
}
}
else{
if ($(this).prop('checked') != false){
$(this).prop('checked', false);
$(this).trigger('change');
}
}
}
}
else if ($(this).data().hasOwnProperty('sliderMax')){
$(this).slider('setValue', parseInt(valArray[name]));
}
else{
if (name in valArray){
$(this).val(valArray[name]);
}
}
});
});
if (data.clicked){
//console.log("Need to click " + data.clicked);
$(data.clicked).prop("disabled", false);
$(data.clicked).addClass("da-click-selected");
if ($(data.clicked).prop("tagName") == 'A' && typeof $(data.clicked).attr('href') != 'undefined' && ($(data.clicked).attr('href').indexOf('javascript') == 0 || $(data.clicked).attr('href').indexOf('#') == 0)){
setTimeout(function(){
$(data.clicked).removeClass("da-click-selected");
}, 2200);
}
setTimeout(function(){
//console.log("Clicking it now");
$(data.clicked).click();
//console.log("Clicked it.");
}, 200);
}
});
}
}
var daCheckinSeconds = """ + str(the_checkin_interval) + """;
var daCheckinInterval = null;
var daReloader = null;
var daDisable = null;
var daChatRoles = """ + json.dumps(user_dict['_internal']['livehelp']['roles']) + """;
var daChatPartnerRoles = """ + json.dumps(user_dict['_internal']['livehelp']['partner_roles']) + """;
function daUnfakeHtmlResponse(text){
text = text.substr(text.indexOf('ABCDABOUNDARYSTARTABC') + 21);
text = text.substr(0, text.indexOf('ABCDABOUNDARYENDABC')).replace(/\s/g, '');
text = atou(text);
return text;
}
function daInjectTrim(handler){
return function (element, event) {
if (element.tagName === "TEXTAREA" || (element.tagName === "INPUT" && element.type !== "password" && element.type !== "date" && element.type !== "datetime" && element.type !== "file")) {
setTimeout(function(){
element.value = $.trim(element.value);
}, 10);
}
return handler.call(this, element, event);
};
}
function daInvalidHandler(form, validator){
var errors = validator.numberOfInvalids();
var scrollTarget = null;
if (errors && $(validator.errorList[0].element).parents('.da-form-group').length > 0) {
if (daJsEmbed){
scrollTarget = $(validator.errorList[0].element).parents('.da-form-group').first().position().top - 60;
}
else{
scrollTarget = $(validator.errorList[0].element).parents('.da-form-group').first().offset().top - 60;
}
}
if (scrollTarget != null){
if (daJsEmbed){
$(daTargetDiv).animate({
scrollTop: scrollTarget
}, 1000);
}
else{
$("html, body").animate({
scrollTop: scrollTarget
}, 1000);
}
}
}
function daValidationHandler(form){
//form.submit();
//console.log("daValidationHandler");
var visibleElements = [];
var seen = Object();
$(form).find("input, select, textarea").filter(":not(:disabled)").each(function(){
//console.log("Considering an element");
if ($(this).attr('name') && $(this).attr('type') != "hidden" && (($(this).hasClass('da-active-invisible') && $(this).parent().is(":visible")) || $(this).is(":visible"))){
var theName = $(this).attr('name');
//console.log("Including an element " + theName);
if (!seen.hasOwnProperty(theName)){
visibleElements.push(theName);
seen[theName] = 1;
}
}
});
$(form).find("input[name='_visible']").val(utoa(JSON_stringify(visibleElements)));
$(form).each(function(){
$(this).find(':input').off('change', daOnChange);
});
$("meta[name=viewport]").attr('content', "width=device-width, minimum-scale=1.0, maximum-scale=1.0, initial-scale=1.0");
if (daCheckinInterval != null){
clearInterval(daCheckinInterval);
}
daDisable = setTimeout(function(){
$(form).find('input[type="submit"]').prop("disabled", true);
$(form).find('button[type="submit"]').prop("disabled", true);
}, 1);
if (daWhichButton != null){
$(".da-field-buttons .btn-da").each(function(){
if (this != daWhichButton){
$(this).removeClass(""" + '"' + app.config['BUTTON_STYLE'] + """primary """ + app.config['BUTTON_STYLE'] + """info """ + app.config['BUTTON_STYLE'] + """warning """ + app.config['BUTTON_STYLE'] + """danger """ + app.config['BUTTON_STYLE'] + """secondary");
$(this).addClass(""" + '"' + app.config['BUTTON_STYLE'] + """light");
}
});
if ($(daWhichButton).hasClass(""" + '"' + app.config['BUTTON_STYLE'] + """success")){
$(daWhichButton).removeClass(""" + '"' + app.config['BUTTON_STYLE'] + """success");
$(daWhichButton).addClass(""" + '"' + app.config['BUTTON_STYLE'] + """primary");
}
else{
$(daWhichButton).removeClass(""" + '"' + app.config['BUTTON_STYLE'] + """primary """ + app.config['BUTTON_STYLE'] + """info """ + app.config['BUTTON_STYLE'] + """warning """ + app.config['BUTTON_STYLE'] + """danger """ + app.config['BUTTON_STYLE'] + """success """ + app.config['BUTTON_STYLE'] + """light");
$(daWhichButton).addClass(""" + '"' + app.config['BUTTON_STYLE'] + """secondary");
}
}
var tableOrder = {};
var tableOrderChanges = {};
$("a.datableup").each(function(){
var tableName = $(this).data('tablename');
if (!tableOrder.hasOwnProperty(tableName)){
tableOrder[tableName] = [];
}
tableOrder[tableName].push(parseInt($(this).data('tableitem')));
});
var tableChanged = false;
for (var tableName in tableOrder){
if (tableOrder.hasOwnProperty(tableName)){
var n = tableOrder[tableName].length;
for (var i = 0; i < n; ++i){
if (i != tableOrder[tableName][i]){
tableChanged = true;
if (!tableOrderChanges.hasOwnProperty(tableName)){
tableOrderChanges[tableName] = [];
}
tableOrderChanges[tableName].push([tableOrder[tableName][i], i])
}
}
}
}
if (tableChanged){
$('<input>').attr({
type: 'hidden',
name: '_order_changes',
value: JSON.stringify(tableOrderChanges)
}).appendTo($(form));
}
var collectToDelete = [];
$(".dacollectunremove:visible").each(function(){
collectToDelete.push(parseInt($(this).parent().parent().data('collectnum')));
});
var lastOk = parseInt($(".dacollectremove:visible, .dacollectremoveexisting:visible").last().parent().parent().data('collectnum'));
$(".dacollectremove, .dacollectremoveexisting").each(function(){
if (parseInt($(this).parent().parent().data('collectnum')) > lastOk){
collectToDelete.push(parseInt($(this).parent().parent().data('collectnum')));
}
});
if (collectToDelete.length > 0){
$('<input>').attr({
type: 'hidden',
name: '_collect_delete',
value: JSON.stringify(collectToDelete)
}).appendTo($(form));
}
$("select.damultiselect:not(:disabled)").each(function(){
var showifParents = $(this).parents(".dajsshowif,.dashowif");
if (showifParents.length == 0 || $(showifParents[0]).data("isVisible") == '1'){
$(this).find('option').each(function(){
$('<input>').attr({
type: 'hidden',
name: $(this).val(),
value: $(this).prop('selected') ? 'True' : 'False'
}).appendTo($(form));
});
}
$(this).prop('disabled', true);
});
daWhichButton = null;
if (daSubmitter != null && daSubmitter.name && $('input[name="' + daSubmitter.name + '"]').length == 0){
$('<input>').attr({
type: 'hidden',
name: daSubmitter.name,
value: daSubmitter.value
}).appendTo($(form));
}
if (daInformedChanged){
$("<input>").attr({
type: 'hidden',
name: 'informed',
value: Object.keys(daInformed).join(',')
}).appendTo($(form));
}
$('<input>').attr({
type: 'hidden',
name: 'ajax',
value: '1'
}).appendTo($(form));
daSpinnerTimeout = setTimeout(daShowSpinner, 1000);
var do_iframe_upload = false;
inline_succeeded = false;
if ($('input[name="_files"]').length){
var filesToRead = 0;
var filesRead = 0;
var newFileList = Array();
var nullFileList = Array();
var fileArray = {keys: Array(), values: Object()};
var file_list = JSON.parse(atou($('input[name="_files"]').val()));
var inline_file_list = Array();
var namesWithImages = Object();
for (var i = 0; i < file_list.length; i++){
var the_file_input = $('#' + file_list[i].replace(/(:|\.|\[|\]|,|=|\/|\")/g, '\\\\$1'))[0];
var the_max_size = $(the_file_input).data('maximagesize');
var the_image_type = $(the_file_input).data('imagetype');
var hasImages = false;
if (typeof the_max_size != 'undefined' || typeof the_image_type != 'undefined'){
for (var j = 0; j < the_file_input.files.length; j++){
var the_file = the_file_input.files[j];
if (the_file.type.match(/image.*/)){
hasImages = true;
}
}
}
if (hasImages || (daJsEmbed && the_file_input.files.length > 0)){
for (var j = 0; j < the_file_input.files.length; j++){
var the_file = the_file_input.files[j];
filesToRead++;
}
inline_file_list.push(file_list[i]);
}
else if (the_file_input.files.length > 0){
newFileList.push(file_list[i]);
}
else{
nullFileList.push(file_list[i]);
}
namesWithImages[file_list[i]] = hasImages;
}
if (inline_file_list.length > 0){
var originalFileList = atou($('input[name="_files"]').val())
if (newFileList.length == 0 && nullFileList.length == 0){
$('input[name="_files"]').remove();
}
else{
$('input[name="_files"]').val(utoa(JSON_stringify(newFileList.concat(nullFileList))));
}
for (var i = 0; i < inline_file_list.length; i++){
fileArray.keys.push(inline_file_list[i])
fileArray.values[inline_file_list[i]] = Array()
var fileInfoList = fileArray.values[inline_file_list[i]];
var file_input = $('#' + inline_file_list[i].replace(/(:|\.|\[|\]|,|=|\/|\")/g, '\\\\$1'))[0];
var max_size;
var image_type;
var image_mime_type;
var this_has_images = false;
if (namesWithImages[inline_file_list[i]]){
this_has_images = true;
max_size = parseInt($(file_input).data('maximagesize'));
image_type = $(file_input).data('imagetype');
image_mime_type = null;
if (image_type){
if (image_type == 'png'){
image_mime_type = 'image/png';
}
else if (image_type == 'bmp'){
image_mime_type = 'image/bmp';
}
else {
image_mime_type = 'image/jpeg';
image_type = 'jpg';
}
}
}
for (var j = 0; j < file_input.files.length; j++){
var a_file = file_input.files[j];
var tempFunc = function(the_file, max_size, has_images){
var reader = new FileReader();
var thisFileInfo = {name: the_file.name, size: the_file.size, type: the_file.type};
fileInfoList.push(thisFileInfo);
reader.onload = function(readerEvent){
if (has_images && the_file.type.match(/image.*/) && !(the_file.type.indexOf('image/svg') == 0)){
var convertedName = the_file.name;
var convertedType = the_file.type;
if (image_type){
var pos = the_file.name.lastIndexOf(".");
convertedName = the_file.name.substr(0, pos < 0 ? the_file.name.length : pos) + "." + image_type;
convertedType = image_mime_type;
thisFileInfo.name = convertedName;
thisFileInfo.type = convertedType;
}
var image = new Image();
image.onload = function(imageEvent) {
var canvas = document.createElement('canvas'),
width = image.width,
height = image.height;
if (width > height) {
if (width > max_size) {
height *= max_size / width;
width = max_size;
}
}
else {
if (height > max_size) {
width *= max_size / height;
height = max_size;
}
}
canvas.width = width;
canvas.height = height;
canvas.getContext('2d').drawImage(image, 0, 0, width, height);
thisFileInfo['content'] = canvas.toDataURL(convertedType);
filesRead++;
if (filesRead >= filesToRead){
daResumeUploadSubmission(form, fileArray, inline_file_list, newFileList);
}
};
image.src = reader.result;
}
else{
thisFileInfo['content'] = reader.result;
filesRead++;
if (filesRead >= filesToRead){
daResumeUploadSubmission(form, fileArray, inline_file_list, newFileList);
}
}
};
reader.readAsDataURL(the_file);
};
tempFunc(a_file, max_size, this_has_images);
inline_succeeded = true;
}
}
}
if (newFileList.length == 0){
//$('input[name="_files"]').remove();
}
else{
do_iframe_upload = true;
}
}
if (inline_succeeded){
return(false);
}
if (do_iframe_upload){
$("#dauploadiframe").remove();
var iframe = $('<iframe name="dauploadiframe" id="dauploadiframe" style="display: none"><\/iframe>');
$(daTargetDiv).append(iframe);
$(form).attr("target", "dauploadiframe");
iframe.bind('load', function(){
setTimeout(function(){
try {
daProcessAjax($.parseJSON(daUnfakeHtmlResponse($("#dauploadiframe").contents().text())), form, 1);
}
catch (e){
try {
daProcessAjax($.parseJSON($("#dauploadiframe").contents().text()), form, 1);
}
catch (f){
daShowErrorScreen(document.getElementById('dauploadiframe').contentWindow.document.body.innerHTML, f);
}
}
}, 0);
});
form.submit();
}
else{
daRequestPending = true;
$.ajax({
type: "POST",
url: daInterviewUrl,
data: $(form).serialize(),
beforeSend: addCsrfHeader,
xhrFields: {
withCredentials: true
},
success: function(data){
setTimeout(function(){
daProcessAjax(data, form, 1);
}, 0);
},
error: function(xhr, status, error){
setTimeout(function(){
daProcessAjaxError(xhr, status, error);
}, 0);
}
});
}
return(false);
}
function daSignatureSubmit(event){
$(this).find("input[name='ajax']").val(1);
daRequestPending = true;
$.ajax({
type: "POST",
url: daInterviewUrl,
data: $(this).serialize(),
beforeSend: addCsrfHeader,
xhrFields: {
withCredentials: true
},
success: function(data){
setTimeout(function(){
daProcessAjax(data, $(this), 1);
}, 0);
},
error: function(xhr, status, error){
setTimeout(function(){
daProcessAjaxError(xhr, status, error);
}, 0);
}
});
event.preventDefault();
event.stopPropagation();
return(false);
}
function JSON_stringify(s){
var json = JSON.stringify(s);
return json.replace(/[\\u007f-\\uffff]/g,
function(c) {
return '\\\\u'+('0000'+c.charCodeAt(0).toString(16)).slice(-4);
}
);
}
function daResumeUploadSubmission(form, fileArray, inline_file_list, newFileList){
$('<input>').attr({
type: 'hidden',
name: '_files_inline',
value: utoa(JSON_stringify(fileArray))
}).appendTo($(form));
for (var i = 0; i < inline_file_list.length; ++i){
document.getElementById(inline_file_list[i]).disabled = true;
}
if (newFileList.length > 0){
$("#dauploadiframe").remove();
var iframe = $('<iframe name="dauploadiframe" id="dauploadiframe" style="display: none"><\/iframe>');
$(daTargetDiv).append(iframe);
$(form).attr("target", "dauploadiframe");
iframe.bind('load', function(){
setTimeout(function(){
daProcessAjax($.parseJSON($("#dauploadiframe").contents().text()), form, 1);
}, 0);
});
form.submit();
}
else{
daRequestPending = true;
$.ajax({
type: "POST",
url: daInterviewUrl,
data: $(form).serialize(),
beforeSend: addCsrfHeader,
xhrFields: {
withCredentials: true
},
success: function(data){
setTimeout(function(){
daProcessAjax(data, form, 1);
}, 0);
},
error: function(xhr, status, error){
setTimeout(function(){
daProcessAjaxError(xhr, status, error);
}, 0);
}
});
}
}
function daOnChange(){
if (daCheckinSeconds == 0 || daShowIfInProcess){
return true;
}
if (daCheckinInterval != null){
clearInterval(daCheckinInterval);
}
var oThis = this;
daCheckin(oThis);
daCheckinInterval = setInterval(daCheckin, daCheckinSeconds);
return true;
}
function daPushChanges(){
//console.log("daPushChanges");
if (daCheckinSeconds == 0 || daShowIfInProcess){
return true;
}
if (daCheckinInterval != null){
clearInterval(daCheckinInterval);
}
daCheckin(null);
daCheckinInterval = setInterval(daCheckin, daCheckinSeconds);
return true;
}
function daProcessAjaxError(xhr, status, error){
daRequestPending = false;
if (xhr.responseType == undefined || xhr.responseType == '' || xhr.responseType == 'text'){
var theHtml = xhr.responseText;
if (theHtml == undefined){
$(daTargetDiv).html("error");
}
else{
theHtml = theHtml.replace(/<script[^>]*>[^<]*<\/script>/g, '');
$(daTargetDiv).html(theHtml);
}
if (daJsEmbed){
$(daTargetDiv)[0].scrollTo(0, 1);
}
else{
window.scrollTo(0, 1);
}
}
else {
console.log("daProcessAjaxError: response was not text");
}
}
function daAddScriptToHead(src){
var head = document.getElementsByTagName("head")[0];
var script = document.createElement("script");
script.type = "text/javascript";
script.src = src;
script.async = true;
script.defer = true;
head.appendChild(script);
}
$(document).on('keydown', function(e){
if (e.which == 13){
if (daShowingHelp == 0){
var tag = $( document.activeElement ).prop("tagName");
if (tag != "INPUT" && tag != "TEXTAREA" && tag != "A" && tag != "LABEL" && tag != "BUTTON"){
e.preventDefault();
e.stopPropagation();
if ($("#daform .da-field-buttons button").not('.danonsubmit').length == 1){
$("#daform .da-field-buttons button").not('.danonsubmit').click();
}
return false;
}
}
if ($(document.activeElement).hasClass("btn-file")){
e.preventDefault();
e.stopPropagation();
$(document.activeElement).find('input').click();
return false;
}
}
});
function daShowErrorScreen(data, error){
console.log('daShowErrorScreen: ' + error);
if ("activeElement" in document){
document.activeElement.blur();
}
$(daTargetDiv).html(data);
}
function daProcessAjax(data, form, doScroll, actionURL){
daRequestPending = false;
daInformedChanged = false;
if (daDisable != null){
clearTimeout(daDisable);
}
daCsrf = data.csrf_token;
if (data.question_data){
daQuestionData = data.question_data;
}
if (data.action == 'body'){""" + forceFullScreen + """
if ("activeElement" in document){
document.activeElement.blur();
}
$(daTargetDiv).html("");
if (daJsEmbed){
$(daTargetDiv)[0].scrollTo(0, 1);
}
else{
window.scrollTo(0, 1);
}
$(daTargetDiv).html(data.body);
$(daTargetDiv).parent().removeClass();
$(daTargetDiv).parent().addClass(data.bodyclass);
$("meta[name=viewport]").attr('content', "width=device-width, initial-scale=1");
daDoAction = data.do_action;
//daNextAction = data.next_action;
daChatAvailable = data.livehelp.availability;
daChatMode = data.livehelp.mode;
daChatRoles = data.livehelp.roles;
daChatPartnerRoles = data.livehelp.partner_roles;
daSteps = data.steps;
//console.log("daProcessAjax: pushing " + daSteps);
if (!daJsEmbed && !daIframeEmbed){
if (history.state != null && daSteps > history.state.steps){
history.pushState({steps: daSteps}, data.browser_title + " - page " + daSteps, daLocationBar + """ + json.dumps(page_sep) + """ + daSteps);
}
else{
history.replaceState({steps: daSteps}, "", daLocationBar + """ + json.dumps(page_sep) + """ + daSteps);
}
}
daAllowGoingBack = data.allow_going_back;
daQuestionID = data.id_dict;
daMessageLog = data.message_log;
daInitialize(doScroll);
var tempDiv = document.createElement('div');
tempDiv.innerHTML = data.extra_scripts;
var scripts = tempDiv.getElementsByTagName('script');
for (var i = 0; i < scripts.length; i++){
//console.log("Found one script");
if (scripts[i].src != ""){
//console.log("Added script to head");
daAddScriptToHead(scripts[i].src);
}
else{
daGlobalEval(scripts[i].innerHTML);
}
}
$(".da-group-has-error").each(function(){
if ($(this).is(":visible")){
if (daJsEmbed){
var scrollToTarget = $(this).position().top - 60;
setTimeout(function(){
$(daTargetDiv).animate({scrollTop: scrollToTarget}, 1000);
}, 100);
}
else{
var scrollToTarget = $(this).offset().top - 60;
setTimeout(function(){
$(daTargetDiv).parent().parent().animate({scrollTop: scrollToTarget}, 1000);
}, 100);
}
return false;
}
});
for (var i = 0; i < data.extra_css.length; i++){
$("head").append(data.extra_css[i]);
}
document.title = data.browser_title;
if ($("html").attr("lang") != data.lang){
$("html").attr("lang", data.lang);
}
$(document).trigger('daPageLoad');
if (daReloader != null){
clearTimeout(daReloader);
}
if (data.reload_after != null && data.reload_after > 0){
//daReloader = setTimeout(function(){location.reload();}, data.reload_after);
daReloader = setTimeout(function(){daRefreshSubmit();}, data.reload_after);
}
daUpdateHeight();
}
else if (data.action == 'redirect'){
if (daSpinnerTimeout != null){
clearTimeout(daSpinnerTimeout);
daSpinnerTimeout = null;
}
if (daShowingSpinner){
daHideSpinner();
}
window.location = data.url;
}
else if (data.action == 'refresh'){
daRefreshSubmit();
}
else if (data.action == 'reload'){
location.reload(true);
}
else if (data.action == 'resubmit'){
if (form == null){
window.location = actionURL;
}
$("input[name='ajax']").remove();
if (daSubmitter != null && daSubmitter.name && $('input[name="' + daSubmitter.name + '"]').length == 0){
var input = $("<input>")
.attr("type", "hidden")
.attr("name", daSubmitter.name).val(daSubmitter.value);
$(form).append($(input));
}
form.submit();
}
}
function daEmbeddedJs(e){
//console.log("using embedded js");
var data = decodeURIComponent($(this).data('js'));
daGlobalEval(data);
e.preventDefault();
return false;
}
function daEmbeddedAction(e){
if (daRequestPending){
e.preventDefault();
$(this).blur();
return false;
}
if ($(this).hasClass("daremovebutton")){
if (confirm(""" + json.dumps(word("Are you sure you want to delete this item?")) + """)){
return true;
}
e.preventDefault();
$(this).blur();
return false;
}
var actionData = decodeURIComponent($(this).data('embaction'));
var theURL = $(this).attr("href");
daRequestPending = true;
$.ajax({
type: "POST",
url: daInterviewUrl,
data: $.param({_action: actionData, csrf_token: daCsrf, ajax: 1}),
beforeSend: addCsrfHeader,
xhrFields: {
withCredentials: true
},
success: function(data){
setTimeout(function(){
daProcessAjax(data, null, 1, theURL);
}, 0);
},
error: function(xhr, status, error){
setTimeout(function(){
daProcessAjaxError(xhr, status, error);
}, 0);
},
dataType: 'json'
});
daSpinnerTimeout = setTimeout(daShowSpinner, 1000);
e.preventDefault();
return false;
}
function daReviewAction(e){
if (daRequestPending){
e.preventDefault();
$(this).blur();
return false;
}
//action_perform_with_next($(this).data('action'), null, daNextAction);
var info = $.parseJSON(atou($(this).data('action')));
da_action_perform(info['action'], info['arguments']);
e.preventDefault();
return false;
}
function daRingChat(){
daChatStatus = 'ringing';
daPushChanges();
}
function daTurnOnChat(){
//console.log("Publishing from daTurnOnChat");
$("#daChatOnButton").addClass("dainvisible");
$("#daChatBox").removeClass("dainvisible");
$("#daCorrespondence").html('');
for(var i = 0; i < daChatHistory.length; i++){
daPublishMessage(daChatHistory[i]);
}
daScrollChatFast();
$("#daMessage").prop('disabled', false);
if (daShowingHelp){
$("#daMessage").focus();
}
}
function daCloseChat(){
//console.log('daCloseChat');
daChatStatus = 'hangup';
daPushChanges();
if (daSocket != null && daSocket.connected){
daSocket.disconnect();
}
}
// function daTurnOffChat(){
// $("#daChatOnButton").removeClass("dainvisible");
// $("#daChatBox").addClass("dainvisible");
// //daCloseSocket();
// $("#daMessage").prop('disabled', true);
// $("#daSend").unbind();
// //daStartCheckingIn();
// }
function daDisplayChat(){
if (daChatStatus == 'off' || daChatStatus == 'observeonly'){
$("#daChatBox").addClass("dainvisible");
$("#daChatAvailable").addClass("dainvisible");
$("#daChatOnButton").addClass("dainvisible");
}
else{
if (daChatStatus == 'waiting'){
if (daChatPartnersAvailable > 0){
$("#daChatBox").removeClass("dainvisible");
}
}
else {
$("#daChatBox").removeClass("dainvisible");
}
}
if (daChatStatus == 'waiting'){
//console.log("I see waiting")
if (daChatHistory.length > 0){
$("#daChatAvailable a i").removeClass("da-chat-active");
$("#daChatAvailable a i").addClass("da-chat-inactive");
$("#daChatAvailable").removeClass("dainvisible");
}
else{
$("#daChatAvailable a i").removeClass("da-chat-active");
$("#daChatAvailable a i").removeClass("da-chat-inactive");
$("#daChatAvailable").addClass("dainvisible");
}
$("#daChatOnButton").addClass("dainvisible");
$("#daChatOffButton").addClass("dainvisible");
$("#daMessage").prop('disabled', true);
$("#daSend").prop('disabled', true);
}
if (daChatStatus == 'standby' || daChatStatus == 'ready'){
//console.log("I see standby")
$("#daChatAvailable").removeClass("dainvisible");
$("#daChatAvailable a i").removeClass("da-chat-inactive");
$("#daChatAvailable a i").addClass("da-chat-active");
$("#daChatOnButton").removeClass("dainvisible");
$("#daChatOffButton").addClass("dainvisible");
$("#daMessage").prop('disabled', true);
$("#daSend").prop('disabled', true);
daInformAbout('chat');
}
if (daChatStatus == 'on'){
$("#daChatAvailable").removeClass("dainvisible");
$("#daChatAvailable a i").removeClass("da-chat-inactive");
$("#daChatAvailable a i").addClass("da-chat-active");
$("#daChatOnButton").addClass("dainvisible");
$("#daChatOffButton").removeClass("dainvisible");
$("#daMessage").prop('disabled', false);
if (daShowingHelp){
$("#daMessage").focus();
}
$("#daSend").prop('disabled', false);
daInformAbout('chat');
}
hideTablist();
}
function daChatLogCallback(data){
if (data.action && data.action == 'reload'){
location.reload(true);
}
//console.log("daChatLogCallback: success is " + data.success);
if (data.success){
$("#daCorrespondence").html('');
daChatHistory = [];
var messages = data.messages;
for (var i = 0; i < messages.length; ++i){
daChatHistory.push(messages[i]);
daPublishMessage(messages[i]);
}
daDisplayChat();
daScrollChatFast();
}
}
function daRefreshSubmit(){
daRequestPending = true;
$.ajax({
type: "POST",
url: daInterviewUrl,
data: 'csrf_token=' + daCsrf + '&ajax=1',
beforeSend: addCsrfHeader,
xhrFields: {
withCredentials: true
},
success: function(data){
setTimeout(function(){
daProcessAjax(data, $("#daform"), 0);
}, 0);
},
error: function(xhr, status, error){
setTimeout(function(){
daProcessAjaxError(xhr, status, error);
}, 0);
}
});
}
function daResetCheckinCode(){
daCheckinCode = Math.random();
}
function daCheckinCallback(data){
if (data.action && data.action == 'reload'){
location.reload(true);
}
daCheckingIn = 0;
//console.log("daCheckinCallback: success is " + data.success);
if (data.checkin_code != daCheckinCode){
console.log("Ignoring checkincallback because code is wrong");
return;
}
if (data.success){
if (data.commands.length > 0){
for (var i = 0; i < data.commands.length; ++i){
var command = data.commands[i];
if (command.extra == 'flash'){
if (!$("#daflash").length){
$(daTargetDiv).append(daSprintf(daNotificationContainer, ""));
}
$("#daflash").append(daSprintf(daNotificationMessage, "info", command.value));
//console.log("command is " + command.value);
}
else if (command.extra == 'refresh'){
daRefreshSubmit();
}
else if (command.extra == 'javascript'){
//console.log("I should eval" + command.value);
daGlobalEval(command.value);
}
else if (command.extra == 'fields'){
for (var key in command.value){
if (command.value.hasOwnProperty(key)){
if (typeof command.value[key] === 'object' && command.value[key] !== null){
if (command.value[key].hasOwnProperty('choices')){
daSetChoices(key, command.value[key]['choices']);
}
if (command.value[key].hasOwnProperty('value')){
daSetField(key, command.value[key]['value']);
}
}
else{
daSetField(key, command.value[key]);
}
}
}
}
else if (command.extra == 'backgroundresponse'){
var assignments = Array();
if (command.value.hasOwnProperty('target') && command.value.hasOwnProperty('content')){
assignments.push({target: command.value.target, content: command.value.content});
}
if (Array.isArray(command.value)){
for (i = 0; i < command.value.length; ++i){
var possible_assignment = command.value[i];
if (possible_assignment.hasOwnProperty('target') && possible_assignment.hasOwnProperty('content')){
assignments.push({target: possible_assignment.target, content: possible_assignment.content});
}
}
}
for (i = 0; i < assignments.length; ++i){
var assignment = assignments[i];
$('.datarget' + assignment.target.replace(/[^A-Za-z0-9\_]/g)).prop('innerHTML', assignment.content);
}
//console.log("Triggering daCheckIn");
$(document).trigger('daCheckIn', [command.action, command.value]);
}
}
// setTimeout(function(){
// $("#daflash .daalert-interlocutory").hide(300, function(){
// $(self).remove();
// });
// }, 5000);
}
oldDaChatStatus = daChatStatus;
//console.log("daCheckinCallback: from " + daChatStatus + " to " + data.chat_status);
if (data.phone == null){
$("#daPhoneMessage").addClass("dainvisible");
$("#daPhoneMessage p").html('');
$("#daPhoneAvailable").addClass("dainvisible");
daPhoneAvailable = false;
}
else{
$("#daPhoneMessage").removeClass("dainvisible");
$("#daPhoneMessage p").html(data.phone);
$("#daPhoneAvailable").removeClass("dainvisible");
daPhoneAvailable = true;
daInformAbout('phone');
}
var statusChanged;
if (daChatStatus == data.chat_status){
statusChanged = false;
}
else{
statusChanged = true;
}
if (statusChanged){
daChatStatus = data.chat_status;
daDisplayChat();
if (daChatStatus == 'ready'){
//console.log("calling initialize socket because ready");
daInitializeSocket();
}
}
daChatPartnersAvailable = 0;
if (daChatMode == 'peer' || daChatMode == 'peerhelp'){
daChatPartnersAvailable += data.num_peers;
if (data.num_peers == 1){
$("#dapeerMessage").html('<span class="badge bg-info">' + data.num_peers + ' ' + """ + json.dumps(word("other user")) + """ + '<\/span>');
}
else{
$("#dapeerMessage").html('<span class="badge bg-info">' + data.num_peers + ' ' + """ + json.dumps(word("other users")) + """ + '<\/span>');
}
$("#dapeerMessage").removeClass("dainvisible");
}
else{
$("#dapeerMessage").addClass("dainvisible");
}
if (daChatMode == 'peerhelp' || daChatMode == 'help'){
if (data.help_available == 1){
$("#dapeerHelpMessage").html('<span class="badge bg-primary">' + data.help_available + ' ' + """ + json.dumps(word("operator")) + """ + '<\/span>');
}
else{
$("#dapeerHelpMessage").html('<span class="badge bg-primary">' + data.help_available + ' ' + """ + json.dumps(word("operators")) + """ + '<\/span>');
}
$("#dapeerHelpMessage").removeClass("dainvisible");
}
else{
$("#dapeerHelpMessage").addClass("dainvisible");
}
if (daBeingControlled){
if (!data.observerControl){
daBeingControlled = false;
//console.log("Hiding control 1");
daHideControl();
if (daChatStatus != 'on'){
if (daSocket != null && daSocket.connected){
//console.log('Terminating interview socket because control is over');
daSocket.emit('terminate');
}
}
}
}
else{
if (data.observerControl){
daBeingControlled = true;
daInitializeSocket();
}
}
}
hideTablist();
}
function daCheckoutCallback(data){
}
function daInitialCheckin(){
daCheckin('initial');
}
function daCheckin(elem){
//console.log("daCheckin");
var elem = (typeof elem === 'undefined') ? null : elem;
daCheckingIn += 1;
//if (daCheckingIn > 1 && !(daCheckingIn % 3)){
if (elem === null && daCheckingIn > 1){
//console.log("daCheckin: request already pending, not re-sending");
return;
}
var datastring;
if ((daChatStatus != 'off') && $("#daform").length > 0 && !daBeingControlled){
if (daDoAction != null){
datastring = $.param({action: 'checkin', chatstatus: daChatStatus, chatmode: daChatMode, csrf_token: daCsrf, checkinCode: daCheckinCode, parameters: daFormAsJSON(elem), raw_parameters: JSON.stringify($("#daform").serializeArray()), do_action: daDoAction, ajax: '1'});
}
else{
datastring = $.param({action: 'checkin', chatstatus: daChatStatus, chatmode: daChatMode, csrf_token: daCsrf, checkinCode: daCheckinCode, parameters: daFormAsJSON(elem), raw_parameters: JSON.stringify($("#daform").serializeArray()), ajax: '1'});
}
}
else{
if (daDoAction != null){
datastring = $.param({action: 'checkin', chatstatus: daChatStatus, chatmode: daChatMode, csrf_token: daCsrf, checkinCode: daCheckinCode, do_action: daDoAction, parameters: daFormAsJSON(elem), ajax: '1'});
}
else{
datastring = $.param({action: 'checkin', chatstatus: daChatStatus, chatmode: daChatMode, csrf_token: daCsrf, checkinCode: daCheckinCode, ajax: '1'});
}
}
//console.log("Doing checkin with " + daChatStatus);
$.ajax({
type: 'POST',
url: """ + "'" + url_for('checkin', i=yaml_filename) + "'" + """,
beforeSend: addCsrfHeader,
xhrFields: {
withCredentials: true
},
data: datastring,
success: daCheckinCallback,
dataType: 'json'
});
return true;
}
function daCheckout(){
$.ajax({
type: 'POST',
url: """ + "'" + url_for('checkout', i=yaml_filename) + "'" + """,
beforeSend: addCsrfHeader,
xhrFields: {
withCredentials: true
},
data: 'csrf_token=' + daCsrf + '&ajax=1&action=checkout',
success: daCheckoutCallback,
dataType: 'json'
});
return true;
}
function daStopCheckingIn(){
daCheckout();
if (daCheckinInterval != null){
clearInterval(daCheckinInterval);
}
}
function daShowSpinner(){
if ($("#daquestion").length > 0){
$('<div id="daSpinner" class="da-spinner-container da-top-for-navbar"><div class="container"><div class="row"><div class="col text-center"><span class="da-spinner"><i class="fas fa-spinner fa-spin"><\/i><\/span><\/div><\/div><\/div><\/div>').appendTo(daTargetDiv);
}
else{
var newSpan = document.createElement('span');
var newI = document.createElement('i');
$(newI).addClass("fas fa-spinner fa-spin");
$(newI).appendTo(newSpan);
$(newSpan).attr("id", "daSpinner");
$(newSpan).addClass("da-sig-spinner da-top-for-navbar");
$(newSpan).appendTo("#dasigtoppart");
}
daShowingSpinner = true;
}
function daHideSpinner(){
$("#daSpinner").remove();
daShowingSpinner = false;
daSpinnerTimeout = null;
}
function daAdjustInputWidth(e){
var contents = $(this).val();
var leftBracket = new RegExp('<', 'g');
var rightBracket = new RegExp('>', 'g');
contents = contents.replace(/&/g,'&').replace(leftBracket,'<').replace(rightBracket,'>').replace(/ /g, ' ');
$('<span class="dainput-embedded" id="dawidth">').html( contents ).appendTo('#daquestion');
$("#dawidth").css('min-width', $(this).css('min-width'));
$("#dawidth").css('background-color', $(daTargetDiv).css('background-color'));
$("#dawidth").css('color', $(daTargetDiv).css('background-color'));
$(this).width($('#dawidth').width() + 16);
setTimeout(function(){
$("#dawidth").remove();
}, 0);
}
function daShowNotifications(){
var n = daMessageLog.length;
for (var i = 0; i < n; i++){
var message = daMessageLog[i];
if (message.priority == 'console'){
console.log(message.message);
}
else if (message.priority == 'javascript'){
daGlobalEval(message.message);
}
else if (message.priority == 'success' || message.priority == 'warning' || message.priority == 'danger' || message.priority == 'secondary' || message.priority == 'tertiary' || message.priority == 'info' || message.priority == 'dark' || message.priority == 'light' || message.priority == 'primary'){
da_flash(message.message, message.priority);
}
else{
da_flash(message.message, 'info');
}
}
}
function daIgnoreAllButTab(event){
event = event || window.event;
var code = event.keyCode;
if (code != 9){
if (code == 13){
$(event.target).parents(".file-caption-main").find("input.dafile").click();
}
event.preventDefault();
return false;
}
}
function daDisableIfNotHidden(query, value){
$(query).each(function(){
var showIfParent = $(this).parents('.dashowif,.dajsshowif');
if (!(showIfParent.length && ($(showIfParent[0]).data('isVisible') == '0' || !$(showIfParent[0]).is(":visible")))){
if ($(this).hasClass('combobox')){
if (value){
daComboBoxes[$(this).attr('id')].disable();
}
else {
daComboBoxes[$(this).attr('id')].enable();
}
}
else if ($(this).hasClass('dafile')){
if (value){
$(this).data("fileinput").disable();
}
else{
$(this).data("fileinput").enable();
}
}
else if ($(this).hasClass('daslider')){
if (value){
$(this).slider('disable');
}
else{
$(this).slider('enable');
}
}
else {
$(this).prop("disabled", value);
}
}
});
}
function daShowIfCompare(theVal, showIfVal){
if (typeof theVal == 'string' && theVal.match(/^-?\d+\.\d+$/)){
theVal = parseFloat(theVal);
}
else if (typeof theVal == 'string' && theVal.match(/^-?\d+$/)){
theVal = parseInt(theVal);
}
if (typeof showIfVal == 'string' && showIfVal.match(/^-?\d+\.\d+$/)){
showIfVal = parseFloat(showIfVal);
}
else if (typeof showIfVal == 'string' && showIfVal.match(/^-?\d+$/)){
showIfVal = parseInt(showIfVal);
}
if (typeof theVal == 'string' || typeof showIfVal == 'string'){
if (String(showIfVal) == 'None' && String(theVal) == ''){
return true;
}
return (String(theVal) == String(showIfVal));
}
return (theVal == showIfVal);
}
function rationalizeListCollect(){
var finalNum = $(".dacollectextraheader").last().data('collectnum');
var num = $(".dacollectextraheader:visible").last().data('collectnum');
if (parseInt(num) < parseInt(finalNum)){
if ($('div.dacollectextraheader[data-collectnum="' + num + '"]').find(".dacollectadd").hasClass('dainvisible')){
$('div.dacollectextraheader[data-collectnum="' + (num + 1) + '"]').show('fast');
}
}
var n = parseInt(finalNum);
var firstNum = parseInt($(".dacollectextraheader").first().data('collectnum'));
while (n-- > firstNum){
if ($('div.dacollectextraheader[data-collectnum="' + (n + 1) + '"]:visible').length > 0){
if (!$('div.dacollectextraheader[data-collectnum="' + (n + 1) + '"]').find(".dacollectadd").hasClass('dainvisible') && $('div.dacollectextraheader[data-collectnum="' + n + '"]').find(".dacollectremove").hasClass('dainvisible')){
$('div.dacollectextraheader[data-collectnum="' + (n + 1) + '"]').hide();
}
}
}
var n = parseInt(finalNum);
var seenAddAnother = false;
while (n-- > firstNum){
if ($('div.dacollectextraheader[data-collectnum="' + (n + 1) + '"]:visible').length > 0){
if (!$('div.dacollectextraheader[data-collectnum="' + (n + 1) + '"]').find(".dacollectadd").hasClass('dainvisible')){
seenAddAnother = true;
}
var current = $('div.dacollectextraheader[data-collectnum="' + n + '"]');
if (seenAddAnother && !$(current).find(".dacollectadd").hasClass('dainvisible')){
$(current).find(".dacollectadd").addClass('dainvisible');
$(current).find(".dacollectunremove").removeClass('dainvisible');
}
}
}
}
function daFetchAjax(elem, cb, doShow){
var wordStart = $(elem).val();
if (wordStart.length < parseInt(cb.$source.data('trig'))){
if (cb.shown){
cb.hide();
}
return;
}
if (daFetchAjaxTimeout != null && daFetchAjaxTimeoutRunning){
daFetchAjaxTimeoutFetchAfter = true;
return;
}
if (doShow){
daFetchAjaxTimeout = setTimeout(function(){
daFetchAjaxTimeoutRunning = false;
if (daFetchAjaxTimeoutFetchAfter){
daFetchAjax(elem, cb, doShow);
daFetchAjaxTimeoutFetchAfter = false;
}
}, 2000);
daFetchAjaxTimeoutRunning = true;
daFetchAjaxTimeoutFetchAfter = false;
}
da_action_call(cb.$source.data('action'), {wordstart: wordStart}, function(data){
wordStart = $(elem).val();
if (typeof data == "object"){
var upperWordStart = wordStart.toUpperCase()
cb.$source.empty();
var emptyItem = $("<option>");
emptyItem.val("");
emptyItem.text("");
cb.$source.append(emptyItem);
var notYetSelected = true;
var selectedValue = null;
if (Array.isArray(data)){
for (var i = 0; i < data.length; ++i){
if (Array.isArray(data[i])){
if (data[i].length >= 2){
var item = $("<option>");
if (notYetSelected && ((doShow && data[i][1].toString() == wordStart) || data[i][0].toString() == wordStart)){
item.prop('selected', true);
notYetSelected = false;
selectedValue = data[i][1]
}
item.text(data[i][1]);
item.val(data[i][0]);
cb.$source.append(item);
}
else if (data[i].length == 1){
var item = $("<option>");
if (notYetSelected && ((doShow && data[i][0].toString() == wordStart) || data[i][0].toString() == wordStart)){
item.prop('selected', true);
notYetSelected = false;
selectedValue = data[i][0]
}
item.text(data[i][0]);
item.val(data[i][0]);
cb.$source.append(item);
}
}
else if (typeof data[i] == "object"){
for (var key in data[i]){
if (data[i].hasOwnProperty(key)){
var item = $("<option>");
if (notYetSelected && ((doShow && key.toString() == wordStart) || key.toString() == wordStart)){
item.prop('selected', true);
notYetSelected = false;
selectedValue = data[i][key];
}
item.text(data[i][key]);
item.val(key);
cb.$source.append(item);
}
}
}
else{
var item = $("<option>");
if (notYetSelected && ((doShow && data[i].toString().toUpperCase() == upperWordStart) || data[i].toString() == wordStart)){
item.prop('selected', true);
notYetSelected = false;
selectedValue = data[i];
}
item.text(data[i]);
item.val(data[i]);
cb.$source.append(item);
}
}
}
else if (typeof data == "object"){
var keyList = Array();
for (var key in data){
if (data.hasOwnProperty(key)){
keyList.push(key);
}
}
keyList = keyList.sort();
for (var i = 0; i < keyList.length; ++i){
var item = $("<option>");
if (notYetSelected && ((doShow && keyList[i].toString().toUpperCase() == upperWordStart) || keyList[i].toString() == wordStart)){
item.prop('selected', true);
notYetSelected = false;
selectedValue = data[keyList[i]];
}
item.text(data[keyList[i]]);
item.val(keyList[i]);
cb.$source.append(item);
}
}
if (doShow){
cb.refresh();
cb.clearTarget();
cb.$target.val(cb.$element.val());
cb.lookup();
}
else{
if (!notYetSelected){
cb.$element.val(selectedValue);
}
}
}
});
}
function daInitialize(doScroll){
daResetCheckinCode();
daComboBoxes = Object();
daVarLookupSelect = Object();
daVarLookupCheckbox = Object();
if (daSpinnerTimeout != null){
clearTimeout(daSpinnerTimeout);
daSpinnerTimeout = null;
}
if (daShowingSpinner){
daHideSpinner();
}
daNotYetScrolled = true;
// $(".dahelptrigger").click(function(e) {
// e.preventDefault();
// $(this).tab('show');
// });
$("input.dafile").fileinput({theme: "fas", language: document.documentElement.lang, allowedPreviewTypes: ['image']});
$(".datableup,.databledown").click(function(e){
e.preventDefault();
$(this).blur();
var row = $(this).parents("tr").first();
if ($(this).is(".datableup")) {
var prev = row.prev();
if (prev.length == 0){
return false;
}
row.addClass("datablehighlighted");
setTimeout(function(){
row.insertBefore(prev);
}, 200);
}
else {
var next = row.next();
if (next.length == 0){
return false;
}
row.addClass("datablehighlighted");
setTimeout(function(){
row.insertAfter(row.next());
}, 200);
}
setTimeout(function(){
row.removeClass("datablehighlighted");
}, 1000);
return false;
});
$(".dacollectextra").find('input, textarea, select').prop("disabled", true);
$(".dacollectextra").find('input.combobox').each(function(){
daComboBoxes[$(this).attr('id')].disable();
});
$(".dacollectextra").find('input.daslider').each(function(){
$(this).slider('disable');
});
$(".dacollectextra").find('input.dafile').each(function(){
$(this).data("fileinput").disable();
});
$("#da-extra-collect").on('click', function(){
$("<input>").attr({
type: 'hidden',
name: '_collect',
value: $(this).val()
}).appendTo($("#daform"));
$("#daform").submit();
event.preventDefault();
return false;
});
$(".dacollectadd").on('click', function(e){
e.preventDefault();
if ($("#daform").valid()){
var num = $(this).parent().parent().data('collectnum');
$('div[data-collectnum="' + num + '"]').show('fast');
$('div[data-collectnum="' + num + '"]').find('input, textarea, select').prop("disabled", false);
$('div[data-collectnum="' + num + '"]').find('input.combobox').each(function(){
daComboBoxes[$(this).attr('id')].enable();
});
$('div[data-collectnum="' + num + '"]').find('input.daslider').each(function(){
$(this).slider('enable');
});
$('div[data-collectnum="' + num + '"]').find('input.dafile').each(function(){
$(this).data("fileinput").enable();
});
$(this).parent().find("button.dacollectremove").removeClass("dainvisible");
$(this).parent().find("span.dacollectnum").removeClass("dainvisible");
$(this).addClass("dainvisible");
$(".da-first-delete").removeClass("dainvisible");
rationalizeListCollect();
var elem = $('div[data-collectnum="' + num + '"]').find('input, textarea, select').first();
if ($(elem).visible()){
$(elem).focus();
}
}
return false;
});
$("#dasigform").on('submit', daSignatureSubmit);
$(".dacollectremove").on('click', function(e){
e.preventDefault();
var num = $(this).parent().parent().data('collectnum');
$('div[data-collectnum="' + num + '"]:not(.dacollectextraheader, .dacollectheader, .dacollectfirstheader)').hide('fast');
$('div[data-collectnum="' + num + '"]').find('input, textarea, select').prop("disabled", true);
$('div[data-collectnum="' + num + '"]').find('input.combobox').each(function(){
daComboBoxes[$(this).attr('id')].disable();
});
$('div[data-collectnum="' + num + '"]').find('input.daslider').each(function(){
$(this).slider('disable');
});
$('div[data-collectnum="' + num + '"]').find('input.dafile').each(function(){
$(this).data("fileinput").disable();
});
$(this).parent().find("button.dacollectadd").removeClass("dainvisible");
$(this).parent().find("span.dacollectnum").addClass("dainvisible");
$(this).addClass("dainvisible");
rationalizeListCollect();
return false;
});
$(".dacollectremoveexisting").on('click', function(e){
e.preventDefault();
var num = $(this).parent().parent().data('collectnum');
$('div[data-collectnum="' + num + '"]:not(.dacollectextraheader, .dacollectheader, .dacollectfirstheader)').hide('fast');
$('div[data-collectnum="' + num + '"]').find('input, textarea, select').prop("disabled", true);
$('div[data-collectnum="' + num + '"]').find('input.combobox').each(function(){
daComboBoxes[$(this).attr('id')].disable();
});
$('div[data-collectnum="' + num + '"]').find('input.daslider').each(function(){
$(this).slider('disable');
});
$('div[data-collectnum="' + num + '"]').find('input.dafile').each(function(){
$(this).data("fileinput").disable();
});
$(this).parent().find("button.dacollectunremove").removeClass("dainvisible");
$(this).parent().find("span.dacollectremoved").removeClass("dainvisible");
$(this).addClass("dainvisible");
rationalizeListCollect();
return false;
});
$(".dacollectunremove").on('click', function(e){
e.preventDefault();
var num = $(this).parent().parent().data('collectnum');
$('div[data-collectnum="' + num + '"]').show('fast');
$('div[data-collectnum="' + num + '"]').find('input, textarea, select').prop("disabled", false);
$('div[data-collectnum="' + num + '"]').find('input.combobox').each(function(){
daComboBoxes[$(this).attr('id')].enable();
});
$('div[data-collectnum="' + num + '"]').find('input.daslider').each(function(){
$(this).slider('enable');
});
$('div[data-collectnum="' + num + '"]').find('input.dafile').each(function(){
$(this).data("fileinput").enable();
});
$(this).parent().find("button.dacollectremoveexisting").removeClass("dainvisible");
$(this).parent().find("button.dacollectremove").removeClass("dainvisible");
$(this).parent().find("span.dacollectnum").removeClass("dainvisible");
$(this).parent().find("span.dacollectremoved").addClass("dainvisible");
$(this).addClass("dainvisible");
rationalizeListCollect();
return false;
});
//$('#daquestionlabel').click(function(e) {
// e.preventDefault();
// $(this).tab('show');
//});
//$('#dapagetitle').click(function(e) {
// if ($(this).prop('href') == '#'){
// e.preventDefault();
// //$('#daquestionlabel').tab('show');
// }
//});
$('select.damultiselect').each(function(){
var isObject = $(this).hasClass('daobject');
var varname = atou($(this).data('varname'));
var theSelect = this;
$(this).find('option').each(function(){
var theVal = atou($(this).data('valname'));
if (isObject){
theVal = atou(theVal);
}
var key = varname + '["' + theVal + '"]';
if (!daVarLookupSelect[key]){
daVarLookupSelect[key] = [];
}
daVarLookupSelect[key].push({'select': theSelect, 'option': this, 'value': theVal});
key = varname + "['" + theVal + "']"
if (!daVarLookupSelect[key]){
daVarLookupSelect[key] = [];
}
daVarLookupSelect[key].push({'select': theSelect, 'option': this, 'value': theVal});
});
})
$('fieldset.da-field-checkboxes').each(function(){
var isObject = $(this).hasClass('daobject');
var varname = atou($(this).data('varname'));
var cbList = [];
if (!daVarLookupCheckbox[varname]){
daVarLookupCheckbox[varname] = [];
}
$(this).find('input').each(function(){
if ($(this).attr('name').substr(0,7) === '_ignore'){
return;
}
var theVal = atou($(this).data('cbvalue'));
var theType = $(this).data('cbtype');
var key;
if (theType == 'R'){
key = varname + '[' + theVal + ']';
}
else {
key = varname + '["' + theVal + '"]';
}
cbList.push({'variable': key, 'value': theVal, 'type': theType, 'elem': this})
});
daVarLookupCheckbox[varname].push({'elem': this, 'checkboxes': cbList, 'isObject': isObject});
$(this).find('input.danota-checkbox').each(function(){
if (!daVarLookupCheckbox[varname + '[nota]']){
daVarLookupCheckbox[varname + '[nota]'] = [];
}
daVarLookupCheckbox[varname + '[nota]'].push({'elem': this, 'checkboxes': [{'variable': varname + '[nota]', 'type': 'X', 'elem': this}], 'isObject': isObject});
});
$(this).find('input.daaota-checkbox').each(function(){
if (!daVarLookupCheckbox[varname + '[aota]']){
daVarLookupCheckbox[varname + '[aota]'] = [];
}
daVarLookupCheckbox[varname + '[aota]'].push({'elem': this, 'checkboxes': [{'variable': varname + '[aota]', 'type': 'X', 'elem': this}], 'isObject': isObject});
});
});
$('.dacurrency').each(function(){
var theVal = $(this).val().toString();
if (theVal.indexOf('.') >= 0){
theVal = theVal.replace(',', '');
var num = parseFloat(theVal);
var cleanNum = num.toFixed(""" + str(daconfig.get('currency decimal places', 2)) + """).toString();
if (cleanNum != 'NaN') {
$(this).val(cleanNum);
}
}
});
$('.dacurrency').on('blur', function(){
var theVal = $(this).val().toString();
if (theVal.indexOf('.') >= 0){
theVal = theVal.replace(',', '');
var num = parseFloat(theVal);
var cleanNum = num.toFixed(""" + str(daconfig.get('currency decimal places', 2)) + """).toString();
if (cleanNum != 'NaN') {
$(this).val(cleanNum);
}
}
});
// iOS will truncate text in `select` options. Adding an empty optgroup fixes that
if (navigator.userAgent.match(/(iPad|iPhone|iPod touch);/i)) {
var selects = document.querySelectorAll("select");
for (var i = 0; i < selects.length; i++){
selects[i].appendChild(document.createElement("optgroup"));
}
}
$(".da-to-labelauty").labelauty({ class: "labelauty da-active-invisible dafullwidth" });
$(".da-to-labelauty-icon").labelauty({ label: false });
$("button").on('click', function(){
daWhichButton = this;
return true;
});
$('#dasource').on('shown.bs.collapse', function (e) {
if (daJsEmbed){
var scrollTarget = $("#dasource").first().position().top - 60;
$(daTargetDiv).animate({
scrollTop: scrollTarget
}, 1000);
}
else{
var scrollTarget = $("#dasource").first().offset().top - 60;
$("html, body").animate({
scrollTop: scrollTarget
}, 1000);
}
});
$('button[data-bs-target="#dahelp"]').on('shown.bs.tab', function (e) {
daShowingHelp = 1;
if (daNotYetScrolled){
daScrollChatFast();
daNotYetScrolled = false;
}""" + debug_readability_help + """
});
$('button[data-bs-target="#daquestion"]').on('shown.bs.tab', function (e) {
daShowingHelp = 0;""" + debug_readability_question + """
});
$("input.daaota-checkbox").click(function(){
var anyChanged = false;
var firstEncountered = null;
$(this).parents('fieldset').find('input.danon-nota-checkbox').each(function(){
if (firstEncountered === null){
firstEncountered = this;
}
var existing_val = $(this).prop('checked');
$(this).prop('checked', true);
if (existing_val != true){
$(this).trigger('change');
anyChanged = true;
}
});
if (firstEncountered !== null && anyChanged === false){
$(firstEncountered).trigger('change');
}
$(this).parents('fieldset').find('input.danota-checkbox').each(function(){
var existing_val = $(this).prop('checked');
$(this).prop('checked', false);
if (existing_val != false){
$(this).trigger('change');
}
});
});
$("input.danota-checkbox").click(function(){
var anyChanged = false;
var firstEncountered = null;
$(this).parents('fieldset').find('input.danon-nota-checkbox').each(function(){
if (firstEncountered === null){
firstEncountered = this;
}
var existing_val = $(this).prop('checked');
$(this).prop('checked', false);
if (existing_val != false){
$(this).trigger('change');
anyChanged = true;
}
});
if (firstEncountered !== null && anyChanged === false){
$(firstEncountered).trigger('change');
}
$(this).parents('fieldset').find('input.daaota-checkbox').each(function(){
var existing_val = $(this).prop('checked');
$(this).prop('checked', false);
if (existing_val != false){
$(this).trigger('change');
}
});
});
$("input.danon-nota-checkbox").click(function(){
$(this).parents('fieldset').find('input.danota-checkbox').each(function(){
var existing_val = $(this).prop('checked');
$(this).prop('checked', false);
if (existing_val != false){
$(this).trigger('change');
}
});
if (!$(this).prop('checked')){
$(this).parents('fieldset').find('input.daaota-checkbox').each(function(){
var existing_val = $(this).prop('checked');
$(this).prop('checked', false);
if (existing_val != false){
$(this).trigger('change');
}
});
}
});
$('select.combobox').combobox({buttonLabel: daComboboxButtonLabel});
$('select.da-ajax-combobox').combobox({clearIfNoMatch: true, buttonLabel: daComboboxButtonLabel});
$('input.da-ajax-combobox').each(function(){
var cb = daComboBoxes[$(this).attr("id")];
daFetchAjax(this, cb, false);
$(this).on('keyup', function(e){
switch(e.keyCode){
case 40:
case 39: // right arrow
case 38: // up arrow
case 37: // left arrow
case 36: // home
case 35: // end
case 16: // shift
case 17: // ctrl
case 9: // tab
case 13: // enter
case 27: // escape
case 18: // alt
return;
}
daFetchAjax(this, cb, true);
daFetchAcceptIncoming = true;
e.preventDefault();
return false;
});
});
$("#daemailform").validate({'submitHandler': daValidationHandler, 'rules': {'_attachment_email_address': {'minlength': 1, 'required': true, 'email': true}}, 'messages': {'_attachment_email_address': {'required': """ + json.dumps(word("An e-mail address is required.")) + """, 'email': """ + json.dumps(word("You need to enter a complete e-mail address.")) + """}}, 'errorClass': 'da-has-error invalid-feedback'});
$("a[data-embaction]").click(daEmbeddedAction);
$("a[data-js]").click(daEmbeddedJs);
$("a.da-review-action").click(daReviewAction);
$("input.dainput-embedded").on('keyup', daAdjustInputWidth);
$("input.dainput-embedded").each(daAdjustInputWidth);
var daPopoverTriggerList = [].slice.call(document.querySelectorAll('[data-bs-toggle="popover"]'));
var daPopoverList = daPopoverTriggerList.map(function (daPopoverTriggerEl) {
return new bootstrap.Popover(daPopoverTriggerEl, {trigger: """ + json.dumps(interview.options.get('popover trigger', 'focus')) + """, html: true});
});
$('label a[data-bs-toggle="popover"]').on('click', function(event){
event.preventDefault();
event.stopPropagation();
var thePopover = bootstrap.Popover.getOrCreateInstance(this);
thePopover.show();
return false;
});
if (daPhoneAvailable){
$("#daPhoneAvailable").removeClass("dainvisible");
}
$(".daquestionbackbutton").on('click', function(event){
event.preventDefault();
$("#dabackbutton").submit();
return false;
});
$("#dabackbutton").on('submit', function(event){
if (daShowingHelp){
event.preventDefault();
$('#daquestionlabel').tab('show');
return false;
}
$("#dabackbutton").addClass("dabackiconpressed");
var informed = '';
if (daInformedChanged){
informed = '&informed=' + Object.keys(daInformed).join(',');
}
var url;
if (daJsEmbed){
url = daPostURL;
}
else{
url = $("#dabackbutton").attr('action');
}
daRequestPending = true;
$.ajax({
type: "POST",
url: url,
beforeSend: addCsrfHeader,
xhrFields: {
withCredentials: true
},
data: $("#dabackbutton").serialize() + '&ajax=1' + informed,
success: function(data){
setTimeout(function(){
daProcessAjax(data, document.getElementById('backbutton'), 1);
}, 0);
},
error: function(xhr, status, error){
setTimeout(function(){
daProcessAjaxError(xhr, status, error);
}, 0);
}
});
daSpinnerTimeout = setTimeout(daShowSpinner, 1000);
event.preventDefault();
});
$("#daChatOnButton").click(daRingChat);
$("#daChatOffButton").click(daCloseChat);
$('#daMessage').bind('keypress keydown keyup', function(e){
var theCode = e.which || e.keyCode;
if(theCode == 13) { daSender(); e.preventDefault(); }
});
$('#daform button[type="submit"]').click(function(){
daSubmitter = this;
document.activeElement.blur();
return true;
});
$('#daform input[type="submit"]').click(function(){
daSubmitter = this;
document.activeElement.blur();
return true;
});
$('#daemailform button[type="submit"]').click(function(){
daSubmitter = this;
return true;
});
$('#dadownloadform button[type="submit"]').click(function(){
daSubmitter = this;
return true;
});
$(".danavlinks a.daclickable").click(function(e){
if (daRequestPending){
e.preventDefault();
$(this).blur();
return false;
}
var the_key = $(this).data('key');
da_action_perform("_da_priority_action", {_action: the_key});
e.preventDefault();
return false;
});
$(".danav-vertical .danavnested").each(function(){
var box = this;
var prev = $(this).prev();
if (prev && !prev.hasClass('active')){
var toggler;
if ($(box).hasClass('danotshowing')){
toggler = $('<a href="#" class="toggler" role="button" aria-pressed="false">');
$('<i class="fas fa-caret-right">').appendTo(toggler);
$('<span class="visually-hidden">""" + word("Toggle") + """</span>').appendTo(toggler);
}
else{
toggler = $('<a href="#" class="toggler" role="button" aria-pressed="true">');
$('<i class="fas fa-caret-down">').appendTo(toggler);
$('<span class="visually-hidden">""" + word("Toggle") + """</span>').appendTo(toggler);
}
toggler.appendTo(prev);
toggler.on('click', function(e){
var oThis = this;
$(this).find("svg").each(function(){
if ($(this).attr('data-icon') == 'caret-down'){
$(this).removeClass('fa-caret-down');
$(this).addClass('fa-caret-right');
$(this).attr('data-icon', 'caret-right');
$(box).hide();
$(oThis).attr('aria-pressed', 'false');
$(box).toggleClass('danotshowing');
}
else if ($(this).attr('data-icon') == 'caret-right'){
$(this).removeClass('fa-caret-right');
$(this).addClass('fa-caret-down');
$(this).attr('data-icon', 'caret-down');
$(box).show();
$(oThis).attr('aria-pressed', 'true');
$(box).toggleClass('danotshowing');
}
});
e.stopPropagation();
e.preventDefault();
return false;
});
}
});
$("body").focus();
if (!daJsEmbed && !isAndroid){
setTimeout(function(){
var firstInput = $("#daform .da-field-container").not(".da-field-container-note").first().find("input, textarea, select").filter(":visible").first();
if (firstInput.length > 0 && $(firstInput).visible()){
$(firstInput).focus();
var inputType = $(firstInput).attr('type');
if ($(firstInput).prop('tagName') != 'SELECT' && inputType != "checkbox" && inputType != "radio" && inputType != "hidden" && inputType != "submit" && inputType != "file" && inputType != "range" && inputType != "number" && inputType != "date" && inputType != "time"){
var strLength = $(firstInput).val().length * 2;
if (strLength > 0){
try {
$(firstInput)[0].setSelectionRange(strLength, strLength);
}
catch(err) {
console.log(err.message);
}
}
}
}
else {
var firstButton = $("#danavbar-collapse .nav-link").filter(':visible').first();
if (firstButton.length > 0 && $(firstButton).visible()){
setTimeout(function(){
$(firstButton).focus();
$(firstButton).blur();
}, 0);
}
}
}, 15);
}
$("input.dauncheckspecificothers").on('change', function(){
if ($(this).is(":checked")){
var theIds = $.parseJSON(atou($(this).data('unchecklist')));
var n = theIds.length;
for (var i = 0; i < n; ++i){
var elem = document.getElementById(theIds[i]);
$(elem).prop("checked", false);
$(elem).trigger('change');
}
}
});
$("input.dauncheckspecificothers").each(function(){
var theIds = $.parseJSON(atou($(this).data('unchecklist')));
var n = theIds.length;
var oThis = this;
for (var i = 0; i < n; ++i){
var elem = document.getElementById(theIds[i]);
$(elem).on('change', function(){
if ($(this).is(":checked")){
$(oThis).prop("checked", false);
$(oThis).trigger('change');
}
});
}
});
$("input.dauncheckothers").on('change', function(){
if ($(this).is(":checked")){
$("input.dauncheckable,input.dacheckothers").each(function(){
if ($(this).is(":checked")){
$(this).prop("checked", false);
$(this).trigger('change');
}
});
}
});
$("input.dacheckspecificothers").on('change', function(){
if ($(this).is(":checked")){
var theIds = $.parseJSON(atou($(this).data('checklist')));
var n = theIds.length;
for (var i = 0; i < n; ++i){
var elem = document.getElementById(theIds[i]);
$(elem).prop("checked", true);
$(elem).trigger('change');
}
}
});
$("input.dacheckspecificothers").each(function(){
var theIds = $.parseJSON(atou($(this).data('checklist')));
var n = theIds.length;
var oThis = this;
for (var i = 0; i < n; ++i){
var elem = document.getElementById(theIds[i]);
$(elem).on('change', function(){
if (!$(this).is(":checked")){
$(oThis).prop("checked", false);
$(oThis).trigger('change');
}
});
}
});
$("input.dacheckothers").on('change', function(){
if ($(this).is(":checked")){
$("input.dauncheckable").each(function(){
if (!$(this).is(":checked")){
$(this).prop("checked", true);
$(this).trigger('change');
}
});
$("input.dauncheckothers").each(function(){
if (!$(this).is(":checked")){
$(this).prop("checked", false);
$(this).trigger('change');
}
});
}
});
$("input.dauncheckable").on('change', function(){
if ($(this).is(":checked")){
$("input.dauncheckothers").each(function(){
if ($(this).is(":checked")){
$(this).prop("checked", false);
$(this).trigger('change');
}
});
}
else{
$("input.dacheckothers").each(function(){
if ($(this).is(":checked")){
$(this).prop("checked", false);
$(this).trigger('change');
}
});
}
});
var navMain = $("#danavbar-collapse");
navMain.on("click", "a", null, function () {
if (!($(this).hasClass("dropdown-toggle"))){
navMain.collapse('hide');
}
});
$("button[data-bs-target='#dahelp']").on("shown.bs.tab", function(){
if (daJsEmbed){
$(daTargetDiv)[0].scrollTo(0, 1);
}
else{
window.scrollTo(0, 1);
}
$("#dahelptoggle").removeClass('daactivetext');
$("#dahelptoggle").blur();
});
$("#dasourcetoggle").on("click", function(){
$(this).parent().toggleClass("active");
$(this).blur();
});
$('#dabackToQuestion').click(function(event){
$('#daquestionlabel').tab('show');
});
daVarLookup = Object();
daVarLookupRev = Object();
daVarLookupMulti = Object();
daVarLookupRevMulti = Object();
daVarLookupOption = Object();
if ($("input[name='_varnames']").length){
the_hash = $.parseJSON(atou($("input[name='_varnames']").val()));
for (var key in the_hash){
if (the_hash.hasOwnProperty(key)){
daVarLookup[the_hash[key]] = key;
daVarLookupRev[key] = the_hash[key];
if (!daVarLookupMulti.hasOwnProperty(the_hash[key])){
daVarLookupMulti[the_hash[key]] = [];
}
daVarLookupMulti[the_hash[key]].push(key);
if (!daVarLookupRevMulti.hasOwnProperty(key)){
daVarLookupRevMulti[key] = [];
}
daVarLookupRevMulti[key].push(the_hash[key]);
}
}
}
if ($("input[name='_checkboxes']").length){
var patt = new RegExp(/\[B['"][^\]]*['"]\]$/);
var pattObj = new RegExp(/\[O['"][^\]]*['"]\]$/);
var pattRaw = new RegExp(/\[R['"][^\]]*['"]\]$/);
the_hash = $.parseJSON(atou($("input[name='_checkboxes']").val()));
for (var key in the_hash){
if (the_hash.hasOwnProperty(key)){
var checkboxName = atou(key);
var baseName = checkboxName;
if (patt.test(baseName)){
bracketPart = checkboxName.replace(/^.*(\[B?['"][^\]]*['"]\])$/, "$1");
checkboxName = checkboxName.replace(/^.*\[B?['"]([^\]]*)['"]\]$/, "$1");
baseName = baseName.replace(/^(.*)\[.*/, "$1");
var transBaseName = baseName;
if (($("[name='" + key + "']").length == 0) && (typeof daVarLookup[utoa(transBaseName).replace(/[\\n=]/g, '')] != "undefined")){
transBaseName = atou(daVarLookup[utoa(transBaseName).replace(/[\\n=]/g, '')]);
}
var convertedName;
try {
convertedName = atou(checkboxName);
}
catch (e) {
continue;
}
var daNameOne = utoa(transBaseName + bracketPart).replace(/[\\n=]/g, '');
var daNameTwo = utoa(baseName + "['" + convertedName + "']").replace(/[\\n=]/g, '');
var daNameThree = utoa(baseName + '["' + convertedName + '"]').replace(/[\\n=]/g, '');
var daNameBase = utoa(baseName).replace(/[\\n=]/g, '');
daVarLookupRev[daNameOne] = daNameTwo;
daVarLookup[daNameTwo] = daNameOne;
daVarLookup[daNameThree] = daNameOne;
daVarLookupOption[key] = convertedName;
if (!daVarLookupRevMulti.hasOwnProperty(daNameOne)){
daVarLookupRevMulti[daNameOne] = [];
}
daVarLookupRevMulti[daNameOne].push(daNameTwo);
if (!daVarLookupMulti.hasOwnProperty(daNameTwo)){
daVarLookupMulti[daNameTwo] = [];
}
daVarLookupMulti[daNameTwo].push(daNameOne);
if (!daVarLookupMulti.hasOwnProperty(daNameThree)){
daVarLookupMulti[daNameThree] = [];
}
daVarLookupMulti[daNameThree].push(daNameOne);
if (!daVarLookupMulti.hasOwnProperty(daNameBase)){
daVarLookupMulti[daNameBase] = [];
}
daVarLookupMulti[daNameBase].push(daNameOne);
}
else if (pattObj.test(baseName)){
bracketPart = checkboxName.replace(/^.*(\[O?['"][^\]]*['"]\])$/, "$1");
checkboxName = checkboxName.replace(/^.*\[O?['"]([^\]]*)['"]\]$/, "$1");
baseName = baseName.replace(/^(.*)\[.*/, "$1");
var transBaseName = baseName;
if (($("[name='" + key + "']").length == 0) && (typeof daVarLookup[utoa(transBaseName).replace(/[\\n=]/g, '')] != "undefined")){
transBaseName = atou(daVarLookup[utoa(transBaseName).replace(/[\\n=]/g, '')]);
}
var convertedName;
try {
convertedName = atou(atou(checkboxName));
}
catch (e) {
continue;
}
var daNameOne = utoa(transBaseName + bracketPart).replace(/[\\n=]/g, '');
var daNameTwo = utoa(baseName + "['" + convertedName + "']").replace(/[\\n=]/g, '');
var daNameThree = utoa(baseName + '["' + convertedName + '"]').replace(/[\\n=]/g, '');
var daNameBase = utoa(baseName).replace(/[\\n=]/g, '');
daVarLookupRev[daNameOne] = daNameTwo;
daVarLookup[daNameTwo] = daNameOne;
daVarLookup[daNameThree] = daNameOne;
daVarLookupOption[key] = convertedName;
if (!daVarLookupRevMulti.hasOwnProperty(daNameOne)){
daVarLookupRevMulti[daNameOne] = [];
}
daVarLookupRevMulti[daNameOne].push(daNameTwo);
if (!daVarLookupMulti.hasOwnProperty(daNameTwo)){
daVarLookupMulti[daNameTwo] = [];
}
daVarLookupMulti[daNameTwo].push(daNameOne);
if (!daVarLookupMulti.hasOwnProperty(daNameThree)){
daVarLookupMulti[daNameThree] = [];
}
daVarLookupMulti[daNameThree].push(daNameOne);
if (!daVarLookupMulti.hasOwnProperty(daNameBase)){
daVarLookupMulti[daNameBase] = [];
}
daVarLookupMulti[daNameBase].push(daNameOne);
}
else if (pattRaw.test(baseName)){
bracketPart = checkboxName.replace(/^.*(\[R?['"][^\]]*['"]\])$/, "$1");
checkboxName = checkboxName.replace(/^.*\[R?['"]([^\]]*)['"]\]$/, "$1");
baseName = baseName.replace(/^(.*)\[.*/, "$1");
var transBaseName = baseName;
if (($("[name='" + key + "']").length == 0) && (typeof daVarLookup[utoa(transBaseName).replace(/[\\n=]/g, '')] != "undefined")){
transBaseName = atou(daVarLookup[utoa(transBaseName).replace(/[\\n=]/g, '')]);
}
var convertedName;
try {
convertedName = atou(checkboxName);
}
catch (e) {
continue;
}
var daNameOne = utoa(transBaseName + bracketPart).replace(/[\\n=]/g, '');
var daNameTwo = utoa(baseName + "[" + convertedName + "]").replace(/[\\n=]/g, '')
var daNameBase = utoa(baseName).replace(/[\\n=]/g, '');
daVarLookupRev[daNameOne] = daNameTwo;
daVarLookup[daNameTwo] = daNameOne;
daVarLookupOption[key] = convertedName;
if (!daVarLookupRevMulti.hasOwnProperty(daNameOne)){
daVarLookupRevMulti[daNameOne] = [];
}
daVarLookupRevMulti[daNameOne].push(daNameTwo);
if (!daVarLookupMulti.hasOwnProperty(daNameTwo)){
daVarLookupMulti[daNameTwo] = [];
}
daVarLookupMulti[daNameTwo].push(daNameOne);
if (!daVarLookupMulti.hasOwnProperty(daNameBase)){
daVarLookupMulti[daNameBase] = [];
}
daVarLookupMulti[daNameBase].push(daNameOne);
}
}
}
}
daShowIfInProcess = true;
var daTriggerQueries = [];
var daInputsSeen = {};
function daOnlyUnique(value, index, self){
return self.indexOf(value) === index;
}
$(".dajsshowif").each(function(){
var showIfDiv = this;
var jsInfo = JSON.parse(atou($(this).data('jsshowif')));
var showIfSign = jsInfo['sign'];
var showIfMode = jsInfo['mode'];
var jsExpression = jsInfo['expression'];
jsInfo['vars'].forEach(function(infoItem, i){
var showIfVars = [];
var initShowIfVar = utoa(infoItem).replace(/[\\n=]/g, '');
var initShowIfVarEscaped = initShowIfVar.replace(/(:|\.|\[|\]|,|=)/g, "\\\\$1");
var elem = $("[name='" + initShowIfVarEscaped + "']");
if (elem.length > 0){
showIfVars.push(initShowIfVar);
}
if (daVarLookupMulti.hasOwnProperty(initShowIfVar)){
for (var j = 0; j < daVarLookupMulti[initShowIfVar].length; j++){
var altShowIfVar = daVarLookupMulti[initShowIfVar][j];
var altShowIfVarEscaped = altShowIfVar.replace(/(:|\.|\[|\]|,|=)/g, "\\\\$1");
var altElem = $("[name='" + altShowIfVarEscaped + "']");
if (altElem.length > 0 && !$.contains(this, altElem[0])){
showIfVars.push(altShowIfVar);
}
}
}
if (showIfVars.length == 0){
console.log("ERROR: reference to non-existent field " + infoItem);
}
showIfVars.forEach(function(showIfVar){
var showIfVarEscaped = showIfVar.replace(/(:|\.|\[|\]|,|=)/g, "\\\\$1");
var varToUse = infoItem;
var showHideDiv = function(speed){
var elem = daGetField(varToUse);
if (elem != null && !$(elem).parents('.da-form-group').first().is($(this).parents('.da-form-group').first())){
return;
}
var resultt = eval(jsExpression);
if(resultt){
if (showIfSign){
if ($(showIfDiv).data('isVisible') != '1'){
daShowHideHappened = true;
}
if (showIfMode == 0){
$(showIfDiv).show(speed);
}
$(showIfDiv).data('isVisible', '1');
$(showIfDiv).find('input, textarea, select').prop("disabled", false);
$(showIfDiv).find('input.combobox').each(function(){
daComboBoxes[$(this).attr('id')].enable();
});
$(showIfDiv).find('input.daslider').each(function(){
$(this).slider('enable');
});
$(showIfDiv).find('input.dafile').each(function(){
$(this).data("fileinput").enable();
});
}
else{
if ($(showIfDiv).data('isVisible') != '0'){
daShowHideHappened = true;
}
if (showIfMode == 0){
$(showIfDiv).hide(speed);
}
$(showIfDiv).data('isVisible', '0');
$(showIfDiv).find('input, textarea, select').prop("disabled", true);
$(showIfDiv).find('input.combobox').each(function(){
daComboBoxes[$(this).attr('id')].disable();
});
$(showIfDiv).find('input.daslider').each(function(){
$(this).slider('disable');
});
$(showIfDiv).find('input.dafile').each(function(){
$(this).data("fileinput").disable();
});
}
}
else{
if (showIfSign){
if ($(showIfDiv).data('isVisible') != '0'){
daShowHideHappened = true;
}
if (showIfMode == 0){
$(showIfDiv).hide(speed);
}
$(showIfDiv).data('isVisible', '0');
$(showIfDiv).find('input, textarea, select').prop("disabled", true);
$(showIfDiv).find('input.combobox').each(function(){
daComboBoxes[$(this).attr('id')].disable();
});
$(showIfDiv).find('input.daslider').each(function(){
$(this).slider('disable');
});
$(showIfDiv).find('input.dafile').each(function(){
$(this).data("fileinput").disable();
});
}
else{
if ($(showIfDiv).data('isVisible') != '1'){
daShowHideHappened = true;
}
if (showIfMode == 0){
$(showIfDiv).show(speed);
}
$(showIfDiv).data('isVisible', '1');
$(showIfDiv).find('input, textarea, select').prop("disabled", false);
$(showIfDiv).find('input.combobox').each(function(){
daComboBoxes[$(this).attr('id')].enable();
});
$(showIfDiv).find('input.daslider').each(function(){
$(this).slider('enable');
});
$(showIfDiv).find('input.dafile').each(function(){
$(this).data("fileinput").enable();
});
}
}
var leader = false;
if (!daShowIfInProcess){
daShowIfInProcess = true;
daInputsSeen = {};
leader = true;
}
$(showIfDiv).find(":input").not("[type='file']").each(function(){
if (!daInputsSeen.hasOwnProperty($(this).attr('id'))){
$(this).trigger('change');
}
daInputsSeen[$(this).attr('id')] = true;
});
if (leader){
daShowIfInProcess = false;
}
};
var showHideDivImmediate = function(){
showHideDiv.apply(this, [null]);
}
var showHideDivFast = function(){
showHideDiv.apply(this, ['fast']);
}
daTriggerQueries.push("#" + showIfVarEscaped);
daTriggerQueries.push("input[type='radio'][name='" + showIfVarEscaped + "']");
daTriggerQueries.push("input[type='checkbox'][name='" + showIfVarEscaped + "']");
$("#" + showIfVarEscaped).change(showHideDivFast);
$("input[type='radio'][name='" + showIfVarEscaped + "']").change(showHideDivFast);
$("input[type='checkbox'][name='" + showIfVarEscaped + "']").change(showHideDivFast);
$("input.dafile[name='" + showIfVarEscaped + "']").on('filecleared', showHideDivFast);
$("#" + showIfVarEscaped).on('daManualTrigger', showHideDivImmediate);
$("input[type='radio'][name='" + showIfVarEscaped + "']").on('daManualTrigger', showHideDivImmediate);
$("input[type='checkbox'][name='" + showIfVarEscaped + "']").on('daManualTrigger', showHideDivImmediate);
});
});
});
$(".dashowif").each(function(){
var showIfVars = [];
var showIfSign = $(this).data('showif-sign');
var showIfMode = parseInt($(this).data('showif-mode'));
var initShowIfVar = $(this).data('showif-var');
var varName = atou(initShowIfVar);
var elem = [];
if (varName.endsWith('[nota]') || varName.endsWith('[aota]')){
var signifier = varName.endsWith('[nota]') ? 'nota' : 'aota';
var cbVarName = varName.replace(/\[[na]ota\]$/, '');
$('fieldset.da-field-checkboxes').each(function(){
var thisVarName = atou($(this).data('varname'));
if (thisVarName == cbVarName){
elem = $(this).find('input.da' + signifier + '-checkbox');
initShowIfVar = $(elem).attr('name');
}
});
}
else {
var initShowIfVarEscaped = initShowIfVar.replace(/(:|\.|\[|\]|,|=)/g, "\\\\$1");
elem = $("[name='" + initShowIfVarEscaped + "']");
}
if (elem.length > 0){
showIfVars.push(initShowIfVar);
}
if (daVarLookupMulti.hasOwnProperty(initShowIfVar)){
var n = daVarLookupMulti[initShowIfVar].length;
for (var i = 0; i < n; i++){
var altShowIfVar = daVarLookupMulti[initShowIfVar][i];
var altShowIfVarEscaped = altShowIfVar.replace(/(:|\.|\[|\]|,|=)/g, "\\\\$1");
var altElem = $("[name='" + altShowIfVarEscaped + "']");
if (altElem.length > 0 && !$.contains(this, altElem[0])){
showIfVars.push(altShowIfVar);
}
}
}
var showIfVal = $(this).data('showif-val');
var saveAs = $(this).data('saveas');
var showIfDiv = this;
showIfVars.forEach(function(showIfVar){
var showIfVarEscaped = showIfVar.replace(/(:|\.|\[|\]|,|=)/g, "\\\\$1");
var showHideDiv = function(speed){
var elem = daGetField(varName, showIfDiv);
if (elem != null && !$(elem).parents('.da-form-group').first().is($(this).parents('.da-form-group').first())){
return;
}
var theVal;
var showifParents = $(this).parents(".dashowif");
if (showifParents.length !== 0 && !($(showifParents[0]).data("isVisible") == '1')){
theVal = '';
//console.log("Setting theVal to blank.");
}
else if ($(this).attr('type') == "checkbox"){
theVal = $("input[name='" + showIfVarEscaped + "']:checked").val();
if (typeof(theVal) == 'undefined'){
//console.log('manually setting checkbox value to False');
theVal = 'False';
}
}
else if ($(this).attr('type') == "radio"){
theVal = $("input[name='" + showIfVarEscaped + "']:checked").val();
if (typeof(theVal) == 'undefined'){
theVal = '';
}
else if (theVal != '' && $("input[name='" + showIfVarEscaped + "']:checked").hasClass("daobject")){
try{
theVal = atou(theVal);
}
catch(e){
}
}
}
else{
theVal = $(this).val();
if (theVal != '' && $(this).hasClass("daobject")){
try{
theVal = atou(theVal);
}
catch(e){
}
}
}
// console.log("There was a trigger on " + $(this).attr('id') + ". This handler was installed based on varName " + varName + ", showIfVar " + atou(showIfVar) + ". This handler was installed for the benefit of the .dashowif div encompassing the field for " + atou(saveAs) + ". The comparison value is " + String(showIfVal) + " and the current value of the element on the screen is " + String(theVal) + ".");
if(daShowIfCompare(theVal, showIfVal)){
if (showIfSign){
if ($(showIfDiv).data('isVisible') != '1'){
daShowHideHappened = true;
}
if (showIfMode == 0){
$(showIfDiv).show(speed);
}
$(showIfDiv).data('isVisible', '1');
var firstChild = $(showIfDiv).children()[0];
if (!$(firstChild).hasClass('dacollectextra') || $(firstChild).is(":visible")){
$(showIfDiv).find('input, textarea, select').prop("disabled", false);
$(showIfDiv).find('input.combobox').each(function(){
daComboBoxes[$(this).attr('id')].enable();
});
$(showIfDiv).find('input.daslider').each(function(){
$(this).slider('enable');
});
$(showIfDiv).find('input.dafile').each(function(){
$(this).data("fileinput").enable();
});
}
}
else{
if ($(showIfDiv).data('isVisible') != '0'){
daShowHideHappened = true;
}
if (showIfMode == 0){
$(showIfDiv).hide(speed);
}
$(showIfDiv).data('isVisible', '0');
$(showIfDiv).find('input, textarea, select').prop("disabled", true);
$(showIfDiv).find('input.combobox').each(function(){
daComboBoxes[$(this).attr('id')].disable();
});
$(showIfDiv).find('input.daslider').each(function(){
$(this).slider('disable');
});
$(showIfDiv).find('input.dafile').each(function(){
$(this).data("fileinput").disable();
});
}
}
else{
if (showIfSign){
if ($(showIfDiv).data('isVisible') != '0'){
daShowHideHappened = true;
}
if (showIfMode == 0){
$(showIfDiv).hide(speed);
}
$(showIfDiv).data('isVisible', '0');
$(showIfDiv).find('input, textarea, select').prop("disabled", true);
$(showIfDiv).find('input.combobox').each(function(){
daComboBoxes[$(this).attr('id')].disable();
});
$(showIfDiv).find('input.daslider').each(function(){
$(this).slider('disable');
});
$(showIfDiv).find('input.dafile').each(function(){
$(this).data("fileinput").disable();
});
}
else{
if ($(showIfDiv).data('isVisible') != '1'){
daShowHideHappened = true;
}
if (showIfMode == 0){
$(showIfDiv).show(speed);
}
$(showIfDiv).data('isVisible', '1');
var firstChild = $(showIfDiv).children()[0];
if (!$(firstChild).hasClass('dacollectextra') || $(firstChild).is(":visible")){
$(showIfDiv).find('input, textarea, select').prop("disabled", false);
$(showIfDiv).find('input.combobox').each(function(){
daComboBoxes[$(this).attr('id')].enable();
});
$(showIfDiv).find('input.daslider').each(function(){
$(this).slider('enable');
});
$(showIfDiv).find('input.dafile').each(function(){
$(this).data("fileinput").enable();
});
}
}
}
var leader = false;
if (!daShowIfInProcess){
daShowIfInProcess = true;
daInputsSeen = {};
leader = true;
}
$(showIfDiv).find(":input").not("[type='file']").each(function(){
if (!daInputsSeen.hasOwnProperty($(this).attr('id'))){
$(this).trigger('change');
}
daInputsSeen[$(this).attr('id')] = true;
});
if (leader){
daShowIfInProcess = false;
}
};
var showHideDivImmediate = function(){
showHideDiv.apply(this, [null]);
}
var showHideDivFast = function(){
showHideDiv.apply(this, ['fast']);
}
daTriggerQueries.push("#" + showIfVarEscaped);
daTriggerQueries.push("input[type='radio'][name='" + showIfVarEscaped + "']");
daTriggerQueries.push("input[type='checkbox'][name='" + showIfVarEscaped + "']");
$("#" + showIfVarEscaped).change(showHideDivFast);
$("#" + showIfVarEscaped).on('daManualTrigger', showHideDivImmediate);
$("input[type='radio'][name='" + showIfVarEscaped + "']").change(showHideDivFast);
$("input[type='radio'][name='" + showIfVarEscaped + "']").on('daManualTrigger', showHideDivImmediate);
$("input[type='checkbox'][name='" + showIfVarEscaped + "']").change(showHideDivFast);
$("input[type='checkbox'][name='" + showIfVarEscaped + "']").on('daManualTrigger', showHideDivImmediate);
$("input.dafile[name='" + showIfVarEscaped + "']").on('filecleared', showHideDivFast);
});
});
function daTriggerAllShowHides(){
var daUniqueTriggerQueries = daTriggerQueries.filter(daOnlyUnique);
var daFirstTime = true;
var daTries = 0;
while ((daFirstTime || daShowHideHappened) && ++daTries < 100){
daShowHideHappened = false;
daFirstTime = false;
var n = daUniqueTriggerQueries.length;
for (var i = 0; i < n; ++i){
$(daUniqueTriggerQueries[i]).trigger('daManualTrigger');
}
}
if (daTries >= 100){
console.log("Too many contradictory 'show if' conditions");
}
}
if (daTriggerQueries.length > 0){
daTriggerAllShowHides();
}
$(".danavlink").last().addClass('thelast');
$(".danavlink").each(function(){
if ($(this).hasClass('btn') && !$(this).hasClass('danotavailableyet')){
var the_a = $(this);
var the_delay = 1000 + 250 * parseInt($(this).data('index'));
setTimeout(function(){
$(the_a).removeClass('""" + app.config['BUTTON_STYLE'] + """secondary');
if ($(the_a).hasClass('active')){
$(the_a).addClass('""" + app.config['BUTTON_STYLE'] + """success');
}
else{
$(the_a).addClass('""" + app.config['BUTTON_STYLE'] + """warning');
}
}, the_delay);
}
});
daShowIfInProcess = false;
$("#daSend").click(daSender);
if (daChatAvailable == 'unavailable'){
daChatStatus = 'off';
}
if (daChatAvailable == 'observeonly'){
daChatStatus = 'observeonly';
}
if ((daChatStatus == 'off' || daChatStatus == 'observeonly') && daChatAvailable == 'available'){
daChatStatus = 'waiting';
}
daDisplayChat();
if (daBeingControlled){
daShowControl('fast');
}
if (daChatStatus == 'ready' || daBeingControlled){
daInitializeSocket();
}
if (daInitialized == false && daCheckinSeconds > 0){ // why was this set to always retrieve the chat log?
setTimeout(function(){
//console.log("daInitialize call to chat_log in checkin");
$.ajax({
type: 'POST',
url: """ + "'" + url_for('checkin', i=yaml_filename) + "'" + """,
beforeSend: addCsrfHeader,
xhrFields: {
withCredentials: true
},
data: $.param({action: 'chat_log', ajax: '1', csrf_token: daCsrf}),
success: daChatLogCallback,
dataType: 'json'
});
}, 200);
}
if (daInitialized == true){
//console.log("Publishing from memory");
$("#daCorrespondence").html('');
for(var i = 0; i < daChatHistory.length; i++){
daPublishMessage(daChatHistory[i]);
}
}
if (daChatStatus != 'off'){
daSendChanges = true;
}
else{
if (daDoAction == null){
daSendChanges = false;
}
else{
daSendChanges = true;
}
}
if (daSendChanges){
$("#daform").each(function(){
$(this).find(':input').change(daOnChange);
});
}
daInitialized = true;
daShowingHelp = 0;
daSubmitter = null;
setTimeout(function(){
$("#daflash .alert-success").hide(300, function(){
$(self).remove();
});
}, 3000);
if (doScroll){
setTimeout(function () {
if (daJsEmbed){
$(daTargetDiv)[0].scrollTo(0, 1);
if (daSteps > 1){
$(daTargetDiv)[0].scrollIntoView();
}
}
else{
window.scrollTo(0, 1);
}
}, 20);
}
if (daShowingSpinner){
daHideSpinner();
}
if (daCheckinInterval != null){
clearInterval(daCheckinInterval);
}
if (daCheckinSeconds > 0){
setTimeout(daInitialCheckin, 100);
daCheckinInterval = setInterval(daCheckin, daCheckinSeconds);
}
daShowNotifications();
if (daUsingGA){
daPageview();
}
if (daUsingSegment){
daSegmentEvent();
}
hideTablist();
}
$(document).ready(function(){
daInitialize(1);
//console.log("ready: replaceState " + daSteps);
if (!daJsEmbed && !daIframeEmbed){
history.replaceState({steps: daSteps}, "", daLocationBar + """ + json.dumps(page_sep) + """ + daSteps);
}
var daReloadAfter = """ + str(int(reload_after)) + """;
if (daReloadAfter > 0){
daReloader = setTimeout(function(){daRefreshSubmit();}, daReloadAfter);
}
window.onpopstate = function(event) {
if (event.state != null && event.state.steps < daSteps && daAllowGoingBack){
$("#dabackbutton").submit();
}
};
$( window ).bind('unload', function() {
daStopCheckingIn();
if (daSocket != null && daSocket.connected){
//console.log('Terminating interview socket because window unloaded');
daSocket.emit('terminate');
}
});
var daDefaultAllowList = bootstrap.Tooltip.Default.allowList;
daDefaultAllowList['*'].push('style');
daDefaultAllowList['a'].push('style');
daDefaultAllowList['img'].push('style');
if (daJsEmbed){
$.ajax({
type: "POST",
url: daPostURL,
beforeSend: addCsrfHeader,
xhrFields: {
withCredentials: true
},
data: 'csrf_token=' + daCsrf + '&ajax=1',
success: function(data){
setTimeout(function(){
daProcessAjax(data, $("#daform"), 0);
}, 0);
},
error: function(xhr, status, error){
setTimeout(function(){
daProcessAjaxError(xhr, status, error);
}, 0);
}
});
}
$(document).trigger('daPageLoad');
});
$(window).ready(daUpdateHeight);
$(window).resize(daUpdateHeight);
function daUpdateHeight(){
$(".dagoogleMap").each(function(){
var size = $( this ).width();
$( this ).css('height', size);
});
}
$.validator.setDefaults({
highlight: function(element) {
$(element).closest('.da-form-group').addClass('da-group-has-error');
$(element).addClass('is-invalid');
},
unhighlight: function(element) {
$(element).closest('.da-form-group').removeClass('da-group-has-error');
$(element).removeClass('is-invalid');
},
errorElement: 'span',
errorClass: 'da-has-error invalid-feedback',
errorPlacement: function(error, element) {
$(error).addClass('invalid-feedback');
var elementName = $(element).attr("name");
var lastInGroup = $.map(daValidationRules['groups'], function(thefields, thename){
var fieldsArr;
if (thefields.indexOf(elementName) >= 0) {
fieldsArr = thefields.split(" ");
return fieldsArr[fieldsArr.length - 1];
}
else {
return null;
}
})[0];
if (element.hasClass('dainput-embedded')){
error.insertAfter(element);
}
else if (element.hasClass('dafile-embedded')){
error.insertAfter(element);
}
else if (element.hasClass('daradio-embedded')){
element.parent().append(error);
}
else if (element.hasClass('dacheckbox-embedded')){
element.parent().append(error);
}
else if (element.hasClass('dauncheckable') && lastInGroup){
$("input[name='" + lastInGroup + "']").parent().append(error);
}
else if (element.parent().hasClass('combobox-container')){
error.insertAfter(element.parent());
}
else if (element.hasClass('dafile')){
var fileContainer = $(element).parents(".file-input").first();
if (fileContainer.length > 0){
$(fileContainer).append(error);
}
else{
error.insertAfter(element.parent());
}
}
else if (element.parent('.input-group').length) {
error.insertAfter(element.parent());
}
else if (element.hasClass('da-active-invisible')){
var choice_with_help = $(element).parents(".dachoicewithhelp").first();
if (choice_with_help.length > 0){
$(choice_with_help).parent().append(error);
}
else{
element.parent().append(error);
}
}
else if (element.hasClass('danon-nota-checkbox')){
element.parents('fieldset').append(error);
}
else {
error.insertAfter(element);
}
}
});
$.validator.addMethod("datetime", function(a, b){
return true;
});
$.validator.addMethod("ajaxrequired", function(value, element, params){
var realElement = $("#" + $(element).attr('name') + "combobox");
var realValue = $(realElement).val();
if (!$(realElement).parent().is(":visible")){
return true;
}
if (realValue == null || realValue.replace(/\s/g, '') == ''){
return false;
}
return true;
});
$.validator.addMethod('checkone', function(value, element, params){
var number_needed = params[0];
var css_query = params[1];
if ($(css_query).length >= number_needed){
return true;
}
else{
return false;
}
});
$.validator.addMethod('checkatleast', function(value, element, params){
if ($(element).attr('name') != '_ignore' + params[0]){
return true;
}
if ($('.dafield' + params[0] + ':checked').length >= params[1]){
return true;
}
else{
return false;
}
});
$.validator.addMethod('checkatmost', function(value, element, params){
if ($(element).attr('name') != '_ignore' + params[0]){
return true;
}
if ($('.dafield' + params[0] + ':checked').length > params[1]){
return false;
}
else{
return true;
}
});
$.validator.addMethod('checkexactly', function(value, element, params){
if ($(element).attr('name') != '_ignore' + params[0]){
return true;
}
if ($('.dafield' + params[0] + ':checked').length != params[1]){
return false;
}
else{
return true;
}
});
$.validator.addMethod('selectexactly', function(value, element, params){
if ($(element).find('option:selected').length == params[0]){
return true;
}
else {
return false;
}
});
$.validator.addMethod('mindate', function(value, element, params){
if (value == null || value == ''){
return true;
}
try {
var date = new Date(value);
var comparator = new Date(params);
if (date >= comparator) {
return true;
}
} catch (e) {}
return false;
});
$.validator.addMethod('maxdate', function(value, element, params){
if (value == null || value == ''){
return true;
}
try {
var date = new Date(value);
var comparator = new Date(params);
if (date <= comparator) {
return true;
}
} catch (e) {}
return false;
});
$.validator.addMethod('minmaxdate', function(value, element, params){
if (value == null || value == ''){
return true;
}
try {
var date = new Date(value);
var before_comparator = new Date(params[0]);
var after_comparator = new Date(params[1]);
if (date >= before_comparator && date <= after_comparator) {
return true;
}
} catch (e) {}
return false;
});
$.validator.addMethod('mintime', function(value, element, params){
if (value == null || value == ''){
return true;
}
try {
var time = new Date('1970-01-01T' + value + 'Z');
var comparator = new Date('1970-01-01T' + params + 'Z');
if (time >= comparator) {
return true;
}
} catch (e) {}
return false;
});
$.validator.addMethod('maxtime', function(value, element, params){
if (value == null || value == ''){
return true;
}
try {
var time = new Date('1970-01-01T' + value + 'Z');
var comparator = new Date('1970-01-01T' + params + 'Z');
if (time <= comparator) {
return true;
}
} catch (e) {}
return false;
});
$.validator.addMethod('minmaxtime', function(value, element, params){
if (value == null || value == ''){
return true;
}
try {
var time = new Date('1970-01-01T' + value + 'Z');
var before_comparator = new Date('1970-01-01T' + params[0] + 'Z');
var after_comparator = new Date('1970-01-01T' + params[1] + 'Z');
if (time >= before_comparator && time <= after_comparator) {
return true;
}
} catch (e) {}
return false;
});
$.validator.addMethod('mindatetime', function(value, element, params){
if (value == null || value == ''){
return true;
}
try {
var datetime = new Date(value + 'Z');
var comparator = new Date(params + 'Z');
if (datetime >= comparator) {
return true;
}
} catch (e) {}
return false;
});
$.validator.addMethod('maxdatetime', function(value, element, params){
if (value == null || value == ''){
return true;
}
try {
var datetime = new Date(value + 'Z');
var comparator = new Date(params + 'Z');
if (datetime <= comparator) {
return true;
}
} catch (e) {}
return false;
});
$.validator.addMethod('minmaxdatetime', function(value, element, params){
if (value == null || value == ''){
return true;
}
try {
var datetime = new Date(value + 'Z');
var before_comparator = new Date(params[0] + 'Z');
var after_comparator = new Date(params[1] + 'Z');
if (datetime >= before_comparator && datetime <= after_comparator) {
return true;
}
} catch (e) {}
return false;
});
$.validator.addMethod('maxuploadsize', function(value, element, param){
try {
var limit = parseInt(param) - 2000;
if (limit <= 0){
return true;
}
var maxImageSize;
if ($(element).data('maximagesize')){
maxImageSize = (parseInt($(element).data('maximagesize')) * parseInt($(element).data('maximagesize'))) * 2;
}
else {
maxImageSize = 0;
}
if ($(element).attr("type") === "file"){
if (element.files && element.files.length) {
var totalSize = 0;
for ( i = 0; i < element.files.length; i++ ) {
if (maxImageSize > 0 && element.files[i].size > (0.20 * maxImageSize) && element.files[i].type.match(/image.*/) && !(element.files[i].type.indexOf('image/svg') == 0)){
totalSize += maxImageSize;
}
else {
totalSize += element.files[i].size;
}
}
if (totalSize > limit){
return false;
}
}
return true;
}
} catch (e) {}
return false;
});""" # noqa: W605
for custom_type in interview.custom_data_types:
info = docassemble.base.functions.custom_types[custom_type]
if isinstance(info['javascript'], str):
the_js += "\n try {\n" + indent_by(info['javascript'].strip(), 8).rstrip() + "\n }\n catch {\n console.log('Error with JavaScript code of CustomDataType " + info['class'].__name__ + "');\n }"
if interview.options.get('send question data', False):
the_js += "\n daQuestionData = " + json.dumps(interview_status.as_data(user_dict))
scripts += """
<script type="text/javascript">
""" + the_js + """
</script>"""
if interview_status.question.language != '*':
interview_language = interview_status.question.language
else:
interview_language = current_language
validation_rules = {'rules': {}, 'messages': {}, 'errorClass': 'da-has-error invalid-feedback', 'debug': False}
interview_status.exit_url = title_info.get('exit url', None)
interview_status.exit_link = title_info.get('exit link', 'exit')
interview_status.exit_label = title_info.get('exit label', word('Exit'))
interview_status.title = title_info.get('full', default_title)
interview_status.display_title = title_info.get('logo', interview_status.title)
interview_status.tabtitle = title_info.get('tab', interview_status.title)
interview_status.short_title = title_info.get('short', title_info.get('full', default_short_title))
interview_status.display_short_title = title_info.get('short logo', title_info.get('logo', interview_status.short_title))
interview_status.title_url = title_info.get('title url', None)
interview_status.title_url_opens_in_other_window = title_info.get('title url opens in other window', True)
interview_status.nav_item = title_info.get('navigation bar html', '')
the_main_page_parts = main_page_parts.get(interview_language, main_page_parts.get('*'))
interview_status.pre = title_info.get('pre', the_main_page_parts['main page pre'])
interview_status.post = title_info.get('post', the_main_page_parts['main page post'])
interview_status.footer = title_info.get('footer', the_main_page_parts['main page footer'] or get_part('global footer'))
if interview_status.footer:
interview_status.footer = re.sub(r'</?p.*?>', '', str(interview_status.footer), flags=re.IGNORECASE).strip()
if interview_status.footer == 'off':
interview_status.footer = ''
interview_status.submit = title_info.get('submit', the_main_page_parts['main page submit'])
interview_status.back = title_info.get('back button label', the_main_page_parts['main page back button label'] or interview_status.question.back())
interview_status.cornerback = title_info.get('corner back button label', the_main_page_parts['main page corner back button label'] or interview_status.question.back())
bootstrap_theme = interview.get_bootstrap_theme()
if not is_ajax:
social = copy.deepcopy(daconfig['social'])
if 'social' in interview.consolidated_metadata and isinstance(interview.consolidated_metadata['social'], dict):
populate_social(social, interview.consolidated_metadata['social'])
standard_header_start = standard_html_start(interview_language=interview_language, debug=debug_mode, bootstrap_theme=bootstrap_theme, page_title=interview_status.title, social=social, yaml_filename=yaml_filename)
if interview_status.question.question_type == "signature":
if 'pen color' in interview_status.extras and 0 in interview_status.extras['pen color']:
pen_color = interview_status.extras['pen color'][0].strip()
else:
pen_color = '#000'
interview_status.extra_scripts.append('<script>$( document ).ready(function() {daInitializeSignature(' + json.dumps(pen_color) + ');});</script>')
if interview.options.get('hide navbar', False):
bodyclass = "dasignature navbarhidden"
else:
bodyclass = "dasignature da-pad-for-navbar"
else:
if interview.options.get('hide navbar', False):
bodyclass = "dabody"
else:
bodyclass = "dabody da-pad-for-navbar"
if 'cssClass' in interview_status.extras:
bodyclass += ' ' + re.sub(r'[^A-Za-z0-9\_]+', '-', interview_status.extras['cssClass'])
elif hasattr(interview_status.question, 'id'):
bodyclass += ' question-' + re.sub(r'[^A-Za-z0-9]+', '-', interview_status.question.id.lower())
if interview_status.footer:
bodyclass += ' da-pad-for-footer'
if debug_mode:
interview_status.screen_reader_text = {}
if 'speak_text' in interview_status.extras and interview_status.extras['speak_text']:
interview_status.initialize_screen_reader()
util_language = docassemble.base.functions.get_language()
util_dialect = docassemble.base.functions.get_dialect()
util_voice = docassemble.base.functions.get_voice()
question_language = interview_status.question.language
if len(interview.translations) > 0:
the_language = util_language
elif question_language != '*':
the_language = question_language
else:
the_language = util_language
if voicerss_config and 'language map' in voicerss_config and isinstance(voicerss_config['language map'], dict) and the_language in voicerss_config['language map']:
the_language = voicerss_config['language map'][the_language]
if the_language == util_language and util_dialect is not None:
the_dialect = util_dialect
elif voicerss_config and 'dialects' in voicerss_config and isinstance(voicerss_config['dialects'], dict) and the_language in voicerss_config['dialects']:
the_dialect = voicerss_config['dialects'][the_language]
elif the_language in valid_voicerss_dialects:
the_dialect = valid_voicerss_dialects[the_language][0]
else:
logmessage("index: unable to determine dialect; reverting to default")
the_language = DEFAULT_LANGUAGE
the_dialect = DEFAULT_DIALECT
if the_language == util_language and the_dialect == util_dialect and util_voice is not None:
the_voice = util_voice
elif voicerss_config and 'voices' in voicerss_config and isinstance(voicerss_config['voices'], dict) and the_language in voicerss_config['voices'] and isinstance(voicerss_config['voices'][the_language], dict) and the_dialect in voicerss_config['voices'][the_language]:
the_voice = voicerss_config['voices'][the_language][the_dialect]
elif voicerss_config and 'voices' in voicerss_config and isinstance(voicerss_config['voices'], dict) and the_language in voicerss_config['voices'] and isinstance(voicerss_config['voices'][the_language], str):
the_voice = voicerss_config['voices'][the_language]
elif the_language == DEFAULT_LANGUAGE and the_dialect == DEFAULT_DIALECT:
the_voice = DEFAULT_VOICE
else:
the_voice = None
for question_type in ('question', 'help'):
for audio_format in ('mp3', 'ogg'):
interview_status.screen_reader_links[question_type].append([url_for('speak_file', i=yaml_filename, question=interview_status.question.number, digest='XXXTHEXXX' + question_type + 'XXXHASHXXX', type=question_type, format=audio_format, language=the_language, dialect=the_dialect, voice=the_voice or ''), audio_mimetype_table[audio_format]])
if (not validated) and the_question.name == interview_status.question.name:
for def_key, def_val in new_values.items():
safe_def_key = safeid(def_key)
if isinstance(def_val, list):
def_val = '[' + ','.join(def_val) + ']'
if safe_def_key in all_field_numbers:
for number in all_field_numbers[safe_def_key]:
try:
interview_status.defaults[number] = eval(def_val, pre_user_dict)
except:
pass
else:
try:
interview_status.other_defaults[def_key] = eval(def_val, pre_user_dict)
except:
pass
the_field_errors = field_error
else:
the_field_errors = None
# restore this, maybe
# if next_action_to_set:
# interview_status.next_action.append(next_action_to_set)
if next_action_to_set:
if 'event_stack' not in user_dict['_internal']:
user_dict['_internal']['event_stack'] = {}
session_uid = interview_status.current_info['user']['session_uid']
if session_uid not in user_dict['_internal']['event_stack']:
user_dict['_internal']['event_stack'][session_uid] = []
already_there = False
for event_item in user_dict['_internal']['event_stack'][session_uid]:
if event_item['action'] == next_action_to_set['action']:
already_there = True
break
if not already_there:
user_dict['_internal']['event_stack'][session_uid].insert(0, next_action_to_set)
if interview.use_progress_bar and (interview_status.question.progress is None or interview_status.question.progress >= 0):
the_progress_bar = progress_bar(user_dict['_internal']['progress'], interview)
else:
the_progress_bar = None
if interview.use_navigation and user_dict['nav'].visible():
if interview.use_navigation_on_small_screens == 'dropdown':
current_dict = {}
dropdown_nav_bar = navigation_bar(user_dict['nav'], interview, wrapper=False, a_class='dropdown-item', hide_inactive_subs=False, always_open=True, return_dict=current_dict)
if dropdown_nav_bar != '':
dropdown_nav_bar = ' <div class="col d-md-none text-end">\n <div class="dropdown danavlinks">\n <button class="btn btn-primary dropdown-toggle" type="button" id="daDropdownSections" data-bs-toggle="dropdown" aria-haspopup="true" aria-expanded="false">' + current_dict.get('title', word("Sections")) + '</button>\n <div class="dropdown-menu" aria-labelledby="daDropdownSections">' + dropdown_nav_bar + '\n </div>\n </div>\n </div>\n'
else:
dropdown_nav_bar = ''
if interview.use_navigation == 'horizontal':
if interview.use_navigation_on_small_screens is not True:
nav_class = ' d-none d-md-block'
else:
nav_class = ''
the_nav_bar = navigation_bar(user_dict['nav'], interview, wrapper=False, inner_div_class='nav flex-row justify-content-center align-items-center nav-pills danav danavlinks danav-horiz danavnested-horiz')
if the_nav_bar != '':
the_nav_bar = dropdown_nav_bar + ' <div class="col' + nav_class + '">\n <div class="nav flex-row justify-content-center align-items-center nav-pills danav danavlinks danav-horiz">\n ' + the_nav_bar + '\n </div>\n </div>\n </div>\n <div class="row tab-content">\n'
else:
if interview.use_navigation_on_small_screens == 'dropdown':
if dropdown_nav_bar:
horiz_nav_bar = dropdown_nav_bar + '\n </div>\n <div class="row tab-content">\n'
else:
horiz_nav_bar = ''
elif interview.use_navigation_on_small_screens:
horiz_nav_bar = navigation_bar(user_dict['nav'], interview, wrapper=False, inner_div_class='nav flex-row justify-content-center align-items-center nav-pills danav danavlinks danav-horiz danavnested-horiz')
if horiz_nav_bar != '':
horiz_nav_bar = dropdown_nav_bar + ' <div class="col d-md-none">\n <div class="nav flex-row justify-content-center align-items-center nav-pills danav danavlinks danav-horiz">\n ' + horiz_nav_bar + '\n </div>\n </div>\n </div>\n <div class="row tab-content">\n'
else:
horiz_nav_bar = ''
the_nav_bar = navigation_bar(user_dict['nav'], interview)
if the_nav_bar != '':
if interview.use_navigation == 'horizontal':
interview_status.using_navigation = 'horizontal'
else:
interview_status.using_navigation = 'vertical'
else:
interview_status.using_navigation = False
else:
the_nav_bar = ''
interview_status.using_navigation = False
content = as_html(interview_status, debug_mode, url_for('index', **index_params), validation_rules, the_field_errors, the_progress_bar, steps - user_dict['_internal']['steps_offset'])
if debug_mode:
readability = {}
for question_type in ('question', 'help'):
if question_type not in interview_status.screen_reader_text:
continue
phrase = to_text(interview_status.screen_reader_text[question_type])
if (not phrase) or len(phrase) < 10:
phrase = "The sky is blue."
phrase = re.sub(r'[^A-Za-z 0-9\.\,\?\#\!\%\&\(\)]', r' ', phrase)
readability[question_type] = [('Flesch Reading Ease', textstat.flesch_reading_ease(phrase)),
('Flesch-Kincaid Grade Level', textstat.flesch_kincaid_grade(phrase)),
('Gunning FOG Scale', textstat.gunning_fog(phrase)),
('SMOG Index', textstat.smog_index(phrase)),
('Automated Readability Index', textstat.automated_readability_index(phrase)),
('Coleman-Liau Index', textstat.coleman_liau_index(phrase)),
('Linsear Write Formula', textstat.linsear_write_formula(phrase)),
('Dale-Chall Readability Score', textstat.dale_chall_readability_score(phrase)),
('Readability Consensus', textstat.text_standard(phrase))]
readability_report = ''
for question_type in ('question', 'help'):
if question_type in readability:
readability_report += ' <div id="dareadability-' + question_type + '"' + (' style="display: none;"' if question_type == 'help' else '') + '>\n'
if question_type == 'question':
readability_report += ' <h3>' + word("Readability of question") + '</h3>\n'
else:
readability_report += ' <h3>' + word("Readability of help text") + '</h3>\n'
readability_report += ' <table class="table">' + "\n"
readability_report += ' <tr><th>' + word("Formula") + '</th><th>' + word("Score") + '</th></tr>' + "\n"
for read_type, value in readability[question_type]:
readability_report += ' <tr><td>' + read_type + '</td><td>' + str(value) + "</td></tr>\n"
readability_report += ' </table>' + "\n"
readability_report += ' </div>' + "\n"
if interview_status.using_screen_reader:
for question_type in ('question', 'help'):
if question_type not in interview_status.screen_reader_text:
continue
phrase = to_text(interview_status.screen_reader_text[question_type])
if encrypted:
the_phrase = encrypt_phrase(phrase, secret)
else:
the_phrase = pack_phrase(phrase)
the_hash = MD5Hash(data=phrase).hexdigest()
content = re.sub(r'XXXTHEXXX' + question_type + 'XXXHASHXXX', the_hash, content)
params = {'filename': yaml_filename, 'key': user_code, 'question': interview_status.question.number, 'digest': the_hash, 'type': question_type, 'language': the_language, 'dialect': the_dialect}
if the_voice:
params['voice'] = the_voice
existing_entry = db.session.execute(select(SpeakList).filter_by(**params).with_for_update()).scalar()
if existing_entry:
if existing_entry.encrypted:
existing_phrase = decrypt_phrase(existing_entry.phrase, secret)
else:
existing_phrase = unpack_phrase(existing_entry.phrase)
if phrase != existing_phrase:
logmessage("index: the phrase changed; updating it")
existing_entry.phrase = the_phrase
existing_entry.upload = None
existing_entry.encrypted = encrypted
else:
new_entry = SpeakList(filename=yaml_filename, key=user_code, phrase=the_phrase, question=interview_status.question.number, digest=the_hash, type=question_type, language=the_language, dialect=the_dialect, encrypted=encrypted, voice=the_voice)
db.session.add(new_entry)
db.session.commit()
append_css_urls = []
if not is_ajax:
start_output = standard_header_start
if 'css' in interview.external_files:
for packageref, fileref in interview.external_files['css']:
the_url = get_url_from_file_reference(fileref, _package=packageref)
if is_js:
append_css_urls.append(the_url)
if the_url is not None:
start_output += "\n" + ' <link href="' + the_url + '" rel="stylesheet">'
else:
logmessage("index: could not find css file " + str(fileref))
start_output += global_css + additional_css(interview_status)
if is_js:
append_javascript += additional_css(interview_status, js_only=True)
start_output += '\n <title>' + interview_status.tabtitle + '</title>\n </head>\n <body class="' + bodyclass + '">\n <div id="dabody">\n'
if interview.options.get('hide navbar', False):
output = make_navbar(interview_status, (steps - user_dict['_internal']['steps_offset']), interview.consolidated_metadata.get('show login', SHOW_LOGIN), user_dict['_internal']['livehelp'], debug_mode, index_params, extra_class='dainvisible')
else:
output = make_navbar(interview_status, (steps - user_dict['_internal']['steps_offset']), interview.consolidated_metadata.get('show login', SHOW_LOGIN), user_dict['_internal']['livehelp'], debug_mode, index_params)
output += flash_content + ' <div class="container">' + "\n " + '<div class="row tab-content">' + "\n"
if the_nav_bar != '':
if interview_status.using_navigation == 'vertical':
output += horiz_nav_bar
output += the_nav_bar
output += content
if 'rightText' in interview_status.extras:
if interview_status.using_navigation == 'vertical':
output += ' <section id="daright" role="complementary" class="' + daconfig['grid classes']['vertical navigation']['right'] + ' daright">\n'
else:
if interview_status.flush_left():
output += ' <section id="daright" role="complementary" class="' + daconfig['grid classes']['flush left']['right'] + ' daright">\n'
else:
output += ' <section id="daright" role="complementary" class="' + daconfig['grid classes']['centered']['right'] + ' daright">\n'
output += docassemble.base.util.markdown_to_html(interview_status.extras['rightText'], trim=False, status=interview_status) + "\n"
output += ' </section>\n'
output += " </div>\n"
if interview_status.question.question_type != "signature" and interview_status.post:
output += ' <div class="row">' + "\n"
if interview_status.using_navigation == 'vertical':
output += ' <div class="' + daconfig['grid classes']['vertical navigation']['body'] + ' daattributions" id="daattributions">\n'
else:
if interview_status.flush_left():
output += ' <div class="' + daconfig['grid classes']['flush left']['body'] + ' daattributions" id="daattributions">\n'
else:
output += ' <div class="' + daconfig['grid classes']['centered']['body'] + ' daattributions" id="daattributions">\n'
output += interview_status.post
output += ' </div>\n'
output += ' </div>' + "\n"
if len(interview_status.attributions) > 0:
output += ' <div class="row">' + "\n"
if interview_status.using_navigation == 'vertical':
output += ' <div class="' + daconfig['grid classes']['vertical navigation']['body'] + ' daattributions" id="daattributions">\n'
else:
if interview_status.flush_left():
output += ' <div class="' + daconfig['grid classes']['flush left']['body'] + ' daattributions" id="daattributions">\n'
else:
output += ' <div class="' + daconfig['grid classes']['centered']['body'] + ' daattributions" id="daattributions">\n'
output += ' <br/><br/><br/><br/><br/><br/><br/>\n'
for attribution in sorted(interview_status.attributions):
output += ' <div><p><cite><small>' + docassemble.base.util.markdown_to_html(attribution, status=interview_status, strip_newlines=True, trim=True) + '</small></cite></p></div>\n'
output += ' </div>\n'
output += ' </div>' + "\n"
if debug_mode:
output += ' <div id="dasource" class="collapse mt-3">' + "\n"
output += ' <h2 class="visually-hidden">Information for developers</h2>\n'
output += ' <div class="row">' + "\n"
output += ' <div class="col-md-12">' + "\n"
if interview_status.using_screen_reader:
output += ' <h3>' + word('Plain text of sections') + '</h3>' + "\n"
for question_type in ('question', 'help'):
if question_type in interview_status.screen_reader_text:
output += '<pre style="white-space: pre-wrap;">' + to_text(interview_status.screen_reader_text[question_type]) + '</pre>\n'
output += ' <hr>\n'
output += ' <h3>' + word('Source code for question') + '<a class="float-end btn btn-info" target="_blank" href="' + url_for('get_variables', i=yaml_filename) + '">' + word('Show variables and values') + '</a></h3>' + "\n"
if interview_status.question.from_source.path != interview.source.path and interview_status.question.from_source.path is not None:
output += ' <p style="font-weight: bold;"><small>(' + word('from') + ' ' + interview_status.question.from_source.path + ")</small></p>\n"
if (not hasattr(interview_status.question, 'source_code')) or interview_status.question.source_code is None:
output += ' <p>' + word('unavailable') + '</p>'
else:
output += highlight(interview_status.question.source_code, YamlLexer(), HtmlFormatter(cssclass='bg-light highlight dahighlight'))
if len(interview_status.seeking) > 1:
output += ' <h4>' + word('How question came to be asked') + '</h4>' + "\n"
output += get_history(interview, interview_status)
output += ' </div>' + "\n"
output += ' </div>' + "\n"
output += ' <div class="row mt-4">' + "\n"
output += ' <div class="col-md-8 col-lg-6">' + "\n"
output += readability_report
output += ' </div>' + "\n"
output += ' </div>' + "\n"
output += ' </div>' + "\n"
output += ' </div>'
if interview_status.footer:
output += """
<footer class=""" + '"' + app.config['FOOTER_CLASS'] + '"' + """>
<div class="container">
""" + interview_status.footer + """
</div>
</footer>
"""
if not is_ajax:
end_output = scripts + global_js + "\n" + indent_by("".join(interview_status.extra_scripts).strip(), 4).rstrip() + "\n </div>\n </body>\n</html>"
key = 'da:html:uid:' + str(user_code) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id)
pipe = r.pipeline()
pipe.set(key, json.dumps({'body': output, 'extra_scripts': interview_status.extra_scripts, 'global_css': global_css, 'extra_css': interview_status.extra_css, 'browser_title': interview_status.tabtitle, 'lang': interview_language, 'bodyclass': bodyclass, 'bootstrap_theme': bootstrap_theme}))
pipe.expire(key, 60)
pipe.execute()
if user_dict['_internal']['livehelp']['availability'] != 'unavailable':
inputkey = 'da:input:uid:' + str(user_code) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id)
r.publish(inputkey, json.dumps({'message': 'newpage', key: key}))
if is_json:
data = {'browser_title': interview_status.tabtitle, 'lang': interview_language, 'csrf_token': generate_csrf(), 'steps': steps, 'allow_going_back': allow_going_back, 'message_log': docassemble.base.functions.get_message_log(), 'id_dict': question_id_dict}
data.update(interview_status.as_data(user_dict))
if reload_after and reload_after > 0:
data['reload_after'] = reload_after
if 'action' in data and data['action'] == 'redirect' and 'url' in data:
logmessage("Redirecting because of a redirect action.")
response = redirect(data['url'])
else:
response = jsonify(**data)
elif is_ajax:
if interview_status.question.checkin is not None:
do_action = interview_status.question.checkin
else:
do_action = None
if interview.options.get('send question data', False):
response = jsonify(action='body', body=output, extra_scripts=interview_status.extra_scripts, extra_css=interview_status.extra_css, browser_title=interview_status.tabtitle, lang=interview_language, bodyclass=bodyclass, reload_after=reload_after, livehelp=user_dict['_internal']['livehelp'], csrf_token=generate_csrf(), do_action=do_action, steps=steps, allow_going_back=allow_going_back, message_log=docassemble.base.functions.get_message_log(), id_dict=question_id_dict, question_data=interview_status.as_data(user_dict))
else:
response = jsonify(action='body', body=output, extra_scripts=interview_status.extra_scripts, extra_css=interview_status.extra_css, browser_title=interview_status.tabtitle, lang=interview_language, bodyclass=bodyclass, reload_after=reload_after, livehelp=user_dict['_internal']['livehelp'], csrf_token=generate_csrf(), do_action=do_action, steps=steps, allow_going_back=allow_going_back, message_log=docassemble.base.functions.get_message_log(), id_dict=question_id_dict)
if response_wrapper:
response_wrapper(response)
if return_fake_html:
fake_up(response, interview_language)
elif is_js:
output = the_js + "\n" + append_javascript
if 'global css' in daconfig:
for fileref in daconfig['global css']:
append_css_urls.append(get_url_from_file_reference(fileref))
if 'global javascript' in daconfig:
for fileref in daconfig['global javascript']:
append_script_urls.append(get_url_from_file_reference(fileref))
if len(append_css_urls) > 0:
output += """
var daLink;"""
for path in append_css_urls:
output += """
daLink = document.createElement('link');
daLink.href = """ + json.dumps(path) + """;
daLink.rel = "stylesheet";
document.head.appendChild(daLink);
"""
if len(append_script_urls) > 0:
output += """
var daScript;"""
for path in append_script_urls:
output += """
daScript = document.createElement('script');
daScript.src = """ + json.dumps(path) + """;
document.head.appendChild(daScript);
"""
response = make_response(output.encode('utf-8'), '200 OK')
response.headers['Content-type'] = 'application/javascript; charset=utf-8'
else:
output = start_output + output + end_output
response = make_response(output.encode('utf-8'), '200 OK')
response.headers['Content-type'] = 'text/html; charset=utf-8'
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
release_lock(user_code, yaml_filename)
if 'in error' in session:
del session['in error']
if response_wrapper:
response_wrapper(response)
return response
def process_set_variable(field_name, user_dict, vars_set, old_values):
vars_set.add(field_name)
try:
old_values[field_name] = eval(field_name, user_dict)
except:
pass
def add_permissions_for_field(the_field, interview_status, files_to_process):
if hasattr(the_field, 'permissions'):
if the_field.number in interview_status.extras['permissions']:
permissions = interview_status.extras['permissions'][the_field.number]
if 'private' in permissions or 'persistent' in permissions:
for (filename, file_number, mimetype, extension) in files_to_process: # pylint: disable=unused-variable
attribute_args = {}
if 'private' in permissions:
attribute_args['private'] = permissions['private']
if 'persistent' in permissions:
attribute_args['persistent'] = permissions['persistent']
file_set_attributes(file_number, **attribute_args)
if 'allow_users' in permissions:
for (filename, file_number, mimetype, extension) in files_to_process:
allow_user_id = []
allow_email = []
for item in permissions['allow_users']:
if isinstance(item, int):
allow_user_id.append(item)
else:
allow_email.append(item)
file_user_access(file_number, allow_user_id=allow_user_id, allow_email=allow_email)
if 'allow_privileges' in permissions:
for (filename, file_number, mimetype, extension) in files_to_process:
file_privilege_access(file_number, allow=permissions['allow_privileges'])
def fake_up(response, interview_language):
response.set_data('<!DOCTYPE html><html lang="' + interview_language + '"><head><meta charset="utf-8"><title>Response</title></head><body><pre>ABCDABOUNDARYSTARTABC' + codecs.encode(response.get_data(), 'base64').decode() + 'ABCDABOUNDARYENDABC</pre></body></html>')
response.headers['Content-type'] = 'text/html; charset=utf-8'
def add_action_to_stack(interview_status, user_dict, action, arguments):
unique_id = interview_status.current_info['user']['session_uid']
if 'event_stack' not in user_dict['_internal']:
user_dict['_internal']['event_stack'] = {}
if unique_id not in user_dict['_internal']['event_stack']:
user_dict['_internal']['event_stack'][unique_id] = []
if len(user_dict['_internal']['event_stack'][unique_id]) > 0 and user_dict['_internal']['event_stack'][unique_id][0]['action'] == action and user_dict['_internal']['event_stack'][unique_id][0]['arguments'] == arguments:
user_dict['_internal']['event_stack'][unique_id].pop(0)
user_dict['_internal']['event_stack'][unique_id].insert(0, {'action': action, 'arguments': arguments})
def sub_indices(the_var, the_user_dict):
try:
if the_var.startswith('x.') and 'x' in the_user_dict and isinstance(the_user_dict['x'], DAObject):
the_var = re.sub(r'^x\.', the_user_dict['x'].instanceName + '.', the_var)
if the_var.startswith('x[') and 'x' in the_user_dict and isinstance(the_user_dict['x'], DAObject):
the_var = re.sub(r'^x\[', the_user_dict['x'].instanceName + '[', the_var)
if re.search(r'\[[ijklmn]\]', the_var):
the_var = re.sub(r'\[([ijklmn])\]', lambda m: '[' + repr(the_user_dict[m.group(1)]) + ']', the_var)
except KeyError as the_err:
missing_var = str(the_err)
raise DAError("Reference to variable " + missing_var + " that was not defined")
return the_var
def fixstr(data):
return bytearray(data, encoding='utf-8').decode('utf-8', 'ignore').encode("utf-8")
def get_history(interview, interview_status):
output = ''
has_question = bool(hasattr(interview_status, 'question'))
the_index = 0
seeking_len = len(interview_status.seeking)
if seeking_len:
starttime = interview_status.seeking[0]['time']
seen_done = False
for stage in interview_status.seeking:
if seen_done:
output = ''
seen_done = False
the_index += 1
if the_index < seeking_len and 'reason' in interview_status.seeking[the_index] and interview_status.seeking[the_index]['reason'] in ('asking', 'running') and interview_status.seeking[the_index]['question'] is stage['question'] and 'question' in stage and 'reason' in stage and stage['reason'] == 'considering':
continue
the_time = " at %.5fs" % (stage['time'] - starttime)
if 'question' in stage and 'reason' in stage and (has_question is False or the_index < (seeking_len - 1) or stage['question'] is not interview_status.question):
if stage['reason'] == 'initial':
output += " <h5>Ran initial code" + the_time + "</h5>\n"
elif stage['reason'] == 'mandatory question':
output += " <h5>Tried to ask mandatory question" + the_time + "</h5>\n"
elif stage['reason'] == 'mandatory code':
output += " <h5>Tried to run mandatory code" + the_time + "</h5>\n"
elif stage['reason'] == 'asking':
output += " <h5>Tried to ask question" + the_time + "</h5>\n"
elif stage['reason'] == 'running':
output += " <h5>Tried to run block" + the_time + "</h5>\n"
elif stage['reason'] == 'considering':
output += " <h5>Considered using block" + the_time + "</h5>\n"
elif stage['reason'] == 'objects from file':
output += " <h5>Tried to load objects from file" + the_time + "</h5>\n"
elif stage['reason'] == 'data':
output += " <h5>Tried to load data" + the_time + "</h5>\n"
elif stage['reason'] == 'objects':
output += " <h5>Tried to load objects" + the_time + "</h5>\n"
elif stage['reason'] == 'result of multiple choice':
output += " <h5>Followed the result of multiple choice selection" + the_time + "</h5>\n"
if stage['question'].from_source.path != interview.source.path and stage['question'].from_source.path is not None:
output += ' <p style="font-weight: bold;"><small>(' + word('from') + ' ' + stage['question'].from_source.path + ")</small></p>\n"
if (not hasattr(stage['question'], 'source_code')) or stage['question'].source_code is None:
output += word('(embedded question, source code not available)')
else:
output += highlight(stage['question'].source_code, YamlLexer(), HtmlFormatter(cssclass='bg-light highlight dahighlight'))
elif 'variable' in stage:
output += ' <h5>Needed definition of <code class="da-variable-needed">' + str(stage['variable']) + "</code>" + the_time + "</h5>\n"
elif 'done' in stage:
output += " <h5>Completed processing" + the_time + "</h5>\n"
seen_done = True
return output
def is_mobile_or_tablet():
ua_string = request.headers.get('User-Agent', None)
if ua_string is not None:
response = ua_parse(ua_string)
if response.is_mobile or response.is_tablet:
return True
return False
def get_referer():
return request.referrer or None
def add_referer(user_dict, referer=None):
if referer:
user_dict['_internal']['referer'] = referer
elif request.referrer:
user_dict['_internal']['referer'] = request.referrer
else:
user_dict['_internal']['referer'] = None
@app.template_filter('word')
def word_filter(text):
return docassemble.base.functions.word(str(text))
def get_part(part, default=None):
if default is None:
default = str()
if part not in page_parts:
return default
if 'language' in session:
lang = session['language']
else:
lang = DEFAULT_LANGUAGE
if lang in page_parts[part]:
return page_parts[part][lang]
if lang != DEFAULT_LANGUAGE and DEFAULT_LANGUAGE in page_parts[part]:
return page_parts[part][DEFAULT_LANGUAGE]
if '*' in page_parts[part]:
return page_parts[part]['*']
return default
@app.context_processor
def utility_processor():
def user_designator(the_user):
if the_user.email:
return the_user.email
return re.sub(r'.*\$', '', the_user.social_id)
if 'language' in session:
docassemble.base.functions.set_language(session['language'])
lang = session['language']
elif 'Accept-Language' in request.headers:
langs = docassemble.base.functions.parse_accept_language(request.headers['Accept-Language'])
if len(langs) > 0:
docassemble.base.functions.set_language(langs[0])
lang = langs[0]
else:
docassemble.base.functions.set_language(DEFAULT_LANGUAGE)
lang = DEFAULT_LANGUAGE
else:
docassemble.base.functions.set_language(DEFAULT_LANGUAGE)
lang = DEFAULT_LANGUAGE
def in_debug():
return DEBUG
return {'word': docassemble.base.functions.word, 'in_debug': in_debug, 'user_designator': user_designator, 'get_part': get_part, 'current_language': lang}
@app.route('/speakfile', methods=['GET'])
def speak_file():
audio_file = None
filename = request.args.get('i', None)
if filename is None:
return ('You must pass the filename (i) to read it out loud', 400)
session_info = get_session(filename)
if session_info is None:
return ("You must include a session to read a screen out loud", 400)
key = session_info['uid']
# encrypted = session_info['encrypted']
question = request.args.get('question', None)
question_type = request.args.get('type', None)
file_format = request.args.get('format', None)
the_language = request.args.get('language', None)
the_dialect = request.args.get('dialect', None)
the_voice = request.args.get('voice', '')
if the_voice == '':
the_voice = None
the_hash = request.args.get('digest', None)
secret = request.cookies.get('secret', None)
if secret is not None:
secret = str(secret)
if file_format not in ('mp3', 'ogg') or not (filename and key and question and question_type and file_format and the_language and the_dialect):
logmessage("speak_file: could not serve speak file because invalid or missing data was provided: filename " + str(filename) + " and key " + str(key) + " and question number " + str(question) + " and question type " + str(question_type) + " and language " + str(the_language) + " and dialect " + str(the_dialect))
return ('File not found', 404)
params = {'filename': filename, 'key': key, 'question': question, 'digest': the_hash, 'type': question_type, 'language': the_language, 'dialect': the_dialect}
if the_voice:
params['voice'] = the_voice
entry = db.session.execute(select(SpeakList).filter_by(**params)).scalar()
if not entry:
logmessage("speak_file: could not serve speak file because no entry could be found in speaklist for filename " + str(filename) + " and key " + str(key) + " and question number " + str(question) + " and question type " + str(question_type) + " and language " + str(the_language) + " and dialect " + str(the_dialect) + " and voice " + str(the_voice))
return ('File not found', 404)
if not entry.upload:
existing_entry = db.session.execute(select(SpeakList).where(and_(SpeakList.phrase == entry.phrase, SpeakList.language == entry.language, SpeakList.dialect == entry.dialect, SpeakList.voice == entry.voice, SpeakList.upload != None, SpeakList.encrypted == entry.encrypted))).scalar() # noqa: E711 # pylint: disable=singleton-comparison
if existing_entry:
logmessage("speak_file: found existing entry: " + str(existing_entry.id) + ". Setting to " + str(existing_entry.upload))
entry.upload = existing_entry.upload
else:
if not VOICERSS_ENABLED:
logmessage("speak_file: could not serve speak file because voicerss not enabled")
return ('File not found', 404)
new_file_number = get_new_file_number(key, 'speak.mp3', yaml_file_name=filename)
# phrase = codecs.decode(entry.phrase, 'base64')
if entry.encrypted:
phrase = decrypt_phrase(entry.phrase, secret)
else:
phrase = unpack_phrase(entry.phrase)
url = voicerss_config.get('url', "https://api.voicerss.org/")
# logmessage("Retrieving " + url)
audio_file = SavedFile(new_file_number, extension='mp3', fix=True, should_not_exist=True)
voicerss_parameters = {'f': voicerss_config.get('format', '16khz_16bit_stereo'), 'key': voicerss_config['key'], 'src': phrase, 'hl': str(entry.language) + '-' + str(entry.dialect)}
if the_voice is not None:
voicerss_parameters['v'] = the_voice
audio_file.fetch_url_post(url, voicerss_parameters)
if audio_file.size_in_bytes() > 100:
call_array = [daconfig.get('pacpl', 'pacpl'), '-t', 'ogg', audio_file.path + '.mp3']
logmessage("speak_file: calling " + " ".join(call_array))
result = subprocess.run(call_array, check=False).returncode
if result != 0:
logmessage("speak_file: failed to convert downloaded mp3 (" + audio_file.path + '.mp3' + ") to ogg")
return ('File not found', 404)
entry.upload = new_file_number
audio_file.finalize()
db.session.commit()
else:
logmessage("speak_file: download from voicerss (" + url + ") failed")
return ('File not found', 404)
if not entry.upload:
logmessage("speak_file: upload file number was not set")
return ('File not found', 404)
if not audio_file:
audio_file = SavedFile(entry.upload, extension='mp3', fix=True)
the_path = audio_file.path + '.' + file_format
if not os.path.isfile(the_path):
logmessage("speak_file: could not serve speak file because file (" + the_path + ") not found")
return ('File not found', 404)
response = send_file(the_path, mimetype=audio_mimetype_table[file_format])
return response
def interview_menu(absolute_urls=False, start_new=False, tag=None):
interview_info = []
for key, yaml_filename in sorted(daconfig['dispatch'].items()):
try:
interview = docassemble.base.interview_cache.get_interview(yaml_filename)
if interview.is_unlisted():
continue
if current_user.is_anonymous:
if not interview.allowed_to_see_listed(is_anonymous=True):
continue
else:
if not interview.allowed_to_see_listed(has_roles=[role.name for role in current_user.roles]):
continue
if interview.source is None:
package = None
else:
package = interview.source.get_package()
titles = interview.get_title({'_internal': {}})
tags = interview.get_tags({'_internal': {}})
metadata = copy.deepcopy(interview.consolidated_metadata)
if 'tags' in metadata:
del metadata['tags']
interview_title = titles.get('full', titles.get('short', word('Untitled')))
subtitle = titles.get('sub', None)
status_class = None
subtitle_class = None
except:
interview_title = yaml_filename
tags = set()
metadata = {}
package = None
subtitle = None
status_class = 'dainterviewhaserror'
subtitle_class = 'dainvisible'
logmessage("interview_dispatch: unable to load interview file " + yaml_filename)
if tag is not None and tag not in tags:
continue
if absolute_urls:
if start_new:
url = url_for('run_interview', dispatch=key, _external=True, reset='1')
else:
url = url_for('redirect_to_interview', dispatch=key, _external=True)
else:
if start_new:
url = url_for('run_interview', dispatch=key, reset='1')
else:
url = url_for('redirect_to_interview', dispatch=key)
interview_info.append({'link': url, 'title': interview_title, 'status_class': status_class, 'subtitle': subtitle, 'subtitle_class': subtitle_class, 'filename': yaml_filename, 'package': package, 'tags': sorted(tags), 'metadata': metadata})
return interview_info
@app.route('/list', methods=['GET'])
def interview_start():
if current_user.is_anonymous and not daconfig.get('allow anonymous access', True):
return redirect(url_for('user.login', next=url_for('interview_start', **request.args)))
if not current_user.is_anonymous and not current_user.is_authenticated:
response = redirect(url_for('interview_start'))
response.set_cookie('remember_token', '', expires=0)
response.set_cookie('visitor_secret', '', expires=0)
response.set_cookie('secret', '', expires=0)
response.set_cookie('session', '', expires=0)
return response
setup_translation()
if len(daconfig['dispatch']) == 0:
return redirect(url_for('index', i=final_default_yaml_filename))
is_json = bool(('json' in request.form and as_int(request.form['json'])) or ('json' in request.args and as_int(request.args['json'])))
tag = request.args.get('tag', None)
if daconfig.get('dispatch interview', None) is not None:
if is_json:
if tag:
return redirect(url_for('index', i=daconfig.get('dispatch interview'), from_list='1', json='1', tag=tag))
return redirect(url_for('index', i=daconfig.get('dispatch interview'), from_list='1', json='1'))
if tag:
return redirect(url_for('index', i=daconfig.get('dispatch interview'), from_list='1', tag=tag))
return redirect(url_for('index', i=daconfig.get('dispatch interview'), from_list='1'))
if 'embedded' in request.args and int(request.args['embedded']):
the_page = 'pages/start-embedded.html'
embed = True
else:
embed = False
interview_info = interview_menu(absolute_urls=embed, tag=tag)
if is_json:
return jsonify(action='menu', interviews=interview_info)
argu = {'version_warning': None, 'interview_info': interview_info}
if embed:
the_page = 'pages/start-embedded.html'
else:
if 'start page template' in daconfig and daconfig['start page template']:
the_page = docassemble.base.functions.package_template_filename(daconfig['start page template'])
if the_page is None:
raise DAError("Could not find start page template " + daconfig['start page template'])
with open(the_page, 'r', encoding='utf-8') as fp:
template_string = fp.read()
return render_template_string(template_string, **argu)
else:
the_page = 'pages/start.html'
resp = make_response(render_template(the_page, **argu))
if embed:
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
@app.route('/start/<package>/<directory>/<filename>/', methods=['GET'])
def redirect_to_interview_in_package_directory(package, directory, filename):
# setup_translation()
if COOKIELESS_SESSIONS:
return html_index()
arguments = {}
for arg in request.args:
arguments[arg] = request.args[arg]
arguments['i'] = 'docassemble.' + package + ':data/questions/' + directory + '/' + filename + '.yml'
if 'session' not in arguments:
arguments['new_session'] = '1'
request.args = arguments
return index(refer=['start_directory', package, directory, filename])
@app.route('/start/<package>/<filename>/', methods=['GET'])
def redirect_to_interview_in_package(package, filename):
# setup_translation()
if COOKIELESS_SESSIONS:
return html_index()
arguments = {}
for arg in request.args:
arguments[arg] = request.args[arg]
if re.search(r'playground[0-9]', package):
arguments['i'] = 'docassemble.' + package + ':' + filename + '.yml'
else:
arguments['i'] = 'docassemble.' + package + ':data/questions/' + filename + '.yml'
if 'session' not in arguments:
arguments['new_session'] = '1'
request.args = arguments
return index(refer=['start', package, filename])
@app.route('/start/<dispatch>/', methods=['GET'])
def redirect_to_interview(dispatch):
# setup_translation()
# logmessage("redirect_to_interview: the dispatch is " + str(dispatch))
if COOKIELESS_SESSIONS:
return html_index()
yaml_filename = daconfig['dispatch'].get(dispatch, None)
if yaml_filename is None:
return ('File not found', 404)
arguments = {}
for arg in request.args:
arguments[arg] = request.args[arg]
arguments['i'] = yaml_filename
if 'session' not in arguments:
arguments['new_session'] = '1'
request.args = arguments
return index(refer=['start_dispatch', dispatch])
@app.route('/run/<package>/<directory>/<filename>/', methods=['GET'])
def run_interview_in_package_directory(package, directory, filename):
# setup_translation()
if COOKIELESS_SESSIONS:
return html_index()
arguments = {}
for arg in request.args:
arguments[arg] = request.args[arg]
arguments['i'] = 'docassemble.' + package + ':data/questions/' + directory + '/' + filename + '.yml'
request.args = arguments
return index(refer=['run_directory', package, directory, filename])
@app.route('/run/<package>/<filename>/', methods=['GET'])
def run_interview_in_package(package, filename):
# setup_translation()
if COOKIELESS_SESSIONS:
return html_index()
arguments = {}
for arg in request.args:
arguments[arg] = request.args[arg]
if re.search(r'playground[0-9]', package):
arguments['i'] = 'docassemble.' + package + ':' + filename + '.yml'
else:
arguments['i'] = 'docassemble.' + package + ':data/questions/' + filename + '.yml'
request.args = arguments
return index(refer=['run', package, filename])
@app.route('/run/<dispatch>/', methods=['GET'])
def run_interview(dispatch):
# setup_translation()
if COOKIELESS_SESSIONS:
return html_index()
yaml_filename = daconfig['dispatch'].get(dispatch, None)
if yaml_filename is None:
return ('File not found', 404)
arguments = {}
for arg in request.args:
arguments[arg] = request.args[arg]
arguments['i'] = yaml_filename
request.args = arguments
return index(refer=['run_dispatch', dispatch])
@app.route('/storedfile/<uid>/<number>/<filename>.<extension>', methods=['GET'])
def serve_stored_file(uid, number, filename, extension):
return do_serve_stored_file(uid, number, filename, extension)
@app.route('/storedfiledownload/<uid>/<number>/<filename>.<extension>', methods=['GET'])
def serve_stored_file_download(uid, number, filename, extension):
return do_serve_stored_file(uid, number, filename, extension, download=True)
def do_serve_stored_file(uid, number, filename, extension, download=False):
number = re.sub(r'[^0-9]', '', str(number))
if not can_access_file_number(number, uids=[uid]):
return ('File not found', 404)
try:
file_info = get_info_from_file_number(number, privileged=True, uids=get_session_uids())
except:
return ('File not found', 404)
if 'path' not in file_info:
return ('File not found', 404)
if not os.path.isfile(file_info['path']):
return ('File not found', 404)
response = send_file(file_info['path'], mimetype=file_info['mimetype'], download_name=filename + '.' + extension)
if download:
response.headers['Content-Disposition'] = 'attachment; filename=' + json.dumps(filename + '.' + extension)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
@app.route('/tempfile/<code>/<filename>.<extension>', methods=['GET'])
def serve_temporary_file(code, filename, extension):
return do_serve_temporary_file(code, filename, extension)
@app.route('/tempfiledownload/<code>/<filename>.<extension>', methods=['GET'])
def serve_temporary_file_download(code, filename, extension):
return do_serve_temporary_file(code, filename, extension, download=True)
def do_serve_temporary_file(code, filename, extension, download=False):
file_info = r.get('da:tempfile:' + str(code))
if file_info is None:
logmessage("serve_temporary_file: file_info was none")
return ('File not found', 404)
(section, file_number) = file_info.decode().split('^')
the_file = SavedFile(file_number, fix=True, section=section)
the_path = the_file.path
if not os.path.isfile(the_path):
return ('File not found', 404)
(extension, mimetype) = get_ext_and_mimetype(filename + '.' + extension)
response = send_file(the_path, mimetype=mimetype, download_name=filename + '.' + extension)
if download:
response.headers['Content-Disposition'] = 'attachment; filename=' + json.dumps(filename + '.' + extension)
return response
@app.route('/packagezip', methods=['GET'])
@login_required
@roles_required(['admin', 'developer'])
def download_zip_package():
package_name = request.args.get('package', None)
if not package_name:
return ('File not found', 404)
package_name = werkzeug.utils.secure_filename(package_name)
package = db.session.execute(select(Package).filter_by(active=True, name=package_name, type='zip')).scalar()
if package is None:
return ('File not found', 404)
if not current_user.has_role('admin'):
auth = db.session.execute(select(PackageAuth).filter_by(package_id=package.id, user_id=current_user.id)).scalar()
if auth is None:
return ('File not found', 404)
try:
file_info = get_info_from_file_number(package.upload, privileged=True)
except:
return ('File not found', 404)
filename = re.sub(r'\.', '-', package_name) + '.zip'
response = send_file(file_info['path'] + '.zip', mimetype='application/zip', download_name=filename)
response.headers['Content-Disposition'] = 'attachment; filename=' + json.dumps(filename)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
@app.route('/uploadedfile/<number>/<filename>.<extension>', methods=['GET'])
def serve_uploaded_file_with_filename_and_extension(number, filename, extension):
return do_serve_uploaded_file_with_filename_and_extension(number, filename, extension)
@app.route('/uploadedfiledownload/<number>/<filename>.<extension>', methods=['GET'])
def serve_uploaded_file_with_filename_and_extension_download(number, filename, extension):
return do_serve_uploaded_file_with_filename_and_extension(number, filename, extension, download=True)
def do_serve_uploaded_file_with_filename_and_extension(number, filename, extension, download=False):
filename = secure_filename_spaces_ok(filename)
extension = werkzeug.utils.secure_filename(extension)
privileged = bool(current_user.is_authenticated and current_user.has_role('admin', 'advocate'))
number = re.sub(r'[^0-9]', '', str(number))
if cloud is not None and daconfig.get('use cloud urls', False):
if not (privileged or can_access_file_number(number, uids=get_session_uids())):
return ('File not found', 404)
the_file = SavedFile(number)
if download:
return redirect(the_file.temp_url_for(_attachment=True))
return redirect(the_file.temp_url_for())
try:
file_info = get_info_from_file_number(number, privileged=privileged, uids=get_session_uids())
except:
return ('File not found', 404)
if 'path' not in file_info:
return ('File not found', 404)
# logmessage("Filename is " + file_info['path'] + '.' + extension)
if os.path.isfile(file_info['path'] + '.' + extension):
# logmessage("Using " + file_info['path'] + '.' + extension)
extension, mimetype = get_ext_and_mimetype(file_info['path'] + '.' + extension)
response = send_file(file_info['path'] + '.' + extension, mimetype=mimetype, download_name=filename + '.' + extension)
if download:
response.headers['Content-Disposition'] = 'attachment; filename=' + json.dumps(filename + '.' + extension)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
if os.path.isfile(os.path.join(os.path.dirname(file_info['path']), filename + '.' + extension)):
# logmessage("Using " + os.path.join(os.path.dirname(file_info['path']), filename + '.' + extension))
extension, mimetype = get_ext_and_mimetype(filename + '.' + extension)
response = send_file(os.path.join(os.path.dirname(file_info['path']), filename + '.' + extension), mimetype=mimetype, download_name=filename + '.' + extension)
if download:
response.headers['Content-Disposition'] = 'attachment; filename=' + json.dumps(filename + '.' + extension)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
return ('File not found', 404)
@app.route('/uploadedfile/<number>.<extension>', methods=['GET'])
def serve_uploaded_file_with_extension(number, extension):
return do_serve_uploaded_file_with_extension(number, extension)
@app.route('/uploadedfiledownload/<number>.<extension>', methods=['GET'])
def serve_uploaded_file_with_extension_download(number, extension):
return do_serve_uploaded_file_with_extension(number, extension, download=True)
def do_serve_uploaded_file_with_extension(number, extension, download=False):
extension = werkzeug.utils.secure_filename(extension)
privileged = bool(current_user.is_authenticated and current_user.has_role('admin', 'advocate'))
number = re.sub(r'[^0-9]', '', str(number))
if cloud is not None and daconfig.get('use cloud urls', False):
if not can_access_file_number(number, uids=get_session_uids()):
return ('File not found', 404)
the_file = SavedFile(number)
if download:
return redirect(the_file.temp_url_for(_attachment=True))
return redirect(the_file.temp_url_for())
try:
file_info = get_info_from_file_number(number, privileged=privileged, uids=get_session_uids())
except:
return ('File not found', 404)
if 'path' not in file_info:
return ('File not found', 404)
if os.path.isfile(file_info['path'] + '.' + extension):
extension, mimetype = get_ext_and_mimetype(file_info['path'] + '.' + extension)
response = send_file(file_info['path'] + '.' + extension, mimetype=mimetype, download_name=str(number) + '.' + extension)
if download:
response.headers['Content-Disposition'] = 'attachment; filename=' + json.dumps(str(number) + '.' + extension)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
return ('File not found', 404)
@app.route('/uploadedfile/<number>', methods=['GET'])
def serve_uploaded_file(number):
return do_serve_uploaded_file(number)
def do_serve_uploaded_file(number, download=False):
number = re.sub(r'[^0-9]', '', str(number))
privileged = bool(current_user.is_authenticated and current_user.has_role('admin', 'advocate'))
try:
file_info = get_info_from_file_number(number, privileged=privileged, uids=get_session_uids())
except:
return ('File not found', 404)
if 'path' not in file_info:
return ('File not found', 404)
if not os.path.isfile(file_info['path']):
return ('File not found', 404)
response = send_file(file_info['path'], mimetype=file_info['mimetype'], download_name=os.path.basename(file_info['path']))
if download:
response.headers['Content-Disposition'] = 'attachment; filename=' + json.dumps(os.path.basename(file_info['path']))
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
@app.route('/uploadedpage/<number>/<page>', methods=['GET'])
def serve_uploaded_page(number, page):
return do_serve_uploaded_page(number, page, size='page')
@app.route('/uploadedpagedownload/<number>/<page>', methods=['GET'])
def serve_uploaded_page_download(number, page):
return do_serve_uploaded_page(number, page, download=True, size='page')
@app.route('/uploadedpagescreen/<number>/<page>', methods=['GET'])
def serve_uploaded_pagescreen(number, page):
return do_serve_uploaded_page(number, page, size='screen')
@app.route('/uploadedpagescreendownload/<number>/<page>', methods=['GET'])
def serve_uploaded_pagescreen_download(number, page):
return do_serve_uploaded_page(number, page, download=True, size='screen')
def do_serve_uploaded_page(number, page, download=False, size='page'):
number = re.sub(r'[^0-9]', '', str(number))
page = re.sub(r'[^0-9]', '', str(page))
privileged = bool(current_user.is_authenticated and current_user.has_role('admin', 'advocate'))
try:
file_info = get_info_from_file_number(number, privileged=privileged, uids=get_session_uids())
except Exception as err:
logmessage("do_serve_uploaded_page: " + err.__class__.__name__ + str(err))
return ('File not found', 404)
if 'path' not in file_info:
logmessage('serve_uploaded_page: no access to file number ' + str(number))
return ('File not found', 404)
try:
the_file = DAFile(mimetype=file_info['mimetype'], extension=file_info['extension'], number=number, make_thumbnail=page)
filename = the_file.page_path(page, size)
assert filename is not None
except Exception as err:
logmessage("Could not make thumbnail: " + err.__class__.__name__ + ": " + str(err))
filename = None
if filename is None:
logmessage("do_serve_uploaded_page: sending blank image")
the_file = docassemble.base.functions.package_data_filename('docassemble.base:data/static/blank_page.png')
response = send_file(the_file, mimetype='image/png', download_name='blank_page.png')
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
if os.path.isfile(filename):
response = send_file(filename, mimetype='image/png', download_name=os.path.basename(filename))
if download:
response.headers['Content-Disposition'] = 'attachment; filename=' + json.dumps(os.path.basename(filename))
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
logmessage('do_serve_uploaded_page: path ' + filename + ' is not a file')
return ('File not found', 404)
@app.route('/visit_interview', methods=['GET', 'POST'])
@login_required
@roles_required(['admin', 'advocate'])
def visit_interview():
setup_translation()
i = request.args.get('i', None)
uid = request.args.get('uid', None)
userid = request.args.get('userid', None)
key = 'da:session:uid:' + str(uid) + ':i:' + str(i) + ':userid:' + str(userid)
try:
obj = fix_pickle_obj(r.get(key))
except:
return ('Interview not found', 404)
if 'secret' not in obj or 'encrypted' not in obj:
return ('Interview not found', 404)
update_session(i, uid=uid, encrypted=obj['encrypted'])
if 'user_id' not in session:
session['user_id'] = current_user.id
if 'tempuser' in session:
del session['tempuser']
response = redirect(url_for('index', i=i))
response.set_cookie('visitor_secret', obj['secret'], httponly=True, secure=app.config['SESSION_COOKIE_SECURE'], samesite=app.config['SESSION_COOKIE_SAMESITE'])
return response
@app.route('/observer', methods=['GET', 'POST'])
@login_required
@roles_required(['admin', 'advocate'])
def observer():
setup_translation()
session['observer'] = 1
i = request.args.get('i', None)
uid = request.args.get('uid', None)
userid = request.args.get('userid', None)
observation_script = """
<script>
var isAndroid = /android/i.test(navigator.userAgent.toLowerCase());
var daMapInfo = null;
var daWhichButton = null;
var daSendChanges = false;
var daNoConnectionCount = 0;
var daConnected = false;
var daConfirmed = false;
var daObserverChangesInterval = null;
var daInitialized = false;
var daShowingSpinner = false;
var daSpinnerTimeout = null;
var daShowingHelp = false;
var daInformedChanged = false;
var daDisable = null;
var daCsrf = """ + json.dumps(generate_csrf()) + """;
var daShowIfInProcess = false;
var daFieldsToSkip = ['_checkboxes', '_empties', '_ml_info', '_back_one', '_files', '_files_inline', '_question_name', '_the_image', '_save_as', '_success', '_datatypes', '_event', '_visible', '_tracker', '_track_location', '_varnames', '_next_action', '_next_action_to_set', 'ajax', 'json', 'informed', 'csrf_token', '_action', '_order_changes', '_collect', '_list_collect_list', '_null_question'];
var daVarLookup = Object();
var daVarLookupRev = Object();
var daVarLookupMulti = Object();
var daVarLookupRevMulti = Object();
var daVarLookupSelect = Object();
var daVarLookupCheckbox = Object();
var daVarLookupOption = Object();
var daTargetDiv = "#dabody";
var daComboBoxes = Object();
var daLocationBar = """ + json.dumps(url_for('index', i=i)) + """;
var daPostURL = """ + json.dumps(url_for('index', i=i, _external=True)) + """;
var daYamlFilename = """ + json.dumps(i) + """;
var daGlobalEval = eval;
var daShowHideHappened = false;
function daShowSpinner(){
if ($("#daquestion").length > 0){
$('<div id="daSpinner" class="da-spinner-container da-top-for-navbar"><div class="container"><div class="row"><div class="col text-center"><span class="da-spinner"><i class="fas fa-spinner fa-spin"><\/i><\/span><\/div><\/div><\/div><\/div>').appendTo(daTargetDiv);
}
else{
var newSpan = document.createElement('span');
var newI = document.createElement('i');
$(newI).addClass("fas fa-spinner fa-spin");
$(newI).appendTo(newSpan);
$(newSpan).attr("id", "daSpinner");
$(newSpan).addClass("da-sig-spinner da-top-for-navbar");
$(newSpan).appendTo("#dasigtoppart");
}
daShowingSpinner = true;
}
function daHideSpinner(){
$("#daSpinner").remove();
daShowingSpinner = false;
daSpinnerTimeout = null;
}
function daDisableIfNotHidden(query, value){
$(query).each(function(){
var showIfParent = $(this).parents('.dashowif, .dajsshowif');
if (!(showIfParent.length && ($(showIfParent[0]).data('isVisible') == '0' || !$(showIfParent[0]).is(":visible")))){
if ($(this).hasClass('combobox')){
if (value){
daComboBoxes[$(this).attr('id')].disable();
}
else {
daComboBoxes[$(this).attr('id')].enable();
}
}
else if ($(this).hasClass('dafile')){
if (value){
$(this).data("fileinput").disable();
}
else{
$(this).data("fileinput").enable();
}
}
else if ($(this).hasClass('daslider')){
if (value){
$(this).slider('disable');
}
else{
$(this).slider('enable');
}
}
else {
$(this).prop("disabled", value);
}
}
});
}
function daShowIfCompare(theVal, showIfVal){
if (typeof theVal == 'string' && theVal.match(/^-?\d+\.\d+$/)){
theVal = parseFloat(theVal);
}
else if (typeof theVal == 'string' && theVal.match(/^-?\d+$/)){
theVal = parseInt(theVal);
}
if (typeof showIfVal == 'string' && showIfVal.match(/^-?\d+\.\d+$/)){
showIfVal = parseFloat(showIfVal);
}
else if (typeof showIfVal == 'string' && showIfVal.match(/^-?\d+$/)){
showIfVal = parseInt(showIfVal);
}
if (typeof theVal == 'string' || typeof showIfVal == 'string'){
if (String(showIfVal) == 'None' && String(theVal) == ''){
return true;
}
return (String(theVal) == String(showIfVal));
}
return (theVal == showIfVal);
}
function rationalizeListCollect(){
var finalNum = $(".dacollectextraheader").last().data('collectnum');
var num = $(".dacollectextraheader:visible").last().data('collectnum');
if (parseInt(num) < parseInt(finalNum)){
if ($('div.dacollectextraheader[data-collectnum="' + num + '"]').find(".dacollectadd").hasClass('dainvisible')){
$('div.dacollectextraheader[data-collectnum="' + (num + 1) + '"]').show('fast');
}
}
var n = parseInt(finalNum);
var firstNum = parseInt($(".dacollectextraheader").first().data('collectnum'));
while (n-- > firstNum){
if ($('div.dacollectextraheader[data-collectnum="' + (n + 1) + '"]:visible').length > 0){
if (!$('div.dacollectextraheader[data-collectnum="' + (n + 1) + '"]').find(".dacollectadd").hasClass('dainvisible') && $('div.dacollectextraheader[data-collectnum="' + n + '"]').find(".dacollectremove").hasClass('dainvisible')){
$('div.dacollectextraheader[data-collectnum="' + (n + 1) + '"]').hide();
}
}
}
var n = parseInt(finalNum);
var seenAddAnother = false;
while (n-- > firstNum){
if ($('div.dacollectextraheader[data-collectnum="' + (n + 1) + '"]:visible').length > 0){
if (!$('div.dacollectextraheader[data-collectnum="' + (n + 1) + '"]').find(".dacollectadd").hasClass('dainvisible')){
seenAddAnother = true;
}
var current = $('div.dacollectextraheader[data-collectnum="' + n + '"]');
if (seenAddAnother && !$(current).find(".dacollectadd").hasClass('dainvisible')){
$(current).find(".dacollectadd").addClass('dainvisible');
$(current).find(".dacollectunremove").removeClass('dainvisible');
}
}
}
}
var daNotificationContainer = """ + json.dumps(NOTIFICATION_CONTAINER) + """;
var daNotificationMessage = """ + json.dumps(NOTIFICATION_MESSAGE) + """;
Object.defineProperty(String.prototype, "daSprintf", {
value: function () {
var args = Array.from(arguments),
i = 0;
function defaultNumber(iValue) {
return iValue != undefined && !isNaN(iValue) ? iValue : "0";
}
function defaultString(iValue) {
return iValue == undefined ? "" : "" + iValue;
}
return this.replace(
/%%|%([+\\-])?([^1-9])?(\\d+)?(\\.\\d+)?([deEfhHioQqs])/g,
function (match, sign, filler, scale, precision, type) {
var strOut, space, value;
var asNumber = false;
if (match == "%%") return "%";
if (i >= args.length) return match;
value = args[i];
while (Array.isArray(value)) {
args.splice(i, 1);
for (var j = i; value.length > 0; j++)
args.splice(j, 0, value.shift());
value = args[i];
}
i++;
if (filler == undefined) filler = " "; // default
if (scale == undefined && !isNaN(filler)) {
scale = filler;
filler = " ";
}
if (sign == undefined) sign = "sqQ".indexOf(type) >= 0 ? "+" : "-"; // default
if (scale == undefined) scale = 0; // default
if (precision == undefined) precision = ".0"; // default
scale = parseInt(scale);
precision = parseInt(precision.substr(1));
switch (type) {
case "d":
case "i":
// decimal integer
asNumber = true;
strOut = parseInt(defaultNumber(value));
if (precision > 0) strOut += "." + "0".repeat(precision);
break;
case "e":
case "E":
// float in exponential notation
asNumber = true;
strOut = parseFloat(defaultNumber(value));
if (precision == 0) strOut = strOut.toExponential();
else strOut = strOut.toExponential(precision);
if (type == "E") strOut = strOut.replace("e", "E");
break;
case "f":
// decimal float
asNumber = true;
strOut = parseFloat(defaultNumber(value));
if (precision != 0) strOut = strOut.toFixed(precision);
break;
case "o":
case "h":
case "H":
// Octal or Hexagesimal integer notation
strOut =
"\\\\" +
(type == "o" ? "0" : type) +
parseInt(defaultNumber(value)).toString(type == "o" ? 8 : 16);
break;
case "q":
// single quoted string
strOut = "'" + defaultString(value) + "'";
break;
case "Q":
// double quoted string
strOut = '"' + defaultString(value) + '"';
break;
default:
// string
strOut = defaultString(value);
break;
}
if (typeof strOut != "string") strOut = "" + strOut;
if ((space = strOut.length) < scale) {
if (asNumber) {
if (sign == "-") {
if (strOut.indexOf("-") < 0)
strOut = filler.repeat(scale - space) + strOut;
else
strOut =
"-" +
filler.repeat(scale - space) +
strOut.replace("-", "");
} else {
if (strOut.indexOf("-") < 0)
strOut = "+" + filler.repeat(scale - space - 1) + strOut;
else
strOut =
"-" +
filler.repeat(scale - space) +
strOut.replace("-", "");
}
} else {
if (sign == "-") strOut = filler.repeat(scale - space) + strOut;
else strOut = strOut + filler.repeat(scale - space);
}
} else if (asNumber && sign == "+" && strOut.indexOf("-") < 0)
strOut = "+" + strOut;
return strOut;
}
);
},
});
Object.defineProperty(window, "daSprintf", {
value: function (str, ...rest) {
if (typeof str == "string")
return String.prototype.daSprintf.apply(str, rest);
return "";
},
});
function daGoToAnchor(target){
scrollTarget = $(target).first().offset().top - 60;
if (scrollTarget != null){
$("html, body").animate({
scrollTop: scrollTarget
}, 500);
}
}
function atou(b64) {
return decodeURIComponent(escape(atob(b64)));
}
function utoa(data) {
return btoa(unescape(encodeURIComponent(data)));
}
function dabtoa(str) {
return utoa(str).replace(/[\\n=]/g, '');
}
function daatob(str) {
return atou(str);
}
function getFields(){
var allFields = [];
for (var rawFieldName in daVarLookup){
if (daVarLookup.hasOwnProperty(rawFieldName)){
var fieldName = atou(rawFieldName);
if (allFields.indexOf(fieldName) == -1){
allFields.push(fieldName);
}
}
}
return allFields;
}
var daGetFields = getFields;
function daAppendIfExists(fieldName, theArray){
var elem = $("[name='" + fieldName + "']");
if (elem.length > 0){
for (var i = 0; i < theArray.length; ++i){
if (theArray[i] == elem[0]){
return;
}
}
theArray.push(elem[0]);
}
}
function getField(fieldName, notInDiv){
if (daVarLookupCheckbox[fieldName]){
var n = daVarLookupCheckbox[fieldName].length;
for (var i = 0; i < n; ++i){
var elem = daVarLookupCheckbox[fieldName][i].checkboxes[0].elem;
if (!$(elem).prop('disabled')){
var showifParents = $(elem).parents(".dajsshowif,.dashowif");
if (showifParents.length == 0 || $(showifParents[0]).data("isVisible") == '1'){
if (notInDiv && $.contains(notInDiv, elem)){
continue;
}
return daVarLookupCheckbox[fieldName][i].elem;
}
}
}
}
if (daVarLookupSelect[fieldName]){
var n = daVarLookupSelect[fieldName].length;
for (var i = 0; i < n; ++i){
var elem = daVarLookupSelect[fieldName][i].select;
if (!$(elem).prop('disabled')){
var showifParents = $(elem).parents(".dajsshowif,.dashowif");
if (showifParents.length == 0 || $(showifParents[0]).data("isVisible") == '1'){
if (notInDiv && $.contains(notInDiv, elem)){
continue;
}
return elem;
}
}
}
}
var fieldNameEscaped = dabtoa(fieldName);
var possibleElements = [];
daAppendIfExists(fieldNameEscaped, possibleElements);
if (daVarLookupMulti.hasOwnProperty(fieldNameEscaped)){
for (var i = 0; i < daVarLookupMulti[fieldNameEscaped].length; ++i){
daAppendIfExists(daVarLookupMulti[fieldNameEscaped][i], possibleElements);
}
}
var returnVal = null;
for (var i = 0; i < possibleElements.length; ++i){
if (!$(possibleElements[i]).prop('disabled') || $(possibleElements[i]).parents(".file-input.is-locked").length > 0 ){
var showifParents = $(possibleElements[i]).parents(".dajsshowif,.dashowif");
if (showifParents.length == 0 || $(showifParents[0]).data("isVisible") == '1'){
if (notInDiv && $.contains(notInDiv, possibleElements[i])){
continue;
}
returnVal = possibleElements[i];
}
}
}
if ($(returnVal).hasClass('da-to-labelauty') && $(returnVal).parents('fieldset').length > 0){
var fieldSet = $(returnVal).parents('fieldset')[0];
if (!$(fieldSet).hasClass('da-field-checkbox') && !$(fieldSet).hasClass('da-field-checkboxes')){
return fieldSet;
}
}
return returnVal;
}
var daGetField = getField;
function setChoices(fieldName, choices){
var elem = daGetField(fieldName);
if (elem == null){
console.log("setChoices: reference to non-existent field " + fieldName);
return;
}
var isCombobox = ($(elem).attr('type') == "hidden" && $(elem).parents('.combobox-container').length > 0);
if (isCombobox){
var comboInput = $(elem).parents('.combobox-container').first().find('input.combobox').first();
var comboObject = daComboBoxes[$(comboInput).attr('id')];
var oldComboVal = comboObject.$target.val();
elem = comboObject.$source;
}
if ($(elem).prop('tagName') != "SELECT"){
console.log("setField: field " + fieldName + " is not a dropdown field");
return;
}
var oldVal = $(elem).val();
$(elem).find("option[value!='']").each(function(){
$(this).remove();
});
var n = choices.length;
for (var i = 0; i < n; i++){
var opt = $("<option>");
opt.val(choices[i][0]);
opt.text(choices[i][1]);
if (oldVal == choices[i][0]){
opt.attr("selected", "selected")
}
$(elem).append(opt);
}
if (isCombobox){
comboObject.refresh();
comboObject.clearTarget();
if (oldComboVal != ""){
daSetField(fieldName, oldComboVal);
}
}
}
var daSetChoices = setChoices;
function setField(fieldName, theValue){
var elem = daGetField(fieldName);
if (elem == null){
console.log('setField: reference to non-existent field ' + fieldName);
return;
}
if ($(elem).prop('tagName') == "FIELDSET" && $(elem).hasClass("da-field-radio")){
elem = $(elem).find('input')[0];
}
if ($(elem).attr('type') == "checkbox"){
if (theValue){
if ($(elem).prop('checked') != true){
$(elem).click();
}
}
else{
if ($(elem).prop('checked') != false){
$(elem).click();
}
}
}
else if ($(elem).attr('type') == "radio"){
var fieldNameEscaped = $(elem).attr('name').replace(/(:|\.|\[|\]|,|=)/g, "\\\\$1");
var wasSet = false;
if (theValue === true){
theValue = 'True';
}
if (theValue === false){
theValue = 'False';
}
$("input[name='" + fieldNameEscaped + "']").each(function(){
if ($(this).val() == theValue){
if ($(this).prop('checked') != true){
$(this).prop('checked', true);
$(this).trigger('change');
}
wasSet = true;
return false;
}
});
if (!wasSet){
console.log('setField: could not set radio button ' + fieldName + ' to ' + theValue);
}
}
else if ($(elem).attr('type') == "hidden"){
if ($(elem).val() != theValue){
if ($(elem).parents('.combobox-container').length > 0){
var comboInput = $(elem).parents('.combobox-container').first().find('input.combobox').first();
daComboBoxes[$(comboInput).attr('id')].manualSelect(theValue);
}
else{
$(elem).val(theValue);
$(elem).trigger('change');
}
}
}
else if ($(elem).prop('tagName') == "FIELDSET" && $(elem).hasClass("da-field-checkboxes")){
if (!Array.isArray(theValue)){
throw new Error('setField: value must be an array');
}
var n = theValue.length;
$(elem).find('input').each(function(){
if ($(this).hasClass('danota-checkbox')){
$(this).prop('checked', n == 0);
$(this).trigger('change');
return;
}
if ($(this).hasClass('daaota-checkbox')){
$(this).prop('checked', false);
$(this).trigger('change');
return;
}
if ($(this).attr('name').substr(0, 7) === '_ignore'){
return;
}
var theVal = atou($(this).data('cbvalue'));
if ($(elem).hasClass("daobject")){
theVal = atou(theVal);
}
var oldVal = $(this).prop('checked') == true;
var newVal = false;
for (var i = 0; i < n; ++i){
if (theValue[i] == theVal){
newVal = true;
}
}
if (oldVal != newVal){
$(this).click();
}
});
}
else if ($(elem).prop('tagName') == "SELECT" && $(elem).hasClass('damultiselect')){
if (daVarLookupSelect[fieldName]){
var n = daVarLookupSelect[fieldName].length;
for (var i = 0; i < n; ++i){
if (daVarLookupSelect[fieldName][i].select === elem){
var oldValue = $(daVarLookupSelect[fieldName][i].option).prop('selected') == true;
if (oldValue != theValue){
$(daVarLookupSelect[fieldName][i].option).prop('selected', theValue);
$(elem).trigger('change');
}
}
}
}
else{
if (!Array.isArray(theValue)){
throw new Error('setField: value must be an array');
}
var n = theValue.length;
var changed = false;
$(elem).find('option').each(function(){
var thisVal = daVarLookupOption[$(this).val()];
var oldVal = $(this).prop('selected') == true;
var newVal = false;
for (var i = 0; i < n; ++i){
if (thisVal == theValue[i]){
newVal = true;
}
}
if (newVal !== oldVal){
changed = true;
$(this).prop('selected', newVal);
}
});
if (changed){
$(elem).trigger('change');
}
}
}
else{
if ($(elem).val() != theValue){
$(elem).val(theValue);
$(elem).trigger('change');
}
}
}
var daSetField = setField;
function val(fieldName){
var elem = daGetField(fieldName);
if (elem == null){
return null;
}
if ($(elem).prop('tagName') == "FIELDSET" && $(elem).hasClass("da-field-radio")){
elem = $(elem).find('input')[0];
}
if ($(elem).attr('type') == "checkbox"){
if ($(elem).prop('checked')){
theVal = true;
}
else{
theVal = false;
}
}
else if ($(elem).attr('type') == "radio"){
var fieldNameEscaped = $(elem).attr('name').replace(/(:|\.|\[|\]|,|=)/g, "\\\\$1");
theVal = $("input[name='" + fieldNameEscaped + "']:checked").val();
if (typeof(theVal) == 'undefined'){
theVal = null;
}
else{
if ($(elem).hasClass("daobject")){
theVal = atou(theVal);
}
else if (theVal == 'True'){
theVal = true;
}
else if (theVal == 'False'){
theVal = false;
}
}
}
else if ($(elem).prop('tagName') == "FIELDSET" && $(elem).hasClass("da-field-checkboxes")){
var cbSelected = [];
$(elem).find('input').each(function(){
if ($(this).attr('name').substr(0,7) === '_ignore'){
return;
}
var theVal = atou($(this).data('cbvalue'));
if ($(elem).hasClass("daobject")){
theVal = atou(theVal);
}
if ($(this).prop('checked')){
cbSelected.push(theVal);
}
});
return cbSelected;
}
else if ($(elem).prop('tagName') == "SELECT" && $(elem).hasClass('damultiselect')){
if (daVarLookupSelect[fieldName]){
var n = daVarLookupSelect[fieldName].length;
for (var i = 0; i < n; ++i){
if (daVarLookupSelect[fieldName][i].select === elem){
return $(daVarLookupSelect[fieldName][i].option).prop('selected');
}
}
}
else{
var selectedVals = [];
$(elem).find('option').each(function(){
if ($(this).prop('selected')){
if (daVarLookupOption[$(this).val()]){
selectedVals.push(daVarLookupOption[$(this).val()]);
}
}
});
return selectedVals;
}
}
else if ($(elem).prop('tagName') == "SELECT" && $(elem).hasClass('daobject')){
theVal = atou($(elem).val());
}
else{
theVal = $(elem).val();
}
return theVal;
}
var da_val = val;
window.daTurnOnControl = function(){
//console.log("Turning on control");
daSendChanges = true;
daNoConnectionCount = 0;
daResetPushChanges();
daSocket.emit('observerStartControl', {uid: """ + json.dumps(uid) + """, i: """ + json.dumps(i) + """, userid: """ + json.dumps(str(userid)) + """});
}
window.daTurnOffControl = function(){
//console.log("Turning off control");
if (!daSendChanges){
//console.log("Already turned off");
return;
}
daSendChanges = false;
daConfirmed = false;
daStopPushChanges();
daSocket.emit('observerStopControl', {uid: """ + json.dumps(uid) + """, i: """ + json.dumps(i) + """, userid: """ + json.dumps(str(userid)) + """});
return;
}
function daInjectTrim(handler){
return function (element, event) {
if (element.tagName === "TEXTAREA" || (element.tagName === "INPUT" && element.type !== "password" && element.type !== "date" && element.type !== "datetime" && element.type !== "file")) {
setTimeout(function(){
element.value = $.trim(element.value);
}, 10);
}
return handler.call(this, element, event);
};
}
function daInvalidHandler(form, validator){
var errors = validator.numberOfInvalids();
var scrollTarget = null;
if (errors && $(validator.errorList[0].element).parents('.da-form-group').length > 0) {
if (daJsEmbed){
scrollTarget = $(validator.errorList[0].element).parents('.da-form-group').first().position().top - 60;
}
else{
scrollTarget = $(validator.errorList[0].element).parents('.da-form-group').first().offset().top - 60;
}
}
if (scrollTarget != null){
if (daJsEmbed){
$(daTargetDiv).animate({
scrollTop: scrollTarget
}, 1000);
}
else{
$("html, body").animate({
scrollTop: scrollTarget
}, 1000);
}
}
}
function daValidationHandler(form){
//console.log("observer: daValidationHandler");
return(false);
}
function daStopPushChanges(){
if (daObserverChangesInterval != null){
clearInterval(daObserverChangesInterval);
}
}
function daResetPushChanges(){
if (daObserverChangesInterval != null){
clearInterval(daObserverChangesInterval);
}
daObserverChangesInterval = setInterval(daPushChanges, """ + str(CHECKIN_INTERVAL) + """);
}
function daOnChange(){
}
function daPushChanges(){
//console.log("Pushing changes");
if (daObserverChangesInterval != null){
clearInterval(daObserverChangesInterval);
}
if (!daSendChanges || !daConnected){
return;
}
daObserverChangesInterval = setInterval(daPushChanges, """ + str(CHECKIN_INTERVAL) + """);
daSocket.emit('observerChanges', {uid: """ + json.dumps(uid) + """, i: """ + json.dumps(i) + """, userid: """ + json.dumps(str(userid)) + """, parameters: JSON.stringify($("#daform").serializeArray())});
}
function daProcessAjaxError(xhr, status, error){
if (xhr.responseType == undefined || xhr.responseType == '' || xhr.responseType == 'text'){
var theHtml = xhr.responseText;
if (theHtml == undefined){
$(daTargetDiv).html("error");
}
else{
theHtml = theHtml.replace(/<script[^>]*>[^<]*<\/script>/g, '');
$(daTargetDiv).html(theHtml);
}
if (daJsEmbed){
$(daTargetDiv)[0].scrollTo(0, 1);
}
else{
window.scrollTo(0, 1);
}
}
else {
console.log("daProcessAjaxError: response was not text");
}
}
function daAddScriptToHead(src){
var head = document.getElementsByTagName("head")[0];
var script = document.createElement("script");
script.type = "text/javascript";
script.src = src;
script.async = true;
script.defer = true;
head.appendChild(script);
}
function daSubmitter(event){
if (!daSendChanges || !daConnected){
event.preventDefault();
return false;
}
var theAction = null;
if ($(this).hasClass('da-review-action')){
theAction = $(this).data('action');
}
var embeddedJs = $(this).data('js');
var embeddedAction = $(this).data('embaction');
var linkNum = $(this).data('linknum');
var theId = $(this).attr('id');
if (theId == 'dapagetitle'){
theId = 'daquestionlabel';
}
var theName = $(this).attr('name');
var theValue = $(this).val();
var skey;
if (linkNum){
skey = 'a[data-linknum="' + linkNum + '"]';
}
else if (embeddedAction){
skey = 'a[data-embaction="' + embeddedAction.replace(/(:|\.|\[|\]|,|=|\/|\")/g, '\\\\$1') + '"]';
}
else if (theAction){
skey = 'a[data-action="' + theAction.replace(/(:|\.|\[|\]|,|=|\/|\")/g, '\\\\$1') + '"]';
}
else if (theId){
skey = '#' + theId.replace(/(:|\.|\[|\]|,|=|\/|\")/g, '\\\\$1');
}
else if (theName){
skey = '#' + $(this).parents("form").attr('id') + ' ' + $(this).prop('tagName').toLowerCase() + '[name="' + theName.replace(/(:|\.|\[|\]|,|=|\/)/g, '\\\\$1') + '"]';
if (typeof theValue !== 'undefined'){
skey += '[value="' + theValue + '"]'
}
}
else{
skey = '#' + $(this).parents("form").attr('id') + ' ' + $(this).prop('tagName').toLowerCase() + '[type="submit"]';
}
//console.log("Need to click on " + skey);
if (daObserverChangesInterval != null && embeddedJs == null && theId != "dabackToQuestion" && theId != "dahelptoggle" && theId != "daquestionlabel"){
clearInterval(daObserverChangesInterval);
}
daSocket.emit('observerChanges', {uid: """ + json.dumps(uid) + """, i: """ + json.dumps(i) + """, userid: """ + json.dumps(str(userid)) + """, clicked: skey, parameters: JSON.stringify($("#daform").serializeArray())});
if (embeddedJs != null){
//console.log("Running the embedded js");
daGlobalEval(decodeURIComponent(embeddedJs));
}
if (theId != "dabackToQuestion" && theId != "dahelptoggle" && theId != "daquestionlabel"){
event.preventDefault();
return false;
}
}
function daAdjustInputWidth(e){
var contents = $(this).val();
var leftBracket = new RegExp('<', 'g');
var rightBracket = new RegExp('>', 'g');
contents = contents.replace(/&/g,'&').replace(leftBracket,'<').replace(rightBracket,'>').replace(/ /g, ' ');
$('<span class="dainput-embedded" id="dawidth">').html( contents ).appendTo('#daquestion');
$("#dawidth").css('min-width', $(this).css('min-width'));
$("#dawidth").css('background-color', $(daTargetDiv).css('background-color'));
$("#dawidth").css('color', $(daTargetDiv).css('background-color'));
$(this).width($('#dawidth').width() + 16);
setTimeout(function(){
$("#dawidth").remove();
}, 0);
}
function daShowHelpTab(){
//$('#dahelptoggle').tab('show');
}
function flash(message, priority, clear){
if (priority == null){
priority = 'info'
}
if (!$("#daflash").length){
$(daTargetDiv).append(daSprintf(daNotificationContainer, ""));
}
if (clear){
$("#daflash").empty();
}
if (message != null){
$("#daflash").append(daSprintf(daNotificationMessage, priority, message));
if (priority == 'success'){
setTimeout(function(){
$("#daflash .alert-success").hide(300, function(){
$(this).remove();
});
}, 3000);
}
}
}
var da_flash = flash;
function JSON_stringify(s){
var json = JSON.stringify(s);
return json.replace(/[\\u007f-\\uffff]/g,
function(c) {
return '\\\\u'+('0000'+c.charCodeAt(0).toString(16)).slice(-4);
}
);
}
function url_action(action, args){
//redo?
if (args == null){
args = {};
}
data = {action: action, arguments: args};
var url;
if (daJsEmbed){
url = daPostURL + "&action=" + encodeURIComponent(utoa(JSON_stringify(data)))
}
else{
url = daLocationBar + "&action=" + encodeURIComponent(utoa(JSON_stringify(data)))
}
return url;
}
var da_url_action = url_action;
function action_call(action, args, callback, forgetPrior=false){
//redo?
if (args == null){
args = {};
}
if (forgetPrior){
args = {_action: action, _arguments: args};
action = '_da_priority_action';
}
if (callback == null){
callback = function(){};
}
var data = {action: action, arguments: args};
var url;
if (daJsEmbed){
url = daPostURL + "&action=" + encodeURIComponent(utoa(JSON_stringify(data)))
}
else{
url = daLocationBar + "&action=" + encodeURIComponent(utoa(JSON_stringify(data)))
}
return $.ajax({
type: "GET",
url: url,
success: callback,
error: function(xhr, status, error){
setTimeout(function(){
daProcessAjaxError(xhr, status, error);
}, 0);
}
});
}
var da_action_call = action_call;
var url_action_call = action_call;
function action_perform(action, args, forgetPrior=false){
//redo
if (args == null){
args = {};
}
if (forgetPrior){
args = {_action: action, _arguments: args};
action = '_da_priority_action';
}
var data = {action: action, arguments: args};
daSpinnerTimeout = setTimeout(daShowSpinner, 1000);
return $.ajax({
type: "POST",
url: daLocationBar,
data: $.param({_action: utoa(JSON_stringify(data)), csrf_token: daCsrf, ajax: 1}),
success: function(data){
setTimeout(function(){
daProcessAjax(data, $("#daform"), 1);
}, 0);
},
error: function(xhr, status, error){
setTimeout(function(){
daProcessAjaxError(xhr, status, error);
}, 0);
},
dataType: 'json'
});
}
var da_action_perform = action_perform;
var url_action_perform = action_perform;
function action_perform_with_next(action, args, next_data, forgetPrior=false){
//redo
//console.log("action_perform_with_next: " + action + " | " + next_data)
if (args == null){
args = {};
}
if (forgetPrior){
args = {_action: action, _arguments: args};
action = '_da_priority_action';
}
var data = {action: action, arguments: args};
daSpinnerTimeout = setTimeout(daShowSpinner, 1000);
return $.ajax({
type: "POST",
url: daLocationBar,
data: $.param({_action: utoa(JSON_stringify(data)), _next_action_to_set: utoa(JSON_stringify(next_data)), csrf_token: daCsrf, ajax: 1}),
success: function(data){
setTimeout(function(){
daProcessAjax(data, $("#daform"), 1);
}, 0);
},
error: function(xhr, status, error){
setTimeout(function(){
daProcessAjaxError(xhr, status, error);
}, 0);
},
dataType: 'json'
});
}
var da_action_perform_with_next = action_perform_with_next;
var url_action_perform_with_next = action_perform_with_next;
function get_interview_variables(callback){
if (callback == null){
callback = function(){};
}
return $.ajax({
type: "GET",
url: """ + '"' + url_for('get_variables', i=i) + '"' + """,
success: callback,
error: function(xhr, status, error){
setTimeout(function(){
daProcessAjaxError(xhr, status, error);
}, 0);
}
});
}
var da_get_interview_variables = get_interview_variables;
function daInitialize(doScroll){
daComboBoxes = Object();
daVarLookupSelect = Object();
daVarLookupCheckbox = Object();
if (daSpinnerTimeout != null){
clearTimeout(daSpinnerTimeout);
daSpinnerTimeout = null;
}
if (daShowingSpinner){
daHideSpinner();
}
$('button[type="submit"], input[type="submit"], a.da-review-action, #dabackToQuestion, #daquestionlabel, #dapagetitle, #dahelptoggle, a[data-linknum], a[data-embaction], #dabackbutton').click(daSubmitter);
$(".da-to-labelauty").labelauty({ class: "labelauty da-active-invisible dafullwidth" });
//$(".da-to-labelauty-icon").labelauty({ label: false });
var navMain = $("#danavbar-collapse");
navMain.on("click", "a", null, function () {
if (!($(this).hasClass("dropdown-toggle"))){
navMain.collapse('hide');
}
});
var daPopoverTriggerList = [].slice.call(document.querySelectorAll('[data-bs-toggle="popover"]'));
var daPopoverList = daPopoverTriggerList.map(function (daPopoverTriggerEl) {
return new bootstrap.Popover(daPopoverTriggerEl, {trigger: "focus", html: true});
});
$("input.daaota-checkbox").click(function(){
var anyChanged = false;
var firstEncountered = null;
$(this).parents('fieldset').find('input.danon-nota-checkbox').each(function(){
if (firstEncountered === null){
firstEncountered = this;
}
var existing_val = $(this).prop('checked');
$(this).prop('checked', true);
if (existing_val != true){
$(this).trigger('change');
anyChanged = true;
}
});
if (firstEncountered !== null && anyChanged === false){
$(firstEncountered).trigger('change');
}
$(this).parents('fieldset').find('input.danota-checkbox').each(function(){
var existing_val = $(this).prop('checked');
$(this).prop('checked', false);
if (existing_val != false){
$(this).trigger('change');
}
});
});
$("input.danota-checkbox").click(function(){
var anyChanged = false;
var firstEncountered = null;
$(this).parents('fieldset').find('input.danon-nota-checkbox').each(function(){
if (firstEncountered === null){
firstEncountered = this;
}
var existing_val = $(this).prop('checked');
$(this).prop('checked', false);
if (existing_val != false){
$(this).trigger('change');
anyChanged = true;
}
});
if (firstEncountered !== null && anyChanged === false){
$(firstEncountered).trigger('change');
}
$(this).parents('fieldset').find('input.daaota-checkbox').each(function(){
var existing_val = $(this).prop('checked');
$(this).prop('checked', false);
if (existing_val != false){
$(this).trigger('change');
}
});
});
$("input.danon-nota-checkbox").click(function(){
$(this).parents('fieldset').find('input.danota-checkbox').each(function(){
var existing_val = $(this).prop('checked');
$(this).prop('checked', false);
if (existing_val != false){
$(this).trigger('change');
}
});
if (!$(this).prop('checked')){
$(this).parents('fieldset').find('input.daaota-checkbox').each(function(){
var existing_val = $(this).prop('checked');
$(this).prop('checked', false);
if (existing_val != false){
$(this).trigger('change');
}
});
}
});
$("input.dainput-embedded").on('keyup', daAdjustInputWidth);
$("input.dainput-embedded").each(daAdjustInputWidth);
// $(".dahelptrigger").click(function(e) {
// e.preventDefault();
// $(this).tab('show');
// });
//$("#daquestionlabel").click(function(e) {
// e.preventDefault();
// $(this).tab('show');
//});
$('#dapagetitle').click(function(e) {
if ($(this).prop('href') == '#'){
e.preventDefault();
//$('#daquestionlabel').tab('show');
}
});
$('select.damultiselect').each(function(){
var isObject = $(this).hasClass('daobject');
var varname = atou($(this).data('varname'));
var theSelect = this;
$(this).find('option').each(function(){
var theVal = atou($(this).data('valname'));
if (isObject){
theVal = atou(theVal);
}
var key = varname + '["' + theVal + '"]';
if (!daVarLookupSelect[key]){
daVarLookupSelect[key] = [];
}
daVarLookupSelect[key].push({'select': theSelect, 'option': this, 'value': theVal});
key = varname + "['" + theVal + "']"
if (!daVarLookupSelect[key]){
daVarLookupSelect[key] = [];
}
daVarLookupSelect[key].push({'select': theSelect, 'option': this, 'value': theVal});
});
})
$('fieldset.da-field-checkboxes').each(function(){
var isObject = $(this).hasClass('daobject');
var varname = atou($(this).data('varname'));
var cbList = [];
if (!daVarLookupCheckbox[varname]){
daVarLookupCheckbox[varname] = [];
}
$(this).find('input').each(function(){
if ($(this).attr('name').substr(0,7) === '_ignore'){
return;
}
var theVal = atou($(this).data('cbvalue'));
var theType = $(this).data('cbtype');
var key;
if (theType == 'R'){
key = varname + '[' + theVal + ']';
}
else {
key = varname + '["' + theVal + '"]';
}
cbList.push({'variable': key, 'value': theVal, 'type': theType, 'elem': this})
});
daVarLookupCheckbox[varname].push({'elem': this, 'checkboxes': cbList, 'isObject': isObject});
});
$('.dacurrency').each(function(){
var theVal = $(this).val().toString();
if (theVal.indexOf('.') >= 0 || theVal.indexOf(',') >= 0){
var num = parseFloat(theVal);
var cleanNum = num.toFixed(""" + str(daconfig.get('currency decimal places', 2)) + """);
$(this).val(cleanNum);
}
});
$('.dacurrency').on('blur', function(){
var theVal = $(this).val().toString();
if (theVal.indexOf('.') >= 0 || theVal.indexOf(',') >= 0){
var num = parseFloat(theVal);
var cleanNum = num.toFixed(""" + str(daconfig.get('currency decimal places', 2)) + """);
if (cleanNum != 'NaN') {
$(this).val(cleanNum);
}
}
});
$("#dahelp").on("shown.bs.tab", function(){
window.scrollTo(0, 1);
$("#dahelptoggle").removeClass('daactivetext')
$("#dahelptoggle").blur();
});
$("#dasourcetoggle").on("click", function(){
$(this).parent().toggleClass("active");
$(this).blur();
});
$('#dabackToQuestion').click(function(event){
$('#daquestionlabel').tab('show');
});
daShowIfInProcess = true;
var daTriggerQueries = [];
var daInputsSeen = {};
function daOnlyUnique(value, index, self){
return self.indexOf(value) === index;
}
$(".dajsshowif").each(function(){
var showIfDiv = this;
var jsInfo = JSON.parse(atou($(this).data('jsshowif')));
var showIfSign = jsInfo['sign'];
var showIfMode = jsInfo['mode'];
var jsExpression = jsInfo['expression'];
jsInfo['vars'].forEach(function(infoItem, i){
var showIfVars = [];
var initShowIfVar = utoa(infoItem).replace(/[\\n=]/g, '');
var initShowIfVarEscaped = initShowIfVar.replace(/(:|\.|\[|\]|,|=)/g, "\\\\$1");
var elem = $("[name='" + initShowIfVarEscaped + "']");
if (elem.length > 0){
showIfVars.push(initShowIfVar);
}
if (daVarLookupMulti.hasOwnProperty(initShowIfVar)){
for (var j = 0; j < daVarLookupMulti[initShowIfVar].length; j++){
var altShowIfVar = daVarLookupMulti[initShowIfVar][j];
var altShowIfVarEscaped = altShowIfVar.replace(/(:|\.|\[|\]|,|=)/g, "\\\\$1");
var altElem = $("[name='" + altShowIfVarEscaped + "']");
if (altElem.length > 0 && !$.contains(this, altElem[0])){
showIfVars.push(altShowIfVar);
}
}
}
if (showIfVars.length == 0){
console.log("ERROR: reference to non-existent field " + infoItem);
}
showIfVars.forEach(function(showIfVar){
var showIfVarEscaped = showIfVar.replace(/(:|\.|\[|\]|,|=)/g, "\\\\$1");
var varToUse = infoItem;
var showHideDiv = function(speed){
var elem = daGetField(varToUse);
if (elem != null && !$(elem).parents('.da-form-group').first().is($(this).parents('.da-form-group').first())){
return;
}
var resultt = eval(jsExpression);
if(resultt){
if (showIfSign){
if ($(showIfDiv).data('isVisible') != '1'){
daShowHideHappened = true;
}
if (showIfMode == 0){
$(showIfDiv).show(speed);
}
$(showIfDiv).data('isVisible', '1');
$(showIfDiv).find('input, textarea, select').prop("disabled", false);
$(showIfDiv).find('input.combobox').each(function(){
daComboBoxes[$(this).attr('id')].enable();
});
$(showIfDiv).find('input.daslider').each(function(){
$(this).slider('enable');
});
$(showIfDiv).find('input.dafile').each(function(){
$(this).data("fileinput").enable();
});
}
else{
if ($(showIfDiv).data('isVisible') != '0'){
daShowHideHappened = true;
}
if (showIfMode == 0){
$(showIfDiv).hide(speed);
}
$(showIfDiv).data('isVisible', '0');
$(showIfDiv).find('input, textarea, select').prop("disabled", true);
$(showIfDiv).find('input.combobox').each(function(){
daComboBoxes[$(this).attr('id')].disable();
});
$(showIfDiv).find('input.daslider').each(function(){
$(this).slider('disable');
});
$(showIfDiv).find('input.dafile').each(function(){
$(this).data("fileinput").disable();
});
}
}
else{
if (showIfSign){
if ($(showIfDiv).data('isVisible') != '0'){
daShowHideHappened = true;
}
if (showIfMode == 0){
$(showIfDiv).hide(speed);
}
$(showIfDiv).data('isVisible', '0');
$(showIfDiv).find('input, textarea, select').prop("disabled", true);
$(showIfDiv).find('input.combobox').each(function(){
daComboBoxes[$(this).attr('id')].disable();
});
$(showIfDiv).find('input.daslider').each(function(){
$(this).slider('disable');
});
$(showIfDiv).find('input.dafile').each(function(){
$(this).data("fileinput").disable();
});
}
else{
if ($(showIfDiv).data('isVisible') != '1'){
daShowHideHappened = true;
}
if (showIfMode == 0){
$(showIfDiv).show(speed);
}
$(showIfDiv).data('isVisible', '1');
$(showIfDiv).find('input, textarea, select').prop("disabled", false);
$(showIfDiv).find('input.combobox').each(function(){
daComboBoxes[$(this).attr('id')].enable();
});
$(showIfDiv).find('input.daslider').each(function(){
$(this).slider('enable');
});
$(showIfDiv).find('input.dafile').each(function(){
$(this).data("fileinput").enable();
});
}
}
var leader = false;
if (!daShowIfInProcess){
daShowIfInProcess = true;
daInputsSeen = {};
leader = true;
}
$(showIfDiv).find(":input").not("[type='file']").each(function(){
if (!daInputsSeen.hasOwnProperty($(this).attr('id'))){
$(this).trigger('change');
}
daInputsSeen[$(this).attr('id')] = true;
});
if (leader){
daShowIfInProcess = false;
}
};
var showHideDivImmediate = function(){
showHideDiv.apply(this, [null]);
}
var showHideDivFast = function(){
showHideDiv.apply(this, ['fast']);
}
daTriggerQueries.push("#" + showIfVarEscaped);
daTriggerQueries.push("input[type='radio'][name='" + showIfVarEscaped + "']");
daTriggerQueries.push("input[type='checkbox'][name='" + showIfVarEscaped + "']");
$("#" + showIfVarEscaped).change(showHideDivFast);
$("input[type='radio'][name='" + showIfVarEscaped + "']").change(showHideDivFast);
$("input[type='checkbox'][name='" + showIfVarEscaped + "']").change(showHideDivFast);
$("input.dafile[name='" + showIfVarEscaped + "']").on('filecleared', showHideDivFast);
$("#" + showIfVarEscaped).on('daManualTrigger', showHideDivImmediate);
$("input[type='radio'][name='" + showIfVarEscaped + "']").on('daManualTrigger', showHideDivImmediate);
$("input[type='checkbox'][name='" + showIfVarEscaped + "']").on('daManualTrigger', showHideDivImmediate);
});
});
});
$(".dashowif").each(function(){
var showIfVars = [];
var showIfSign = $(this).data('showif-sign');
var showIfMode = parseInt($(this).data('showif-mode'));
var initShowIfVar = $(this).data('showif-var');
var varName = atou(initShowIfVar);
var elem = [];
if (varName.endsWith('[nota]') || varName.endsWith('[aota]')){
var signifier = varName.endsWith('[nota]') ? 'nota' : 'aota';
var cbVarName = varName.replace(/\[[na]ota\]$/, '');
$('fieldset.da-field-checkboxes').each(function(){
var thisVarName = atou($(this).data('varname'));
if (thisVarName == cbVarName){
elem = $(this).find('input.da' + signifier + '-checkbox');
initShowIfVar = $(elem).attr('name');
}
});
}
else {
var initShowIfVarEscaped = initShowIfVar.replace(/(:|\.|\[|\]|,|=)/g, "\\\\$1");
elem = $("[name='" + initShowIfVarEscaped + "']");
}
if (elem.length > 0){
showIfVars.push(initShowIfVar);
}
if (daVarLookupMulti.hasOwnProperty(initShowIfVar)){
var n = daVarLookupMulti[initShowIfVar].length;
for (var i = 0; i < n; i++){
var altShowIfVar = daVarLookupMulti[initShowIfVar][i];
var altShowIfVarEscaped = altShowIfVar.replace(/(:|\.|\[|\]|,|=)/g, "\\\\$1");
var altElem = $("[name='" + altShowIfVarEscaped + "']");
if (altElem.length > 0 && !$.contains(this, altElem[0])){
showIfVars.push(altShowIfVar);
}
}
}
var showIfVal = $(this).data('showif-val');
var saveAs = $(this).data('saveas');
var showIfDiv = this;
showIfVars.forEach(function(showIfVar){
var showIfVarEscaped = showIfVar.replace(/(:|\.|\[|\]|,|=)/g, "\\\\$1");
var showHideDiv = function(speed){
var elem = daGetField(varName, showIfDiv);
if (elem != null && !$(elem).parents('.da-form-group').first().is($(this).parents('.da-form-group').first())){
return;
}
var theVal;
var showifParents = $(this).parents(".dashowif");
if (showifParents.length !== 0 && !($(showifParents[0]).data("isVisible") == '1')){
theVal = '';
//console.log("Setting theVal to blank.");
}
else if ($(this).attr('type') == "checkbox"){
theVal = $("input[name='" + showIfVarEscaped + "']:checked").val();
if (typeof(theVal) == 'undefined'){
//console.log('manually setting checkbox value to False');
theVal = 'False';
}
}
else if ($(this).attr('type') == "radio"){
theVal = $("input[name='" + showIfVarEscaped + "']:checked").val();
if (typeof(theVal) == 'undefined'){
theVal = '';
}
else if (theVal != '' && $("input[name='" + showIfVarEscaped + "']:checked").hasClass("daobject")){
try{
theVal = atou(theVal);
}
catch(e){
}
}
}
else{
theVal = $(this).val();
if (theVal != '' && $(this).hasClass("daobject")){
try{
theVal = atou(theVal);
}
catch(e){
}
}
}
//console.log("this is " + $(this).attr('id') + " and saveAs is " + atou(saveAs) + " and showIfVar is " + atou(showIfVar) + " and val is " + String(theVal) + " and showIfVal is " + String(showIfVal));
if(daShowIfCompare(theVal, showIfVal)){
if (showIfSign){
if ($(showIfDiv).data('isVisible') != '1'){
daShowHideHappened = true;
}
if (showIfMode == 0){
$(showIfDiv).show(speed);
}
$(showIfDiv).data('isVisible', '1');
var firstChild = $(showIfDiv).children()[0];
if (!$(firstChild).hasClass('dacollectextra') || $(firstChild).is(":visible")){
$(showIfDiv).find('input, textarea, select').prop("disabled", false);
$(showIfDiv).find('input.combobox').each(function(){
daComboBoxes[$(this).attr('id')].enable();
});
$(showIfDiv).find('input.daslider').each(function(){
$(this).slider('enable');
});
$(showIfDiv).find('input.dafile').each(function(){
$(this).data("fileinput").enable();
});
}
}
else{
if ($(showIfDiv).data('isVisible') != '0'){
daShowHideHappened = true;
}
if (showIfMode == 0){
$(showIfDiv).hide(speed);
}
$(showIfDiv).data('isVisible', '0');
$(showIfDiv).find('input, textarea, select').prop("disabled", true);
$(showIfDiv).find('input.combobox').each(function(){
daComboBoxes[$(this).attr('id')].disable();
});
$(showIfDiv).find('input.daslider').each(function(){
$(this).slider('disable');
});
$(showIfDiv).find('input.dafile').each(function(){
$(this).data("fileinput").disable();
});
}
}
else{
if (showIfSign){
if ($(showIfDiv).data('isVisible') != '0'){
daShowHideHappened = true;
}
if (showIfMode == 0){
$(showIfDiv).hide(speed);
}
$(showIfDiv).data('isVisible', '0');
$(showIfDiv).find('input, textarea, select').prop("disabled", true);
$(showIfDiv).find('input.combobox').each(function(){
daComboBoxes[$(this).attr('id')].disable();
});
$(showIfDiv).find('input.daslider').each(function(){
$(this).slider('disable');
});
$(showIfDiv).find('input.dafile').each(function(){
$(this).data("fileinput").disable();
});
}
else{
if ($(showIfDiv).data('isVisible') != '1'){
daShowHideHappened = true;
}
if (showIfMode == 0){
$(showIfDiv).show(speed);
}
$(showIfDiv).data('isVisible', '1');
var firstChild = $(showIfDiv).children()[0];
if (!$(firstChild).hasClass('dacollectextra') || $(firstChild).is(":visible")){
$(showIfDiv).find('input, textarea, select').prop("disabled", false);
$(showIfDiv).find('input.combobox').each(function(){
daComboBoxes[$(this).attr('id')].enable();
});
$(showIfDiv).find('input.daslider').each(function(){
$(this).slider('enable');
});
$(showIfDiv).find('input.dafile').each(function(){
$(this).data("fileinput").enable();
});
}
}
}
var leader = false;
if (!daShowIfInProcess){
daShowIfInProcess = true;
daInputsSeen = {};
leader = true;
}
$(showIfDiv).find(":input").not("[type='file']").each(function(){
if (!daInputsSeen.hasOwnProperty($(this).attr('id'))){
$(this).trigger('change');
}
daInputsSeen[$(this).attr('id')] = true;
});
if (leader){
daShowIfInProcess = false;
}
};
var showHideDivImmediate = function(){
showHideDiv.apply(this, [null]);
}
var showHideDivFast = function(){
showHideDiv.apply(this, ['fast']);
}
daTriggerQueries.push("#" + showIfVarEscaped);
daTriggerQueries.push("input[type='radio'][name='" + showIfVarEscaped + "']");
daTriggerQueries.push("input[type='checkbox'][name='" + showIfVarEscaped + "']");
$("#" + showIfVarEscaped).change(showHideDivFast);
$("#" + showIfVarEscaped).on('daManualTrigger', showHideDivImmediate);
$("input[type='radio'][name='" + showIfVarEscaped + "']").change(showHideDivFast);
$("input[type='radio'][name='" + showIfVarEscaped + "']").on('daManualTrigger', showHideDivImmediate);
$("input[type='checkbox'][name='" + showIfVarEscaped + "']").change(showHideDivFast);
$("input[type='checkbox'][name='" + showIfVarEscaped + "']").on('daManualTrigger', showHideDivImmediate);
$("input.dafile[name='" + showIfVarEscaped + "']").on('filecleared', showHideDivFast);
});
});
function daTriggerAllShowHides(){
var daUniqueTriggerQueries = daTriggerQueries.filter(daOnlyUnique);
var daFirstTime = true;
var daTries = 0;
while ((daFirstTime || daShowHideHappened) && ++daTries < 100){
daShowHideHappened = false;
daFirstTime = false;
var n = daUniqueTriggerQueries.length;
for (var i = 0; i < n; ++i){
$(daUniqueTriggerQueries[i]).trigger('daManualTrigger');
}
}
if (daTries >= 100){
console.log("Too many contradictory 'show if' conditions");
}
}
if (daTriggerQueries.length > 0){
daTriggerAllShowHides();
}
$(".danavlink").last().addClass('thelast');
$(".danavlink").each(function(){
if ($(this).hasClass('btn') && !$(this).hasClass('danotavailableyet')){
var the_a = $(this);
var the_delay = 1000 + 250 * parseInt($(this).data('index'));
setTimeout(function(){
$(the_a).removeClass('""" + app.config['BUTTON_STYLE'] + """secondary');
if ($(the_a).hasClass('active')){
$(the_a).addClass('""" + app.config['BUTTON_STYLE'] + """success');
}
else{
$(the_a).addClass('""" + app.config['BUTTON_STYLE'] + """warning');
}
}, the_delay);
}
});
daShowIfInProcess = false;
// daDisable = setTimeout(function(){
// $("#daform").find('button[type="submit"]').prop("disabled", true);
// //$("#daform").find(':input').prop("disabled", true);
// }, 1);
$("#daform").each(function(){
$(this).find(':input').on('change', daOnChange);
});
daInitialized = true;
daShowingHelp = 0;
setTimeout(function(){
$("#daflash .alert-success").hide(300, function(){
$(self).remove();
});
}, 3000);
}
$( document ).ready(function(){
daInitialize(1);
var daDefaultAllowList = bootstrap.Tooltip.Default.allowList;
daDefaultAllowList['*'].push('style');
daDefaultAllowList['a'].push('style');
daDefaultAllowList['img'].push('style');
$( window ).bind('unload', function() {
if (daSocket != null && daSocket.connected){
daSocket.emit('terminate');
}
});
if (location.protocol === 'http:' || document.location.protocol === 'http:'){
daSocket = io.connect('http://' + document.domain + '/observer', {path: '""" + ROOT + """ws/socket.io', query: "i=" + daYamlFilename});
}
if (location.protocol === 'https:' || document.location.protocol === 'https:'){
daSocket = io.connect('https://' + document.domain + '/observer', {path: '""" + ROOT + """ws/socket.io', query: "i=" + daYamlFilename});
}
if (typeof daSocket !== 'undefined') {
daSocket.on('connect', function() {
//console.log("Connected!");
daSocket.emit('observe', {uid: """ + json.dumps(uid) + """, i: daYamlFilename, userid: """ + json.dumps(str(userid)) + """});
daConnected = true;
});
daSocket.on('terminate', function() {
//console.log("Terminating socket");
daSocket.disconnect();
});
daSocket.on('disconnect', function() {
//console.log("Disconnected socket");
//daSocket = null;
});
daSocket.on('stopcontrolling', function(data) {
window.parent.daStopControlling(data.key);
});
daSocket.on('start_being_controlled', function(data) {
//console.log("Got start_being_controlled");
daConfirmed = true;
daPushChanges();
window.parent.daGotConfirmation(data.key);
});
daSocket.on('abortcontrolling', function(data) {
//console.log("Got abortcontrolling");
//daSendChanges = false;
//daConfirmed = false;
//daStopPushChanges();
window.parent.daAbortControlling(data.key);
});
daSocket.on('noconnection', function(data) {
//console.log("warning: no connection");
if (daNoConnectionCount++ > 2){
//console.log("error: no connection");
window.parent.daStopControlling(data.key);
}
});
daSocket.on('newpage', function(incoming) {
//console.log("Got newpage")
var data = incoming.obj;
$(daTargetDiv).html(data.body);
$(daTargetDiv).parent().removeClass();
$(daTargetDiv).parent().addClass(data.bodyclass);
daInitialize(1);
var tempDiv = document.createElement('div');
tempDiv.innerHTML = data.extra_scripts;
var scripts = tempDiv.getElementsByTagName('script');
for (var i = 0; i < scripts.length; i++){
if (scripts[i].src != ""){
daAddScriptToHead(scripts[i].src);
}
else{
daGlobalEval(scripts[i].innerHTML);
}
}
for (var i = 0; i < data.extra_css.length; i++){
$("head").append(data.extra_css[i]);
}
document.title = data.browser_title;
if ($("html").attr("lang") != data.lang){
$("html").attr("lang", data.lang);
}
daPushChanges();
});
daSocket.on('pushchanges', function(data) {
//console.log("Got pushchanges: " + JSON.stringify(data));
var valArray = Object();
var values = data.parameters;
for (var i = 0; i < values.length; i++) {
valArray[values[i].name] = values[i].value;
}
$("#daform").each(function(){
$(this).find(':input').each(function(){
var type = $(this).attr('type');
var id = $(this).attr('id');
var name = $(this).attr('name');
if (type == 'checkbox'){
if (name in valArray){
if (valArray[name] == 'True'){
if ($(this).prop('checked') != true){
$(this).prop('checked', true);
$(this).trigger('change');
}
}
else{
if ($(this).prop('checked') != false){
$(this).prop('checked', false);
$(this).trigger('change');
}
}
}
else{
if ($(this).prop('checked') != false){
$(this).prop('checked', false);
$(this).trigger('change');
}
}
}
else if (type == 'radio'){
if (name in valArray){
if (valArray[name] == $(this).val()){
if ($(this).prop('checked') != true){
$(this).prop('checked', true);
$(this).trigger('change');
}
}
else{
if ($(this).prop('checked') != false){
$(this).prop('checked', false);
$(this).trigger('change');
}
}
}
}
else if ($(this).data().hasOwnProperty('sliderMax')){
$(this).slider('setValue', parseInt(valArray[name]));
}
else{
if (name in valArray){
$(this).val(valArray[name]);
}
}
});
});
});
}
daObserverChangesInterval = setInterval(daPushChanges, """ + str(CHECKIN_INTERVAL) + """);
$(document).trigger('daPageLoad');
});
</script>
""" # noqa: W605
the_key = 'da:html:uid:' + str(uid) + ':i:' + str(i) + ':userid:' + str(userid)
html = r.get(the_key)
if html is not None:
obj = json.loads(html.decode())
else:
logmessage("observer: failed to load JSON from key " + the_key)
obj = {}
page_title = word('Observation')
output = standard_html_start(interview_language=obj.get('lang', 'en'), debug=DEBUG, bootstrap_theme=obj.get('bootstrap_theme', None))
output += obj.get('global_css', '') + "\n" + indent_by("".join(obj.get('extra_css', [])), 4)
output += '\n <title>' + page_title + '</title>\n </head>\n <body class="' + obj.get('bodyclass', 'dabody da-pad-for-navbar da-pad-for-footer') + '">\n <div id="dabody">\n '
output += obj.get('body', '')
output += " </div>\n </div>" + standard_scripts(interview_language=obj.get('lang', 'en')) + observation_script + "\n " + "".join(obj.get('extra_scripts', [])) + "\n </body>\n</html>"
response = make_response(output.encode('utf-8'), '200 OK')
response.headers['Content-type'] = 'text/html; charset=utf-8'
return response
def decode_dict(the_dict):
out_dict = {}
for k, v in the_dict.items():
out_dict[k.decode()] = v.decode()
return out_dict
@app.route('/monitor', methods=['GET', 'POST'])
@login_required
@roles_required(['admin', 'advocate'])
def monitor():
if not app.config['ENABLE_MONITOR']:
return ('File not found', 404)
setup_translation()
if request.method == 'GET' and needs_to_change_password():
return redirect(url_for('user.change_password', next=url_for('monitor')))
session['monitor'] = 1
if 'user_id' not in session:
session['user_id'] = current_user.id
phone_number_key = 'da:monitor:phonenumber:' + str(session['user_id'])
default_phone_number = r.get(phone_number_key)
if default_phone_number is None:
default_phone_number = ''
else:
default_phone_number = default_phone_number.decode()
sub_role_key = 'da:monitor:userrole:' + str(session['user_id'])
if r.exists(sub_role_key):
subscribed_roles = decode_dict(r.hgetall(sub_role_key))
r.expire(sub_role_key, 2592000)
else:
subscribed_roles = {}
key = 'da:monitor:available:' + str(current_user.id)
if r.exists(key):
daAvailableForChat = 'true'
else:
daAvailableForChat = 'false'
call_forwarding_on = 'false'
if twilio_config is not None:
forwarding_phone_number = twilio_config['name']['default'].get('number', None)
if forwarding_phone_number is not None:
call_forwarding_on = 'true'
script = "\n" + ' <script type="text/javascript" src="' + url_for('static', filename='app/socket.io.min.js', v=da_version) + '"></script>' + "\n" + """ <script type="text/javascript">
var daAudioContext = null;
var daSocket;
var daSoundBuffer = Object();
var daShowingNotif = false;
var daUpdatedSessions = Object();
var daUserid = """ + str(current_user.id) + """;
var daPhoneOnMessage = """ + json.dumps(word("The user can call you. Click to cancel.")) + """;
var daPhoneOffMessage = """ + json.dumps(word("Click if you want the user to be able to call you.")) + """;
var daSessions = Object();
var daAvailRoles = Object();
var daChatPartners = Object();
var daPhonePartners = Object();
var daNewPhonePartners = Object();
var daTermPhonePartners = Object();
var daUsePhone = """ + call_forwarding_on + """;
var daSubscribedRoles = """ + json.dumps(subscribed_roles) + """;
var daAvailableForChat = """ + daAvailableForChat + """;
var daPhoneNumber = """ + json.dumps(default_phone_number) + """;
var daFirstTime = 1;
var daUpdateMonitorInterval = null;
var daNotificationsEnabled = false;
var daControlling = Object();
var daBrowserTitle = """ + json.dumps(word('Monitor')) + """;
window.daGotConfirmation = function(key){
//console.log("Got confirmation in parent for key " + key);
// daControlling[key] = 2;
// var skey = key.replace(/(:|\.|\[|\]|,|=|\/)/g, '\\\\$1');
// $("#listelement" + skey).find("a").each(function(){
// if ($(this).data('name') == "stopcontrolling"){
// $(this).removeClass('dainvisible');
// console.log("Found it");
// }
// });
}
function daFaviconRegular(){
var link = document.querySelector("link[rel*='shortcut icon'") || document.createElement('link');
link.type = 'image/x-icon';
link.rel = 'shortcut icon';
link.href = '""" + url_for('favicon', nocache="1") + """';
document.getElementsByTagName('head')[0].appendChild(link);
}
function daFaviconAlert(){
var link = document.querySelector("link[rel*='shortcut icon'") || document.createElement('link');
link.type = 'image/x-icon';
link.rel = 'shortcut icon';
link.href = '""" + url_for('static', filename='app/chat.ico', v=da_version) + """?nocache=1';
document.getElementsByTagName('head')[0].appendChild(link);
}
function daTopMessage(message){
var newDiv = document.createElement('div');
$(newDiv).addClass("datop-alert col-xs-10 col-sm-7 col-md-6 col-lg-5 dacol-centered");
$(newDiv).html(message)
$(newDiv).css("display", "none");
$(newDiv).appendTo($(daTargetDiv));
$(newDiv).slideDown();
setTimeout(function(){
$(newDiv).slideUp(300, function(){
$(newDiv).remove();
});
}, 2000);
}
window.daAbortControlling = function(key){
daTopMessage(""" + json.dumps(word("That screen is already being controlled by another operator")) + """);
daStopControlling(key);
}
window.daStopControlling = function(key){
//console.log("Got daStopControlling in parent for key " + key);
// if (daControlling.hasOwnProperty(key)){
// delete daControlling[key];
// }
var skey = key.replace(/(:|\.|\[|\]|,|=|\/)/g, '\\\\$1');
$("#listelement" + skey).find("a").each(function(){
if ($(this).data('name') == "stopcontrolling"){
$(this).click();
//console.log("Found it");
}
});
}
function daOnError(){
console.log('daOnError');
}
function daLoadSoundBuffer(key, url_a, url_b){
//console.log("daLoadSoundBuffer");
var pos = 0;
if (daAudioContext == null){
return;
}
var request = new XMLHttpRequest();
request.open('GET', url_a, true);
request.responseType = 'arraybuffer';
request.onload = function(){
daAudioContext.decodeAudioData(request.response, function(buffer){
if (!buffer){
if (pos == 1){
console.error('daLoadSoundBuffer: error decoding file data');
return;
}
else {
pos = 1;
console.info('daLoadSoundBuffer: error decoding file data, trying next source');
request.open("GET", url_b, true);
return request.send();
}
}
daSoundBuffer[key] = buffer;
},
function(error){
if (pos == 1){
console.error('daLoadSoundBuffer: decodeAudioData error');
return;
}
else{
pos = 1;
console.info('daLoadSoundBuffer: decodeAudioData error, trying next source');
request.open("GET", url_b, true);
return request.send();
}
});
}
request.send();
}
function daPlaySound(key) {
//console.log("daPlaySound");
var buffer = daSoundBuffer[key];
if (!daAudioContext || !buffer){
return;
}
var source = daAudioContext.createBufferSource();
source.buffer = buffer;
source.connect(daAudioContext.destination);
source.start(0);
}
function daCheckNotifications(){
//console.log("daCheckNotifications");
if (daNotificationsEnabled){
return;
}
if (!("Notification" in window)) {
daNotificationsEnabled = false;
return;
}
if (Notification.permission === "granted") {
daNotificationsEnabled = true;
return;
}
if (Notification.permission !== 'denied') {
Notification.requestPermission(function (permission) {
if (permission === "granted") {
daNotificationsEnabled = true;
}
});
}
}
function daNotifyOperator(key, mode, message) {
//console.log("daNotifyOperator: " + key + " " + mode + " " + message);
var skey = key.replace(/(:|\.|\[|\]|,|=|\/)/g, '\\\\$1');
if (mode == "chat"){
daPlaySound('newmessage');
}
else{
daPlaySound('newconversation');
}
if ($("#listelement" + skey).offset().top > $(window).scrollTop() + $(window).height()){
if (mode == "chat"){
$("#chat-message-below").html(""" + json.dumps(word("New message below")) + """);
}
else{
$("#chat-message-below").html(""" + json.dumps(word("New conversation below")) + """);
}
//$("#chat-message-below").data('key', key);
$("#chat-message-below").slideDown();
daShowingNotif = true;
daMarkAsUpdated(key);
}
else if ($("#listelement" + skey).offset().top + $("#listelement" + skey).height() < $(window).scrollTop() + 32){
if (mode == "chat"){
$("#chat-message-above").html(""" + json.dumps(word("New message above")) + """);
}
else{
$("#chat-message-above").html(""" + json.dumps(word("New conversation above")) + """);
}
//$("#chat-message-above").data('key', key);
$("#chat-message-above").slideDown();
daShowingNotif = true;
daMarkAsUpdated(key);
}
else{
//console.log("It is visible");
}
if (!daNotificationsEnabled){
//console.log("Browser will not enable notifications")
return;
}
if (!("Notification" in window)) {
return;
}
if (Notification.permission === "granted") {
var notification = new Notification(message);
}
else if (Notification.permission !== 'denied') {
Notification.requestPermission(function (permission) {
if (permission === "granted") {
var notification = new Notification(message);
daNotificationsEnabled = true;
}
});
}
}
function daPhoneNumberOk(){
//console.log("daPhoneNumberOk");
var phoneNumber = $("#daPhoneNumber").val();
if (phoneNumber == '' || phoneNumber.match(/^\+?[1-9]\d{1,14}$/)){
return true;
}
else{
return false;
}
}
function daCheckPhone(){
//console.log("daCheckPhone");
$("#daPhoneNumber").val($("#daPhoneNumber").val().replace(/ \-/g, ''));
var the_number = $("#daPhoneNumber").val();
if (the_number != '' && the_number[0] != '+'){
$("#daPhoneNumber").val('+' + the_number);
}
if (daPhoneNumberOk()){
$("#daPhoneNumber").removeClass("is-invalid");
$("#daPhoneError").addClass("dainvisible");
daPhoneNumber = $("#daPhoneNumber").val();
if (daPhoneNumber == ''){
daPhoneNumber = null;
}
else{
$(".phone").removeClass("dainvisible");
}
$("#daPhoneSaved").removeClass("dainvisible");
setTimeout(function(){
$("#daPhoneSaved").addClass("dainvisible");
}, 2000);
}
else{
$("#daPhoneNumber").addClass("is-invalid");
$("#daPhoneError").removeClass("dainvisible");
daPhoneNumber = null;
$(".phone").addClass("dainvisible");
}
}
function daAllSessions(uid, yaml_filename){
//console.log("daAllSessions");
var prefix = 'da:session:uid:' + uid + ':i:' + yaml_filename + ':userid:';
var output = Array();
for (var key in daSessions){
if (daSessions.hasOwnProperty(key) && key.indexOf(prefix) == 0){
output.push(key);
}
}
return(output);
}
function daScrollChat(key){
var chatScroller = $(key).find('ul').first();
if (chatScroller.length){
var height = chatScroller[0].scrollHeight;
chatScroller.animate({scrollTop: height}, 800);
}
else{
console.log("daScrollChat: error")
}
}
function daScrollChatFast(key){
var chatScroller = $(key).find('ul').first();
if (chatScroller.length){
var height = chatScroller[0].scrollHeight;
//console.log("Scrolling to " + height + " where there are " + chatScroller[0].childElementCount + " children");
chatScroller.scrollTop(height);
}
else{
console.log("daScrollChatFast: error")
}
}
function daDoUpdateMonitor(){
//console.log("daDoUpdateMonitor with " + daAvailableForChat);
if (daPhoneNumberOk()){
daPhoneNumber = $("#daPhoneNumber").val();
if (daPhoneNumber == ''){
daPhoneNumber = null;
}
}
else{
daPhoneNumber = null;
}
daSocket.emit('updatemonitor', {available_for_chat: daAvailableForChat, phone_number: daPhoneNumber, subscribed_roles: daSubscribedRoles, phone_partners_to_add: daNewPhonePartners, phone_partners_to_terminate: daTermPhonePartners});
}
function daUpdateMonitor(){
//console.log("daUpdateMonitor with " + daAvailableForChat);
if (daUpdateMonitorInterval != null){
clearInterval(daUpdateMonitorInterval);
}
daDoUpdateMonitor();
daUpdateMonitorInterval = setInterval(daDoUpdateMonitor, """ + str(CHECKIN_INTERVAL) + """);
//console.log("daUpdateMonitor");
}
function daIsHidden(ref){
if ($(ref).length){
if (($(ref).offset().top + $(ref).height() < $(window).scrollTop() + 32)){
return -1;
}
else if ($(ref).offset().top > $(window).scrollTop() + $(window).height()){
return 1;
}
else{
return 0;
}
}
else{
return 0;
}
}
function daMarkAsUpdated(key){
//console.log("daMarkAsUpdated with " + key);
var skey = key.replace(/(:|\.|\[|\]|,|=|\/)/g, '\\\\$1');
if (daIsHidden("#listelement" + skey)){
daUpdatedSessions["#listelement" + skey] = 1;
}
}
function daActivateChatArea(key){
//console.log("daActivateChatArea with " + key);
var skey = key.replace(/(:|\.|\[|\]|,|=|\/)/g, '\\\\$1');
if (!$("#chatarea" + skey).find('input').first().is(':focus')){
$("#listelement" + skey).addClass("da-new-message");
if (daBrowserTitle == document.title){
document.title = '* ' + daBrowserTitle;
daFaviconAlert();
}
}
daMarkAsUpdated(key);
$("#chatarea" + skey).removeClass('dainvisible');
$("#chatarea" + skey).find('input, button').prop("disabled", false);
$("#chatarea" + skey).find('ul').html('');
daSocket.emit('chat_log', {key: key});
}
function daDeActivateChatArea(key){
//console.log("daActivateChatArea with " + key);
var skey = key.replace(/(:|\.|\[|\]|,|=|\/)/g, '\\\\$1');
$("#chatarea" + skey).find('input, button').prop("disabled", true);
$("#listelement" + skey).removeClass("da-new-message");
if (document.title != daBrowserTitle){
document.title = daBrowserTitle;
daFaviconRegular();
}
}
function daUndrawSession(key){
//console.log("Undrawing...");
var skey = key.replace(/(:|\.|\[|\]|,|=|\/)/g, '\\\\$1');
var xButton = document.createElement('a');
var xButtonIcon = document.createElement('i');
$(xButton).addClass("dacorner-remove");
$(xButtonIcon).addClass("fas fa-times-circle");
$(xButtonIcon).appendTo($(xButton));
$("#listelement" + skey).addClass("list-group-item-danger");
$("#session" + skey).find("a").remove();
$("#session" + skey).find("span").first().html(""" + json.dumps(word("offline")) + """);
$("#session" + skey).find("span").first().removeClass('""" + app.config['BUTTON_STYLE'] + """info');
$("#session" + skey).find("span").first().addClass('""" + app.config['BUTTON_STYLE'] + """danger');
$(xButton).click(function(){
$("#listelement" + skey).slideUp(300, function(){
$("#listelement" + skey).remove();
daCheckIfEmpty();
});
});
$(xButton).appendTo($("#session" + skey));
$("#chatarea" + skey).find('input, button').prop("disabled", true);
var theIframe = $("#iframe" + skey).find('iframe')[0];
if (theIframe){
$(theIframe).contents().find('body').addClass("dainactive");
if (theIframe.contentWindow && theIframe.contentWindow.daTurnOffControl){
theIframe.contentWindow.daTurnOffControl();
}
}
if (daControlling.hasOwnProperty(key)){
delete daControlling[key];
}
delete daSessions[key];
}
function daPublishChatLog(uid, yaml_filename, userid, mode, messages, scroll){
//console.log("daPublishChatLog with " + uid + " " + yaml_filename + " " + userid + " " + mode + " " + messages);
//console.log("daPublishChatLog: scroll is " + scroll);
var keys;
//if (mode == 'peer' || mode == 'peerhelp'){
// keys = daAllSessions(uid, yaml_filename);
//}
//else{
keys = ['da:session:uid:' + uid + ':i:' + yaml_filename + ':userid:' + userid];
//}
for (var i = 0; i < keys.length; ++i){
key = keys[i];
var skey = key.replace(/(:|\.|\[|\]|,|=|\/)/g, '\\\\$1');
var chatArea = $("#chatarea" + skey).find('ul').first();
if (messages.length > 0){
$(chatArea).removeClass('dainvisible');
}
for (var i = 0; i < messages.length; ++i){
var message = messages[i];
var newLi = document.createElement('li');
$(newLi).addClass("list-group-item");
if (message.is_self){
$(newLi).addClass("list-group-item-primary dalistright");
}
else{
$(newLi).addClass("list-group-item-secondary dalistleft");
}
$(newLi).html(message.message);
$(newLi).appendTo(chatArea);
}
if (messages.length > 0 && scroll){
daScrollChatFast("#chatarea" + skey);
}
}
}
function daCheckIfEmpty(){
if ($("#monitorsessions").find("li").length > 0){
$("#emptylist").addClass("dainvisible");
}
else{
$("#emptylist").removeClass("dainvisible");
}
}
function daDrawSession(key, obj){
//console.log("daDrawSession with " + key);
var skey = key.replace(/(:|\.|\[|\]|,|=|\/)/g, '\\\\$1');
var the_html;
var wants_to_chat;
if (obj.chatstatus != 'off'){
wants_to_chat = true;
}
if (wants_to_chat){
the_html = obj.browser_title + ' — '
if (obj.hasOwnProperty('first_name')){
the_html += obj.first_name + ' ' + obj.last_name;
}
else{
the_html += """ + json.dumps(word("anonymous visitor") + ' ') + """ + obj.temp_user_id;
}
}
var theListElement;
var sessionDiv;
var theIframeContainer;
var theChatArea;
if ($("#session" + skey).length && !(key in daSessions)){
$("#listelement" + skey).removeClass("list-group-item-danger");
$("#iframe" + skey).find('iframe').first().contents().find('body').removeClass("dainactive");
}
daSessions[key] = 1;
if ($("#session" + skey).length){
theListElement = $("#listelement" + skey).first();
sessionDiv = $("#session" + skey).first();
//controlDiv = $("#control" + skey).first();
theIframeContainer = $("#iframe" + skey).first();
theChatArea = $("#chatarea" + skey).first();
$(sessionDiv).empty();
if (obj.chatstatus == 'on' && key in daChatPartners && $("#chatarea" + skey).find('button').first().prop("disabled") == true){
daActivateChatArea(key);
}
}
else{
var theListElement = document.createElement('li');
$(theListElement).addClass('list-group-item');
$(theListElement).attr('id', "listelement" + key);
var sessionDiv = document.createElement('div');
$(sessionDiv).attr('id', "session" + key);
$(sessionDiv).addClass('da-chat-session');
$(sessionDiv).addClass('p-1');
$(sessionDiv).appendTo($(theListElement));
$(theListElement).appendTo("#monitorsessions");
// controlDiv = document.createElement('div');
// $(controlDiv).attr('id', "control" + key);
// $(controlDiv).addClass("dachatcontrol dainvisible da-chat-session");
// $(controlDiv).appendTo($(theListElement));
theIframeContainer = document.createElement('div');
$(theIframeContainer).addClass("daobserver-container dainvisible");
$(theIframeContainer).attr('id', 'iframe' + key);
var theIframe = document.createElement('iframe');
$(theIframe).addClass("daobserver");
$(theIframe).attr('name', 'iframe' + key);
$(theIframe).appendTo($(theIframeContainer));
$(theIframeContainer).appendTo($(theListElement));
var theChatArea = document.createElement('div');
$(theChatArea).addClass('monitor-chat-area dainvisible');
$(theChatArea).html('<div class="row"><div class="col-md-12"><ul class="list-group dachatbox" id="daCorrespondence"><\/ul><\/div><\/div><form autocomplete="off"><div class="row"><div class="col-md-12"><div class="input-group"><input type="text" class="form-control daChatMessage" disabled=""><button role="button" class="btn """ + app.config['BUTTON_STYLE'] + """secondary daChatButton" type="button" disabled="">""" + word("Send") + """<\/button><\/div><\/div><\/div><\/form>');
$(theChatArea).attr('id', 'chatarea' + key);
var submitter = function(){
//console.log("I am the submitter and I am submitting " + key);
var input = $(theChatArea).find("input").first();
var message = input.val().trim();
if (message == null || message == ""){
//console.log("Message was blank");
return false;
}
daSocket.emit('chatmessage', {key: key, data: input.val()});
input.val('');
return false;
};
$(theChatArea).find("button").click(submitter);
$(theChatArea).find("input").bind('keypress keydown keyup', function(e){
var theCode = e.which || e.keyCode;
if(theCode == 13) { submitter(); e.preventDefault(); }
});
$(theChatArea).find("input").focus(function(){
$(theListElement).removeClass("da-new-message");
if (document.title != daBrowserTitle){
document.title = daBrowserTitle;
daFaviconRegular();
}
});
$(theChatArea).appendTo($(theListElement));
if (obj.chatstatus == 'on' && key in daChatPartners){
daActivateChatArea(key);
}
}
var theText = document.createElement('span');
$(theText).addClass('da-chat-title-label');
theText.innerHTML = the_html;
var statusLabel = document.createElement('span');
$(statusLabel).addClass("badge bg-info da-chat-status-label");
$(statusLabel).html(obj.chatstatus == 'observeonly' ? 'off' : obj.chatstatus);
$(statusLabel).appendTo($(sessionDiv));
if (daUsePhone){
var phoneButton = document.createElement('a');
var phoneIcon = document.createElement('i');
$(phoneIcon).addClass("fas fa-phone");
$(phoneIcon).appendTo($(phoneButton));
$(phoneButton).addClass("btn phone");
$(phoneButton).data('name', 'phone');
if (key in daPhonePartners){
$(phoneButton).addClass("phone-on """ + app.config['BUTTON_STYLE'] + """success");
$(phoneButton).attr('title', daPhoneOnMessage);
}
else{
$(phoneButton).addClass("phone-off """ + app.config['BUTTON_STYLE'] + """secondary");
$(phoneButton).attr('title', daPhoneOffMessage);
}
$(phoneButton).attr('tabindex', 0);
$(phoneButton).addClass('daobservebutton')
$(phoneButton).appendTo($(sessionDiv));
$(phoneButton).attr('href', '#');
if (daPhoneNumber == null){
$(phoneButton).addClass("dainvisible");
}
$(phoneButton).click(function(e){
e.preventDefault();
if ($(this).hasClass("phone-off") && daPhoneNumber != null){
$(this).removeClass("phone-off");
$(this).removeClass(""" + '"' + app.config['BUTTON_STYLE'] + """secondary");
$(this).addClass("phone-on");
$(this).addClass(""" + '"' + app.config['BUTTON_STYLE'] + """success");
$(this).attr('title', daPhoneOnMessage);
daPhonePartners[key] = 1;
daNewPhonePartners[key] = 1;
if (key in daTermPhonePartners){
delete daTermPhonePartners[key];
}
}
else{
$(this).removeClass("phone-on");
$(this).removeClass(""" + '"' + app.config['BUTTON_STYLE'] + """success");
$(this).addClass("phone-off");
$(this).addClass(""" + '"' + app.config['BUTTON_STYLE'] + """secondary");
$(this).attr('title', daPhoneOffMessage);
if (key in daPhonePartners){
delete daPhonePartners[key];
}
if (key in daNewPhonePartners){
delete daNewPhonePartners[key];
}
daTermPhonePartners[key] = 1;
}
daUpdateMonitor();
return false;
});
}
var unblockButton = document.createElement('a');
$(unblockButton).addClass("btn """ + app.config['BUTTON_STYLE'] + """info daobservebutton");
$(unblockButton).data('name', 'unblock');
if (!obj.blocked){
$(unblockButton).addClass("dainvisible");
}
$(unblockButton).html(""" + json.dumps(word("Unblock")) + """);
$(unblockButton).attr('href', '#');
$(unblockButton).appendTo($(sessionDiv));
var blockButton = document.createElement('a');
$(blockButton).addClass("btn """ + app.config['BUTTON_STYLE'] + """danger daobservebutton");
if (obj.blocked){
$(blockButton).addClass("dainvisible");
}
$(blockButton).html(""" + json.dumps(word("Block")) + """);
$(blockButton).attr('href', '#');
$(blockButton).data('name', 'block');
$(blockButton).appendTo($(sessionDiv));
$(blockButton).click(function(e){
$(unblockButton).removeClass("dainvisible");
$(this).addClass("dainvisible");
daDeActivateChatArea(key);
daSocket.emit('block', {key: key});
e.preventDefault();
return false;
});
$(unblockButton).click(function(e){
$(blockButton).removeClass("dainvisible");
$(this).addClass("dainvisible");
daSocket.emit('unblock', {key: key});
e.preventDefault();
return false;
});
var joinButton = document.createElement('a');
$(joinButton).addClass("btn """ + app.config['BUTTON_STYLE'] + """warning daobservebutton");
$(joinButton).html(""" + json.dumps(word("Join")) + """);
$(joinButton).attr('href', """ + json.dumps(url_for('visit_interview') + '?') + """ + $.param({i: obj.i, uid: obj.uid, userid: obj.userid}));
$(joinButton).data('name', 'join');
$(joinButton).attr('target', '_blank');
$(joinButton).appendTo($(sessionDiv));
if (wants_to_chat){
var openButton = document.createElement('a');
$(openButton).addClass("btn """ + app.config['BUTTON_STYLE'] + """primary daobservebutton");
$(openButton).attr('href', """ + json.dumps(url_for('observer') + '?') + """ + $.param({i: obj.i, uid: obj.uid, userid: obj.userid}));
//$(openButton).attr('href', 'about:blank');
$(openButton).attr('id', 'observe' + key);
$(openButton).attr('target', 'iframe' + key);
$(openButton).html(""" + json.dumps(word("Observe")) + """);
$(openButton).data('name', 'open');
$(openButton).appendTo($(sessionDiv));
var stopObservingButton = document.createElement('a');
$(stopObservingButton).addClass("btn """ + app.config['BUTTON_STYLE'] + """secondary daobservebutton dainvisible");
$(stopObservingButton).html(""" + json.dumps(word("Stop Observing")) + """);
$(stopObservingButton).attr('href', '#');
$(stopObservingButton).data('name', 'stopObserving');
$(stopObservingButton).appendTo($(sessionDiv));
var controlButton = document.createElement('a');
$(controlButton).addClass("btn """ + app.config['BUTTON_STYLE'] + """info daobservebutton");
$(controlButton).html(""" + json.dumps(word("Control")) + """);
$(controlButton).attr('href', '#');
$(controlButton).data('name', 'control');
$(controlButton).appendTo($(sessionDiv));
var stopControllingButton = document.createElement('a');
$(stopControllingButton).addClass("btn """ + app.config['BUTTON_STYLE'] + """secondary daobservebutton dainvisible");
$(stopControllingButton).html(""" + json.dumps(word("Stop Controlling")) + """);
$(stopControllingButton).attr('href', '#');
$(stopControllingButton).data('name', 'stopcontrolling');
$(stopControllingButton).appendTo($(sessionDiv));
$(controlButton).click(function(event){
event.preventDefault();
//console.log("Controlling...");
$(this).addClass("dainvisible");
$(stopControllingButton).removeClass("dainvisible");
$(stopObservingButton).addClass("dainvisible");
var theIframe = $("#iframe" + skey).find('iframe')[0];
if (theIframe != null && theIframe.contentWindow){
theIframe.contentWindow.daTurnOnControl();
}
else{
console.log("Cannot turn on control");
}
daControlling[key] = 1;
return false;
});
$(stopControllingButton).click(function(event){
//console.log("Got click on stopControllingButton");
event.preventDefault();
var theIframe = $("#iframe" + skey).find('iframe')[0];
if (theIframe != null && theIframe.contentWindow && theIframe.contentWindow.daTurnOffControl){
theIframe.contentWindow.daTurnOffControl();
}
else{
console.log("Cannot turn off control");
return false;
}
//console.log("Stop controlling...");
$(this).addClass("dainvisible");
$(controlButton).removeClass("dainvisible");
$(stopObservingButton).removeClass("dainvisible");
if (daControlling.hasOwnProperty(key)){
delete daControlling[key];
}
return false;
});
$(openButton).click(function(event){
//console.log("Observing..");
$(this).addClass("dainvisible");
$(stopObservingButton).removeClass("dainvisible");
$("#iframe" + skey).removeClass("dainvisible");
$(controlButton).removeClass("dainvisible");
return true;
});
$(stopObservingButton).click(function(e){
//console.log("Unobserving...");
$(this).addClass("dainvisible");
$(openButton).removeClass("dainvisible");
$(controlButton).addClass("dainvisible");
$(stopObservingButton).addClass("dainvisible");
$(stopControllingButton).addClass("dainvisible");
var theIframe = $("#iframe" + skey).find('iframe')[0];
if (daControlling.hasOwnProperty(key)){
delete daControlling[key];
if (theIframe != null && theIframe.contentWindow && theIframe.contentWindow.daTurnOffControl){
//console.log("Calling daTurnOffControl in iframe");
theIframe.contentWindow.daTurnOffControl();
}
}
if (theIframe != null && theIframe.contentWindow){
//console.log("Deleting the iframe");
theIframe.contentWindow.document.open();
theIframe.contentWindow.document.write("");
theIframe.contentWindow.document.close();
}
$("#iframe" + skey).slideUp(400, function(){
$(this).css("display", "").addClass("dainvisible");
});
e.preventDefault();
return false;
});
if ($(theIframeContainer).hasClass("dainvisible")){
$(openButton).removeClass("dainvisible");
$(stopObservingButton).addClass("dainvisible");
$(controlButton).addClass("dainvisible");
$(stopControllingButton).addClass("dainvisible");
if (daControlling.hasOwnProperty(key)){
delete daControlling[key];
}
}
else{
$(openButton).addClass("dainvisible");
if (daControlling.hasOwnProperty(key)){
$(stopObservingButton).addClass("dainvisible");
$(controlButton).addClass("dainvisible");
$(stopControllingButton).removeClass("dainvisible");
}
else{
$(stopObservingButton).removeClass("dainvisible");
$(controlButton).removeClass("dainvisible");
$(stopControllingButton).addClass("dainvisible");
}
}
}
$(theText).appendTo($(sessionDiv));
if (obj.chatstatus == 'on' && key in daChatPartners && $("#chatarea" + skey).hasClass('dainvisible')){
daActivateChatArea(key);
}
if ((obj.chatstatus != 'on' || !(key in daChatPartners)) && $("#chatarea" + skey).find('button').first().prop("disabled") == false){
daDeActivateChatArea(key);
}
else if (obj.blocked){
daDeActivateChatArea(key);
}
}
function daOnScrollResize(){
if (document.title != daBrowserTitle){
document.title = daBrowserTitle;
daFaviconRegular();
}
if (!daShowingNotif){
return true;
}
var obj = Array();
for (var key in daUpdatedSessions){
if (daUpdatedSessions.hasOwnProperty(key)){
obj.push(key);
}
}
var somethingAbove = false;
var somethingBelow = false;
var firstElement = -1;
var lastElement = -1;
for (var i = 0; i < obj.length; ++i){
var result = daIsHidden(obj[i]);
if (result == 0){
delete daUpdatedSessions[obj[i]];
}
else if (result < 0){
var top = $(obj[i]).offset().top;
somethingAbove = true;
if (firstElement == -1 || top < firstElement){
firstElement = top;
}
}
else if (result > 0){
var top = $(obj[i]).offset().top;
somethingBelow = true;
if (lastElement == -1 || top > lastElement){
lastElement = top;
}
}
}
if (($("#chat-message-above").is(":visible")) && !somethingAbove){
$("#chat-message-above").hide();
}
if (($("#chat-message-below").is(":visible")) && !somethingBelow){
$("#chat-message-below").hide();
}
if (!(somethingAbove || somethingBelow)){
daShowingNotif = false;
}
return true;
}
$(document).ready(function(){
//console.log("document ready");
try {
window.AudioContext = window.AudioContext || window.webkitAudioContext;
daAudioContext = new AudioContext();
}
catch(e) {
console.log('Web Audio API is not supported in this browser');
}
daLoadSoundBuffer('newmessage', '""" + url_for('static', filename='sounds/notification-click-on.mp3', v=da_version) + """', '""" + url_for('static', filename='sounds/notification-click-on.ogg', v=da_version) + """');
daLoadSoundBuffer('newconversation', '""" + url_for('static', filename='sounds/notification-stapler.mp3', v=da_version) + """', '""" + url_for('static', filename='sounds/notification-stapler.ogg', v=da_version) + """');
daLoadSoundBuffer('signinout', '""" + url_for('static', filename='sounds/notification-snap.mp3', v=da_version) + """', '""" + url_for('static', filename='sounds/notification-snap.ogg', v=da_version) + """');
if (location.protocol === 'http:' || document.location.protocol === 'http:'){
daSocket = io.connect('http://' + document.domain + '/monitor', {path: '""" + ROOT + """ws/socket.io'});
}
if (location.protocol === 'https:' || document.location.protocol === 'https:'){
daSocket = io.connect('https://' + document.domain + '/monitor', {path: '""" + ROOT + """ws/socket.io'});
}
//console.log("socket is " + daSocket)
if (typeof daSocket !== 'undefined') {
daSocket.on('connect', function() {
//console.log("Connected!");
daUpdateMonitor();
});
daSocket.on('terminate', function() {
console.log("monitor: terminating socket");
daSocket.disconnect();
});
daSocket.on('disconnect', function() {
//console.log("monitor: disconnected socket");
//daSocket = null;
});
daSocket.on('refreshsessions', function(data) {
daUpdateMonitor();
});
// daSocket.on('abortcontroller', function(data) {
// console.log("Got abortcontroller message for " + data.key);
// });
daSocket.on('chatready', function(data) {
var key = 'da:session:uid:' + data.uid + ':i:' + data.i + ':userid:' + data.userid
//console.log('chatready: ' + key);
daActivateChatArea(key);
daNotifyOperator(key, "chatready", """ + json.dumps(word("New chat connection from")) + """ + ' ' + data.name)
});
daSocket.on('chatstop', function(data) {
var key = 'da:session:uid:' + data.uid + ':i:' + data.i + ':userid:' + data.userid
//console.log('chatstop: ' + key);
if (key in daChatPartners){
delete daChatPartners[key];
}
daDeActivateChatArea(key);
});
daSocket.on('chat_log', function(arg) {
//console.log('chat_log: ' + arg.userid);
daPublishChatLog(arg.uid, arg.i, arg.userid, arg.mode, arg.data, arg.scroll);
});
daSocket.on('block', function(arg) {
//console.log("back from blocking " + arg.key);
daUpdateMonitor();
});
daSocket.on('unblock', function(arg) {
//console.log("back from unblocking " + arg.key);
daUpdateMonitor();
});
daSocket.on('chatmessage', function(data) {
//console.log("chatmessage");
var keys;
if (data.data.mode == 'peer' || data.data.mode == 'peerhelp'){
keys = daAllSessions(data.uid, data.i);
}
else{
keys = ['da:session:uid:' + data.uid + ':i:' + data.i + ':userid:' + data.userid];
}
for (var i = 0; i < keys.length; ++i){
key = keys[i];
var skey = key.replace(/(:|\.|\[|\]|,|=|\/)/g, '\\\\$1');
//console.log("Received chat message for #chatarea" + skey);
var chatArea = $("#chatarea" + skey).find('ul').first();
var newLi = document.createElement('li');
$(newLi).addClass("list-group-item");
if (data.data.is_self){
$(newLi).addClass("list-group-item-primary dalistright");
}
else{
$(newLi).addClass("list-group-item-secondary dalistleft");
}
$(newLi).html(data.data.message);
$(newLi).appendTo(chatArea);
daScrollChat("#chatarea" + skey);
if (data.data.is_self){
$("#listelement" + skey).removeClass("da-new-message");
if (document.title != daBrowserTitle){
document.title = daBrowserTitle;
daFaviconRegular();
}
}
else{
if (!$("#chatarea" + skey).find('input').first().is(':focus')){
$("#listelement" + skey).addClass("da-new-message");
if (daBrowserTitle == document.title){
document.title = '* ' + daBrowserTitle;
daFaviconAlert();
}
}
if (data.data.hasOwnProperty('temp_user_id')){
daNotifyOperator(key, "chat", """ + json.dumps(word("anonymous visitor")) + """ + ' ' + data.data.temp_user_id + ': ' + data.data.message);
}
else{
if (data.data.first_name && data.data.first_name != ''){
daNotifyOperator(key, "chat", data.data.first_name + ' ' + data.data.last_name + ': ' + data.data.message);
}
else{
daNotifyOperator(key, "chat", data.data.email + ': ' + data.data.message);
}
}
}
}
});
daSocket.on('sessionupdate', function(data) {
//console.log("Got session update: " + data.session.chatstatus);
daDrawSession(data.key, data.session);
daCheckIfEmpty();
});
daSocket.on('updatemonitor', function(data) {
//console.log("Got update monitor response");
//console.log("updatemonitor: chat partners are: " + data.chatPartners);
daChatPartners = data.chatPartners;
daNewPhonePartners = Object();
daTermPhonePartners = Object();
daPhonePartners = data.phonePartners;
var newSubscribedRoles = Object();
for (var key in data.subscribedRoles){
if (data.subscribedRoles.hasOwnProperty(key)){
newSubscribedRoles[key] = 1;
}
}
for (var i = 0; i < data.availRoles.length; ++i){
var key = data.availRoles[i];
var skey = key.replace(/(:|\.|\[|\]|,|=|\/| )/g, '\\\\$1');
if ($("#role" + skey).length == 0){
var div = document.createElement('div');
$(div).addClass("form-check form-check-inline");
var label = document.createElement('label');
$(label).addClass('form-check-label');
$(label).attr('for', "role" + key);
var input = document.createElement('input');
$(input).addClass('form-check-input');
var text = document.createTextNode(key);
$(input).attr('type', 'checkbox');
$(input).attr('id', "role" + key);
if (key in newSubscribedRoles){
$(input).prop('checked', true);
}
else{
$(input).prop('checked', false);
}
$(input).val(key);
$(text).appendTo($(label));
$(input).appendTo($(div));
$(label).appendTo($(div));
$(div).appendTo($("#monitorroles"));
$(input).change(function(){
var key = $(this).val();
//console.log("change to " + key);
if ($(this).is(":checked")) {
//console.log("it is checked");
daSubscribedRoles[key] = 1;
}
else{
//console.log("it is not checked");
if (key in daSubscribedRoles){
delete daSubscribedRoles[key];
}
}
daUpdateMonitor();
});
}
else{
var input = $("#role" + skey).first();
if (key in newSubscribedRoles){
$(input).prop('checked', true);
}
else{
$(input).prop('checked', false);
}
}
}
daSubscribedRoles = newSubscribedRoles;
newDaSessions = Object();
for (var key in data.sessions){
if (data.sessions.hasOwnProperty(key)){
var user_id = key.replace(/^.*:userid:/, '');
if (true || user_id != daUserid){
var obj = data.sessions[key];
newDaSessions[key] = obj;
daDrawSession(key, obj);
}
}
}
var toDelete = Array();
var numSessions = 0;
for (var key in daSessions){
if (daSessions.hasOwnProperty(key)){
numSessions++;
if (!(key in newDaSessions)){
toDelete.push(key);
}
}
}
for (var i = 0; i < toDelete.length; ++i){
var key = toDelete[i];
daUndrawSession(key);
}
if ($("#monitorsessions").find("li").length > 0){
$("#emptylist").addClass("dainvisible");
}
else{
$("#emptylist").removeClass("dainvisible");
}
});
}
if (daAvailableForChat){
$("#daNotAvailable").addClass("dainvisible");
daCheckNotifications();
}
else{
$("#daAvailable").addClass("dainvisible");
}
$("#daAvailable").click(function(event){
$("#daAvailable").addClass("dainvisible");
$("#daNotAvailable").removeClass("dainvisible");
daAvailableForChat = false;
//console.log("daAvailableForChat: " + daAvailableForChat);
daUpdateMonitor();
daPlaySound('signinout');
});
$("#daNotAvailable").click(function(event){
daCheckNotifications();
$("#daNotAvailable").addClass("dainvisible");
$("#daAvailable").removeClass("dainvisible");
daAvailableForChat = true;
//console.log("daAvailableForChat: " + daAvailableForChat);
daUpdateMonitor();
daPlaySound('signinout');
});
$( window ).bind('unload', function() {
if (typeof daSocket !== 'undefined'){
daSocket.emit('terminate');
}
});
if (daUsePhone){
$("#daPhoneInfo").removeClass("dainvisible");
$("#daPhoneNumber").val(daPhoneNumber);
$("#daPhoneNumber").change(daCheckPhone);
$("#daPhoneNumber").bind('keypress keydown keyup', function(e){
var theCode = e.which || e.keyCode;
if(theCode == 13) { $(this).blur(); e.preventDefault(); }
});
}
$(window).on('scroll', daOnScrollResize);
$(window).on('resize', daOnScrollResize);
$(".da-chat-notifier").click(function(e){
//var key = $(this).data('key');
var direction = 0;
if ($(this).attr('id') == "chat-message-above"){
direction = -1;
}
else{
direction = 1;
}
var target = -1;
var targetElement = null;
for (var key in daUpdatedSessions){
if (daUpdatedSessions.hasOwnProperty(key)){
var top = $(key).offset().top;
if (direction == -1){
if (target == -1 || top < target){
target = top;
targetElement = key;
}
}
else{
if (target == -1 || top > target){
target = top;
targetElement = key;
}
}
}
}
if (target >= 0){
$("html, body").animate({scrollTop: target - 60}, 500, function(){
$(targetElement).find("input").first().focus();
});
}
e.preventDefault();
return false;
});
});
</script>""" # noqa: W605
response = make_response(render_template('pages/monitor.html', version_warning=None, bodyclass='daadminbody', extra_js=Markup(script), tab_title=word('Monitor'), page_title=word('Monitor')), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
@app.route('/updatingpackages', methods=['GET', 'POST'])
@login_required
@roles_required(['admin', 'developer'])
def update_package_wait():
setup_translation()
if not (app.config['DEVELOPER_CAN_INSTALL'] or current_user.has_role('admin')):
return ('File not found', 404)
next_url = app.user_manager.make_safe_url_function(request.args.get('next', url_for('update_package')))
my_csrf = generate_csrf()
script = """
<script>
var daCheckinInterval = null;
var resultsAreIn = false;
var pollDelay = 0;
var pollFail = 0;
var pollPending = false;
function daRestartCallback(data){
//console.log("Restart result: " + data.success);
}
function daRestart(){
$.ajax({
type: 'POST',
url: """ + json.dumps(url_for('restart_ajax')) + """,
data: 'csrf_token=""" + my_csrf + """&action=restart',
success: daRestartCallback,
dataType: 'json'
});
return true;
}
function daBadCallback(data){
pollPending = false;
pollFail += 1;
}
function daUpdateCallback(data){
pollPending = false;
if (data.success){
if (data.status == 'finished'){
resultsAreIn = true;
if (data.ok){
$("#notification").html(""" + json.dumps(word("The package update did not report an error. The logs are below.")) + """);
$("#notification").removeClass("alert-info");
$("#notification").removeClass("alert-danger");
$("#notification").addClass("alert-success");
}
else{
$("#notification").html(""" + json.dumps(word("The package update reported an error. The logs are below.")) + """);
$("#notification").removeClass("alert-info");
$("#notification").removeClass("alert-success");
$("#notification").addClass("alert-danger");
}
$("#resultsContainer").show();
$("#resultsArea").html(data.summary);
if (daCheckinInterval != null){
clearInterval(daCheckinInterval);
}
//daRestart();
}
else if (data.status == 'failed' && !resultsAreIn){
resultsAreIn = true;
$("#notification").html(""" + json.dumps(word("There was an error updating the packages.")) + """);
$("#notification").removeClass("alert-info");
$("#notification").removeClass("alert-success");
$("#notification").addClass("alert-danger");
$("#resultsContainer").show();
if (data.error_message){
$("#resultsArea").html(data.error_message);
}
else if (data.summary){
$("#resultsArea").html(data.summary);
}
if (daCheckinInterval != null){
clearInterval(daCheckinInterval);
}
}
}
else if (!resultsAreIn){
$("#notification").html(""" + json.dumps(word("There was an error.")) + """);
$("#notification").removeClass("alert-info");
$("#notification").removeClass("alert-success");
$("#notification").addClass("alert-danger");
if (daCheckinInterval != null){
clearInterval(daCheckinInterval);
}
}
}
function daUpdate(){
if (pollDelay > 25 || pollFail > 8){
$("#notification").html(""" + json.dumps(word("Server did not respond to request for update.")) + """);
$("#notification").removeClass("alert-info");
$("#notification").removeClass("alert-success");
$("#notification").addClass("alert-danger");
if (daCheckinInterval != null){
clearInterval(daCheckinInterval);
}
return;
}
if (pollPending){
pollDelay += 1;
return;
}
if (resultsAreIn){
return;
}
pollDelay = 0;
pollPending = true;
$.ajax({
type: 'POST',
url: """ + json.dumps(url_for('update_package_ajax')) + """,
data: 'csrf_token=""" + my_csrf + """',
success: daUpdateCallback,
error: daBadCallback,
timeout: 10000,
dataType: 'json'
});
return true;
}
$( document ).ready(function() {
//console.log("page loaded");
daCheckinInterval = setInterval(daUpdate, 6000);
});
</script>"""
response = make_response(render_template('pages/update_package_wait.html', version_warning=None, bodyclass='daadminbody', extra_js=Markup(script), tab_title=word('Updating'), page_title=word('Updating'), next_page=next_url), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
@app.route('/update_package_ajax', methods=['POST'])
@login_required
@roles_required(['admin', 'developer'])
def update_package_ajax():
if not (app.config['DEVELOPER_CAN_INSTALL'] or current_user.has_role('admin')):
return ('File not found', 404)
if 'taskwait' not in session or 'serverstarttime' not in session:
return jsonify(success=False)
setup_translation()
result = docassemble.webapp.worker.workerapp.AsyncResult(id=session['taskwait'])
if result.ready():
# if 'taskwait' in session:
# del session['taskwait']
the_result = result.get()
if isinstance(the_result, ReturnValue):
if the_result.ok:
# logmessage("update_package_ajax: success")
if (hasattr(the_result, 'restart') and not the_result.restart) or START_TIME > session['serverstarttime']:
return jsonify(success=True, status='finished', ok=the_result.ok, summary=summarize_results(the_result.results, the_result.logmessages))
return jsonify(success=True, status='waiting')
if hasattr(the_result, 'error_message'):
logmessage("update_package_ajax: failed return value is " + str(the_result.error_message))
return jsonify(success=True, status='failed', error_message=str(the_result.error_message))
if hasattr(the_result, 'results') and hasattr(the_result, 'logmessages'):
return jsonify(success=True, status='failed', summary=summarize_results(the_result.results, the_result.logmessages))
return jsonify(success=True, status='failed', error_message=str("No error message. Result is " + str(the_result)))
logmessage("update_package_ajax: failed return value is a " + str(type(the_result)))
logmessage("update_package_ajax: failed return value is " + str(the_result))
return jsonify(success=True, status='failed', error_message=str(the_result))
return jsonify(success=True, status='waiting')
def get_package_name_from_zip(zippath):
with zipfile.ZipFile(zippath, mode='r') as zf:
min_level = 999999
setup_py = None
for zinfo in zf.infolist():
parts = splitall(zinfo.filename)
if parts[-1] == 'setup.py':
if len(parts) < min_level:
setup_py = zinfo
min_level = len(parts)
if setup_py is None:
raise DAException("Not a Python package zip file")
with zf.open(setup_py) as f:
the_file = TextIOWrapper(f, encoding='utf8')
contents = the_file.read()
extracted = {}
for line in contents.splitlines():
m = re.search(r"^NAME *= *\(?'(.*)'", line)
if m:
extracted['name'] = m.group(1)
m = re.search(r'^NAME *= *\(?"(.*)"', line)
if m:
extracted['name'] = m.group(1)
m = re.search(r'^NAME *= *\[(.*)\]', line)
if m:
extracted['name'] = m.group(1)
if 'name' in extracted:
return extracted['name']
contents = re.sub(r'.*setup\(', '', contents, flags=re.DOTALL)
extracted = {}
for line in contents.splitlines():
m = re.search(r"^ *([a-z_]+) *= *\(?'(.*)'", line)
if m:
extracted[m.group(1)] = m.group(2)
m = re.search(r'^ *([a-z_]+) *= *\(?"(.*)"', line)
if m:
extracted[m.group(1)] = m.group(2)
m = re.search(r'^ *([a-z_]+) *= *\[(.*)\]', line)
if m:
extracted[m.group(1)] = m.group(2)
if 'name' not in extracted:
raise DAException("Could not find name of Python package")
return extracted['name']
@app.route('/updatepackage', methods=['GET', 'POST'])
@login_required
@roles_required(['admin', 'developer'])
def update_package():
setup_translation()
if not app.config['ALLOW_UPDATES']:
return ('File not found', 404)
if not (app.config['DEVELOPER_CAN_INSTALL'] or current_user.has_role('admin')):
return ('File not found', 404)
if 'taskwait' in session:
del session['taskwait']
if 'serverstarttime' in session:
del session['serverstarttime']
# pip.utils.logging._log_state = threading.local()
# pip.utils.logging._log_state.indentation = 0
if request.method == 'GET' and app.config['USE_GITHUB'] and r.get('da:using_github:userid:' + str(current_user.id)) is not None:
storage = RedisCredStorage(oauth_app='github')
credentials = storage.get()
if not credentials or credentials.invalid:
state_string = random_string(16)
session['github_next'] = json.dumps({'state': state_string, 'path': 'update_package', 'arguments': request.args})
flow = get_github_flow()
uri = flow.step1_get_authorize_url(state=state_string)
return redirect(uri)
form = UpdatePackageForm(request.form)
form.gitbranch.choices = [('', "Not applicable")]
if form.gitbranch.data:
form.gitbranch.choices.append((form.gitbranch.data, form.gitbranch.data))
action = request.args.get('action', None)
target = request.args.get('package', None)
limitation = request.args.get('limitation', '')
branch = None
if action is not None and target is not None:
package_list, package_auth = get_package_info() # pylint: disable=unused-variable
the_package = None
for package in package_list:
if package.package.name == target:
the_package = package
break
if the_package is not None:
if action == 'uninstall' and the_package.can_uninstall:
uninstall_package(target)
elif action == 'update' and the_package.can_update:
existing_package = db.session.execute(select(Package).filter_by(name=target, active=True).order_by(Package.id.desc())).scalar()
if existing_package is not None:
if limitation and existing_package.limitation != limitation:
existing_package.limitation = limitation
db.session.commit()
if existing_package.type == 'git' and existing_package.giturl is not None:
if existing_package.gitbranch:
install_git_package(target, existing_package.giturl, existing_package.gitbranch)
else:
install_git_package(target, existing_package.giturl, get_master_branch(existing_package.giturl))
elif existing_package.type == 'pip':
if existing_package.name == 'docassemble.webapp' and existing_package.limitation and not limitation:
existing_package.limitation = None
db.session.commit()
install_pip_package(existing_package.name, existing_package.limitation)
result = docassemble.webapp.worker.update_packages.apply_async(link=docassemble.webapp.worker.reset_server.s(run_create=should_run_create(target)))
session['taskwait'] = result.id
session['serverstarttime'] = START_TIME
return redirect(url_for('update_package_wait'))
if request.method == 'POST' and form.validate_on_submit():
# use_pip_cache = form.use_cache.data
# pipe = r.pipeline()
# pipe.set('da:updatepackage:use_pip_cache', 1 if use_pip_cache else 0)
# pipe.expire('da:updatepackage:use_pip_cache', 120)
# pipe.execute()
if 'zipfile' in request.files and request.files['zipfile'].filename:
try:
the_file = request.files['zipfile']
filename = secure_filename(the_file.filename)
file_number = get_new_file_number(None, filename)
saved_file = SavedFile(file_number, extension='zip', fix=True, should_not_exist=True)
file_set_attributes(file_number, private=False, persistent=True)
zippath = saved_file.path
the_file.save(zippath)
saved_file.save()
saved_file.finalize()
pkgname = get_package_name_from_zip(zippath)
if user_can_edit_package(pkgname=pkgname):
install_zip_package(pkgname, file_number)
result = docassemble.webapp.worker.update_packages.apply_async(link=docassemble.webapp.worker.reset_server.s(run_create=should_run_create(pkgname)))
session['taskwait'] = result.id
session['serverstarttime'] = START_TIME
return redirect(url_for('update_package_wait'))
flash(word("You do not have permission to install this package."), 'error')
except Exception as errMess:
flash("Error of type " + str(type(errMess)) + " processing upload: " + str(errMess), "error")
else:
if form.giturl.data:
giturl = form.giturl.data.strip().rstrip('/')
branch = form.gitbranch.data.strip()
if not branch:
branch = get_master_branch(giturl)
m = re.search(r'#egg=(.*)', giturl)
if m:
packagename = re.sub(r'&.*', '', m.group(1))
giturl = re.sub(r'#.*', '', giturl)
else:
packagename = re.sub(r'/*$', '', giturl)
packagename = re.sub(r'^git+', '', packagename)
packagename = re.sub(r'#.*', '', packagename)
packagename = re.sub(r'\.git$', '', packagename)
packagename = re.sub(r'.*/', '', packagename)
packagename = re.sub(r'^docassemble-', 'docassemble.', packagename)
if user_can_edit_package(giturl=giturl) and user_can_edit_package(pkgname=packagename):
install_git_package(packagename, giturl, branch)
result = docassemble.webapp.worker.update_packages.apply_async(link=docassemble.webapp.worker.reset_server.s(run_create=should_run_create(packagename)))
session['taskwait'] = result.id
session['serverstarttime'] = START_TIME
return redirect(url_for('update_package_wait'))
flash(word("You do not have permission to install this package."), 'error')
elif form.pippackage.data:
m = re.match(r'([^>=<]+)([>=<]+.+)', form.pippackage.data)
if m:
packagename = m.group(1)
limitation = m.group(2)
else:
packagename = form.pippackage.data
limitation = None
packagename = re.sub(r'[^A-Za-z0-9\_\-\.]', '', packagename)
if user_can_edit_package(pkgname=packagename):
install_pip_package(packagename, limitation)
result = docassemble.webapp.worker.update_packages.apply_async(link=docassemble.webapp.worker.reset_server.s(run_create=should_run_create(packagename)))
session['taskwait'] = result.id
session['serverstarttime'] = START_TIME
return redirect(url_for('update_package_wait'))
flash(word("You do not have permission to install this package."), 'error')
else:
flash(word('You need to supply a Git URL, upload a file, or supply the name of a package on PyPI.'), 'error')
package_list, package_auth = get_package_info()
form.pippackage.data = None
form.giturl.data = None
extra_js = """
<script>
var default_branch = """ + json.dumps(branch if branch else 'null') + """;
function get_branches(){
var elem = $("#gitbranch");
elem.empty();
var opt = $("<option><\/option>");
opt.attr("value", "").text("Not applicable");
elem.append(opt);
var github_url = $("#giturl").val();
if (!github_url){
return;
}
$.get(""" + json.dumps(url_for('get_git_branches')) + """, { url: github_url }, "json")
.done(function(data){
//console.log(data);
if (data.success){
var n = data.result.length;
if (n > 0){
var default_to_use = default_branch;
var to_try = [default_branch, """ + json.dumps(GITHUB_BRANCH) + """, 'master', 'main'];
outer:
for (var j = 0; j < 4; j++){
for (var i = 0; i < n; i++){
if (data.result[i].name == to_try[j]){
default_to_use = to_try[j];
break outer;
}
}
}
elem.empty();
for (var i = 0; i < n; i++){
opt = $("<option></option>");
opt.attr("value", data.result[i].name).text(data.result[i].name);
if (data.result[i].name == default_to_use){
opt.prop('selected', true);
}
$(elem).append(opt);
}
}
}
});
}
$( document ).ready(function() {
get_branches();
$("#giturl").on('change', get_branches);
});
$('#zipfile').on('change', function(){
var fileName = $(this).val();
fileName = fileName.replace(/.*\\\\/, '');
fileName = fileName.replace(/.*\\//, '');
$(this).next('.custom-file-label').html(fileName);
});
</script>""" # noqa: W605
python_version = daconfig.get('python version', word('Unknown'))
version = word("Current") + ': <span class="badge bg-primary">' + str(python_version) + '</span>'
dw_status = pypi_status('docassemble.webapp')
if daconfig.get('stable version', False):
if not dw_status['error'] and 'info' in dw_status and 'releases' in dw_status['info'] and isinstance(dw_status['info']['releases'], dict):
stable_version = packaging.version.parse('1.1')
latest_version = None
for version_number, version_info in dw_status['info']['releases'].items(): # pylint: disable=unused-variable
version_number_loose = packaging.version.parse(version_number)
if version_number_loose >= stable_version:
continue
if latest_version is None or version_number_loose > packaging.version.parse(latest_version):
latest_version = version_number
if latest_version != str(python_version):
version += ' ' + word("Available") + ': <span class="badge bg-success">' + latest_version + '</span>'
else:
if not dw_status['error'] and 'info' in dw_status and 'info' in dw_status['info'] and 'version' in dw_status['info']['info'] and dw_status['info']['info']['version'] != str(python_version):
version += ' ' + word("Available") + ': <span class="badge bg-success">' + dw_status['info']['info']['version'] + '</span>'
allowed_to_upgrade = current_user.has_role('admin') or user_can_edit_package(pkgname='docassemble.webapp')
if daconfig.get('stable version', False):
limitation = '<1.1'
else:
limitation = ''
if daconfig.get('stable version', False):
limitation = '<1.1.0'
else:
limitation = ''
allowed_to_upgrade = current_user.has_role('admin') or user_can_edit_package(pkgname='docassemble.webapp')
response = make_response(render_template('pages/update_package.html', version_warning=version_warning, bodyclass='daadminbody', form=form, package_list=sorted(package_list, key=lambda y: (0 if y.package.name.startswith('docassemble') else 1, y.package.name.lower())), tab_title=word('Package Management'), page_title=word('Package Management'), extra_js=Markup(extra_js), version=Markup(version), allowed_to_upgrade=allowed_to_upgrade, limitation=limitation), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
def get_master_branch(giturl):
try:
return get_repo_info(giturl).get('default_branch', GITHUB_BRANCH)
except:
return GITHUB_BRANCH
# @app.route('/testws', methods=['GET', 'POST'])
# def test_websocket():
# script = '<script type="text/javascript" src="' + url_for('static', filename='app/socket.io.min.js') + '"></script>' + """<script type="text/javascript">
# var daSocket;
# $(document).ready(function(){
# if (location.protocol === 'http:' || document.location.protocol === 'http:'){
# daSocket = io.connect("http://" + document.domain + "/wsinterview", {path: '/ws/socket.io'});
# }
# if (location.protocol === 'https:' || document.location.protocol === 'https:'){
# daSocket = io.connect("https://" + document.domain + "/wsinterview" + location.port, {path: '/ws/socket.io'});
# }
# if (typeof daSocket !== 'undefined') {
# daSocket.on('connect', function() {
# //console.log("Connected!");
# daSocket.emit('chat_log', {data: 1});
# });
# daSocket.on('mymessage', function(arg) {
# //console.log("Received " + arg.data);
# $("#daPushResult").html(arg.data);
# });
# daSocket.on('chatmessage', function(arg) {
# console.log("Received chat message " + arg.data);
# var newDiv = document.createElement('div');
# $(newDiv).html(arg.data.message);
# $("#daCorrespondence").append(newDiv);
# });
# }
# $("#daSend").click(daSender);
# });
# </script>"""
# return render_template('pages/socketserver.html', extra_js=Markup(script)), 200
@app.route('/createplaygroundpackage', methods=['GET', 'POST'])
@login_required
@roles_required(['admin', 'developer'])
def create_playground_package():
setup_translation()
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
fix_package_folder()
playground_user = get_playground_user()
current_project = get_current_project()
form = CreatePlaygroundPackageForm(request.form)
current_package = request.args.get('package', None)
if current_package is not None:
current_package = werkzeug.utils.secure_filename(current_package)
do_pypi = request.args.get('pypi', False)
do_github = request.args.get('github', False)
if app.config['DEVELOPER_CAN_INSTALL'] or current_user.has_role('admin'):
do_install = request.args.get('install', False)
else:
do_install = False
branch = request.args.get('branch', None)
if branch is not None:
branch = branch.strip()
if branch in ('', 'None'):
branch = None
new_branch = request.args.get('new_branch', None)
if new_branch is not None and new_branch not in ('', 'None'):
branch = new_branch
sanitize_arguments(do_pypi, do_github, do_install, branch, new_branch)
if app.config['USE_GITHUB']:
github_auth = r.get('da:using_github:userid:' + str(current_user.id))
else:
github_auth = None
area = {}
area['playgroundpackages'] = SavedFile(playground_user.id, fix=True, section='playgroundpackages')
if os.path.isfile(os.path.join(directory_for(area['playgroundpackages'], current_project), 'docassemble.' + current_package)):
filename = os.path.join(directory_for(area['playgroundpackages'], current_project), 'docassemble.' + current_package)
info = {}
with open(filename, 'r', encoding='utf-8') as fp:
content = fp.read()
info = standardyaml.load(content, Loader=standardyaml.FullLoader)
else:
info = {}
if do_github:
if not app.config['USE_GITHUB']:
return ('File not found', 404)
if current_package is None:
logmessage('create_playground_package: package not specified')
return ('File not found', 404)
if not github_auth:
logmessage('create_playground_package: github button called when github auth not enabled.')
return ('File not found', 404)
github_auth = github_auth.decode()
if github_auth == '1':
github_auth_info = {'shared': True, 'orgs': True}
else:
github_auth_info = json.loads(github_auth)
github_package_name = 'docassemble-' + re.sub(r'^docassemble-', r'', current_package)
# github_package_name = re.sub(r'[^A-Za-z\_\-]', '', github_package_name)
if github_package_name in ('docassemble-base', 'docassemble-webapp', 'docassemble-demo'):
return ('File not found', 404)
commit_message = request.args.get('commit_message', 'a commit')
storage = RedisCredStorage(oauth_app='github')
credentials = storage.get()
if not credentials or credentials.invalid:
state_string = random_string(16)
session['github_next'] = json.dumps({'state': state_string, 'path': 'create_playground_package', 'arguments': request.args})
flow = get_github_flow()
uri = flow.step1_get_authorize_url(state=state_string)
return redirect(uri)
http = credentials.authorize(httplib2.Http())
resp, content = http.request("https://api.github.com/user", "GET")
if int(resp['status']) == 200:
user_info = json.loads(content.decode())
github_user_name = user_info.get('login', None)
github_email = user_info.get('email', None)
else:
raise DAError("create_playground_package: could not get information about GitHub User")
if github_email is None:
resp, content = http.request("https://api.github.com/user/emails", "GET")
if int(resp['status']) == 200:
email_info = json.loads(content.decode())
for item in email_info:
if item.get('email', None) and item.get('visibility', None) != 'private':
github_email = item['email']
if github_user_name is None or github_email is None:
raise DAError("create_playground_package: login and/or email not present in user info from GitHub")
github_url_from_file = info.get('github_url', None)
found = False
found_strong = False
commit_repository = None
resp, content = http.request("https://api.github.com/repos/" + str(github_user_name) + "/" + github_package_name, "GET")
if int(resp['status']) == 200:
repo_info = json.loads(content.decode('utf-8', 'ignore'))
commit_repository = repo_info
found = True
if github_url_from_file is None or github_url_from_file in [repo_info['html_url'], repo_info['ssh_url']]:
found_strong = True
if found_strong is False and github_auth_info['shared']:
repositories = get_user_repositories(http)
for repo_info in repositories:
if repo_info['name'] != github_package_name or (commit_repository is not None and commit_repository.get('html_url', None) is not None and commit_repository['html_url'] == repo_info['html_url']) or (commit_repository is not None and commit_repository.get('ssh_url', None) is not None and commit_repository['ssh_url'] == repo_info['ssh_url']):
continue
if found and github_url_from_file is not None and github_url_from_file not in [repo_info['html_url'], repo_info['ssh_url']]:
break
commit_repository = repo_info
found = True
if github_url_from_file is None or github_url_from_file in [repo_info['html_url'], repo_info['ssh_url']]:
found_strong = True
break
if found_strong is False and github_auth_info['orgs']:
orgs_info = get_orgs_info(http)
for org_info in orgs_info:
resp, content = http.request("https://api.github.com/repos/" + str(org_info['login']) + "/" + github_package_name, "GET")
if int(resp['status']) == 200:
repo_info = json.loads(content.decode('utf-8', 'ignore'))
if found and github_url_from_file is not None and github_url_from_file not in [repo_info['html_url'], repo_info['ssh_url']]:
break
commit_repository = repo_info
break
file_list = {}
the_directory = directory_for(area['playgroundpackages'], current_project)
file_list['playgroundpackages'] = sorted([re.sub(r'^docassemble.', r'', f) for f in os.listdir(the_directory) if os.path.isfile(os.path.join(the_directory, f)) and re.search(r'^[A-Za-z0-9]', f)])
the_choices = []
for file_option in file_list['playgroundpackages']:
the_choices.append((file_option, file_option))
form.name.choices = the_choices
if request.method == 'POST':
if form.validate():
current_package = form.name.data
# flash("form validated", 'success')
else:
the_error = ''
for error in form.name.errors:
the_error += str(error)
flash("form did not validate with " + str(form.name.data) + " " + str(the_error) + " among " + str(form.name.choices), 'error')
if current_package is not None:
pkgname = re.sub(r'^docassemble-', r'', current_package)
# if not user_can_edit_package(pkgname='docassemble.' + pkgname):
# flash(word('That package name is already in use by someone else. Please change the name.'), 'error')
# current_package = None
if current_package is not None and current_package not in file_list['playgroundpackages']:
flash(word('Sorry, that package name does not exist in the playground'), 'error')
current_package = None
if current_package is not None:
# section_sec = {'playgroundtemplate': 'template', 'playgroundstatic': 'static', 'playgroundsources': 'sources', 'playgroundmodules': 'modules'}
for sec in ('playground', 'playgroundtemplate', 'playgroundstatic', 'playgroundsources', 'playgroundmodules'):
area[sec] = SavedFile(playground_user.id, fix=True, section=sec)
the_directory = directory_for(area[sec], current_project)
if os.path.isdir(the_directory):
file_list[sec] = sorted([f for f in os.listdir(the_directory) if os.path.isfile(os.path.join(the_directory, f)) and re.search(r'^[A-Za-z0-9]', f)])
else:
file_list[sec] = []
if os.path.isfile(os.path.join(directory_for(area['playgroundpackages'], current_project), 'docassemble.' + current_package)):
filename = os.path.join(directory_for(area['playgroundpackages'], current_project), 'docassemble.' + current_package)
info = {}
with open(filename, 'r', encoding='utf-8') as fp:
content = fp.read()
info = standardyaml.load(content, Loader=standardyaml.FullLoader)
for field in ('dependencies', 'interview_files', 'template_files', 'module_files', 'static_files', 'sources_files'):
if field not in info:
info[field] = []
info['dependencies'] = list(x for x in map(lambda y: re.sub(r'[\>\<\=].*', '', y), info['dependencies']) if x not in ('docassemble', 'docassemble.base', 'docassemble.webapp'))
info['modtime'] = os.path.getmtime(filename)
author_info = {}
author_info['author name and email'] = name_of_user(playground_user, include_email=True)
author_info['author name'] = name_of_user(playground_user)
author_info['author email'] = playground_user.email
author_info['first name'] = playground_user.first_name
author_info['last name'] = playground_user.last_name
author_info['id'] = playground_user.id
if do_pypi:
if current_user.pypi_username is None or current_user.pypi_password is None or current_user.pypi_username == '' or current_user.pypi_password == '':
flash("Could not publish to PyPI because username and password were not defined")
return redirect(url_for('playground_packages', project=current_project, file=current_package))
if playground_user.timezone:
the_timezone = playground_user.timezone
else:
the_timezone = get_default_timezone()
fix_ml_files(author_info['id'], current_project)
had_error, logmessages = docassemble.webapp.files.publish_package(pkgname, info, author_info, current_project=current_project)
flash(logmessages, 'danger' if had_error else 'info')
if not do_install:
time.sleep(3.0)
return redirect(url_for('playground_packages', project=current_project, file=current_package))
if do_github:
if commit_repository is not None:
resp, content = http.request("https://api.github.com/repos/" + commit_repository['full_name'] + "/commits?per_page=1", "GET")
if int(resp['status']) == 200:
commit_list = json.loads(content.decode('utf-8', 'ignore'))
if len(commit_list) == 0:
first_time = True
is_empty = True
else:
first_time = False
is_empty = False
else:
first_time = True
is_empty = True
else:
first_time = True
is_empty = False
headers = {'Content-Type': 'application/json'}
the_license = 'mit' if re.search(r'MIT License', info.get('license', '')) else None
body = json.dumps({'name': github_package_name, 'description': info.get('description', None), 'homepage': info.get('url', None), 'license_template': the_license})
resp, content = http.request("https://api.github.com/user/repos", "POST", headers=headers, body=body)
if int(resp['status']) != 201:
raise DAError("create_playground_package: unable to create GitHub repository: status " + str(resp['status']) + " " + str(content))
resp, content = http.request("https://api.github.com/repos/" + str(github_user_name) + "/" + github_package_name, "GET")
if int(resp['status']) == 200:
commit_repository = json.loads(content.decode('utf-8', 'ignore'))
else:
raise DAError("create_playground_package: GitHub repository could not be found after creating it.")
if first_time:
logmessage("Not checking for stored commit code because no target repository exists")
pulled_already = False
else:
current_commit_file = os.path.join(directory_for(area['playgroundpackages'], current_project), '.' + github_package_name)
if os.path.isfile(current_commit_file):
with open(current_commit_file, 'r', encoding='utf-8') as fp:
commit_code = fp.read()
commit_code = commit_code.strip()
resp, content = http.request("https://api.github.com/repos/" + commit_repository['full_name'] + "/commits/" + commit_code, "GET")
if int(resp['status']) == 200:
logmessage("Stored commit code is valid")
pulled_already = True
else:
logmessage("Stored commit code is invalid")
pulled_already = False
else:
logmessage("Commit file not found")
pulled_already = False
directory = tempfile.mkdtemp(prefix='SavedFile')
(private_key_file, public_key_file) = get_ssh_keys(github_email)
os.chmod(private_key_file, stat.S_IRUSR | stat.S_IWUSR)
os.chmod(public_key_file, stat.S_IRUSR | stat.S_IWUSR)
ssh_script = tempfile.NamedTemporaryFile(mode='w', prefix="datemp", suffix='.sh', delete=False, encoding='utf-8')
ssh_script.write('# /bin/bash\n\nssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o GlobalKnownHostsFile=/dev/null -i "' + str(private_key_file) + '" $1 $2 $3 $4 $5 $6')
ssh_script.close()
os.chmod(ssh_script.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
# git_prefix = "GIT_SSH_COMMAND='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o GlobalKnownHostsFile=/dev/null -i \"" + str(private_key_file) + "\"' "
git_prefix = "GIT_SSH=" + ssh_script.name + " "
git_env = dict(os.environ, GIT_SSH=ssh_script.name)
ssh_url = commit_repository.get('ssh_url', None)
# github_url = commit_repository.get('html_url', None)
commit_branch = commit_repository.get('default_branch', GITHUB_BRANCH)
if ssh_url is None:
raise DAError("create_playground_package: could not obtain ssh_url for package")
output = ''
# if branch:
# branch_option = '-b ' + str(branch) + ' '
# else:
# branch_option = '-b ' + commit_branch + ' '
tempbranch = 'playground' + random_string(5)
packagedir = os.path.join(directory, 'docassemble-' + str(pkgname))
the_user_name = str(playground_user.first_name) + " " + str(playground_user.last_name)
if the_user_name == ' ':
the_user_name = 'Anonymous User'
if is_empty:
os.makedirs(packagedir)
output += "Doing git init\n"
try:
output += subprocess.check_output(["git", "init"], cwd=packagedir, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
output += err.output
raise DAError("create_playground_package: error running git init. " + output)
with open(os.path.join(packagedir, 'README.md'), 'w', encoding='utf-8') as the_file:
the_file.write("")
output += "Doing git config user.email " + json.dumps(github_email) + "\n"
try:
output += subprocess.check_output(["git", "config", "user.email", json.dumps(github_email)], cwd=packagedir, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
raise DAError("create_playground_package: error running git config user.email. " + output)
output += "Doing git config user.name " + json.dumps(the_user_name) + "\n"
try:
output += subprocess.check_output(["git", "config", "user.name", json.dumps(the_user_name)], cwd=packagedir, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
raise DAError("create_playground_package: error running git config user.name. " + output)
output += "Doing git add README.MD\n"
try:
output += subprocess.check_output(["git", "add", "README.md"], cwd=packagedir, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
raise DAError("create_playground_package: error running git add README.md. " + output)
output += "Doing git commit -m \"first commit\"\n"
try:
output += subprocess.check_output(["git", "commit", "-m", "first commit"], cwd=packagedir, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
raise DAError("create_playground_package: error running git commit -m \"first commit\". " + output)
output += "Doing git branch -M " + commit_branch + "\n"
try:
output += subprocess.check_output(["git", "branch", "-M", commit_branch], cwd=packagedir, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
raise DAError("create_playground_package: error running git branch -M " + commit_branch + ". " + output)
output += "Doing git remote add origin " + ssh_url + "\n"
try:
output += subprocess.check_output(["git", "remote", "add", "origin", ssh_url], cwd=packagedir, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
raise DAError("create_playground_package: error running git remote add origin. " + output)
output += "Doing " + git_prefix + "git push -u origin " + '"' + commit_branch + '"' + "\n"
try:
output += subprocess.check_output(["git", "push", "-u", "origin ", commit_branch], cwd=packagedir, stderr=subprocess.STDOUT, env=git_env).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
raise DAError("create_playground_package: error running first git push. " + output)
else:
output += "Doing " + git_prefix + "git clone " + ssh_url + "\n"
try:
output += subprocess.check_output(["git", "clone", ssh_url], cwd=directory, stderr=subprocess.STDOUT, env=git_env).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
raise DAError("create_playground_package: error running git clone. " + output)
if not os.path.isdir(packagedir):
raise DAError("create_playground_package: package directory did not exist. " + output)
if pulled_already:
output += "Doing git checkout " + commit_code + "\n"
try:
output += subprocess.check_output(["git", "checkout", commit_code], cwd=packagedir, stderr=subprocess.STDOUT, env=git_env).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
# raise DAError("create_playground_package: error running git checkout. " + output)
if playground_user.timezone:
the_timezone = playground_user.timezone
else:
the_timezone = get_default_timezone()
fix_ml_files(author_info['id'], current_project)
docassemble.webapp.files.make_package_dir(pkgname, info, author_info, directory=directory, current_project=current_project)
if branch:
the_branch = branch
else:
the_branch = commit_branch
output += "Going to use " + the_branch + " as the branch.\n"
if not is_empty:
output += "Doing git config user.email " + json.dumps(github_email) + "\n"
try:
output += subprocess.check_output(["git", "config", "user.email", json.dumps(github_email)], cwd=packagedir, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
raise DAError("create_playground_package: error running git config user.email. " + output)
output += "Doing git config user.name " + json.dumps(the_user_name) + "\n"
try:
output += subprocess.check_output(["git", "config", "user.name", json.dumps(the_user_name)], cwd=packagedir, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
raise DAError("create_playground_package: error running git config user.email. " + output)
output += "Trying git checkout " + the_branch + "\n"
try:
output += subprocess.check_output(["git", "checkout", the_branch], cwd=packagedir, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError:
output += the_branch + " is a new branch\n"
# force_branch_creation = True
branch = the_branch
output += "Doing git checkout -b " + tempbranch + "\n"
try:
output += subprocess.check_output(["git", "checkout", "-b", tempbranch], cwd=packagedir, stderr=subprocess.STDOUT, env=git_env).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
raise DAError("create_playground_package: error running git checkout. " + output)
output += "Doing git add .\n"
try:
output += subprocess.check_output(["git", "add", "."], cwd=packagedir, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
output += err.output
raise DAError("create_playground_package: error running git add. " + output)
output += "Doing git status\n"
try:
output += subprocess.check_output(["git", "status"], cwd=packagedir, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
raise DAError("create_playground_package: error running git status. " + output)
output += "Doing git commit -m " + json.dumps(str(commit_message)) + "\n"
try:
output += subprocess.check_output(["git", "commit", "-am", str(commit_message)], cwd=packagedir, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
raise DAError("create_playground_package: error running git commit. " + output)
output += "Trying git checkout " + the_branch + "\n"
try:
output += subprocess.check_output(["git", "checkout", the_branch], cwd=packagedir, stderr=subprocess.STDOUT, env=git_env).decode()
branch_exists = True
except subprocess.CalledProcessError:
branch_exists = False
if not branch_exists:
output += "Doing git checkout -b " + the_branch + "\n"
try:
output += subprocess.check_output(["git", "checkout", "-b", the_branch], cwd=packagedir, stderr=subprocess.STDOUT, env=git_env).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
raise DAError("create_playground_package: error running git checkout -b " + the_branch + ". " + output)
else:
output += "Doing git merge --squash " + tempbranch + "\n"
try:
output += subprocess.check_output(["git", "merge", "--squash", tempbranch], cwd=packagedir, stderr=subprocess.STDOUT, env=git_env).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
raise DAError("create_playground_package: error running git merge --squash " + tempbranch + ". " + output)
output += "Doing git commit\n"
try:
output += subprocess.check_output(["git", "commit", "-am", str(commit_message)], cwd=packagedir, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
raise DAError("create_playground_package: error running git commit -am " + str(commit_message) + ". " + output)
if branch:
output += "Doing " + git_prefix + "git push --set-upstream origin " + str(branch) + "\n"
try:
output += subprocess.check_output(["git", "push", "--set-upstream", "origin", str(branch)], cwd=packagedir, stderr=subprocess.STDOUT, env=git_env).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
raise DAError("create_playground_package: error running git push. " + output)
else:
output += "Doing " + git_prefix + "git push\n"
try:
output += subprocess.check_output(["git", "push"], cwd=packagedir, stderr=subprocess.STDOUT, env=git_env).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
raise DAError("create_playground_package: error running git push. " + output)
logmessage(output)
flash(word("Pushed commit to GitHub.") + "<br>" + re.sub(r'[\n\r]+', '<br>', output), 'info')
time.sleep(3.0)
shutil.rmtree(directory)
the_args = {'project': current_project, 'pull': '1', 'github_url': ssh_url, 'show_message': '0'}
do_pypi_also = true_or_false(request.args.get('pypi_also', False))
if app.config['DEVELOPER_CAN_INSTALL'] or current_user.has_role('admin'):
do_install_also = true_or_false(request.args.get('install_also', False))
else:
do_install_also = False
if do_pypi_also or do_install_also:
the_args['file'] = current_package
if do_pypi_also:
the_args['pypi_also'] = '1'
if do_install_also:
the_args['install_also'] = '1'
if branch:
the_args['branch'] = branch
return redirect(url_for('playground_packages', **the_args))
nice_name = 'docassemble-' + str(pkgname) + '.zip'
file_number = get_new_file_number(None, nice_name)
file_set_attributes(file_number, private=False, persistent=True)
saved_file = SavedFile(file_number, extension='zip', fix=True, should_not_exist=True)
if playground_user.timezone:
the_timezone = playground_user.timezone
else:
the_timezone = get_default_timezone()
fix_ml_files(author_info['id'], current_project)
zip_file = docassemble.webapp.files.make_package_zip(pkgname, info, author_info, the_timezone, current_project=current_project)
saved_file.copy_from(zip_file.name)
saved_file.finalize()
if do_install:
install_zip_package('docassemble.' + pkgname, file_number)
result = docassemble.webapp.worker.update_packages.apply_async(link=docassemble.webapp.worker.reset_server.s(run_create=should_run_create('docassemble.' + pkgname)))
session['taskwait'] = result.id
session['serverstarttime'] = START_TIME
return redirect(url_for('update_package_wait', next=url_for('playground_packages', project=current_project, file=current_package)))
# return redirect(url_for('playground_packages', file=current_package))
response = send_file(saved_file.path, mimetype='application/zip', as_attachment=True, download_name=nice_name)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
response = make_response(render_template('pages/create_playground_package.html', current_project=current_project, version_warning=version_warning, bodyclass='daadminbody', form=form, current_package=current_package, package_names=file_list['playgroundpackages'], tab_title=word('Playground Packages'), page_title=word('Playground Packages')), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
@app.route('/createpackage', methods=['GET', 'POST'])
@login_required
@roles_required(['admin', 'developer'])
def create_package():
setup_translation()
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
form = CreatePackageForm(request.form)
if request.method == 'POST' and form.validate():
pkgname = re.sub(r'^docassemble-', r'', form.name.data)
initpy = """\
__import__('pkg_resources').declare_namespace(__name__)
"""
licensetext = """\
The MIT License (MIT)
"""
licensetext += 'Copyright (c) ' + str(datetime.datetime.now().year) + ' ' + str(name_of_user(current_user)) + """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
readme = '# docassemble.' + str(pkgname) + "\n\nA docassemble extension.\n\n## Author\n\n" + name_of_user(current_user, include_email=True) + "\n"
manifestin = """\
include README.md
"""
setupcfg = """\
[metadata]
description_file = README
"""
setuppy = """\
import os
import sys
from setuptools import setup, find_packages
from fnmatch import fnmatchcase
from distutils2.util import convert_path
standard_exclude = ('*.pyc', '*~', '.*', '*.bak', '*.swp*')
standard_exclude_directories = ('.*', 'CVS', '_darcs', os.path.join('.', 'build'), os.path.join('.', 'dist'), 'EGG-INFO', '*.egg-info')
def find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories):
out = {}
stack = [(convert_path(where), '', package)]
while stack:
where, prefix, package = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
break
if bad_name:
continue
if os.path.isfile(os.path.join(fn, '__init__.py')):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package))
else:
stack.append((fn, prefix + name + os.path.sep, package))
else:
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
break
if bad_name:
continue
out.setdefault(package, []).append(prefix+name)
return out
"""
setuppy += "setup(name='docassemble." + str(pkgname) + "',\n" + """\
version='0.0.1',
description=('A docassemble extension.'),
long_description=""" + repr(readme) + """,
long_description_content_type='text/markdown',
author=""" + repr(str(name_of_user(current_user))) + """,
author_email=""" + repr(str(current_user.email)) + """,
license='MIT',
url='https://docassemble.org',
packages=find_packages(),
namespace_packages = ['docassemble'],
zip_safe = False,
package_data=find_package_data(where=os.path.join('docassemble', '""" + str(pkgname) + """', ''), package='docassemble.""" + str(pkgname) + """'),
)
"""
questionfiletext = """\
---
metadata:
title: I am the title of the application
short title: Mobile title
description: |
Insert description of question file here.
authors:
- name: """ + str(current_user.first_name) + " " + str(current_user.last_name) + """
organization: """ + str(current_user.organization) + """
revision_date: """ + formatted_current_date() + """
---
mandatory: True
code: |
user_done
---
question: |
% if user_doing_well:
Good to hear it!
% else:
Sorry to hear that!
% endif
sets: user_done
buttons:
- Exit: exit
- Restart: restart
---
question: Are you doing well today?
yesno: user_doing_well
...
"""
templatereadme = """\
# Template directory
If you want to use templates for document assembly, put them in this directory.
"""
staticreadme = """\
# Static file directory
If you want to make files available in the web app, put them in
this directory.
"""
sourcesreadme = """\
# Sources directory
This directory is used to store word translation files,
machine learning training files, and other source files.
"""
objectfile = """\
# This is a Python module in which you can write your own Python code,
# if you want to.
#
# Include this module in a docassemble interview by writing:
# ---
# modules:
# - docassemble.""" + pkgname + """.objects
# ---
#
# Then you can do things like:
# ---
# objects:
# - favorite_fruit: Fruit
# ---
# mandatory: True
# question: |
# When I eat some ${ favorite_fruit.name },
# I think, "${ favorite_fruit.eat() }"
# ---
# question: What is the best fruit?
# fields:
# - Fruit Name: favorite_fruit.name
# ---
from docassemble.base.util import DAObject
class Fruit(DAObject):
def eat(self):
return "Yum, that " + self.name + " was good!"
"""
directory = tempfile.mkdtemp(prefix='SavedFile')
packagedir = os.path.join(directory, 'docassemble-' + str(pkgname))
questionsdir = os.path.join(packagedir, 'docassemble', str(pkgname), 'data', 'questions')
templatesdir = os.path.join(packagedir, 'docassemble', str(pkgname), 'data', 'templates')
staticdir = os.path.join(packagedir, 'docassemble', str(pkgname), 'data', 'static')
sourcesdir = os.path.join(packagedir, 'docassemble', str(pkgname), 'data', 'sources')
os.makedirs(questionsdir, exist_ok=True)
os.makedirs(templatesdir, exist_ok=True)
os.makedirs(staticdir, exist_ok=True)
os.makedirs(sourcesdir, exist_ok=True)
with open(os.path.join(packagedir, 'README.md'), 'w', encoding='utf-8') as the_file:
the_file.write(readme)
with open(os.path.join(packagedir, 'LICENSE'), 'w', encoding='utf-8') as the_file:
the_file.write(licensetext)
with open(os.path.join(packagedir, 'setup.py'), 'w', encoding='utf-8') as the_file:
the_file.write(setuppy)
with open(os.path.join(packagedir, 'setup.cfg'), 'w', encoding='utf-8') as the_file:
the_file.write(setupcfg)
with open(os.path.join(packagedir, 'MANIFEST.in'), 'w', encoding='utf-8') as the_file:
the_file.write(manifestin)
with open(os.path.join(packagedir, 'docassemble', '__init__.py'), 'w', encoding='utf-8') as the_file:
the_file.write(initpy)
with open(os.path.join(packagedir, 'docassemble', pkgname, '__init__.py'), 'w', encoding='utf-8') as the_file:
the_file.write('__version__ = "0.0.1"')
with open(os.path.join(packagedir, 'docassemble', pkgname, 'objects.py'), 'w', encoding='utf-8') as the_file:
the_file.write(objectfile)
with open(os.path.join(templatesdir, 'README.md'), 'w', encoding='utf-8') as the_file:
the_file.write(templatereadme)
with open(os.path.join(staticdir, 'README.md'), 'w', encoding='utf-8') as the_file:
the_file.write(staticreadme)
with open(os.path.join(sourcesdir, 'README.md'), 'w', encoding='utf-8') as the_file:
the_file.write(sourcesreadme)
with open(os.path.join(questionsdir, 'questions.yml'), 'w', encoding='utf-8') as the_file:
the_file.write(questionfiletext)
nice_name = 'docassemble-' + str(pkgname) + '.zip'
file_number = get_new_file_number(None, nice_name)
file_set_attributes(file_number, private=False, persistent=True)
saved_file = SavedFile(file_number, extension='zip', fix=True, should_not_exist=True)
zf = zipfile.ZipFile(saved_file.path, compression=zipfile.ZIP_DEFLATED, mode='w')
trimlength = len(directory) + 1
if current_user.timezone:
the_timezone = zoneinfo.ZoneInfo(current_user.timezone)
else:
the_timezone = zoneinfo.ZoneInfo(get_default_timezone())
for root, dirs, files in os.walk(packagedir): # pylint: disable=unused-variable
for the_file in files:
thefilename = os.path.join(root, the_file)
info = zipfile.ZipInfo(thefilename[trimlength:])
info.date_time = datetime.datetime.utcfromtimestamp(os.path.getmtime(thefilename)).replace(tzinfo=datetime.timezone.utc).astimezone(the_timezone).timetuple()
info.compress_type = zipfile.ZIP_DEFLATED
info.external_attr = 0o644 << 16
with open(thefilename, 'rb') as fp:
zf.writestr(info, fp.read())
# zf.write(thefilename, thefilename[trimlength:])
zf.close()
saved_file.save()
saved_file.finalize()
shutil.rmtree(directory)
response = send_file(saved_file.path, mimetype='application/zip', as_attachment=True, download_name=nice_name)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
flash(word("Package created"), 'success')
return response
response = make_response(render_template('pages/create_package.html', version_warning=version_warning, bodyclass='daadminbody', form=form, tab_title=word('Create Package'), page_title=word('Create Package')), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
@app.route('/restart', methods=['GET', 'POST'])
@login_required
@roles_required(['admin', 'developer'])
def restart_page():
setup_translation()
if not app.config['ALLOW_RESTARTING']:
return ('File not found', 404)
script = """
<script>
function daRestartCallback(data){
//console.log("Restart result: " + data.success);
}
function daRestart(){
$.ajax({
type: 'POST',
url: """ + json.dumps(url_for('restart_ajax')) + """,
data: 'csrf_token=""" + generate_csrf() + """&action=restart',
success: daRestartCallback,
dataType: 'json'
});
return true;
}
$( document ).ready(function() {
//console.log("restarting");
setTimeout(daRestart, 100);
});
</script>"""
next_url = app.user_manager.make_safe_url_function(request.args.get('next', url_for('interview_list', post_restart=1)))
extra_meta = """\n <meta http-equiv="refresh" content="8;URL='""" + next_url + """'">"""
response = make_response(render_template('pages/restart.html', version_warning=None, bodyclass='daadminbody', extra_meta=Markup(extra_meta), extra_js=Markup(script), tab_title=word('Restarting'), page_title=word('Restarting')), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
@app.route('/playground_poll', methods=['GET'])
@login_required
@roles_required(['admin', 'developer'])
def playground_poll():
setup_translation()
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
script = """
<script>
function daPollCallback(data){
if (data.success){
window.location.replace(data.url);
}
}
function daPoll(){
$.ajax({
type: 'GET',
url: """ + json.dumps(url_for('playground_redirect_poll')) + """,
success: daPollCallback,
dataType: 'json'
});
return true;
}
$( document ).ready(function() {
//console.log("polling");
setInterval(daPoll, 4000);
});
</script>"""
response = make_response(render_template('pages/playground_poll.html', version_warning=None, bodyclass='daadminbody', extra_js=Markup(script), tab_title=word('Waiting'), page_title=word('Waiting')), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
def get_gd_flow():
app_credentials = current_app.config['OAUTH_CREDENTIALS'].get('googledrive', {})
client_id = app_credentials.get('id', None)
client_secret = app_credentials.get('secret', None)
if client_id is None or client_secret is None:
raise DAError('Google Drive is not configured.')
flow = oauth2client.client.OAuth2WebServerFlow(
client_id=client_id,
client_secret=client_secret,
scope='https://www.googleapis.com/auth/drive',
redirect_uri=url_for('google_drive_callback', _external=True),
access_type='offline',
prompt='consent')
return flow
def get_playground_user():
if 'playground_user' in session:
user = db.session.execute(select(UserModel).filter_by(id=session['playground_user'])).scalar()
return user
return current_user
def set_playground_user(user_id):
if user_id == current_user.id:
if 'playground_user' in session:
del session['playground_user']
else:
session['playground_user'] = user_id
def get_gd_folder():
key = 'da:googledrive:mapping:userid:' + str(current_user.id)
folder = r.get(key)
if folder is not None:
return folder.decode()
return folder
def set_gd_folder(folder):
key = 'da:googledrive:mapping:userid:' + str(current_user.id)
if folder is None:
r.delete(key)
else:
set_od_folder(None)
r.set(key, folder)
def get_od_flow():
app_credentials = current_app.config['OAUTH_CREDENTIALS'].get('onedrive', {})
client_id = app_credentials.get('id', None)
client_secret = app_credentials.get('secret', None)
if client_id is None or client_secret is None:
raise DAError('OneDrive is not configured.')
flow = oauth2client.client.OAuth2WebServerFlow(
client_id=client_id,
client_secret=client_secret,
scope='files.readwrite.all user.read offline_access',
redirect_uri=url_for('onedrive_callback', _external=True),
response_type='code',
auth_uri='https://login.microsoftonline.com/common/oauth2/v2.0/authorize',
token_uri='https://login.microsoftonline.com/common/oauth2/v2.0/token')
return flow
def get_od_folder():
key = 'da:onedrive:mapping:userid:' + str(current_user.id)
folder = r.get(key)
if folder is not None:
return folder.decode()
return folder
def set_od_folder(folder):
key = 'da:onedrive:mapping:userid:' + str(current_user.id)
if folder is None:
r.delete(key)
else:
set_gd_folder(None)
r.set(key, folder)
class RedisCredStorage(oauth2client.client.Storage):
def __init__(self, oauth_app='googledrive'):
self.key = 'da:' + oauth_app + ':userid:' + str(current_user.id)
self.lockkey = 'da:' + oauth_app + ':lock:userid:' + str(current_user.id)
super().__init__()
def acquire_lock(self):
pipe = r.pipeline()
pipe.set(self.lockkey, 1)
pipe.expire(self.lockkey, 5)
pipe.execute()
def release_lock(self):
r.delete(self.lockkey)
def locked_get(self):
json_creds = r.get(self.key)
creds = None
if json_creds is not None:
json_creds = json_creds.decode()
try:
creds = oauth2client.client.Credentials.new_from_json(json_creds)
except:
logmessage("RedisCredStorage: could not read credentials from " + str(json_creds))
return creds
def locked_put(self, credentials):
r.set(self.key, credentials.to_json())
def locked_delete(self):
r.delete(self.key)
@app.route('/google_drive_callback', methods=['GET', 'POST'])
@login_required
@roles_required(['admin', 'developer'])
def google_drive_callback():
setup_translation()
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
for key in request.args:
logmessage("google_drive_callback: argument " + str(key) + ": " + str(request.args[key]))
if 'code' in request.args:
flow = get_gd_flow()
credentials = flow.step2_exchange(request.args['code'])
storage = RedisCredStorage(oauth_app='googledrive')
storage.put(credentials)
error = None
elif 'error' in request.args:
error = request.args['error']
else:
error = word("could not connect to Google Drive")
if error:
flash(word('There was a Google Drive error: ' + error), 'error')
return redirect(url_for('user.profile'))
flash(word('Connected to Google Drive'), 'success')
return redirect(url_for('google_drive_page'))
def rename_gd_project(old_project, new_project):
the_folder = get_gd_folder()
if the_folder is None:
logmessage('rename_gd_project: folder not configured')
return False
storage = RedisCredStorage(oauth_app='googledrive')
credentials = storage.get()
if not credentials or credentials.invalid:
logmessage('rename_gd_project: credentials missing or expired')
return False
http = credentials.authorize(httplib2.Http())
service = apiclient.discovery.build('drive', 'v3', http=http)
response = service.files().get(fileId=the_folder, fields="mimeType, id, name, trashed").execute()
trashed = response.get('trashed', False)
the_mime_type = response.get('mimeType', None)
if trashed is True or the_mime_type != "application/vnd.google-apps.folder":
logmessage('rename_gd_project: folder did not exist')
return False
for section in ['static', 'templates', 'questions', 'modules', 'sources', 'packages']:
logmessage("rename_gd_project: section is " + section)
subdir = None
page_token = None
while True:
response = service.files().list(spaces="drive", pageToken=page_token, fields="nextPageToken, files(id, name)", q="mimeType='application/vnd.google-apps.folder' and trashed=false and name='" + str(section) + "' and '" + str(the_folder) + "' in parents").execute()
for the_file in response.get('files', []):
if 'id' in the_file:
subdir = the_file['id']
break
page_token = response.get('nextPageToken', None)
if subdir is not None or page_token is None:
break
if subdir is None:
logmessage('rename_gd_project: section ' + str(section) + ' could not be found')
continue
subsubdir = None
page_token = None
while True:
response = service.files().list(spaces="drive", pageToken=page_token, fields="nextPageToken, files(id, name)", q="mimeType='application/vnd.google-apps.folder' and trashed=false and name='" + str(old_project) + "' and '" + str(subdir) + "' in parents").execute()
for the_file in response.get('files', []):
if 'id' in the_file:
subsubdir = the_file['id']
break
page_token = response.get('nextPageToken', None)
if subsubdir is not None or page_token is None:
break
if subsubdir is None:
logmessage('rename_gd_project: project ' + str(old_project) + ' could not be found in ' + str(section))
continue
metadata = {'name': new_project}
service.files().update(fileId=subsubdir, body=metadata, fields='name').execute()
logmessage('rename_gd_project: folder ' + str(old_project) + ' renamed in section ' + str(section))
return True
def trash_gd_project(old_project):
the_folder = get_gd_folder()
if the_folder is None:
logmessage('trash_gd_project: folder not configured')
return False
storage = RedisCredStorage(oauth_app='googledrive')
credentials = storage.get()
if not credentials or credentials.invalid:
logmessage('trash_gd_project: credentials missing or expired')
return False
http = credentials.authorize(httplib2.Http())
service = apiclient.discovery.build('drive', 'v3', http=http)
response = service.files().get(fileId=the_folder, fields="mimeType, id, name, trashed").execute()
trashed = response.get('trashed', False)
the_mime_type = response.get('mimeType', None)
if trashed is True or the_mime_type != "application/vnd.google-apps.folder":
logmessage('trash_gd_project: folder did not exist')
return False
for section in ['static', 'templates', 'questions', 'modules', 'sources', 'packages']:
subdir = None
page_token = None
while True:
response = service.files().list(spaces="drive", pageToken=page_token, fields="nextPageToken, files(id, name)", q="mimeType='application/vnd.google-apps.folder' and trashed=false and name='" + str(section) + "' and '" + str(the_folder) + "' in parents").execute()
for the_file in response.get('files', []):
if 'id' in the_file:
subdir = the_file['id']
break
page_token = response.get('nextPageToken', None)
if subdir is not None or page_token is None:
break
if subdir is None:
logmessage('trash_gd_project: section ' + str(section) + ' could not be found')
continue
subsubdir = None
page_token = None
while True:
response = service.files().list(spaces="drive", fields="nextPageToken, files(id, name)", q="mimeType='application/vnd.google-apps.folder' and trashed=false and name='" + str(old_project) + "' and '" + str(subdir) + "' in parents").execute()
for the_file in response.get('files', []):
if 'id' in the_file:
subsubdir = the_file['id']
break
page_token = response.get('nextPageToken', None)
if subsubdir is not None or page_token is None:
break
if subsubdir is None:
logmessage('trash_gd_project: project ' + str(old_project) + ' could not be found in ' + str(section))
continue
service.files().delete(fileId=subsubdir).execute()
logmessage('trash_gd_project: project ' + str(old_project) + ' deleted in section ' + str(section))
return True
def trash_gd_file(section, filename, current_project):
if section == 'template':
section = 'templates'
the_folder = get_gd_folder()
if the_folder is None:
logmessage('trash_gd_file: folder not configured')
return False
storage = RedisCredStorage(oauth_app='googledrive')
credentials = storage.get()
if not credentials or credentials.invalid:
logmessage('trash_gd_file: credentials missing or expired')
return False
http = credentials.authorize(httplib2.Http())
service = apiclient.discovery.build('drive', 'v3', http=http)
response = service.files().get(fileId=the_folder, fields="mimeType, id, name, trashed").execute()
trashed = response.get('trashed', False)
the_mime_type = response.get('mimeType', None)
if trashed is True or the_mime_type != "application/vnd.google-apps.folder":
logmessage('trash_gd_file: folder did not exist')
return False
subdir = None
response = service.files().list(spaces="drive", fields="nextPageToken, files(id, name)", q="mimeType='application/vnd.google-apps.folder' and trashed=false and name='" + str(section) + "' and '" + str(the_folder) + "' in parents").execute()
for the_file in response.get('files', []):
if 'id' in the_file:
subdir = the_file['id']
break
if subdir is None:
logmessage('trash_gd_file: section ' + str(section) + ' could not be found')
return False
if current_project != 'default':
response = service.files().list(spaces="drive", fields="nextPageToken, files(id, name)", q="mimeType='application/vnd.google-apps.folder' and trashed=false and name='" + str(current_project) + "' and '" + str(subdir) + "' in parents").execute()
subdir = None
for the_file in response.get('files', []):
if 'id' in the_file:
subdir = the_file['id']
break
if subdir is None:
logmessage('trash_gd_file: project ' + str(current_project) + ' could not be found')
return False
id_of_filename = None
response = service.files().list(spaces="drive", fields="nextPageToken, files(id, name)", q="mimeType!='application/vnd.google-apps.folder' and name='" + str(filename) + "' and '" + str(subdir) + "' in parents").execute()
for the_file in response.get('files', []):
if 'id' in the_file:
id_of_filename = the_file['id']
break
if id_of_filename is None:
logmessage('trash_gd_file: file ' + str(filename) + ' could not be found in ' + str(section))
return False
service.files().delete(fileId=id_of_filename).execute()
logmessage('trash_gd_file: file ' + str(filename) + ' permanently deleted from ' + str(section))
return True
@app.route('/sync_with_google_drive', methods=['GET'])
@login_required
@roles_required(['admin', 'developer'])
def sync_with_google_drive():
setup_translation()
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
current_project = get_current_project()
the_next = app.user_manager.make_safe_url_function(request.args.get('next', url_for('playground_page', project=current_project)))
auto_next = request.args.get('auto_next', None)
if app.config['USE_GOOGLE_DRIVE'] is False:
flash(word("Google Drive is not configured"), "error")
return redirect(the_next)
storage = RedisCredStorage(oauth_app='googledrive')
credentials = storage.get()
if not credentials or credentials.invalid:
flow = get_gd_flow()
uri = flow.step1_get_authorize_url()
return redirect(uri)
task = docassemble.webapp.worker.sync_with_google_drive.delay(current_user.id)
session['taskwait'] = task.id
if auto_next:
return redirect(url_for('gd_sync_wait', auto_next=auto_next))
return redirect(url_for('gd_sync_wait', next=the_next))
@app.route('/gdsyncing', methods=['GET', 'POST'])
@login_required
@roles_required(['admin', 'developer'])
def gd_sync_wait():
setup_translation()
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
current_project = get_current_project()
next_url = app.user_manager.make_safe_url_function(request.args.get('next', url_for('playground_page', project=current_project)))
auto_next_url = request.args.get('auto_next', None)
my_csrf = generate_csrf()
script = """
<script>
var daCheckinInterval = null;
var autoNext = """ + json.dumps(auto_next_url) + """;
var resultsAreIn = false;
function daRestartCallback(data){
//console.log("Restart result: " + data.success);
}
function daRestart(){
$.ajax({
type: 'POST',
url: """ + json.dumps(url_for('restart_ajax')) + """,
data: 'csrf_token=""" + my_csrf + """&action=restart',
success: daRestartCallback,
dataType: 'json'
});
return true;
}
function daSyncCallback(data){
if (data.success){
if (data.status == 'finished'){
resultsAreIn = true;
if (data.ok){
if (autoNext != null){
window.location.replace(autoNext);
}
$("#notification").html(""" + json.dumps(word("The synchronization was successful.")) + """);
$("#notification").removeClass("alert-info");
$("#notification").removeClass("alert-danger");
$("#notification").addClass("alert-success");
}
else{
$("#notification").html(""" + json.dumps(word("The synchronization was not successful.")) + """);
$("#notification").removeClass("alert-info");
$("#notification").removeClass("alert-success");
$("#notification").addClass("alert-danger");
}
$("#resultsContainer").show();
$("#resultsArea").html(data.summary);
if (daCheckinInterval != null){
clearInterval(daCheckinInterval);
}
if (data.restart){
daRestart();
}
}
else if (data.status == 'failed' && !resultsAreIn){
resultsAreIn = true;
$("#notification").html(""" + json.dumps(word("There was an error with the synchronization.")) + """);
$("#notification").removeClass("alert-info");
$("#notification").removeClass("alert-success");
$("#notification").addClass("alert-danger");
$("#resultsContainer").show();
if (data.error_message){
$("#resultsArea").html(data.error_message);
}
else if (data.summary){
$("#resultsArea").html(data.summary);
}
if (daCheckinInterval != null){
clearInterval(daCheckinInterval);
}
}
}
else if (!resultsAreIn){
$("#notification").html(""" + json.dumps(word("There was an error.")) + """);
$("#notification").removeClass("alert-info");
$("#notification").removeClass("alert-success");
$("#notification").addClass("alert-danger");
if (daCheckinInterval != null){
clearInterval(daCheckinInterval);
}
}
}
function daSync(){
if (resultsAreIn){
return;
}
$.ajax({
type: 'POST',
url: """ + json.dumps(url_for('checkin_sync_with_google_drive')) + """,
data: 'csrf_token=""" + my_csrf + """',
success: daSyncCallback,
dataType: 'json'
});
return true;
}
$( document ).ready(function() {
//console.log("page loaded");
daCheckinInterval = setInterval(daSync, 2000);
});
</script>"""
response = make_response(render_template('pages/gd_sync_wait.html', version_warning=None, bodyclass='daadminbody', extra_js=Markup(script), tab_title=word('Synchronizing'), page_title=word('Synchronizing'), next_page=next_url), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
@app.route('/onedrive_callback', methods=['GET', 'POST'])
@login_required
@roles_required(['admin', 'developer'])
def onedrive_callback():
setup_translation()
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
for key in request.args:
logmessage("onedrive_callback: argument " + str(key) + ": " + str(request.args[key]))
if 'code' in request.args:
flow = get_od_flow()
credentials = flow.step2_exchange(request.args['code'])
storage = RedisCredStorage(oauth_app='onedrive')
storage.put(credentials)
error = None
elif 'error' in request.args:
error = request.args['error']
if 'error_description' in request.args:
error += '; ' + str(request.args['error_description'])
else:
error = word("could not connect to OneDrive")
if error:
flash(word('There was a OneDrive error: ' + error), 'error')
return redirect(url_for('user.profile'))
flash(word('Connected to OneDrive'), 'success')
return redirect(url_for('onedrive_page'))
def rename_od_project(old_project, new_project):
the_folder = get_od_folder()
if the_folder is None:
logmessage('rename_od_project: folder not configured')
return False
storage = RedisCredStorage(oauth_app='onedrive')
credentials = storage.get()
if not credentials or credentials.invalid:
logmessage('rename_od_project: credentials missing or expired')
return False
http = credentials.authorize(httplib2.Http())
resp, content = http.request("https://graph.microsoft.com/v1.0/me/drive/items/" + urllibquote(the_folder), "GET")
if int(resp['status']) != 200:
trashed = True
else:
info = json.loads(content.decode())
# logmessage("Found " + repr(info))
trashed = bool(info.get('deleted', None))
if trashed is True or 'folder' not in info:
logmessage('rename_od_project: folder did not exist')
return False
resp, content = http.request("https://graph.microsoft.com/v1.0/me/drive/items/" + urllibquote(the_folder) + "/children?$select=id,name,deleted,folder", "GET")
subdir = {}
for section in ['static', 'templates', 'questions', 'modules', 'sources', 'packages']:
subdir[section] = None
while True:
if int(resp['status']) != 200:
logmessage('rename_od_project: could not obtain subfolders')
return False
info = json.loads(content.decode())
for item in info.get('value', []):
if item.get('deleted', None) or 'folder' not in item:
continue
if item['name'] in subdir:
subdir[item['name']] = item['id']
if "@odata.nextLink" not in info:
break
resp, content = http.request(info["@odata.nextLink"], "GET")
for section, the_subdir in subdir.items():
if the_subdir is None:
logmessage('rename_od_project: could not obtain subfolder for ' + str(section))
continue
subsubdir = None
resp, content = http.request("https://graph.microsoft.com/v1.0/me/drive/items/" + str(the_subdir) + "/children?$select=id,name,deleted,folder", "GET")
while True:
if int(resp['status']) != 200:
logmessage('rename_od_project: could not obtain contents of subfolder for ' + str(section))
break
info = json.loads(content.decode())
for item in info.get('value', []):
if item.get('deleted', None) or 'folder' not in item:
continue
if item['name'] == old_project:
subsubdir = item['id']
break
if subsubdir is not None or "@odata.nextLink" not in info:
break
resp, content = http.request(info["@odata.nextLink"], "GET")
if subsubdir is None:
logmessage("rename_od_project: subdirectory " + str(old_project) + " not found")
else:
headers = {'Content-Type': 'application/json'}
resp, content = http.request("https://graph.microsoft.com/v1.0/me/drive/items/" + str(subsubdir), "PATCH", headers=headers, body=json.dumps({'name': new_project}))
if int(resp['status']) != 200:
logmessage('rename_od_project: could not rename folder ' + str(old_project) + " in " + str(section) + " because " + repr(content))
continue
logmessage('rename_od_project: project ' + str(old_project) + ' rename in section ' + str(section))
return True
def trash_od_project(old_project):
the_folder = get_od_folder()
if the_folder is None:
logmessage('trash_od_project: folder not configured')
return False
storage = RedisCredStorage(oauth_app='onedrive')
credentials = storage.get()
if not credentials or credentials.invalid:
logmessage('trash_od_project: credentials missing or expired')
return False
http = credentials.authorize(httplib2.Http())
resp, content = http.request("https://graph.microsoft.com/v1.0/me/drive/items/" + urllibquote(the_folder), "GET")
if int(resp['status']) != 200:
trashed = True
else:
info = json.loads(content.decode())
# logmessage("Found " + repr(info))
trashed = bool(info.get('deleted', None))
if trashed is True or 'folder' not in info:
logmessage('trash_od_project: folder did not exist')
return False
subdir = {}
for section in ['static', 'templates', 'questions', 'modules', 'sources', 'packages']:
subdir[section] = None
resp, content = http.request("https://graph.microsoft.com/v1.0/me/drive/items/" + urllibquote(the_folder) + "/children?$select=id,name,deleted,folder", "GET")
while True:
if int(resp['status']) != 200:
logmessage('trash_od_project: could not obtain subfolders')
return False
info = json.loads(content.decode())
for item in info['value']:
if item.get('deleted', None) or 'folder' not in item:
continue
if item['name'] in subdir:
subdir[item['name']] = item['id']
if "@odata.nextLink" not in info:
break
resp, content = http.request(info["@odata.nextLink"], "GET")
for section, the_subdir in subdir.items():
if the_subdir is None:
logmessage('trash_od_project: could not obtain subfolder for ' + str(section))
continue
subsubdir = None
resp, content = http.request("https://graph.microsoft.com/v1.0/me/drive/items/" + str(the_subdir) + "/children?$select=id,name,deleted,folder", "GET")
while True:
if int(resp['status']) != 200:
logmessage('trash_od_project: could not obtain contents of subfolder for ' + str(section))
break
info = json.loads(content.decode())
for item in info['value']:
if item.get('deleted', None) or 'folder' not in item:
continue
if item['name'] == old_project:
subsubdir = item['id']
break
if subsubdir is not None or "@odata.nextLink" not in info:
break
resp, content = http.request(info["@odata.nextLink"], "GET")
if subsubdir is None:
logmessage("Could not find subdirectory " + old_project + " in section " + str(section))
else:
resp, content = http.request("https://graph.microsoft.com/v1.0/me/drive/items/" + urllibquote(subsubdir) + "/children?$select=id", "GET")
to_delete = []
while True:
if int(resp['status']) != 200:
logmessage('trash_od_project: could not obtain contents of project folder')
return False
info = json.loads(content.decode())
for item in info.get('value', []):
if 'id' in item:
to_delete.append(item['id'])
if "@odata.nextLink" not in info:
break
resp, content = http.request(info["@odata.nextLink"], "GET")
for item_id in to_delete:
resp, content = http.request("https://graph.microsoft.com/v1.0/me/drive/items/" + str(item_id), "DELETE")
if int(resp['status']) != 204:
logmessage('trash_od_project: could not delete file ' + str(item_id) + ". Result: " + repr(content))
return False
resp, content = http.request("https://graph.microsoft.com/v1.0/me/drive/items/" + str(subsubdir), "DELETE")
if int(resp['status']) != 204:
logmessage('trash_od_project: could not delete project ' + str(old_project) + ". Result: " + repr(content))
return False
logmessage('trash_od_project: project ' + str(old_project) + ' trashed in section ' + str(section))
return True
def trash_od_file(section, filename, current_project):
if section == 'template':
section = 'templates'
the_folder = get_od_folder()
if the_folder is None:
logmessage('trash_od_file: folder not configured')
return False
storage = RedisCredStorage(oauth_app='onedrive')
credentials = storage.get()
if not credentials or credentials.invalid:
logmessage('trash_od_file: credentials missing or expired')
return False
http = credentials.authorize(httplib2.Http())
resp, content = http.request("https://graph.microsoft.com/v1.0/me/drive/items/" + urllibquote(the_folder), "GET")
if int(resp['status']) != 200:
trashed = True
else:
info = json.loads(content.decode())
# logmessage("Found " + repr(info))
trashed = bool(info.get('deleted', None))
if trashed is True or 'folder' not in info:
logmessage('trash_od_file: folder did not exist')
return False
resp, content = http.request("https://graph.microsoft.com/v1.0/me/drive/items/" + urllibquote(the_folder) + "/children?$select=id,name,deleted,folder", "GET")
subdir = None
while True:
if int(resp['status']) != 200:
logmessage('trash_od_file: could not obtain subfolders')
return False
info = json.loads(content.decode())
# logmessage("Found " + repr(info))
for item in info['value']:
if item.get('deleted', None) or 'folder' not in item:
continue
if item['name'] == section:
subdir = item['id']
break
if subdir is not None or "@odata.nextLink" not in info:
break
resp, content = http.request(info["@odata.nextLink"], "GET")
if subdir is None:
logmessage('trash_od_file: could not obtain subfolder')
return False
if current_project != 'default':
resp, content = http.request("https://graph.microsoft.com/v1.0/me/drive/items/" + str(subdir) + "/children?$select=id,name,deleted,folder", "GET")
subdir = None
while True:
if int(resp['status']) != 200:
logmessage('trash_od_file: could not obtain subfolders to find project')
return False
info = json.loads(content.decode())
for item in info['value']:
if item.get('deleted', None) or 'folder' not in item:
continue
if item['name'] == current_project:
subdir = item['id']
break
if subdir is not None or "@odata.nextLink" not in info:
break
resp, content = http.request(info["@odata.nextLink"], "GET")
if subdir is None:
logmessage('trash_od_file: could not obtain subfolder')
return False
id_of_filename = None
resp, content = http.request("https://graph.microsoft.com/v1.0/me/drive/items/" + str(subdir) + "/children?$select=id,name,deleted,folder", "GET")
while True:
if int(resp['status']) != 200:
logmessage('trash_od_file: could not obtain contents of subfolder')
return False
info = json.loads(content.decode())
# logmessage("Found " + repr(info))
for item in info['value']:
if item.get('deleted', None) or 'folder' in item:
continue
if 'folder' in item:
continue
if item['name'] == filename:
id_of_filename = item['id']
break
if id_of_filename is not None or "@odata.nextLink" not in info:
break
resp, content = http.request(info["@odata.nextLink"], "GET")
resp, content = http.request("https://graph.microsoft.com/v1.0/me/drive/items/" + str(id_of_filename), "DELETE")
if int(resp['status']) != 204:
logmessage('trash_od_file: could not delete ')
return False
logmessage('trash_od_file: file ' + str(filename) + ' trashed from ' + str(section))
return True
@app.route('/sync_with_onedrive', methods=['GET'])
@login_required
@roles_required(['admin', 'developer'])
def sync_with_onedrive():
setup_translation()
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
# current_project = get_current_project()
the_next = app.user_manager.make_safe_url_function(request.args.get('next', url_for('playground_page', project=get_current_project())))
auto_next = request.args.get('auto_next', None)
if app.config['USE_ONEDRIVE'] is False:
flash(word("OneDrive is not configured"), "error")
return redirect(the_next)
storage = RedisCredStorage(oauth_app='onedrive')
credentials = storage.get()
if not credentials or credentials.invalid:
flow = get_gd_flow()
uri = flow.step1_get_authorize_url()
return redirect(uri)
task = docassemble.webapp.worker.sync_with_onedrive.delay(current_user.id)
session['taskwait'] = task.id
if auto_next:
return redirect(url_for('od_sync_wait', auto_next=auto_next))
return redirect(url_for('od_sync_wait', next=the_next))
@app.route('/odsyncing', methods=['GET', 'POST'])
@login_required
@roles_required(['admin', 'developer'])
def od_sync_wait():
setup_translation()
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
current_project = get_current_project()
next_url = app.user_manager.make_safe_url_function(request.args.get('next', url_for('playground_page', project=current_project)))
auto_next_url = request.args.get('auto_next', None)
if auto_next_url is not None:
auto_next_url = app.user_manager.make_safe_url_function(auto_next_url)
my_csrf = generate_csrf()
script = """
<script>
var daCheckinInterval = null;
var autoNext = """ + json.dumps(auto_next_url) + """;
var resultsAreIn = false;
function daRestartCallback(data){
if (autoNext != null){
setTimeout(function(){
window.location.replace(autoNext);
}, 1000);
}
//console.log("Restart result: " + data.success);
}
function daRestart(){
$.ajax({
type: 'POST',
url: """ + json.dumps(url_for('restart_ajax')) + """,
data: 'csrf_token=""" + my_csrf + """&action=restart',
success: daRestartCallback,
dataType: 'json'
});
return true;
}
function daSyncCallback(data){
if (data.success){
if (data.status == 'finished'){
resultsAreIn = true;
if (data.ok){
$("#notification").html(""" + json.dumps(word("The synchronization was successful.")) + """);
$("#notification").removeClass("alert-info");
$("#notification").removeClass("alert-danger");
$("#notification").addClass("alert-success");
}
else{
$("#notification").html(""" + json.dumps(word("The synchronization was not successful.")) + """);
$("#notification").removeClass("alert-info");
$("#notification").removeClass("alert-success");
$("#notification").addClass("alert-danger");
}
$("#resultsContainer").show();
$("#resultsArea").html(data.summary);
if (daCheckinInterval != null){
clearInterval(daCheckinInterval);
}
if (data.restart){
daRestart();
}
else{
if (autoNext != null){
window.location.replace(autoNext);
}
}
}
else if (data.status == 'failed' && !resultsAreIn){
resultsAreIn = true;
$("#notification").html(""" + json.dumps(word("There was an error with the synchronization.")) + """);
$("#notification").removeClass("alert-info");
$("#notification").removeClass("alert-success");
$("#notification").addClass("alert-danger");
$("#resultsContainer").show();
if (data.error_message){
$("#resultsArea").html(data.error_message);
}
else if (data.summary){
$("#resultsArea").html(data.summary);
}
if (daCheckinInterval != null){
clearInterval(daCheckinInterval);
}
}
}
else if (!resultsAreIn){
$("#notification").html(""" + json.dumps(word("There was an error.")) + """);
$("#notification").removeClass("alert-info");
$("#notification").removeClass("alert-success");
$("#notification").addClass("alert-danger");
if (daCheckinInterval != null){
clearInterval(daCheckinInterval);
}
}
}
function daSync(){
if (resultsAreIn){
return;
}
$.ajax({
type: 'POST',
url: """ + json.dumps(url_for('checkin_sync_with_onedrive')) + """,
data: 'csrf_token=""" + my_csrf + """',
success: daSyncCallback,
dataType: 'json'
});
return true;
}
$( document ).ready(function() {
//console.log("page loaded");
daCheckinInterval = setInterval(daSync, 2000);
});
</script>"""
response = make_response(render_template('pages/od_sync_wait.html', version_warning=None, bodyclass='daadminbody', extra_js=Markup(script), tab_title=word('Synchronizing'), page_title=word('Synchronizing'), next_page=next_url), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
# @app.route('/old_sync_with_google_drive', methods=['GET', 'POST'])
# @login_required
# @roles_required(['admin', 'developer'])
# def old_sync_with_google_drive():
# next = request.args.get('next', url_for('playground_page'))
# extra_meta = """\n <meta http-equiv="refresh" content="1; url='""" + url_for('do_sync_with_google_drive', next=next) + """'">"""
# return render_template('pages/google_sync.html', version_warning=None, bodyclass='daadminbody', extra_meta=Markup(extra_meta), tab_title=word('Synchronizing'), page_title=word('Synchronizing'))
def add_br(text):
return re.sub(r'[\n\r]+', "<br>", text)
@app.route('/checkin_sync_with_google_drive', methods=['GET', 'POST'])
@login_required
@roles_required(['admin', 'developer'])
def checkin_sync_with_google_drive():
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
setup_translation()
if 'taskwait' not in session:
return jsonify(success=False)
result = docassemble.webapp.worker.workerapp.AsyncResult(id=session['taskwait'])
if result.ready():
if 'taskwait' in session:
del session['taskwait']
the_result = result.get()
if isinstance(the_result, ReturnValue):
if the_result.ok:
logmessage("checkin_sync_with_google_drive: success")
return jsonify(success=True, status='finished', ok=the_result.ok, summary=add_br(the_result.summary), restart=the_result.restart)
if hasattr(the_result, 'error'):
logmessage("checkin_sync_with_google_drive: failed return value is " + str(the_result.error))
return jsonify(success=True, status='failed', error_message=str(the_result.error), restart=False)
if hasattr(the_result, 'summary'):
return jsonify(success=True, status='failed', summary=add_br(the_result.summary), restart=False)
return jsonify(success=True, status='failed', error_message=str("No error message. Result is " + str(the_result)), restart=False)
logmessage("checkin_sync_with_google_drive: failed return value is a " + str(type(the_result)))
logmessage("checkin_sync_with_google_drive: failed return value is " + str(the_result))
return jsonify(success=True, status='failed', error_message=noquote(str(the_result)), restart=False)
return jsonify(success=True, status='waiting', restart=False)
@app.route('/checkin_sync_with_onedrive', methods=['GET', 'POST'])
@login_required
@roles_required(['admin', 'developer'])
def checkin_sync_with_onedrive():
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
setup_translation()
if 'taskwait' not in session:
return jsonify(success=False)
result = docassemble.webapp.worker.workerapp.AsyncResult(id=session['taskwait'])
if result.ready():
if 'taskwait' in session:
del session['taskwait']
the_result = result.get()
if isinstance(the_result, ReturnValue):
if the_result.ok:
logmessage("checkin_sync_with_onedrive: success")
return jsonify(success=True, status='finished', ok=the_result.ok, summary=add_br(the_result.summary), restart=the_result.restart)
if hasattr(the_result, 'error'):
logmessage("checkin_sync_with_onedrive: failed return value is " + str(the_result.error))
return jsonify(success=True, status='failed', error_message=str(the_result.error), restart=False)
if hasattr(the_result, 'summary'):
return jsonify(success=True, status='failed', summary=add_br(the_result.summary), restart=False)
return jsonify(success=True, status='failed', error_message=str("No error message. Result is " + str(the_result)), restart=False)
logmessage("checkin_sync_with_onedrive: failed return value is a " + str(type(the_result)))
logmessage("checkin_sync_with_onedrive: failed return value is " + str(the_result))
return jsonify(success=True, status='failed', error_message=str(the_result), restart=False)
return jsonify(success=True, status='waiting', restart=False)
@app.route('/google_drive', methods=['GET', 'POST'])
@login_required
@roles_required(['admin', 'developer'])
def google_drive_page():
setup_translation()
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
if app.config['USE_GOOGLE_DRIVE'] is False:
flash(word("Google Drive is not configured"), "error")
return redirect(url_for('user.profile'))
form = GoogleDriveForm(request.form)
if request.method == 'POST' and form.cancel.data:
return redirect(url_for('user.profile'))
storage = RedisCredStorage(oauth_app='googledrive')
credentials = storage.get()
if not credentials or credentials.invalid:
flow = get_gd_flow()
uri = flow.step1_get_authorize_url()
# logmessage("google_drive_page: uri is " + str(uri))
return redirect(uri)
http = credentials.authorize(httplib2.Http())
try:
service = apiclient.discovery.build('drive', 'v3', http=http)
except:
set_gd_folder(None)
storage.release_lock()
storage.locked_delete()
flow = get_gd_flow()
uri = flow.step1_get_authorize_url()
return redirect(uri)
items = [{'id': '', 'name': word('-- Do not link --')}]
# items = []
page_token = None
while True:
try:
response = service.files().list(spaces="drive", pageToken=page_token, fields="nextPageToken, files(id, name, mimeType, shortcutDetails)", q="trashed=false and 'root' in parents and (mimeType = 'application/vnd.google-apps.folder' or (mimeType = 'application/vnd.google-apps.shortcut' and shortcutDetails.targetMimeType = 'application/vnd.google-apps.folder'))").execute()
except Exception as err:
logmessage("google_drive_page: " + err.__class__.__name__ + ": " + str(err))
set_gd_folder(None)
storage.release_lock()
storage.locked_delete()
flash(word('There was a Google Drive error: ' + err.__class__.__name__ + ": " + str(err)), 'error')
return redirect(url_for('google_drive_page'))
for the_file in response.get('files', []):
if the_file['mimeType'] == 'application/vnd.google-apps.shortcut':
the_file['id'] = the_file['shortcutDetails']['targetId']
items.append(the_file)
page_token = response.get('nextPageToken', None)
if page_token is None:
break
item_ids = [x['id'] for x in items if x['id'] != '']
if request.method == 'POST' and form.submit.data:
if form.folder.data == '':
set_gd_folder(None)
storage.locked_delete()
flash(word("Google Drive is not linked."), 'success')
elif form.folder.data in (-1, '-1'):
file_metadata = {
'name': 'docassemble',
'mimeType': 'application/vnd.google-apps.folder'
}
new_file = service.files().create(body=file_metadata,
fields='id').execute()
new_folder = new_file.get('id', None)
set_gd_folder(new_folder)
gd_fix_subdirs(service, new_folder)
if new_folder is not None:
active_folder = {'id': new_folder, 'name': 'docassemble'}
items.append(active_folder)
item_ids.append(new_folder)
flash(word("Your Playground is connected to your Google Drive."), 'success')
elif form.folder.data in item_ids:
flash(word("Your Playground is connected to your Google Drive."), 'success')
set_gd_folder(form.folder.data)
gd_fix_subdirs(service, form.folder.data)
else:
flash(word("The supplied folder " + str(form.folder.data) + "could not be found."), 'error')
set_gd_folder(None)
return redirect(url_for('user.profile'))
the_folder = get_gd_folder()
active_folder = None
if the_folder is not None:
try:
response = service.files().get(fileId=the_folder, fields="mimeType, trashed").execute()
except:
set_gd_folder(None)
return redirect(url_for('google_drive_page'))
the_mime_type = response.get('mimeType', None)
trashed = response.get('trashed', False)
if trashed is False and the_mime_type == "application/vnd.google-apps.folder":
active_folder = {'id': the_folder, 'name': response.get('name', 'no name')}
if the_folder not in item_ids:
items.append(active_folder)
else:
set_gd_folder(None)
the_folder = None
flash(word("The mapping was reset because the folder does not appear to exist anymore."), 'error')
if the_folder is None:
for item in items:
if item['name'].lower() == 'docassemble':
active_folder = item
break
if active_folder is None:
active_folder = {'id': -1, 'name': 'docassemble'}
items.append(active_folder)
item_ids.append(-1)
if the_folder is not None:
gd_fix_subdirs(service, the_folder)
if the_folder is None:
the_folder = ''
description = 'Select the folder from your Google Drive that you want to be synchronized with the Playground.'
if app.config['USE_ONEDRIVE'] is True and get_od_folder() is not None:
description += ' ' + word('Note that if you connect to a Google Drive folder, you will disable your connection to OneDrive.')
response = make_response(render_template('pages/googledrive.html', version_warning=version_warning, description=description, bodyclass='daadminbody', header=word('Google Drive'), tab_title=word('Google Drive'), items=items, the_folder=the_folder, page_title=word('Google Drive'), form=form), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
def gd_fix_subdirs(service, the_folder):
subdirs = []
page_token = None
while True:
response = service.files().list(spaces="drive", pageToken=page_token, fields="nextPageToken, files(id, name)", q="mimeType='application/vnd.google-apps.folder' and trashed=false and '" + str(the_folder) + "' in parents").execute()
for the_file in response.get('files', []):
subdirs.append(the_file)
page_token = response.get('nextPageToken', None)
if page_token is None:
break
todo = set(['questions', 'static', 'sources', 'templates', 'modules', 'packages'])
done = set(x['name'] for x in subdirs if x['name'] in todo)
for key in todo - done:
file_metadata = {
'name': key,
'mimeType': 'application/vnd.google-apps.folder',
'parents': [the_folder]
}
service.files().create(body=file_metadata,
fields='id').execute()
@app.route('/onedrive', methods=['GET', 'POST'])
@login_required
@roles_required(['admin', 'developer'])
def onedrive_page():
setup_translation()
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
if app.config['USE_ONEDRIVE'] is False:
flash(word("OneDrive is not configured"), "error")
return redirect(url_for('user.profile'))
form = OneDriveForm(request.form)
if request.method == 'POST' and form.cancel.data:
return redirect(url_for('user.profile'))
storage = RedisCredStorage(oauth_app='onedrive')
credentials = storage.get()
if not credentials or credentials.invalid:
flow = get_od_flow()
uri = flow.step1_get_authorize_url()
logmessage("one_drive_page: uri is " + str(uri))
return redirect(uri)
items = [{'id': '', 'name': word('-- Do not link --')}]
http = credentials.authorize(httplib2.Http())
try:
resp, content = http.request("https://graph.microsoft.com/v1.0/me/drive/root/children?$select=id,name,deleted,folder", "GET")
except:
set_od_folder(None)
storage.release_lock()
storage.locked_delete()
flow = get_od_flow()
uri = flow.step1_get_authorize_url()
logmessage("one_drive_page: uri is " + str(uri))
return redirect(uri)
while True:
if int(resp['status']) != 200:
flash("Error: could not connect to OneDrive; response code was " + str(resp['status']) + ". " + content.decode(), 'danger')
return redirect(url_for('user.profile'))
info = json.loads(content.decode())
for item in info['value']:
if 'folder' not in item:
continue
items.append({'id': item['id'], 'name': item['name']})
if "@odata.nextLink" not in info:
break
resp, content = http.request(info["@odata.nextLink"], "GET")
item_ids = [x['id'] for x in items if x['id'] != '']
if request.method == 'POST' and form.submit.data:
if form.folder.data == '':
set_od_folder(None)
storage.locked_delete()
flash(word("OneDrive is not linked."), 'success')
elif form.folder.data in (-1, '-1'):
headers = {'Content-Type': 'application/json'}
info = {}
info['name'] = 'docassemble'
info['folder'] = {}
info["@microsoft.graph.conflictBehavior"] = "fail"
resp, content = http.request("https://graph.microsoft.com/v1.0/me/drive/root/children", "POST", headers=headers, body=json.dumps(info))
if int(resp['status']) == 201:
new_item = json.loads(content.decode())
set_od_folder(new_item['id'])
od_fix_subdirs(http, new_item['id'])
flash(word("Your Playground is connected to your OneDrive."), 'success')
else:
flash(word("Could not create folder. " + content.decode()), 'danger')
elif form.folder.data in item_ids:
set_od_folder(form.folder.data)
od_fix_subdirs(http, form.folder.data)
flash(word("Your Playground is connected to your OneDrive."), 'success')
else:
flash(word("The supplied folder " + str(form.folder.data) + "could not be found."), 'danger')
set_od_folder(None)
return redirect(url_for('user.profile'))
the_folder = get_od_folder()
active_folder = None
if the_folder is not None:
resp, content = http.request("https://graph.microsoft.com/v1.0/me/drive/items/" + str(the_folder), "GET")
if int(resp['status']) != 200:
set_od_folder(None)
flash(word("The previously selected OneDrive folder does not exist.") + " " + str(the_folder) + " " + str(content) + " status: " + repr(resp['status']), "info")
return redirect(url_for('onedrive_page'))
info = json.loads(content.decode())
logmessage("Found " + repr(info))
if info.get('deleted', None):
set_od_folder(None)
flash(word("The previously selected OneDrive folder was deleted."), "info")
return redirect(url_for('onedrive_page'))
active_folder = {'id': the_folder, 'name': info.get('name', 'no name')}
if the_folder not in item_ids:
items.append(active_folder)
item_ids.append(the_folder)
if the_folder is None:
for item in items:
if item['name'].lower() == 'docassemble':
active_folder = item
break
if active_folder is None:
active_folder = {'id': -1, 'name': 'docassemble'}
items.append(active_folder)
item_ids.append(-1)
if the_folder is not None:
od_fix_subdirs(http, the_folder)
if the_folder is None:
the_folder = ''
description = word('Select the folder from your OneDrive that you want to be synchronized with the Playground.')
if app.config['USE_GOOGLE_DRIVE'] is True and get_gd_folder() is not None:
description += ' ' + word('Note that if you connect to a OneDrive folder, you will disable your connection to Google Drive.')
response = make_response(render_template('pages/onedrive.html', version_warning=version_warning, bodyclass='daadminbody', header=word('OneDrive'), tab_title=word('OneDrive'), items=items, the_folder=the_folder, page_title=word('OneDrive'), form=form, description=Markup(description)), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
def od_fix_subdirs(http, the_folder):
subdirs = set()
resp, content = http.request("https://graph.microsoft.com/v1.0/me/drive/items/" + str(the_folder) + "/children?$select=id,name,deleted,folder", "GET")
while True:
if int(resp['status']) != 200:
raise DAError("od_fix_subdirs: could not get contents of folder")
info = json.loads(content.decode())
logmessage("Found " + repr(info))
for item in info['value']:
if 'folder' in item:
subdirs.add(item['name'])
if "@odata.nextLink" not in info:
break
resp, content = http.request(info["@odata.nextLink"], "GET")
todo = set(['questions', 'static', 'sources', 'templates', 'modules', 'packages'])
for folder_name in (todo - subdirs):
headers = {'Content-Type': 'application/json'}
data = {}
data['name'] = folder_name
data['folder'] = {}
data["@microsoft.graph.conflictBehavior"] = "rename"
resp, content = http.request("https://graph.microsoft.com/v1.0/me/drive/items/" + str(the_folder) + "/children", "POST", headers=headers, body=json.dumps(data))
if int(resp['status']) != 201:
raise DAError("od_fix_subdirs: could not create subfolder " + folder_name + ' in ' + str(the_folder) + '. ' + content.decode() + ' status: ' + str(resp['status']))
@app.route('/config', methods=['GET', 'POST'])
@login_required
@roles_required(['admin'])
def config_page():
setup_translation()
if not app.config['ALLOW_CONFIGURATION_EDITING']:
return ('File not found', 404)
form = ConfigForm(request.form)
content = None
ok = True
if request.method == 'POST':
if form.submit.data and form.config_content.data:
try:
standardyaml.load(form.config_content.data, Loader=standardyaml.FullLoader)
yml = ruamel.yaml.YAML()
yml.allow_duplicate_keys = False
yml.load(form.config_content.data)
except Exception as errMess:
ok = False
content = form.config_content.data
errMess = word("Configuration not updated. There was a syntax error in the configuration YAML.") + '<pre>' + str(errMess) + '</pre>'
flash(str(errMess), 'error')
logmessage('config_page: ' + str(errMess))
if ok:
if cloud is not None:
key = cloud.get_key('config.yml')
key.set_contents_from_string(form.config_content.data)
with open(daconfig['config file'], 'w', encoding='utf-8') as fp:
fp.write(form.config_content.data)
flash(word('The configuration file was saved.'), 'success')
# session['restart'] = 1
return redirect(url_for('restart_page'))
elif form.cancel.data:
flash(word('Configuration not updated.'), 'info')
return redirect(url_for('interview_list'))
else:
flash(word('Configuration not updated. There was an error.'), 'error')
return redirect(url_for('interview_list'))
if ok:
with open(daconfig['config file'], 'r', encoding='utf-8') as fp:
content = fp.read()
if content is None:
return ('File not found', 404)
(disk_total, disk_used, disk_free) = shutil.disk_usage(daconfig['config file']) # pylint: disable=unused-variable
if keymap:
kbOpt = 'keyMap: "' + keymap + '", cursorBlinkRate: 0, '
kbLoad = '<script src="' + url_for('static', filename="codemirror/keymap/" + keymap + ".js", v=da_version) + '"></script>\n '
else:
kbOpt = ''
kbLoad = ''
python_version = daconfig.get('python version', word('Unknown'))
system_version = daconfig.get('system version', word('Unknown'))
if python_version == system_version:
version = word("Version") + " " + str(python_version)
else:
version = word("Version") + " " + str(python_version) + ' (Python); ' + str(system_version) + ' (' + word('system') + ')'
response = make_response(render_template('pages/config.html', underlying_python_version=re.sub(r' \(.*', '', sys.version, flags=re.DOTALL), free_disk_space=humanize.naturalsize(disk_free), config_errors=docassemble.base.config.errors, config_messages=docassemble.base.config.env_messages, version_warning=version_warning, version=version, bodyclass='daadminbody', tab_title=word('Configuration'), page_title=word('Configuration'), extra_css=Markup('\n <link href="' + url_for('static', filename='codemirror/lib/codemirror.css', v=da_version) + '" rel="stylesheet">\n <link href="' + url_for('static', filename='codemirror/addon/search/matchesonscrollbar.css', v=da_version) + '" rel="stylesheet">\n <link href="' + url_for('static', filename='codemirror/addon/display/fullscreen.css', v=da_version) + '" rel="stylesheet">\n <link href="' + url_for('static', filename='codemirror/addon/scroll/simplescrollbars.css', v=da_version) + '" rel="stylesheet">\n <link href="' + url_for('static', filename='app/pygments.min.css', v=da_version) + '" rel="stylesheet">'), extra_js=Markup('\n <script src="' + url_for('static', filename="codemirror/lib/codemirror.js", v=da_version) + '"></script>\n <script src="' + url_for('static', filename="codemirror/addon/search/searchcursor.js", v=da_version) + '"></script>\n <script src="' + url_for('static', filename="codemirror/addon/scroll/annotatescrollbar.js", v=da_version) + '"></script>\n <script src="' + url_for('static', filename="codemirror/addon/search/matchesonscrollbar.js", v=da_version) + '"></script>\n <script src="' + url_for('static', filename="codemirror/addon/display/fullscreen.js", v=da_version) + '"></script>\n <script src="' + url_for('static', filename="codemirror/addon/edit/matchbrackets.js", v=da_version) + '"></script>\n <script src="' + url_for('static', filename="codemirror/mode/yaml/yaml.js", v=da_version) + '"></script>\n ' + kbLoad + '<script>\n daTextArea=document.getElementById("config_content");\n daTextArea.value = JSON.parse(atob("' + safeid(json.dumps(content)) + '"));\n var daCodeMirror = CodeMirror.fromTextArea(daTextArea, {mode: "yaml", ' + kbOpt + 'tabSize: 2, tabindex: 70, autofocus: true, lineNumbers: true, matchBrackets: true});\n daCodeMirror.setOption("extraKeys", { Tab: function(cm) { var spaces = Array(cm.getOption("indentUnit") + 1).join(" "); cm.replaceSelection(spaces); }, "F11": function(cm) { cm.setOption("fullScreen", !cm.getOption("fullScreen")); }, "Esc": function(cm) { if (cm.getOption("fullScreen")) cm.setOption("fullScreen", false); }});\n daCodeMirror.setOption("coverGutterNextToScrollbar", true);\n daCodeMirror.setOption("viewportMargin", Infinity);\n </script>'), form=form), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
@app.route('/view_source', methods=['GET'])
@login_required
@roles_required(['developer', 'admin'])
def view_source():
setup_translation()
source_path = request.args.get('i', None)
playground_user = get_playground_user()
current_project = get_current_project()
if source_path is None:
logmessage("view_source: no source path")
return ('File not found', 404)
try:
if re.search(r':', source_path):
source = docassemble.base.parse.interview_source_from_string(source_path)
else:
try:
source = docassemble.base.parse.interview_source_from_string('docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':' + source_path)
except:
source = docassemble.base.parse.interview_source_from_string(source_path)
except Exception as errmess:
logmessage("view_source: no source: " + str(errmess))
return ('File not found', 404)
header = source_path
response = make_response(render_template('pages/view_source.html', version_warning=None, bodyclass='daadminbody', tab_title="Source", page_title="Source", extra_css=Markup('\n <link href="' + url_for('static', filename='app/pygments.min.css') + '" rel="stylesheet">'), header=header, contents=Markup(highlight(source.content, YamlLexer(), HtmlFormatter(cssclass="bg-light highlight dahighlight dafullheight")))), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
@app.route('/playgroundstatic/<current_project>/<userid>/<path:filename>', methods=['GET'])
def playground_static(current_project, userid, filename):
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
# filename = re.sub(r'[^A-Za-z0-9\-\_\. ]', '', filename)
try:
attach = int(request.args.get('attach', 0))
except:
attach = 0
area = SavedFile(userid, fix=True, section='playgroundstatic')
the_directory = directory_for(area, current_project)
filename = filename.replace('/', os.path.sep)
path = os.path.join(the_directory, filename)
if os.path.join('..', '') in path:
return ('File not found', 404)
if os.path.isfile(path):
filename = os.path.basename(filename)
extension, mimetype = get_ext_and_mimetype(filename) # pylint: disable=unused-variable
response = send_file(path, mimetype=str(mimetype), download_name=filename)
if attach:
response.headers['Content-Disposition'] = 'attachment; filename=' + json.dumps(filename)
return response
return ('File not found', 404)
@app.route('/playgroundmodules/<current_project>/<userid>/<path:filename>', methods=['GET'])
@login_required
@roles_required(['developer', 'admin'])
def playground_modules(current_project, userid, filename):
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
setup_translation()
# filename = re.sub(r'[^A-Za-z0-9\-\_\. ]', '', filename)
try:
attach = int(request.args.get('attach', 0))
except:
attach = 0
area = SavedFile(userid, fix=True, section='playgroundmodules')
the_directory = directory_for(area, current_project)
filename = filename.replace('/', os.path.sep)
path = os.path.join(the_directory, filename)
if os.path.join('..', '') in path:
return ('File not found', 404)
if os.path.isfile(path):
filename = os.path.basename(filename)
extension, mimetype = get_ext_and_mimetype(filename) # pylint: disable=unused-variable
response = send_file(path, mimetype=str(mimetype), download_name=filename)
if attach:
response.headers['Content-Disposition'] = 'attachment; filename=' + json.dumps(filename)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
return ('File not found', 404)
@app.route('/playgroundsources/<current_project>/<userid>/<path:filename>', methods=['GET'])
@login_required
@roles_required(['developer', 'admin'])
def playground_sources(current_project, userid, filename):
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
setup_translation()
try:
attach = int(request.args.get('attach', 0))
except:
attach = 0
# filename = re.sub(r'[^A-Za-z0-9\-\_\(\)\. ]', '', filename)
filename = filename.replace('/', os.path.sep)
area = SavedFile(userid, fix=True, section='playgroundsources')
write_ml_source(area, userid, current_project, filename)
the_directory = directory_for(area, current_project)
path = os.path.join(the_directory, filename)
if os.path.join('..', '') in path:
return ('File not found', 404)
if os.path.isfile(path):
filename = os.path.basename(filename)
extension, mimetype = get_ext_and_mimetype(filename) # pylint: disable=unused-variable
response = send_file(path, mimetype=str(mimetype), download_name=filename)
if attach:
response.headers['Content-Disposition'] = 'attachment; filename=' + json.dumps(filename)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
return ('File not found', 404)
@app.route('/playgroundtemplate/<current_project>/<userid>/<path:filename>', methods=['GET'])
@login_required
@roles_required(['developer', 'admin'])
def playground_template(current_project, userid, filename):
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
# filename = re.sub(r'[^A-Za-z0-9\-\_\. ]', '', filename)
setup_translation()
try:
attach = int(request.args.get('attach', 0))
except:
attach = 0
area = SavedFile(userid, fix=True, section='playgroundtemplate')
the_directory = directory_for(area, current_project)
filename = filename.replace('/', os.path.sep)
path = os.path.join(the_directory, filename)
if os.path.join('..', '') in path:
return ('File not found', 404)
if os.path.isfile(path):
filename = os.path.basename(filename)
extension, mimetype = get_ext_and_mimetype(filename) # pylint: disable=unused-variable
response = send_file(path, mimetype=str(mimetype), download_name=filename)
if attach:
response.headers['Content-Disposition'] = 'attachment; filename=' + json.dumps(filename)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
return ('File not found', 404)
@app.route('/playgrounddownload/<current_project>/<userid>/<path:filename>', methods=['GET'])
@login_required
@roles_required(['developer', 'admin'])
def playground_download(current_project, userid, filename):
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
setup_translation()
# filename = re.sub(r'[^A-Za-z0-9\-\_\. ]', '', filename)
area = SavedFile(userid, fix=True, section='playground')
the_directory = directory_for(area, current_project)
filename = filename.replace('/', os.path.sep)
path = os.path.join(the_directory, filename)
if os.path.join('..', '') in path:
return ('File not found', 404)
if os.path.isfile(path):
filename = os.path.basename(filename)
extension, mimetype = get_ext_and_mimetype(path) # pylint: disable=unused-variable
response = send_file(path, mimetype=str(mimetype))
response.headers['Content-type'] = 'text/plain; charset=utf-8'
response.headers['Content-Disposition'] = 'attachment; filename=' + json.dumps(filename)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
return ('File not found', 404)
@app.route('/officefunctionfile', methods=['GET', 'POST'])
@cross_origin(origins='*', methods=['GET', 'POST', 'HEAD'], automatic_options=True)
def playground_office_functionfile():
g.embed = True
docassemble.base.functions.set_language(DEFAULT_LANGUAGE)
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
functionform = FunctionFileForm(request.form)
response = make_response(render_template('pages/officefunctionfile.html', current_project=get_current_project(), page_title=word("Docassemble Playground"), tab_title=word("Playground"), parent_origin=daconfig.get('office addin url', daconfig.get('url root', get_base_url())), form=functionform), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
@app.route('/officetaskpane', methods=['GET', 'POST'])
@cross_origin(origins='*', methods=['GET', 'POST', 'HEAD'], automatic_options=True)
def playground_office_taskpane():
g.embed = True
docassemble.base.functions.set_language(DEFAULT_LANGUAGE)
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
defaultDaServer = url_for('rootindex', _external=True)
response = make_response(render_template('pages/officeouter.html', page_title=word("Docassemble Playground"), tab_title=word("Playground"), defaultDaServer=defaultDaServer, extra_js=Markup("\n <script>" + indent_by(variables_js(office_mode=True), 9) + " </script>")), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
@app.route('/officeaddin', methods=['GET', 'POST'])
@cross_origin(origins='*', methods=['GET', 'POST', 'HEAD'], automatic_options=True)
@login_required
@roles_required(['developer', 'admin'])
def playground_office_addin():
g.embed = True
setup_translation()
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
playground_user = get_playground_user()
current_project = get_current_project()
if request.args.get('fetchfiles', None):
playground = SavedFile(playground_user.id, fix=True, section='playground')
the_directory = directory_for(playground, current_project)
files = sorted([f for f in os.listdir(the_directory) if os.path.isfile(os.path.join(the_directory, f)) and re.search(r'^[A-Za-z0-9]', f)])
return jsonify(success=True, files=files)
pg_var_file = request.args.get('pgvars', None)
# logmessage("playground_office_addin: YAML file is " + str(pg_var_file))
use_html = request.args.get('html', False)
uploadform = AddinUploadForm(request.form)
if request.method == 'POST':
area = SavedFile(playground_user.id, fix=True, section='playgroundtemplate')
filename = secure_filename(uploadform.filename.data)
filename = re.sub(r'[^A-Za-z0-9\-\_\. ]+', '_', filename)
if filename == '':
return jsonify({'success': False})
content = str(uploadform.content.data)
start_index = 0
char_index = 0
for char in content:
char_index += 1
if char == ',':
start_index = char_index
break
area.write_content(codecs.decode(bytearray(content[start_index:], encoding='utf-8'), 'base64'), filename=filename, binary=True)
area.finalize()
if use_html:
if pg_var_file is None:
pg_var_file = ''
else:
if pg_var_file is None or pg_var_file == '':
return jsonify({'success': True, 'variables_json': [], 'vocab_list': []})
if pg_var_file is not None:
playground = SavedFile(playground_user.id, fix=True, section='playground')
the_directory = directory_for(playground, current_project)
files = sorted([f for f in os.listdir(the_directory) if os.path.isfile(os.path.join(the_directory, f)) and re.search(r'^[A-Za-z0-9]', f)])
if pg_var_file in files:
# logmessage("playground_office_addin: file " + str(pg_var_file) + " was found")
interview_source = docassemble.base.parse.interview_source_from_string('docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':' + pg_var_file)
interview_source.set_testing(True)
else:
# logmessage("playground_office_addin: file " + str(pg_var_file) + " was not found")
if pg_var_file == '' and current_project == 'default':
pg_var_file = 'test.yml'
content = "modules:\n - docassemble.base.util\n---\nmandatory: True\nquestion: hi"
interview_source = docassemble.base.parse.InterviewSourceString(content=content, directory=the_directory, package="docassemble.playground" + str(playground_user.id) + project_name(current_project), path="docassemble.playground" + str(playground_user.id) + project_name(current_project) + ":" + pg_var_file, testing=True)
interview = interview_source.get_interview()
ensure_ml_file_exists(interview, pg_var_file, current_project)
the_current_info = current_info(yaml='docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':' + pg_var_file, req=request, action=None, device_id=request.cookies.get('ds', None))
docassemble.base.functions.this_thread.current_info = the_current_info
interview_status = docassemble.base.parse.InterviewStatus(current_info=the_current_info)
if use_html:
variables_html, vocab_list, vocab_dict = get_vars_in_use(interview, interview_status, debug_mode=False, show_messages=False, show_jinja_help=True, current_project=current_project)
return jsonify({'success': True, 'current_project': current_project, 'variables_html': variables_html, 'vocab_list': list(vocab_list), 'vocab_dict': vocab_dict})
variables_json, vocab_list, vocab_dict = get_vars_in_use(interview, interview_status, debug_mode=False, return_json=True, current_project=current_project)
return jsonify({'success': True, 'variables_json': variables_json, 'vocab_list': list(vocab_list)})
parent_origin = re.sub(r'^(https?://[^/]+)/.*', r'\1', daconfig.get('office addin url', get_base_url()))
response = make_response(render_template('pages/officeaddin.html', current_project=current_project, page_title=word("Docassemble Office Add-in"), tab_title=word("Office Add-in"), parent_origin=parent_origin, form=uploadform), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
def cloud_trash(use_gd, use_od, section, the_file, current_project):
if use_gd:
try:
trash_gd_file(section, the_file, current_project)
except Exception as the_err:
logmessage("cloud_trash: unable to delete file on Google Drive. " + str(the_err))
elif use_od:
try:
trash_od_file(section, the_file, current_project)
except Exception as the_err:
try:
logmessage("cloud_trash: unable to delete file on OneDrive. " + str(the_err))
except:
logmessage("cloud_trash: unable to delete file on OneDrive.")
@app.route('/playgroundfiles', methods=['GET', 'POST'])
@login_required
@roles_required(['developer', 'admin'])
def playground_files():
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
setup_translation()
playground_user = get_playground_user()
current_project = get_current_project()
use_gd = bool(app.config['USE_GOOGLE_DRIVE'] is True and get_gd_folder() is not None)
use_od = bool(use_gd is False and app.config['USE_ONEDRIVE'] is True and get_od_folder() is not None)
form = PlaygroundFilesForm(request.form)
formtwo = PlaygroundFilesEditForm(request.form)
is_ajax = bool('ajax' in request.form and int(request.form['ajax']))
section = werkzeug.utils.secure_filename(request.args.get('section', 'template'))
the_file = secure_filename_spaces_ok(request.args.get('file', ''))
scroll = False
if the_file != '':
scroll = True
if request.method == 'GET':
is_new = true_or_false(request.args.get('new', False))
else:
is_new = False
if is_new:
scroll = True
the_file = ''
if request.method == 'POST':
form_validated = bool((form.purpose.data == 'upload' and form.validate()) or (formtwo.purpose.data == 'edit' and formtwo.validate()))
if form_validated:
if form.section.data:
section = form.section.data
if formtwo.file_name.data:
the_file = formtwo.file_name.data
the_file = re.sub(r'[^A-Za-z0-9\-\_\. ]+', '_', the_file)
else:
form_validated = None
if section not in ("template", "static", "sources", "modules", "packages"):
section = "template"
pgarea = SavedFile(playground_user.id, fix=True, section='playground')
the_directory = directory_for(pgarea, current_project)
if current_project != 'default' and not os.path.isdir(the_directory):
current_project = set_current_project('default')
the_directory = directory_for(pgarea, current_project)
pulldown_files = sorted([f for f in os.listdir(the_directory) if os.path.isfile(os.path.join(the_directory, f)) and re.search(r'^[A-Za-z0-9]', f)])
current_variable_file = get_variable_file(current_project)
if current_variable_file is not None:
if current_variable_file in pulldown_files:
active_file = current_variable_file
else:
delete_variable_file(current_project)
active_file = None
else:
active_file = None
if active_file is None:
current_file = get_current_file(current_project, 'questions')
if current_file in pulldown_files:
active_file = current_file
elif len(pulldown_files) > 0:
delete_current_file(current_project, 'questions')
active_file = pulldown_files[0]
else:
delete_current_file(current_project, 'questions')
area = SavedFile(playground_user.id, fix=True, section='playground' + section)
the_directory = directory_for(area, current_project)
if request.args.get('delete', False):
# argument = re.sub(r'[^A-Za-z0-9\-\_\. ]', '', request.args.get('delete'))
argument = request.args.get('delete')
if argument:
the_directory = directory_for(area, current_project)
the_file = add_project(argument, current_project)
filename = os.path.join(the_directory, argument)
if os.path.exists(filename):
os.remove(filename)
area.finalize()
for key in r.keys('da:interviewsource:docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':*'):
r.incr(key.decode())
cloud_trash(use_gd, use_od, section, argument, current_project)
flash(word("Deleted file: ") + the_file, "success")
for key in r.keys('da:interviewsource:docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':*'):
r.incr(key.decode())
return redirect(url_for('playground_files', section=section, project=current_project))
flash(word("File not found: ") + argument, "error")
if request.args.get('convert', False):
# argument = re.sub(r'[^A-Za-z0-9\-\_\. ]', '', request.args.get('convert'))
argument = request.args.get('convert')
if argument:
filename = os.path.join(the_directory, argument)
if os.path.exists(filename):
to_file = os.path.splitext(argument)[0] + '.md'
to_path = os.path.join(the_directory, to_file)
if not os.path.exists(to_path):
extension, mimetype = get_ext_and_mimetype(argument)
if mimetype and mimetype in convertible_mimetypes:
the_format = convertible_mimetypes[mimetype]
elif extension and extension in convertible_extensions:
the_format = convertible_extensions[extension]
else:
flash(word("File format not understood: ") + argument, "error")
return redirect(url_for('playground_files', section=section, project=current_project))
result = word_to_markdown(filename, the_format)
if result is None:
flash(word("File could not be converted: ") + argument, "error")
return redirect(url_for('playground_files', section=section, project=current_project))
shutil.copyfile(result.name, to_path)
flash(word("Created new Markdown file called ") + to_file + word("."), "success")
area.finalize()
return redirect(url_for('playground_files', section=section, file=to_file, project=current_project))
else:
flash(word("File not found: ") + argument, "error")
if request.method == 'POST' and form_validated:
if 'uploadfile' in request.files:
the_files = request.files.getlist('uploadfile')
if the_files:
need_to_restart = False
for up_file in the_files:
try:
filename = werkzeug.utils.secure_filename(up_file.filename)
extension, mimetype = get_ext_and_mimetype(filename) # pylint: disable=unused-variable
if section == 'modules' and extension != 'py':
flash(word("Sorry, only .py files can be uploaded here. To upload other types of files, use other Folders."), 'error')
return redirect(url_for('playground_files', section=section, project=current_project))
filename = re.sub(r'[^A-Za-z0-9\-\_\. ]+', '_', filename)
the_file = filename
filename = os.path.join(the_directory, filename)
up_file.save(filename)
for key in r.keys('da:interviewsource:docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':*'):
r.incr(key.decode())
area.finalize()
if section == 'modules':
need_to_restart = True
except Exception as errMess:
flash("Error of type " + str(type(errMess)) + " processing upload: " + str(errMess), "error")
if need_to_restart:
flash(word('Since you uploaded a Python module, the server needs to restart in order to load your module.'), 'info')
return redirect(url_for('restart_page', next=url_for('playground_files', section=section, file=the_file, project=current_project)))
flash(word("Upload successful"), "success")
if formtwo.delete.data:
if the_file != '':
filename = os.path.join(the_directory, the_file)
if os.path.exists(filename):
os.remove(filename)
for key in r.keys('da:interviewsource:docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':*'):
r.incr(key.decode())
area.finalize()
flash(word("Deleted file: ") + the_file, "success")
return redirect(url_for('playground_files', section=section, project=current_project))
flash(word("File not found: ") + the_file, "error")
if formtwo.submit.data and formtwo.file_content.data:
if the_file != '':
if section == 'modules' and not re.search(r'\.py$', the_file):
the_file = re.sub(r'\..*', '', the_file) + '.py'
if formtwo.original_file_name.data and formtwo.original_file_name.data != the_file:
old_filename = os.path.join(the_directory, formtwo.original_file_name.data)
cloud_trash(use_gd, use_od, section, formtwo.original_file_name.data, current_project)
if os.path.isfile(old_filename):
os.remove(old_filename)
filename = os.path.join(the_directory, the_file)
with open(filename, 'w', encoding='utf-8') as fp:
fp.write(re.sub(r'\r\n', r'\n', formtwo.file_content.data))
the_time = formatted_current_time()
for key in r.keys('da:interviewsource:docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':*'):
r.incr(key.decode())
area.finalize()
if formtwo.active_file.data and formtwo.active_file.data != the_file:
# interview_file = os.path.join(pgarea.directory, formtwo.active_file.data)
r.incr('da:interviewsource:docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':' + formtwo.active_file.data)
# if os.path.isfile(interview_file):
# with open(interview_file, 'a'):
# os.utime(interview_file, None)
# pgarea.finalize()
flash_message = flash_as_html(str(the_file) + ' ' + word('was saved at') + ' ' + the_time + '.', message_type='success', is_ajax=is_ajax)
if section == 'modules':
# restart_all()
flash(word('Since you changed a Python module, the server needs to restart in order to load your module.'), 'info')
return redirect(url_for('restart_page', next=url_for('playground_files', section=section, file=the_file, project=current_project)))
if is_ajax:
return jsonify(success=True, flash_message=flash_message)
return redirect(url_for('playground_files', section=section, file=the_file, project=current_project))
flash(word('You need to type in a name for the file'), 'error')
if is_ajax and not form_validated:
errors = []
for fieldName, errorMessages in formtwo.errors.items():
for err in errorMessages:
errors.append({'fieldName': fieldName, 'err': err})
return jsonify(success=False, errors=errors)
files = sorted([f for f in os.listdir(the_directory) if os.path.isfile(os.path.join(the_directory, f)) and re.search(r'^[A-Za-z0-9]', f)])
editable_files = []
convertible_files = []
trainable_files = {}
mode = "yaml"
for a_file in files:
extension, mimetype = get_ext_and_mimetype(a_file)
if (mimetype and mimetype in ok_mimetypes) or (extension and extension in ok_extensions) or (mimetype and mimetype.startswith('text')):
if section == 'sources' and re.match(r'ml-.*\.json$', a_file):
trainable_files[a_file] = re.sub(r'^ml-|\.json$', '', a_file)
else:
editable_files.append({'name': a_file, 'modtime': os.path.getmtime(os.path.join(the_directory, a_file))})
assign_opacity(editable_files)
editable_file_listing = [x['name'] for x in editable_files]
for a_file in files:
extension, mimetype = get_ext_and_mimetype(a_file)
b_file = os.path.splitext(a_file)[0] + '.md'
if b_file not in editable_file_listing and ((mimetype and mimetype in convertible_mimetypes) or (extension and extension in convertible_extensions)):
convertible_files.append(a_file)
if the_file and not is_new and the_file not in editable_file_listing:
the_file = ''
if not the_file and not is_new:
current_file = get_current_file(current_project, section)
if current_file in editable_file_listing:
the_file = current_file
else:
delete_current_file(current_project, section)
if len(editable_files) > 0:
the_file = sorted(editable_files, key=lambda x: x['modtime'])[-1]['name']
else:
if section == 'modules':
the_file = 'test.py'
elif section == 'sources':
the_file = 'test.json'
else:
the_file = 'test.md'
if the_file in editable_file_listing:
set_current_file(current_project, section, the_file)
if the_file != '':
extension, mimetype = get_ext_and_mimetype(the_file)
if mimetype and mimetype in ok_mimetypes:
mode = ok_mimetypes[mimetype]
elif extension and extension in ok_extensions:
mode = ok_extensions[extension]
elif mimetype and mimetype.startswith('text'):
mode = 'null'
if mode != 'markdown':
active_file = None
if section == 'modules':
mode = 'python'
formtwo.original_file_name.data = the_file
formtwo.file_name.data = the_file
if the_file != '' and os.path.isfile(os.path.join(the_directory, the_file)):
filename = os.path.join(the_directory, the_file)
else:
filename = None
if filename is not None:
area.finalize()
with open(filename, 'r', encoding='utf-8') as fp:
try:
content = fp.read()
except:
filename = None
content = ''
elif formtwo.file_content.data:
content = re.sub(r'\r\n', r'\n', formtwo.file_content.data)
else:
content = ''
lowerdescription = None
description = None
if section == "template":
header = word("Templates")
description = 'Add files here that you want want to include in your interviews using <a target="_blank" href="https://docassemble.org/docs/documents.html#docx template file"><code>docx template file</code></a>, <a target="_blank" href="https://docassemble.org/docs/documents.html#pdf template file"><code>pdf template file</code></a>, <a target="_blank" href="https://docassemble.org/docs/documents.html#content file"><code>content file</code></a>, <a target="_blank" href="https://docassemble.org/docs/documents.html#initial yaml"><code>initial yaml</code></a>, <a target="_blank" href="https://docassemble.org/docs/documents.html#additional yaml"><code>additional yaml</code></a>, <a target="_blank" href="https://docassemble.org/docs/documents.html#template file"><code>template file</code></a>, <a target="_blank" href="https://docassemble.org/docs/documents.html#rtf template file"><code>rtf template file</code></a>, or <a target="_blank" href="https://docassemble.org/docs/documents.html#docx reference file"><code>docx reference file</code></a>.'
upload_header = word("Upload a template file")
list_header = word("Existing template files")
edit_header = word('Edit text files')
after_text = None
elif section == "static":
header = word("Static Files")
description = 'Add files here that you want to include in your interviews with <a target="_blank" href="https://docassemble.org/docs/initial.html#images"><code>images</code></a>, <a target="_blank" href="https://docassemble.org/docs/initial.html#image sets"><code>image sets</code></a>, <a target="_blank" href="https://docassemble.org/docs/markup.html#inserting%20images"><code>[FILE]</code></a> or <a target="_blank" href="https://docassemble.org/docs/functions.html#url_of"><code>url_of()</code></a>.'
upload_header = word("Upload a static file")
list_header = word("Existing static files")
edit_header = word('Edit text files')
after_text = None
elif section == "sources":
header = word("Source Files")
description = 'Add files here that you want to use as a data source in your interview code, such as word translation files and training data for machine learning. For Python source code, see the Modules folder.'
upload_header = word("Upload a source file")
list_header = word("Existing source files")
edit_header = word('Edit source files')
after_text = None
elif section == "modules":
header = word("Modules")
upload_header = word("Upload a Python module")
list_header = word("Existing module files")
edit_header = word('Edit module files')
description = 'You can use this page to add Python module files (.py files) that you want to include in your interviews using <a target="_blank" href="https://docassemble.org/docs/initial.html#modules"><code>modules</code></a> or <a target="_blank" href="https://docassemble.org/docs/initial.html#imports"><code>imports</code></a>.'
lowerdescription = Markup("""<p>To use this in an interview, write a <a target="_blank" href="https://docassemble.org/docs/initial.html#modules"><code>modules</code></a> block that refers to this module using Python's syntax for specifying a "relative import" of a module (i.e., prefix the module name with a period).</p>""" + highlight('---\nmodules:\n - .' + re.sub(r'\.py$', '', the_file) + '\n---', YamlLexer(), HtmlFormatter(cssclass='bg-light highlight dahighlight')) + """<p>If you wish to refer to this module from another package, you can use a fully qualified reference.</p>""" + highlight('---\nmodules:\n - ' + "docassemble.playground" + str(playground_user.id) + project_name(current_project) + "." + re.sub(r'\.py$', '', the_file) + '\n---', YamlLexer(), HtmlFormatter(cssclass='bg-light highlight dahighlight')))
after_text = None
if scroll:
extra_command = """
if ($("#file_name").val().length > 0){
daCodeMirror.focus();
}
else{
$("#file_name").focus()
}
scrollBottom();"""
else:
extra_command = ""
if keymap:
kbOpt = 'keyMap: "' + keymap + '", cursorBlinkRate: 0, '
kbLoad = '<script src="' + url_for('static', filename="codemirror/keymap/" + keymap + ".js", v=da_version) + '"></script>\n '
else:
kbOpt = ''
kbLoad = ''
extra_js = """
<script>
var daNotificationContainer = """ + json.dumps(NOTIFICATION_CONTAINER) + """;
var daNotificationMessage = """ + json.dumps(NOTIFICATION_MESSAGE) + """;
Object.defineProperty(String.prototype, "daSprintf", {
value: function () {
var args = Array.from(arguments),
i = 0;
function defaultNumber(iValue) {
return iValue != undefined && !isNaN(iValue) ? iValue : "0";
}
function defaultString(iValue) {
return iValue == undefined ? "" : "" + iValue;
}
return this.replace(
/%%|%([+\\-])?([^1-9])?(\\d+)?(\\.\\d+)?([deEfhHioQqs])/g,
function (match, sign, filler, scale, precision, type) {
var strOut, space, value;
var asNumber = false;
if (match == "%%") return "%";
if (i >= args.length) return match;
value = args[i];
while (Array.isArray(value)) {
args.splice(i, 1);
for (var j = i; value.length > 0; j++)
args.splice(j, 0, value.shift());
value = args[i];
}
i++;
if (filler == undefined) filler = " "; // default
if (scale == undefined && !isNaN(filler)) {
scale = filler;
filler = " ";
}
if (sign == undefined) sign = "sqQ".indexOf(type) >= 0 ? "+" : "-"; // default
if (scale == undefined) scale = 0; // default
if (precision == undefined) precision = ".0"; // default
scale = parseInt(scale);
precision = parseInt(precision.substr(1));
switch (type) {
case "d":
case "i":
// decimal integer
asNumber = true;
strOut = parseInt(defaultNumber(value));
if (precision > 0) strOut += "." + "0".repeat(precision);
break;
case "e":
case "E":
// float in exponential notation
asNumber = true;
strOut = parseFloat(defaultNumber(value));
if (precision == 0) strOut = strOut.toExponential();
else strOut = strOut.toExponential(precision);
if (type == "E") strOut = strOut.replace("e", "E");
break;
case "f":
// decimal float
asNumber = true;
strOut = parseFloat(defaultNumber(value));
if (precision != 0) strOut = strOut.toFixed(precision);
break;
case "o":
case "h":
case "H":
// Octal or Hexagesimal integer notation
strOut =
"\\\\" +
(type == "o" ? "0" : type) +
parseInt(defaultNumber(value)).toString(type == "o" ? 8 : 16);
break;
case "q":
// single quoted string
strOut = "'" + defaultString(value) + "'";
break;
case "Q":
// double quoted string
strOut = '"' + defaultString(value) + '"';
break;
default:
// string
strOut = defaultString(value);
break;
}
if (typeof strOut != "string") strOut = "" + strOut;
if ((space = strOut.length) < scale) {
if (asNumber) {
if (sign == "-") {
if (strOut.indexOf("-") < 0)
strOut = filler.repeat(scale - space) + strOut;
else
strOut =
"-" +
filler.repeat(scale - space) +
strOut.replace("-", "");
} else {
if (strOut.indexOf("-") < 0)
strOut = "+" + filler.repeat(scale - space - 1) + strOut;
else
strOut =
"-" +
filler.repeat(scale - space) +
strOut.replace("-", "");
}
} else {
if (sign == "-") strOut = filler.repeat(scale - space) + strOut;
else strOut = strOut + filler.repeat(scale - space);
}
} else if (asNumber && sign == "+" && strOut.indexOf("-") < 0)
strOut = "+" + strOut;
return strOut;
}
);
},
});
Object.defineProperty(window, "daSprintf", {
value: function (str, ...rest) {
if (typeof str == "string")
return String.prototype.daSprintf.apply(str, rest);
return "";
},
});
var daCodeMirror;
var daTextArea;
var vocab = [];
var currentFile = """ + json.dumps(the_file) + """;
var daIsNew = """ + ('true' if is_new else 'false') + """;
var existingFiles = """ + json.dumps(files) + """;
var daSection = """ + '"' + section + '";' + """
var attrs_showing = Object();
var currentProject = """ + json.dumps(current_project) + """;
""" + indent_by(variables_js(form='formtwo'), 6) + """
""" + indent_by(search_js(form='formtwo'), 6) + """
var daExpireSession = null;
function resetExpireSession(){
if (daExpireSession != null){
window.clearTimeout(daExpireSession);
}
daExpireSession = setTimeout(function(){
alert(""" + json.dumps(word("Your browser session has expired and you have been signed out. You will not be able to save your work. Please log in again.")) + """);
}, """ + str(999 * int(daconfig.get('session lifetime seconds', 43200))) + """);
}
function saveCallback(data){
if (!data.success){
var n = data.errors.length;
for (var i = 0; i < n; ++i){
$('input[name="' + data.errors[i].fieldName + '"]').parents('.input-group').addClass("da-group-has-error").after('<div class="da-has-error invalid-feedback">' + data.errors[i].err + '</div>');
}
return;
}
$('.da-has-error').remove();
$('.da-group-has-error').removeClass('da-group-has-error');
fetchVars(true);
if ($("#daflash").length){
$("#daflash").html(data.flash_message);
}
else{
$("#damain").prepend(daSprintf(daNotificationContainer, data.flash_message));
}
}
function scrollBottom(){
$("html, body").animate({
scrollTop: $("#editnav").offset().top - 53
}, "slow");
}
$( document ).ready(function() {
resetExpireSession();
$("#file_name").on('change', function(){
var newFileName = $(this).val();
if ((!daIsNew) && newFileName == currentFile){
return;
}
for (var i = 0; i < existingFiles.length; i++){
if (newFileName == existingFiles[i]){
alert(""" + json.dumps(word("Warning: a file by that name already exists. If you save, you will overwrite it.")) + """);
return;
}
}
return;
});
$("#dauploadbutton").click(function(event){
if ($("#uploadfile").val() == ""){
event.preventDefault();
return false;
}
});
daTextArea = document.getElementById("file_content");
daCodeMirror = CodeMirror.fromTextArea(daTextArea, {mode: """ + ('{name: "markdown", underscoresBreakWords: false}' if mode == 'markdown' else json.dumps(mode)) + """, """ + kbOpt + """tabSize: 2, tabindex: 580, autofocus: false, lineNumbers: true, matchBrackets: true, lineWrapping: """ + ('true' if daconfig.get('wrap lines in playground', True) else 'false') + """});
$(window).bind("beforeunload", function(){
daCodeMirror.save();
$("#formtwo").trigger("checkform.areYouSure");
});
$("#daDelete").click(function(event){
if (!confirm(""" + json.dumps(word("Are you sure that you want to delete this file?")) + """)){
event.preventDefault();
}
});
$("#formtwo").areYouSure(""" + json.dumps(json.dumps({'message': word("There are unsaved changes. Are you sure you wish to leave this page?")})) + """);
$("#formtwo").bind("submit", function(e){
daCodeMirror.save();
$("#formtwo").trigger("reinitialize.areYouSure");
if (daSection != 'modules' && !daIsNew){
var extraVariable = ''
if ($("#daVariables").length){
extraVariable = '&active_file=' + encodeURIComponent($("#daVariables").val());
}
$.ajax({
type: "POST",
url: """ + '"' + url_for('playground_files', project=current_project) + '"' + """,
data: $("#formtwo").serialize() + extraVariable + '&submit=Save&ajax=1',
success: function(data){
if (data.action && data.action == 'reload'){
location.reload(true);
}
resetExpireSession();
saveCallback(data);
setTimeout(function(){
$("#daflash .alert-success").hide(300, function(){
$(self).remove();
});
}, 3000);
},
dataType: 'json'
});
e.preventDefault();
return false;
}
return true;
});
daCodeMirror.setOption("extraKeys", { Tab: function(cm) { var spaces = Array(cm.getOption("indentUnit") + 1).join(" "); cm.replaceSelection(spaces); }, "F11": function(cm) { cm.setOption("fullScreen", !cm.getOption("fullScreen")); }, "Esc": function(cm) { if (cm.getOption("fullScreen")) cm.setOption("fullScreen", false); }});
daCodeMirror.setOption("coverGutterNextToScrollbar", true);
searchReady();
variablesReady();
fetchVars(false);""" + extra_command + """
});
searchReady();
$('#uploadfile').on('change', function(){
var fileName = $(this).val();
fileName = fileName.replace(/.*\\\\/, '');
fileName = fileName.replace(/.*\\//, '');
$(this).next('.custom-file-label').html(fileName);
});
</script>"""
if keymap:
kbOpt = 'keyMap: "' + keymap + '", cursorBlinkRate: 0, '
kbLoad = '<script src="' + url_for('static', filename="codemirror/keymap/" + keymap + ".js") + '"></script>\n '
else:
kbOpt = ''
kbLoad = ''
any_files = bool(len(editable_files) > 0)
back_button = Markup('<span class="navbar-brand navbar-nav dabackicon me-3"><a href="' + url_for('playground_page', project=current_project) + '" class="dabackbuttoncolor nav-link" title=' + json.dumps(word("Go back to the main Playground page")) + '><i class="fas fa-chevron-left"></i><span class="daback">' + word('Back') + '</span></a></span>')
cm_mode = ''
if mode == 'null':
modes = []
elif mode == 'htmlmixed':
modes = ['css', 'xml', 'htmlmixed']
else:
modes = [mode]
for the_mode in modes:
cm_mode += '\n <script src="' + url_for('static', filename="codemirror/mode/" + the_mode + "/" + ('damarkdown' if the_mode == 'markdown' else the_mode) + ".js", v=da_version) + '"></script>'
if current_user.id != playground_user.id:
header += " / " + playground_user.email
if current_project != 'default':
header += " / " + current_project
response = make_response(render_template('pages/playgroundfiles.html', current_project=current_project, version_warning=None, bodyclass='daadminbody', use_gd=use_gd, use_od=use_od, back_button=back_button, tab_title=header, page_title=header, extra_css=Markup('\n <link href="' + url_for('static', filename='app/playgroundbundle.css', v=da_version) + '" rel="stylesheet">'), extra_js=Markup('\n <script src="' + url_for('static', filename="app/playgroundbundle.js", v=da_version) + '"></script>\n ' + kbLoad + cm_mode + extra_js), header=header, upload_header=upload_header, list_header=list_header, edit_header=edit_header, description=Markup(description), lowerdescription=lowerdescription, form=form, files=sorted(files, key=lambda y: y.lower()), section=section, userid=playground_user.id, editable_files=sorted(editable_files, key=lambda y: y['name'].lower()), editable_file_listing=editable_file_listing, trainable_files=trainable_files, convertible_files=convertible_files, formtwo=formtwo, current_file=the_file, content=content, after_text=after_text, is_new=str(is_new), any_files=any_files, pulldown_files=sorted(pulldown_files, key=lambda y: y.lower()), active_file=active_file, playground_package='docassemble.playground' + str(playground_user.id) + project_name(current_project), own_playground=bool(playground_user.id == current_user.id)), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
@app.route('/pullplaygroundpackage', methods=['GET', 'POST'])
@login_required
@roles_required(['developer', 'admin'])
def pull_playground_package():
setup_translation()
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
current_project = get_current_project()
form = PullPlaygroundPackage(request.form)
if request.method == 'POST':
if form.pull.data:
if form.github_url.data and form.pypi.data:
flash(word("You cannot pull from GitHub and PyPI at the same time. Please fill in one and leave the other blank."), 'error')
elif form.github_url.data:
return redirect(url_for('playground_packages', project=current_project, pull='1', github_url=re.sub(r'/*$', '', str(form.github_url.data).strip()), branch=form.github_branch.data))
elif form.pypi.data:
return redirect(url_for('playground_packages', project=current_project, pull='1', pypi=form.pypi.data))
if form.cancel.data:
return redirect(url_for('playground_packages', project=current_project))
elif 'github' in request.args:
form.github_url.data = re.sub(r'[^A-Za-z0-9\-\.\_\~\:\/\?\#\[\]\@\!\$\&\'\(\)\*\+\,\;\=\`]', '', request.args['github'])
elif 'pypi' in request.args:
form.pypi.data = re.sub(r'[^A-Za-z0-9\-\.\_\~\:\/\?\#\[\]\@\!\$\&\'\(\)\*\+\,\;\=\`]', '', request.args['pypi'])
form.github_branch.choices = []
description = word("Enter a URL of a GitHub repository containing an extension package. When you press Pull, the contents of that repository will be copied into the Playground, overwriting any files with the same names. Or, put in the name of a PyPI package and it will do the same with the package on PyPI.")
branch = request.args.get('branch')
extra_js = """
<script>
var default_branch = """ + json.dumps(branch if branch else GITHUB_BRANCH) + """;
function get_branches(){
var elem = $("#github_branch");
elem.empty();
var opt = $("<option><\/option>");
opt.attr("value", "").text("Not applicable");
elem.append(opt);
var github_url = $("#github_url").val();
if (!github_url){
return;
}
$.get(""" + json.dumps(url_for('get_git_branches')) + """, { url: github_url }, "json")
.done(function(data){
//console.log(data);
if (data.success){
var n = data.result.length;
if (n > 0){
var default_to_use = default_branch;
var to_try = [default_branch, """ + json.dumps(GITHUB_BRANCH) + """, 'master', 'main'];
outer:
for (var j = 0; j < 4; j++){
for (var i = 0; i < n; i++){
if (data.result[i].name == to_try[j]){
default_to_use = to_try[j];
break outer;
}
}
}
elem.empty();
for (var i = 0; i < n; i++){
opt = $("<option><\/option>");
opt.attr("value", data.result[i].name).text(data.result[i].name);
if (data.result[i].name == default_to_use){
opt.prop('selected', true);
}
$(elem).append(opt);
}
}
}
});
}
$( document ).ready(function() {
get_branches();
$("#github_url").on('change', get_branches);
});
</script>
""" # noqa: W605
response = make_response(render_template('pages/pull_playground_package.html',
current_project=current_project,
form=form,
description=description,
version_warning=version_warning,
bodyclass='daadminbody',
title=word("Pull GitHub or PyPI Package"),
tab_title=word("Pull"),
page_title=word("Pull"),
extra_js=Markup(extra_js)), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
def get_branches_of_repo(giturl):
repo_name = re.sub(r'/*$', '', giturl)
m = re.search(r'//(.+):[email protected]', repo_name)
if m:
access_token = m.group(1)
else:
access_token = None
repo_name = re.sub(r'^http.*github.com/', '', repo_name)
repo_name = re.sub(r'.*@github.com:', '', repo_name)
repo_name = re.sub(r'.git$', '', repo_name)
repo_name = re.sub(r'#egg=.*', '', repo_name)
if app.config['USE_GITHUB']:
github_auth = r.get('da:using_github:userid:' + str(current_user.id))
else:
github_auth = None
if github_auth and access_token is None:
storage = RedisCredStorage(oauth_app='github')
credentials = storage.get()
if not credentials or credentials.invalid:
http = httplib2.Http()
else:
http = credentials.authorize(httplib2.Http())
else:
http = httplib2.Http()
the_url = "https://api.github.com/repos/" + repo_name + '/branches'
branches = []
if access_token:
resp, content = http.request(the_url, "GET", headers={'Authorization': "token " + access_token})
else:
resp, content = http.request(the_url, "GET")
if int(resp['status']) == 200:
branches.extend(json.loads(content.decode()))
while True:
next_link = get_next_link(resp)
if next_link:
if access_token:
resp, content = http.request(next_link, "GET", headers={'Authorization': "token " + access_token})
else:
resp, content = http.request(next_link, "GET")
if int(resp['status']) != 200:
raise DAException(repo_name + " fetch failed")
branches.extend(json.loads(content.decode()))
else:
break
return branches
raise DAException(the_url + " fetch failed on first try; got " + str(resp['status']))
def get_repo_info(giturl):
giturl = re.sub(r'#.*', '', giturl)
repo_name = re.sub(r'/*$', '', giturl)
m = re.search(r'//(.+):[email protected]', repo_name)
if m:
access_token = m.group(1)
else:
access_token = None
repo_name = re.sub(r'^http.*github.com/', '', repo_name)
repo_name = re.sub(r'.*@github.com:', '', repo_name)
repo_name = re.sub(r'.git$', '', repo_name)
if app.config['USE_GITHUB']:
github_auth = r.get('da:using_github:userid:' + str(current_user.id))
else:
github_auth = None
if github_auth and access_token is None:
storage = RedisCredStorage(oauth_app='github')
credentials = storage.get()
if not credentials or credentials.invalid:
http = httplib2.Http()
else:
http = credentials.authorize(httplib2.Http())
else:
http = httplib2.Http()
the_url = "https://api.github.com/repos/" + repo_name
if access_token:
resp, content = http.request(the_url, "GET", headers={'Authorization': "token " + access_token})
else:
resp, content = http.request(the_url, "GET")
if int(resp['status']) == 200:
return json.loads(content.decode())
raise DAException(the_url + " fetch failed on first try; got " + str(resp['status']))
@app.route('/get_git_branches', methods=['GET'])
@login_required
@roles_required(['developer', 'admin'])
def get_git_branches():
if 'url' not in request.args:
return ('File not found', 404)
giturl = request.args['url'].strip()
try:
return jsonify({'success': True, 'result': get_branches_of_repo(giturl)})
except Exception as err:
return jsonify({'success': False, 'reason': str(err)})
def get_user_repositories(http):
repositories = []
resp, content = http.request("https://api.github.com/user/repos", "GET")
if int(resp['status']) == 200:
repositories.extend(json.loads(content.decode()))
while True:
next_link = get_next_link(resp)
if next_link:
resp, content = http.request(next_link, "GET")
if int(resp['status']) != 200:
raise DAError("get_user_repositories: could not get information from next URL")
repositories.extend(json.loads(content.decode()))
else:
break
else:
raise DAError("playground_packages: could not get information about repositories")
return repositories
def get_orgs_info(http):
orgs_info = []
resp, content = http.request("https://api.github.com/user/orgs", "GET")
if int(resp['status']) == 200:
orgs_info.extend(json.loads(content.decode()))
while True:
next_link = get_next_link(resp)
if next_link:
resp, content = http.request(next_link, "GET")
if int(resp['status']) != 200:
raise DAError("get_orgs_info: could not get additional information about organizations")
orgs_info.extend(json.loads(content.decode()))
else:
break
else:
raise DAError("get_orgs_info: failed to get orgs using https://api.github.com/user/orgs")
return orgs_info
def get_branch_info(http, full_name):
branch_info = []
resp, content = http.request("https://api.github.com/repos/" + str(full_name) + '/branches', "GET")
if int(resp['status']) == 200:
branch_info.extend(json.loads(content.decode()))
while True:
next_link = get_next_link(resp)
if next_link:
resp, content = http.request(next_link, "GET")
if int(resp['status']) != 200:
raise DAError("get_branch_info: could not get additional information from next URL")
branch_info.extend(json.loads(content))
else:
break
else:
logmessage("get_branch_info: could not get info from https://api.github.com/repos/" + str(full_name) + '/branches')
return branch_info
def fix_package_folder():
playground_user = get_playground_user()
use_gd = bool(app.config['USE_GOOGLE_DRIVE'] is True and get_gd_folder() is not None)
use_od = bool(use_gd is False and app.config['USE_ONEDRIVE'] is True and get_od_folder() is not None)
problem_exists = False
area = SavedFile(playground_user.id, fix=True, section='playgroundpackages')
for f in os.listdir(area.directory):
path = os.path.join(area.directory, f)
if os.path.isfile(path) and not f.startswith('docassemble.') and not f.startswith('.'):
os.rename(path, os.path.join(area.directory, 'docassemble.' + f))
cloud_trash(use_gd, use_od, 'packages', f, 'default')
problem_exists = True
if os.path.isdir(path) and not f.startswith('.'):
for e in os.listdir(path):
if os.path.isfile(os.path.join(path, e)) and not e.startswith('docassemble.') and not e.startswith('.'):
os.rename(os.path.join(path, e), os.path.join(path, 'docassemble.' + e))
cloud_trash(use_gd, use_od, 'packages', e, f)
problem_exists = True
if problem_exists:
area.finalize()
def secure_git_branchname(branch):
"""Makes an input branch name a valid git branch name, and also strips out
things that would interpolated in bash."""
# The rules of what's allowed in a git branch name are: https://git-scm.com/docs/git-check-ref-format
branch = unicodedata.normalize("NFKD", branch)
branch = branch.encode("ascii", "ignore").decode("ascii")
branch = re.compile(r"[\u0000-\u0020]|(\")|(@{)|(\.\.)|[\u0170~ ^:?*$`[\\]|(//+)").sub("", branch)
branch = branch.strip("/")
# Can include a slash, but no slash-separated component can begin with a dot `.` or end with `.lock`
branch = "/".join([re.compile(r"\.lock$").sub("", component.lstrip('.')) for component in branch.split("/")])
branch = branch.rstrip(".")
if branch == "@":
branch = "_"
return branch
def do_playground_pull(area, current_project, github_url=None, branch=None, pypi_package=None, can_publish_to_github=False, github_email=None, pull_only=False):
playground_user = get_playground_user()
area_sec = {'templates': 'playgroundtemplate', 'static': 'playgroundstatic', 'sources': 'playgroundsources', 'questions': 'playground'}
readme_text = ''
setup_py = ''
if branch in ('', 'None'):
branch = None
if branch:
branch = secure_git_branchname(branch)
branch_option = ['-b', branch]
else:
branch_option = []
need_to_restart = False
extracted = {}
data_files = {'templates': [], 'static': [], 'sources': [], 'interviews': [], 'modules': [], 'questions': []}
directory = tempfile.mkdtemp(prefix='SavedFile')
output = ''
pypi_url = daconfig.get('pypi url', 'https://pypi.python.org/pypi')
expected_name = 'unknown'
if github_url:
github_url = re.sub(r'[^A-Za-z0-9\-\.\_\~\:\/\#\[\]\@\$\+\,\=]', '', github_url)
repo_name = re.sub(r'/*$', '', github_url)
repo_name = re.sub(r'^http.*github.com/', '', repo_name)
repo_name = re.sub(r'.*@github.com:', '', repo_name)
repo_name = re.sub(r'.git$', '', repo_name)
if '[email protected]' not in github_url and can_publish_to_github and github_email:
github_url = f'[email protected]:{repo_name}.git'
expected_name = re.sub(r'.*/', '', github_url)
expected_name = re.sub(r'\.git', '', expected_name)
expected_name = re.sub(r'docassemble-', '', expected_name)
(private_key_file, public_key_file) = get_ssh_keys(github_email)
os.chmod(private_key_file, stat.S_IRUSR | stat.S_IWUSR)
os.chmod(public_key_file, stat.S_IRUSR | stat.S_IWUSR)
ssh_script = tempfile.NamedTemporaryFile(mode='w', prefix="datemp", suffix='.sh', delete=False, encoding='utf-8')
ssh_script.write('# /bin/bash\n\nssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o GlobalKnownHostsFile=/dev/null -i "' + str(private_key_file) + '" $1 $2 $3 $4 $5 $6')
ssh_script.close()
os.chmod(ssh_script.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
# git_prefix = "GIT_SSH_COMMAND='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o GlobalKnownHostsFile=/dev/null -i \"" + str(private_key_file) + "\"' "
git_prefix = "GIT_SSH=" + ssh_script.name + " "
git_env = dict(os.environ, GIT_SSH=ssh_script.name)
output += "Doing " + git_prefix + "git clone " + " ".join(branch_option) + github_url + "\n"
try:
output += subprocess.check_output(["git", "clone"] + branch_option + [github_url], cwd=directory, stderr=subprocess.STDOUT, env=git_env).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
return {'action': "error", 'message': "error running git clone. " + output}
else:
if not github_url.startswith('http'):
github_url = f'https://github.com/{repo_name}'
expected_name = re.sub(r'.*/', '', github_url)
expected_name = re.sub(r'\.git', '', expected_name)
expected_name = re.sub(r'docassemble-', '', expected_name)
try:
if branch is not None:
logmessage("Doing git clone -b " + branch + " " + github_url)
output += subprocess.check_output(['git', 'clone', '-b', branch, github_url], cwd=directory, stderr=subprocess.STDOUT).decode()
else:
logmessage("Doing git clone " + github_url)
output += subprocess.check_output(['git', 'clone', github_url], cwd=directory, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
return {'action': "error", 'message': "error running git clone. " + output}
logmessage(output)
dirs_inside = [f for f in os.listdir(directory) if os.path.isdir(os.path.join(directory, f)) and re.search(r'^[A-Za-z0-9]', f)]
if len(dirs_inside) == 1:
commit_file = os.path.join(directory_for(area['playgroundpackages'], current_project), '.' + dirs_inside[0])
packagedir = os.path.join(directory, dirs_inside[0])
try:
current_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=packagedir, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
output = err.output.decode()
return {'action': "error", 'message': "error running git rev-parse. " + output}
with open(commit_file, 'w', encoding='utf-8') as fp:
fp.write(current_commit.strip())
logmessage("Wrote " + current_commit.strip() + " to " + commit_file)
else:
logmessage("Did not find a single directory inside repo")
if pull_only:
return {'action': 'pull_only'}
elif pypi_package:
pypi_package = re.sub(r'[^A-Za-z0-9\-\.\_\:\/\@\+\=]', '', pypi_package)
pypi_package = 'docassemble.' + re.sub(r'^docassemble\.', '', pypi_package)
package_file = tempfile.NamedTemporaryFile(suffix='.tar.gz')
try:
http = httplib2.Http()
resp, content = http.request(pypi_url + "/" + str(pypi_package) + "/json", "GET")
the_pypi_url = None
if int(resp['status']) == 200:
pypi_response = json.loads(content.decode())
for file_option in pypi_response['releases'][pypi_response['info']['version']]:
if file_option['packagetype'] == 'sdist':
the_pypi_url = file_option['url']
break
else:
return {'action': 'fail', 'message': word("The package you specified could not be downloaded from PyPI.")}
if the_pypi_url is None:
return {'action': 'fail', 'message': word("The package you specified could not be downloaded from PyPI as a tar.gz file.")}
except Exception as err:
return {'action': 'error', 'message': "error getting information about PyPI package. " + str(err)}
try:
urlretrieve(the_pypi_url, package_file.name)
except Exception as err:
return {'action': 'error', 'message': "error downloading PyPI package. " + str(err)}
try:
tar = tarfile.open(package_file.name)
tar.extractall(path=directory)
tar.close()
except Exception as err:
return {'action': 'error', 'message': "error unpacking PyPI package. " + str(err)}
package_file.close()
initial_directories = len(splitall(directory)) + 1
for root, dirs, files in os.walk(directory):
at_top_level = bool('setup.py' in files and 'docassemble' in dirs)
for a_file in files:
orig_file = os.path.join(root, a_file)
# output += "Original file is " + orig_file + "\n"
thefilename = os.path.join(*splitall(orig_file)[initial_directories:]) # pylint: disable=no-value-for-parameter
(the_directory, filename) = os.path.split(thefilename)
if filename.startswith('#') or filename.endswith('~'):
continue
dirparts = splitall(the_directory)
if '.git' in dirparts:
continue
levels = re.findall(r'/', the_directory)
for sec in ('templates', 'static', 'sources', 'questions'):
if the_directory.endswith('data/' + sec) and filename != 'README.md':
data_files[sec].append(filename)
target_filename = os.path.join(directory_for(area[area_sec[sec]], current_project), filename)
# output += "Copying " + orig_file + "\n"
copy_if_different(orig_file, target_filename)
if filename == 'README.md' and at_top_level:
with open(orig_file, 'r', encoding='utf-8') as fp:
readme_text = fp.read()
if filename == 'setup.py' and at_top_level:
with open(orig_file, 'r', encoding='utf-8') as fp:
setup_py = fp.read()
elif len(levels) >= 1 and not at_top_level and filename.endswith('.py') and filename != '__init__.py' and 'tests' not in dirparts and 'data' not in dirparts:
data_files['modules'].append(filename)
target_filename = os.path.join(directory_for(area['playgroundmodules'], current_project), filename)
# output += "Copying " + orig_file + "\n"
if (not os.path.isfile(target_filename)) or filecmp.cmp(orig_file, target_filename) is False:
need_to_restart = True
copy_if_different(orig_file, target_filename)
# output += "setup.py is " + str(len(setup_py)) + " characters long\n"
setup_py = re.sub(r'.*setup\(', '', setup_py, flags=re.DOTALL)
for line in setup_py.splitlines():
m = re.search(r"^ *([a-z_]+) *= *\(?'(.*)'", line)
if m:
extracted[m.group(1)] = m.group(2)
m = re.search(r'^ *([a-z_]+) *= *\(?"(.*)"', line)
if m:
extracted[m.group(1)] = m.group(2)
m = re.search(r'^ *([a-z_]+) *= *\[(.*)\]', line)
if m:
the_list = []
for item in re.split(r', *', m.group(2)):
inner_item = re.sub(r"'$", '', item)
inner_item = re.sub(r"^'", '', inner_item)
inner_item = re.sub(r'"+$', '', inner_item)
inner_item = re.sub(r'^"+', '', inner_item)
the_list.append(inner_item)
extracted[m.group(1)] = the_list
info_dict = {'readme': readme_text, 'interview_files': data_files['questions'], 'sources_files': data_files['sources'], 'static_files': data_files['static'], 'module_files': data_files['modules'], 'template_files': data_files['templates'], 'dependencies': extracted.get('install_requires', []), 'description': extracted.get('description', ''), 'author_name': extracted.get('author', ''), 'author_email': extracted.get('author_email', ''), 'license': extracted.get('license', ''), 'url': extracted.get('url', ''), 'version': extracted.get('version', ''), 'github_url': github_url, 'github_branch': branch, 'pypi_package_name': pypi_package}
info_dict['dependencies'] = [x for x in map(lambda y: re.sub(r'[\>\<\=].*', '', y), info_dict['dependencies']) if x not in ('docassemble', 'docassemble.base', 'docassemble.webapp')]
# output += "info_dict is set\n"
package_name = re.sub(r'^docassemble\.', '', extracted.get('name', expected_name))
# if not user_can_edit_package(pkgname='docassemble.' + package_name):
# index = 1
# orig_package_name = package_name
# while index < 100 and not user_can_edit_package(pkgname='docassemble.' + package_name):
# index += 1
# package_name = orig_package_name + str(index)
with open(os.path.join(directory_for(area['playgroundpackages'], current_project), 'docassemble.' + package_name), 'w', encoding='utf-8') as fp:
the_yaml = standardyaml.safe_dump(info_dict, default_flow_style=False, default_style='|')
fp.write(str(the_yaml))
for sec in area:
area[sec].finalize()
for key in r.keys('da:interviewsource:docassemble.playground' + str(playground_user.id) + ':*'):
r.incr(key.decode())
return {'action': 'finished', 'need_to_restart': need_to_restart, 'package_name': package_name}
def get_github_username_and_email():
storage = RedisCredStorage(oauth_app='github')
credentials = storage.get()
if not credentials or credentials.invalid:
raise DAException('GitHub integration expired.')
http = credentials.authorize(httplib2.Http())
try:
resp, content = http.request("https://api.github.com/user", "GET")
except:
return None, None, None
if int(resp['status']) == 200:
info = json.loads(content.decode('utf-8', 'ignore'))
github_user_name = info.get('login', None)
github_author_name = info.get('name', None)
github_email = info.get('email', None)
else:
raise DAError("playground_packages: could not get information about GitHub User")
if github_email is None:
resp, content = http.request("https://api.github.com/user/emails", "GET")
if int(resp['status']) == 200:
info = json.loads(content.decode('utf-8', 'ignore'))
for item in info:
if item.get('email', None) and item.get('visibility', None) != 'private':
github_email = item['email']
if github_user_name is None or github_email is None:
raise DAError("playground_packages: login not present in user info from GitHub")
return github_user_name, github_email, github_author_name
@app.route('/playgroundpackages', methods=['GET', 'POST'])
@login_required
@roles_required(['developer', 'admin'])
def playground_packages():
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
setup_translation()
fix_package_folder()
playground_user = get_playground_user()
current_project = get_current_project()
form = PlaygroundPackagesForm(request.form)
fileform = PlaygroundUploadForm(request.form)
the_file = secure_filename_spaces_ok(request.args.get('file', ''))
# no_file_specified = bool(the_file == '')
scroll = False
allow_pypi = daconfig.get('pypi', False)
pypi_username = current_user.pypi_username
pypi_password = current_user.pypi_password
pypi_url = daconfig.get('pypi url', 'https://pypi.python.org/pypi')
can_publish_to_pypi = bool(allow_pypi is True and pypi_username is not None and pypi_password is not None and pypi_username != '' and pypi_password != '')
if app.config['USE_GITHUB']:
github_auth = r.get('da:using_github:userid:' + str(current_user.id))
if github_auth is not None:
github_auth = github_auth.decode()
if github_auth == '1':
github_auth_info = {'shared': True, 'orgs': True}
else:
github_auth_info = json.loads(github_auth)
can_publish_to_github = True
else:
can_publish_to_github = False
else:
can_publish_to_github = None
if can_publish_to_github and request.method == 'GET':
storage = RedisCredStorage(oauth_app='github')
credentials = storage.get()
if not credentials or credentials.invalid:
state_string = random_string(16)
session['github_next'] = json.dumps({'state': state_string, 'path': 'playground_packages', 'arguments': request.args})
flow = get_github_flow()
uri = flow.step1_get_authorize_url(state=state_string)
return redirect(uri)
show_message = true_or_false(request.args.get('show_message', True))
github_message = None
pypi_message = None
pypi_version = None
package_list, package_auth = get_package_info() # pylint: disable=unused-variable
package_names = sorted([package.package.name for package in package_list])
for default_package in ('docassemble', 'docassemble.base', 'docassemble.webapp'):
if default_package in package_names:
package_names.remove(default_package)
# if the_file:
# scroll = True
if request.method == 'GET':
is_new = true_or_false(request.args.get('new', False))
else:
is_new = False
if is_new:
# scroll = True
the_file = ''
area = {}
file_list = {}
section_name = {'playground': 'Interview files', 'playgroundpackages': 'Packages', 'playgroundtemplate': 'Template files', 'playgroundstatic': 'Static files', 'playgroundsources': 'Source files', 'playgroundmodules': 'Modules'}
section_sec = {'playgroundtemplate': 'template', 'playgroundstatic': 'static', 'playgroundsources': 'sources', 'playgroundmodules': 'modules'}
section_field = {'playground': form.interview_files, 'playgroundtemplate': form.template_files, 'playgroundstatic': form.static_files, 'playgroundsources': form.sources_files, 'playgroundmodules': form.module_files}
for sec in ('playground', 'playgroundpackages', 'playgroundtemplate', 'playgroundstatic', 'playgroundsources', 'playgroundmodules'):
area[sec] = SavedFile(playground_user.id, fix=True, section=sec)
the_directory = directory_for(area[sec], current_project)
if sec == 'playground' and current_project != 'default' and not os.path.isdir(the_directory):
current_project = set_current_project('default')
the_directory = directory_for(area[sec], current_project)
if os.path.isdir(the_directory):
file_list[sec] = sorted([f for f in os.listdir(the_directory) if os.path.isfile(os.path.join(the_directory, f)) and re.search(r'^[A-Za-z0-9]', f)])
else:
file_list[sec] = []
for sec, field in section_field.items():
the_list = []
for item in file_list[sec]:
the_list.append((item, item))
field.choices = the_list
the_list = []
for item in package_names:
the_list.append((item, item))
form.dependencies.choices = the_list
validated = False
form.github_branch.choices = []
if form.github_branch.data:
form.github_branch.choices.append((form.github_branch.data, form.github_branch.data))
else:
form.github_branch.choices.append(('', ''))
if request.method == 'POST' and not (app.config['DEVELOPER_CAN_INSTALL'] or current_user.has_role('admin')):
form.install_also.data = 'n'
form.install.data = ''
if request.method == 'POST' and 'uploadfile' not in request.files and form.validate():
the_file = form.file_name.data
validated = True
# else:
# the_error = ''
# for attrib in ('original_file_name', 'file_name', 'license', 'description', 'author_name', 'author_email', 'version', 'url', 'dependencies', 'interview_files', 'template_files', 'module_files', 'static_files', 'sources_files', 'readme', 'github_branch', 'commit_message', 'submit', 'download', 'install', 'pypi', 'github', 'cancel', 'delete'):
# the_field = getattr(form, attrib)
# for error in the_field.errors:
# the_error += str(error)
# raise DAError("Form did not validate with " + str(the_error))
the_file = re.sub(r'[^A-Za-z0-9\-\_\.]+', '-', the_file)
the_file = re.sub(r'^docassemble-', r'', the_file)
the_directory = directory_for(area['playgroundpackages'], current_project)
files = sorted([f for f in os.listdir(the_directory) if os.path.isfile(os.path.join(the_directory, f)) and re.search(r'^[A-Za-z0-9]', f)])
editable_files = []
for a_file in files:
editable_files.append({'name': re.sub(r'^docassemble.', r'', a_file), 'modtime': os.path.getmtime(os.path.join(the_directory, a_file))})
assign_opacity(editable_files)
editable_file_listing = [x['name'] for x in editable_files]
if request.method == 'GET' and not the_file and not is_new:
current_file = get_current_file(current_project, 'packages')
if not current_file.startswith('docassemble.'):
current_file = 'docassemble.' + current_file
set_current_file(current_project, 'packages', current_file)
if re.sub(r'^docassemble.', r'', current_file) in editable_file_listing:
the_file = re.sub(r'^docassemble.', r'', current_file)
else:
delete_current_file(current_project, 'packages')
if len(editable_files) > 0:
the_file = sorted(editable_files, key=lambda x: x['modtime'])[-1]['name']
else:
the_file = ''
# if the_file != '' and not user_can_edit_package(pkgname='docassemble.' + the_file):
# flash(word('Sorry, that package name,') + ' ' + the_file + word(', is already in use by someone else'), 'error')
# validated = False
if request.method == 'GET' and the_file in editable_file_listing:
set_current_file(current_project, 'packages', 'docassemble.' + the_file)
if the_file == '' and len(file_list['playgroundpackages']) and not is_new:
the_file = file_list['playgroundpackages'][0]
the_file = re.sub(r'^docassemble.', r'', the_file)
old_info = {}
branch_info = []
github_http = None
github_ssh = None
github_use_ssh = False
github_user_name = None
github_email = None
github_author_name = None
github_url_from_file = None
pypi_package_from_file = None
expected_name = 'unknown'
if request.method == 'GET' and the_file != '':
if the_file != '' and os.path.isfile(os.path.join(directory_for(area['playgroundpackages'], current_project), 'docassemble.' + the_file)):
filename = os.path.join(directory_for(area['playgroundpackages'], current_project), 'docassemble.' + the_file)
with open(filename, 'r', encoding='utf-8') as fp:
content = fp.read()
old_info = standardyaml.load(content, Loader=standardyaml.FullLoader)
if isinstance(old_info, dict):
github_url_from_file = old_info.get('github_url', None)
pypi_package_from_file = old_info.get('pypi_package_name', None)
for field in ('license', 'description', 'author_name', 'author_email', 'version', 'url', 'readme'):
if field in old_info:
form[field].data = old_info[field]
else:
form[field].data = ''
if 'dependencies' in old_info and isinstance(old_info['dependencies'], list) and len(old_info['dependencies']):
old_info['dependencies'] = list(map(lambda y: re.sub(r'[\>\<\=].*', '', y), old_info['dependencies']))
for item in ('docassemble', 'docassemble.base', 'docassemble.webapp'):
if item in old_info['dependencies']:
del old_info['dependencies'][item]
for field in ('dependencies', 'interview_files', 'template_files', 'module_files', 'static_files', 'sources_files'):
if field in old_info and isinstance(old_info[field], list) and len(old_info[field]):
form[field].data = old_info[field]
else:
raise DAException("YAML yielded " + repr(old_info) + " from " + repr(content))
else:
filename = None
if the_file != '' and can_publish_to_github and not is_new:
github_package_name = 'docassemble-' + the_file
try:
storage = RedisCredStorage(oauth_app='github')
credentials = storage.get()
if not credentials or credentials.invalid:
if form.github.data:
state_string = random_string(16)
session['github_next'] = json.dumps({'state': state_string, 'path': 'playground_packages', 'arguments': request.args})
flow = get_github_flow()
uri = flow.step1_get_authorize_url(state=state_string)
return redirect(uri)
raise DAException('GitHub integration expired.')
http = credentials.authorize(httplib2.Http())
resp, content = http.request("https://api.github.com/user", "GET")
if int(resp['status']) == 200:
info = json.loads(content.decode('utf-8', 'ignore'))
github_user_name = info.get('login', None)
github_author_name = info.get('name', None)
github_email = info.get('email', None)
else:
raise DAError("playground_packages: could not get information about GitHub User")
if github_email is None:
resp, content = http.request("https://api.github.com/user/emails", "GET")
if int(resp['status']) == 200:
info = json.loads(content.decode('utf-8', 'ignore'))
for item in info:
if item.get('email', None) and item.get('visibility', None) != 'private':
github_email = item['email']
if github_user_name is None or github_email is None:
raise DAError("playground_packages: login not present in user info from GitHub")
found = False
found_strong = False
resp, content = http.request("https://api.github.com/repos/" + str(github_user_name) + "/" + github_package_name, "GET")
if int(resp['status']) == 200:
repo_info = json.loads(content.decode('utf-8', 'ignore'))
github_http = repo_info['html_url']
github_ssh = repo_info['ssh_url']
if repo_info['private']:
github_use_ssh = True
github_message = word('This package is') + ' <a target="_blank" href="' + repo_info.get('html_url', 'about:blank') + '">' + word("published on GitHub") + '</a>.'
if github_author_name:
github_message += " " + word("The author is") + " " + github_author_name + "."
branch_info = get_branch_info(http, repo_info['full_name'])
found = True
if github_url_from_file is None or github_url_from_file in [github_ssh, github_http]:
found_strong = True
if found_strong is False and github_auth_info['shared']:
repositories = get_user_repositories(http)
for repo_info in repositories:
if repo_info['name'] != github_package_name or (github_http is not None and github_http == repo_info['html_url']) or (github_ssh is not None and github_ssh == repo_info['ssh_url']):
continue
if found and github_url_from_file is not None and github_url_from_file not in [repo_info['html_url'], repo_info['ssh_url']]:
break
github_http = repo_info['html_url']
github_ssh = repo_info['ssh_url']
if repo_info['private']:
github_use_ssh = True
github_message = word('This package is') + ' <a target="_blank" href="' + repo_info.get('html_url', 'about:blank') + '">' + word("published on GitHub") + '</a>.'
branch_info = get_branch_info(http, repo_info['full_name'])
found = True
if github_url_from_file is None or github_url_from_file in [github_ssh, github_http]:
found_strong = True
break
if found_strong is False and github_auth_info['orgs']:
orgs_info = get_orgs_info(http)
for org_info in orgs_info:
resp, content = http.request("https://api.github.com/repos/" + str(org_info['login']) + "/" + github_package_name, "GET")
if int(resp['status']) == 200:
repo_info = json.loads(content.decode('utf-8', 'ignore'))
if found and github_url_from_file is not None and github_url_from_file not in [repo_info['html_url'], repo_info['ssh_url']]:
break
github_http = repo_info['html_url']
github_ssh = repo_info['ssh_url']
if repo_info['private']:
github_use_ssh = True
github_message = word('This package is') + ' <a target="_blank" href="' + repo_info.get('html_url', 'about:blank') + '">' + word("published on GitHub") + '</a>.'
branch_info = get_branch_info(http, repo_info['full_name'])
found = True
if github_url_from_file is None or github_url_from_file in [github_ssh, github_http]:
found_strong = True
break
if found is False:
github_message = word('This package is not yet published on your GitHub account.')
except Exception as e:
logmessage('playground_packages: GitHub error. ' + str(e))
github_message = word('Unable to determine if the package is published on your GitHub account.')
if request.method == 'POST' and 'uploadfile' in request.files:
the_files = request.files.getlist('uploadfile')
need_to_restart = False
if current_user.timezone:
the_timezone = zoneinfo.ZoneInfo(current_user.timezone)
else:
the_timezone = zoneinfo.ZoneInfo(get_default_timezone())
if the_files:
for up_file in the_files:
# zip_filename = werkzeug.utils.secure_filename(up_file.filename)
zippath = tempfile.NamedTemporaryFile(mode="wb", suffix=".zip", prefix="datemp", delete=False)
up_file.save(zippath.name)
area_sec = {'templates': 'playgroundtemplate', 'static': 'playgroundstatic', 'sources': 'playgroundsources', 'questions': 'playground'}
zippath.close()
with zipfile.ZipFile(zippath.name, mode='r') as zf:
readme_text = ''
setup_py = ''
extracted = {}
data_files = {'templates': [], 'static': [], 'sources': [], 'interviews': [], 'modules': [], 'questions': []}
has_docassemble_dir = set()
has_setup_file = set()
for zinfo in zf.infolist():
if zinfo.is_dir():
if zinfo.filename.endswith('/docassemble/'):
has_docassemble_dir.add(re.sub(r'/docassemble/$', '', zinfo.filename))
if zinfo.filename == 'docassemble/':
has_docassemble_dir.add('')
elif zinfo.filename.endswith('/setup.py'):
(directory, filename) = os.path.split(zinfo.filename)
has_setup_file.add(directory)
elif zinfo.filename == 'setup.py':
has_setup_file.add('')
root_dir = None
for directory in has_docassemble_dir.union(has_setup_file):
if root_dir is None or len(directory) < len(root_dir):
root_dir = directory
if root_dir is None:
flash(word("The zip file did not contain a docassemble add-on package."), 'error')
return redirect(url_for('playground_packages', project=current_project, file=the_file))
for zinfo in zf.infolist():
# logmessage("Found a " + zinfo.filename)
if zinfo.filename.endswith('/'):
continue
(directory, filename) = os.path.split(zinfo.filename)
if filename.startswith('#') or filename.endswith('~'):
continue
dirparts = splitall(directory)
if '.git' in dirparts:
continue
levels = re.findall(r'/', directory)
time_tuple = zinfo.date_time
the_time = time.mktime(datetime.datetime(*time_tuple).timetuple())
for sec in ('templates', 'static', 'sources', 'questions'):
if directory.endswith('data/' + sec) and filename != 'README.md':
data_files[sec].append(filename)
target_filename = os.path.join(directory_for(area[area_sec[sec]], current_project), filename)
with zf.open(zinfo) as source_fp, open(target_filename, 'wb') as target_fp:
shutil.copyfileobj(source_fp, target_fp)
os.utime(target_filename, (the_time, the_time))
if filename == 'README.md' and directory == root_dir:
with zf.open(zinfo) as f:
the_file_obj = TextIOWrapper(f, encoding='utf8')
readme_text = the_file_obj.read()
if filename == 'setup.py' and directory == root_dir:
with zf.open(zinfo) as f:
the_file_obj = TextIOWrapper(f, encoding='utf8')
setup_py = the_file_obj.read()
elif len(levels) >= 1 and directory != root_dir and filename.endswith('.py') and filename != '__init__.py' and 'tests' not in dirparts and 'data' not in dirparts:
need_to_restart = True
data_files['modules'].append(filename)
target_filename = os.path.join(directory_for(area['playgroundmodules'], current_project), filename)
with zf.open(zinfo) as source_fp, open(target_filename, 'wb') as target_fp:
shutil.copyfileobj(source_fp, target_fp)
os.utime(target_filename, (the_time, the_time))
setup_py = re.sub(r'.*setup\(', '', setup_py, flags=re.DOTALL)
for line in setup_py.splitlines():
m = re.search(r"^ *([a-z_]+) *= *\(?'(.*)'", line)
if m:
extracted[m.group(1)] = m.group(2)
m = re.search(r'^ *([a-z_]+) *= *\(?"(.*)"', line)
if m:
extracted[m.group(1)] = m.group(2)
m = re.search(r'^ *([a-z_]+) *= *\[(.*)\]', line)
if m:
the_list = []
for item in re.split(r', *', m.group(2)):
inner_item = re.sub(r"'$", '', item)
inner_item = re.sub(r"^'", '', inner_item)
inner_item = re.sub(r'"+$', '', inner_item)
inner_item = re.sub(r'^"+', '', inner_item)
the_list.append(inner_item)
extracted[m.group(1)] = the_list
info_dict = {'readme': readme_text, 'interview_files': data_files['questions'], 'sources_files': data_files['sources'], 'static_files': data_files['static'], 'module_files': data_files['modules'], 'template_files': data_files['templates'], 'dependencies': list(map(lambda y: re.sub(r'[\>\<\=].*', '', y), extracted.get('install_requires', []))), 'description': extracted.get('description', ''), 'author_name': extracted.get('author', ''), 'author_email': extracted.get('author_email', ''), 'license': extracted.get('license', ''), 'url': extracted.get('url', ''), 'version': extracted.get('version', '')}
info_dict['dependencies'] = [x for x in map(lambda y: re.sub(r'[\>\<\=].*', '', y), info_dict['dependencies']) if x not in ('docassemble', 'docassemble.base', 'docassemble.webapp')]
package_name = re.sub(r'^docassemble\.', '', extracted.get('name', expected_name))
with open(os.path.join(directory_for(area['playgroundpackages'], current_project), 'docassemble.' + package_name), 'w', encoding='utf-8') as fp:
the_yaml = standardyaml.safe_dump(info_dict, default_flow_style=False, default_style='|')
fp.write(str(the_yaml))
for key in r.keys('da:interviewsource:docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':*'):
r.incr(key.decode())
for the_area in area.values():
the_area.finalize()
the_file = package_name
zippath.close()
if show_message:
flash(word("The package was unpacked into the Playground."), 'success')
if need_to_restart:
return redirect(url_for('restart_page', next=url_for('playground_packages', project=current_project, file=the_file)))
return redirect(url_for('playground_packages', project=current_project, file=the_file))
if request.method == 'GET' and 'pull' in request.args and int(request.args['pull']) == 1 and ('github_url' in request.args or 'pypi' in request.args):
if can_publish_to_github and (github_user_name is None or github_email is None):
(github_user_name, github_email, github_author_name) = get_github_username_and_email()
github_url = request.args.get('github_url', None)
pypi_package = request.args.get('pypi', None)
branch = request.args.get('branch', None)
do_pypi_also = true_or_false(request.args.get('pypi_also', False))
if app.config['DEVELOPER_CAN_INSTALL'] or current_user.has_role('admin'):
do_install_also = true_or_false(request.args.get('install_also', False))
else:
do_install_also = False
result = do_playground_pull(area, current_project, github_url=github_url, branch=branch, pypi_package=pypi_package, can_publish_to_github=can_publish_to_github, github_email=github_email, pull_only=(do_pypi_also or do_install_also))
if result['action'] == 'error':
raise DAError("playground_packages: " + result['message'])
if result['action'] == 'fail':
flash(result['message'], 'error')
return redirect(url_for('playground_packages', project=current_project))
if result['action'] == 'pull_only':
the_args = {'package': the_file, 'project': current_project}
if do_pypi_also:
the_args['pypi'] = '1'
if do_install_also:
the_args['install'] = '1'
area['playgroundpackages'].finalize()
return redirect(url_for('create_playground_package', **the_args))
if result['action'] == 'finished':
the_file = result['package_name']
if show_message:
flash(word("The package was unpacked into the Playground."), 'success')
# shutil.rmtree(directory)
if result['need_to_restart']:
return redirect(url_for('restart_page', next=url_for('playground_packages', file=the_file, project=current_project)))
return redirect(url_for('playground_packages', project=current_project, file=the_file))
if request.method == 'POST' and validated and form.delete.data and the_file != '' and the_file == form.file_name.data and os.path.isfile(os.path.join(directory_for(area['playgroundpackages'], current_project), 'docassemble.' + the_file)):
os.remove(os.path.join(directory_for(area['playgroundpackages'], current_project), 'docassemble.' + the_file))
dotfile = os.path.join(directory_for(area['playgroundpackages'], current_project), '.docassemble-' + the_file)
if os.path.exists(dotfile):
os.remove(dotfile)
area['playgroundpackages'].finalize()
flash(word("Deleted package"), "success")
return redirect(url_for('playground_packages', project=current_project))
if not is_new:
pkgname = 'docassemble.' + the_file
if can_publish_to_pypi:
pypi_info = pypi_status(pkgname)
if pypi_info['error']:
pypi_message = word("Unable to determine if the package is published on PyPI.")
else:
if pypi_info['exists'] and 'info' in pypi_info['info']:
pypi_version = pypi_info['info']['info'].get('version', None)
pypi_message = word('This package is') + ' <a target="_blank" href="' + pypi_url + '/' + pkgname + '/' + pypi_version + '">' + word("published on PyPI") + '</a>.'
pypi_author = pypi_info['info']['info'].get('author', None)
if pypi_author:
pypi_message += " " + word("The author is") + " " + pypi_author + "."
if pypi_version != form['version'].data:
pypi_message += " " + word("The version on PyPI is") + " " + str(pypi_version) + ". " + word("Your version is") + " " + str(form['version'].data) + "."
else:
pypi_message = word('This package is not yet published on PyPI.')
if request.method == 'POST' and validated:
new_info = {}
for field in ('license', 'description', 'author_name', 'author_email', 'version', 'url', 'readme', 'dependencies', 'interview_files', 'template_files', 'module_files', 'static_files', 'sources_files'):
new_info[field] = form[field].data
# logmessage("found " + str(new_info))
if form.submit.data or form.download.data or form.install.data or form.pypi.data or form.github.data:
if the_file != '':
area['playgroundpackages'].finalize()
if form.original_file_name.data and form.original_file_name.data != the_file:
old_filename = os.path.join(directory_for(area['playgroundpackages'], current_project), 'docassemble.' + form.original_file_name.data)
if os.path.isfile(old_filename):
os.remove(old_filename)
if can_publish_to_pypi and form.pypi.data and pypi_version is not None:
if not new_info['version']:
new_info['version'] = pypi_version
while 'releases' in pypi_info['info'] and new_info['version'] in pypi_info['info']['releases'].keys():
versions = new_info['version'].split(".")
versions[-1] = str(int(versions[-1]) + 1)
new_info['version'] = ".".join(versions)
filename = os.path.join(directory_for(area['playgroundpackages'], current_project), 'docassemble.' + the_file)
if os.path.isfile(filename):
with open(filename, 'r', encoding='utf-8') as fp:
content = fp.read()
old_info = standardyaml.load(content, Loader=standardyaml.FullLoader)
for name in ('github_url', 'github_branch', 'pypi_package_name'):
if old_info.get(name, None):
new_info[name] = old_info[name]
with open(filename, 'w', encoding='utf-8') as fp:
the_yaml = standardyaml.safe_dump(new_info, default_flow_style=False, default_style='|')
fp.write(str(the_yaml))
area['playgroundpackages'].finalize()
if form.download.data:
return redirect(url_for('create_playground_package', package=the_file, project=current_project))
if form.install.data:
return redirect(url_for('create_playground_package', package=the_file, project=current_project, install='1'))
if form.pypi.data:
if form.install_also.data:
return redirect(url_for('create_playground_package', package=the_file, project=current_project, pypi='1', install='1'))
return redirect(url_for('create_playground_package', package=the_file, project=current_project, pypi='1'))
if form.github.data:
the_branch = form.github_branch.data
if the_branch == "<new>":
the_branch = re.sub(r'[^A-Za-z0-9\_\-]', r'', str(form.github_branch_new.data))
return redirect(url_for('create_playground_package', project=current_project, package=the_file, github='1', commit_message=form.commit_message.data, new_branch=str(the_branch), pypi_also=('1' if form.pypi_also.data else '0'), install_also=('1' if form.install_also.data else '0')))
return redirect(url_for('create_playground_package', project=current_project, package=the_file, github='1', commit_message=form.commit_message.data, branch=str(the_branch), pypi_also=('1' if form.pypi_also.data else '0'), install_also=('1' if form.install_also.data else '0')))
the_time = formatted_current_time()
if show_message:
flash(word('The package information was saved.'), 'success')
form.original_file_name.data = the_file
form.file_name.data = the_file
if the_file != '' and os.path.isfile(os.path.join(directory_for(area['playgroundpackages'], current_project), 'docassemble.' + the_file)):
filename = os.path.join(directory_for(area['playgroundpackages'], current_project), 'docassemble.' + the_file)
else:
filename = None
header = word("Packages")
upload_header = None
edit_header = None
description = Markup("""Describe your package and choose the files from your Playground that will go into it.""")
after_text = None
if scroll:
extra_command = " scrollBottom();"
else:
extra_command = ""
extra_command += upload_js() + """
$("#daCancelPyPI").click(function(event){
var daWhichButton = this;
$("#pypi_message_div").hide();
$(".btn-da").each(function(){
if (this != daWhichButton && $(this).attr('id') != 'daCancelGitHub' && $(this).is(":hidden")){
$(this).show();
}
});
$("#daPyPI").html(""" + json.dumps(word("PyPI")) + """);
$(this).hide();
event.preventDefault();
return false;
});
$("#daCancelGitHub").click(function(event){
var daWhichButton = this;
$("#commit_message_div").hide();
$(".btn-da").each(function(){
if (this != daWhichButton && $(this).attr('id') != 'daCancelPyPI' && $(this).is(":hidden")){
$(this).show();
}
});
$("#daGitHub").html(""" + json.dumps(word("GitHub")) + """);
$(this).hide();
event.preventDefault();
return false;
});
if ($("#github_branch option").length == 0){
$("#github_branch_div").hide();
}
$("#github_branch").on('change', function(event){
if ($(this).val() == '<new>'){
$("#new_branch_div").show();
}
else{
$("#new_branch_div").hide();
}
});
$("#daPyPI").click(function(event){
if (existingPypiVersion != null && existingPypiVersion == $("#version").val()){
alert(""" + json.dumps(word("You need to increment the version before publishing to PyPI.")) + """);
$('html, body').animate({
scrollTop: $("#version").offset().top-90,
scrollLeft: 0
});
$("#version").focus();
var tmpStr = $("#version").val();
$("#version").val('');
$("#version").val(tmpStr);
event.preventDefault();
return false;
}
var daWhichButton = this;
if ($("#pypi_message_div").is(":hidden")){
$("#pypi_message_div").show();
$(".btn-da").each(function(){
if (this != daWhichButton && $(this).is(":visible")){
$(this).hide();
}
});
$(this).html(""" + json.dumps(word("Publish")) + """);
$("#daCancelPyPI").show();
window.scrollTo(0, document.body.scrollHeight);
event.preventDefault();
return false;
}
});
$("#daGitHub").click(function(event){
var daWhichButton = this;
if ($("#commit_message").val().length == 0 || $("#commit_message_div").is(":hidden")){
if ($("#commit_message_div").is(":visible")){
$("#commit_message").addClass("is-invalid");
}
else{
$("#commit_message_div").show();
$(".btn-da").each(function(){
if (this != daWhichButton && $(this).is(":visible")){
$(this).hide();
}
});
$(this).html(""" + json.dumps(word("Commit")) + """);
$("#daCancelGitHub").show();
}
$("#commit_message").focus();
window.scrollTo(0, document.body.scrollHeight);
event.preventDefault();
return false;
}
if ($("#pypi_also").prop('checked') && existingPypiVersion != null && existingPypiVersion == $("#version").val()){
alert(""" + json.dumps(word("You need to increment the version before publishing to PyPI.")) + """);
$('html, body').animate({
scrollTop: $("#version").offset().top-90,
scrollLeft: 0
});
$("#version").focus();
var tmpStr = $("#version").val();
$("#version").val('');
$("#version").val(tmpStr);
event.preventDefault();
return false;
}
});
$(document).on('keydown', function(e){
if (e.which == 13){
var tag = $( document.activeElement ).prop("tagName");
if (tag != "TEXTAREA" && tag != "A" && tag != "LABEL" && tag != "BUTTON"){
e.preventDefault();
e.stopPropagation();
}
}
});"""
if keymap:
kbOpt = 'keyMap: "' + keymap + '", cursorBlinkRate: 0, '
kbLoad = '<script src="' + url_for('static', filename="codemirror/keymap/" + keymap + ".js", v=da_version) + '"></script>\n '
else:
kbOpt = ''
kbLoad = ''
any_files = len(editable_files) > 0
back_button = Markup('<span class="navbar-brand navbar-nav dabackicon me-3"><a href="' + url_for('playground_page', project=current_project) + '" class="dabackbuttoncolor nav-link" title=' + json.dumps(word("Go back to the main Playground page")) + '><i class="fas fa-chevron-left"></i><span class="daback">' + word('Back') + '</span></a></span>')
if can_publish_to_pypi:
if pypi_message is not None:
pypi_message = Markup(pypi_message)
else:
pypi_message = None
extra_js = '\n <script src="' + url_for('static', filename="app/playgroundbundle.js", v=da_version) + '"></script>\n '
extra_js += kbLoad
extra_js += """<script>
var existingPypiVersion = """ + json.dumps(pypi_version) + """;
var isNew = """ + json.dumps(is_new) + """;
var existingFiles = """ + json.dumps(files) + """;
var currentFile = """ + json.dumps(the_file) + """;
var daExpireSession = null;
function resetExpireSession(){
if (daExpireSession != null){
window.clearTimeout(daExpireSession);
}
daExpireSession = setTimeout(function(){
alert(""" + json.dumps(word("Your browser session has expired and you have been signed out. You will not be able to save your work. Please log in again.")) + """);
}, """ + str(999 * int(daconfig.get('session lifetime seconds', 43200))) + """);
}
function scrollBottom(){
$("html, body").animate({ scrollTop: $(document).height() }, "slow");
}
$( document ).ready(function() {
resetExpireSession();
$("#file_name").on('change', function(){
var newFileName = $(this).val();
if ((!isNew) && newFileName == currentFile){
return;
}
for (var i = 0; i < existingFiles.length; i++){
if (newFileName == existingFiles[i]){
alert(""" + json.dumps(word("Warning: a package definition by that name already exists. If you save, you will overwrite it.")) + """);
return;
}
}
return;
});
$("#daDelete").click(function(event){
if (!confirm(""" + '"' + word("Are you sure that you want to delete this package?") + '"' + """)){
event.preventDefault();
}
});
daTextArea = document.getElementById("readme");
var daCodeMirror = CodeMirror.fromTextArea(daTextArea, {mode: "markdown", """ + kbOpt + """tabSize: 2, tabindex: 70, autofocus: false, lineNumbers: true, matchBrackets: true, lineWrapping: """ + ('true' if daconfig.get('wrap lines in playground', True) else 'false') + """});
$(window).bind("beforeunload", function(){
daCodeMirror.save();
$("#form").trigger("checkform.areYouSure");
});
$("#form").areYouSure(""" + json.dumps({'message': word("There are unsaved changes. Are you sure you wish to leave this page?")}) + """);
$("#form").bind("submit", function(){
daCodeMirror.save();
$("#form").trigger("reinitialize.areYouSure");
return true;
});
daCodeMirror.setOption("extraKeys", { Tab: function(cm){ var spaces = Array(cm.getOption("indentUnit") + 1).join(" "); cm.replaceSelection(spaces); }, "F11": function(cm) { cm.setOption("fullScreen", !cm.getOption("fullScreen")); }, "Esc": function(cm) { if (cm.getOption("fullScreen")) cm.setOption("fullScreen", false); }});
daCodeMirror.setOption("coverGutterNextToScrollbar", true);""" + extra_command + """
});
</script>"""
if github_use_ssh:
the_github_url = github_ssh
else:
the_github_url = github_http
if the_github_url is None and github_url_from_file is not None:
the_github_url = github_url_from_file
if the_github_url is None:
the_pypi_package_name = pypi_package_from_file
else:
the_pypi_package_name = None
if github_message is not None and github_url_from_file is not None and github_url_from_file != github_http and github_url_from_file != github_ssh:
github_message += ' ' + word("This package was originally pulled from") + ' <a target="_blank" href="' + github_as_http(github_url_from_file) + '">' + word('a GitHub repository') + '</a>.'
if github_message is not None and old_info.get('github_branch', None) and (github_http or github_url_from_file):
html_url = github_http or github_url_from_file
commit_code = None
current_commit_file = os.path.join(directory_for(area['playgroundpackages'], current_project), '.' + github_package_name)
if os.path.isfile(current_commit_file):
with open(current_commit_file, 'r', encoding='utf-8') as fp:
commit_code = fp.read().strip()
if current_user.timezone:
the_timezone = zoneinfo.ZoneInfo(current_user.timezone)
else:
the_timezone = zoneinfo.ZoneInfo(get_default_timezone())
commit_code_date = datetime.datetime.utcfromtimestamp(os.path.getmtime(current_commit_file)).replace(tzinfo=datetime.timezone.utc).astimezone(the_timezone).strftime("%Y-%m-%d %H:%M:%S %Z")
if commit_code:
github_message += ' ' + word('The current branch is %s and the current commit is %s.') % ('<a target="_blank" href="' + html_url + '/tree/' + old_info['github_branch'] + '">' + old_info['github_branch'] + '</a>', '<a target="_blank" href="' + html_url + '/commit/' + commit_code + '"><code>' + commit_code[0:7] + '</code></a>') + ' ' + word('The commit was saved locally at %s.') % commit_code_date
else:
github_message += ' ' + word('The current branch is %s.') % ('<a target="_blank" href="' + html_url + '/tree/' + old_info['github_branch'] + '">' + old_info['github_branch'] + '</a>',)
if github_message is not None:
github_message = Markup(github_message)
branch = old_info.get('github_branch', None)
if branch is not None:
branch = branch.strip()
branch_choices = []
if len(branch_info) > 0:
branch_choices.append(("<new>", word("(New branch)")))
branch_names = set()
for br in branch_info:
branch_names.add(br['name'])
branch_choices.append((br['name'], br['name']))
if branch and branch in branch_names:
form.github_branch.data = branch
default_branch = branch
elif 'master' in branch_names:
form.github_branch.data = 'master'
default_branch = 'master'
elif 'main' in branch_names:
form.github_branch.data = 'main'
default_branch = 'main'
else:
default_branch = GITHUB_BRANCH
form.github_branch.choices = branch_choices
if form.author_name.data in ('', None) and current_user.first_name and current_user.last_name:
form.author_name.data = current_user.first_name + " " + current_user.last_name
if form.author_email.data in ('', None) and current_user.email:
form.author_email.data = current_user.email
if current_user.id != playground_user.id:
header += " / " + playground_user.email
if current_project != 'default':
header += " / " + current_project
response = make_response(render_template('pages/playgroundpackages.html', current_project=current_project, branch=default_branch, version_warning=None, bodyclass='daadminbody', can_publish_to_pypi=can_publish_to_pypi, pypi_message=pypi_message, can_publish_to_github=can_publish_to_github, github_message=github_message, github_url=the_github_url, pypi_package_name=the_pypi_package_name, back_button=back_button, tab_title=header, page_title=header, extra_css=Markup('\n <link href="' + url_for('static', filename='app/playgroundbundle.css', v=da_version) + '" rel="stylesheet">'), extra_js=Markup(extra_js), header=header, upload_header=upload_header, edit_header=edit_header, description=description, form=form, fileform=fileform, files=files, file_list=file_list, userid=playground_user.id, editable_files=sorted(editable_files, key=lambda y: y['name'].lower()), current_file=the_file, after_text=after_text, section_name=section_name, section_sec=section_sec, section_field=section_field, package_names=sorted(package_names, key=lambda y: y.lower()), any_files=any_files), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
def github_as_http(url):
if url.startswith('http'):
return url
return re.sub(r'^[^@]+@([^:]+):(.*)\.git$', r'https://\1/\2', url)
def copy_if_different(source, destination):
if (not os.path.isfile(destination)) or filecmp.cmp(source, destination) is False:
shutil.copyfile(source, destination)
def splitall(path):
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path:
allparts.insert(0, parts[0])
break
if parts[1] == path:
allparts.insert(0, parts[1])
break
path = parts[0]
allparts.insert(0, parts[1])
return allparts
@app.route('/playground_redirect_poll', methods=['GET'])
@login_required
@roles_required(['developer', 'admin'])
def playground_redirect_poll():
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
playground_user = get_playground_user()
key = 'da:runplayground:' + str(playground_user.id)
the_url = r.get(key)
# logmessage("playground_redirect: key " + str(key) + " is " + str(the_url))
if the_url is not None:
the_url = the_url.decode()
r.delete(key)
return jsonify({'success': True, 'url': the_url})
return jsonify({'success': False, 'url': the_url})
@app.route('/playground_redirect', methods=['GET', 'POST'])
@login_required
@roles_required(['developer', 'admin'])
def playground_redirect():
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
playground_user = get_playground_user()
key = 'da:runplayground:' + str(playground_user.id)
counter = 0
while counter < 15:
the_url = r.get(key)
# logmessage("playground_redirect: key " + str(key) + " is " + str(the_url))
if the_url is not None:
the_url = the_url.decode()
r.delete(key)
return redirect(the_url)
time.sleep(1)
counter += 1
return ('File not found', 404)
def upload_js():
return """
$("#uploadlink").on('click', function(event){
$("#uploadlabel").click();
event.preventDefault();
return false;
});
$("#uploadlabel").on('click', function(event){
event.stopPropagation();
event.preventDefault();
$("#uploadfile").click();
return false;
});
$("#uploadfile").on('click', function(event){
event.stopPropagation();
});
$("#uploadfile").on('change', function(event){
$("#fileform").submit();
});"""
def search_js(form=None):
if form is None:
form = 'form'
return """
var origPosition = null;
var searchMatches = null;
function searchReady(){
$("#""" + form + """ input[name='search_term']").on("focus", function(event){
origPosition = daCodeMirror.getCursor('from');
});
$("#""" + form + """ input[name='search_term']").change(update_search);
$("#""" + form + """ input[name='search_term']").on("keydown", enter_search);
$("#""" + form + """ input[name='search_term']").on("keyup", update_search);
$("#daSearchPrevious").click(function(event){
var query = $("#""" + form + """ input[name='search_term']").val();
if (query.length == 0){
clear_matches();
daCodeMirror.setCursor(daCodeMirror.getCursor('from'));
$("#""" + form + """ input[name='search_term']").removeClass("da-search-error");
return;
}
origPosition = daCodeMirror.getCursor('from');
var sc = daCodeMirror.getSearchCursor(query, origPosition);
show_matches(query);
var found = sc.findPrevious();
if (found){
daCodeMirror.setSelection(sc.from(), sc.to());
scroll_to_selection();
$("#""" + form + """ input[name='search_term']").removeClass("da-search-error");
}
else{
var lastLine = daCodeMirror.lastLine()
var lastChar = daCodeMirror.lineInfo(lastLine).text.length
origPosition = { line: lastLine, ch: lastChar, xRel: 1 }
sc = daCodeMirror.getSearchCursor(query, origPosition);
show_matches(query);
var found = sc.findPrevious();
if (found){
daCodeMirror.setSelection(sc.from(), sc.to());
scroll_to_selection();
$("#""" + form + """ input[name='search_term']").removeClass("da-search-error");
}
else{
$("#""" + form + """ input[name='search_term']").addClass("da-search-error");
}
}
event.preventDefault();
return false;
});
$("#daSearchNext").click(function(event){
var query = $("#""" + form + """ input[name='search_term']").val();
if (query.length == 0){
clear_matches();
daCodeMirror.setCursor(daCodeMirror.getCursor('from'));
$("#""" + form + """ input[name='search_term']").removeClass("da-search-error");
return;
}
origPosition = daCodeMirror.getCursor('to');
var sc = daCodeMirror.getSearchCursor(query, origPosition);
show_matches(query);
var found = sc.findNext();
if (found){
daCodeMirror.setSelection(sc.from(), sc.to());
scroll_to_selection();
$("#""" + form + """ input[name='search_term']").removeClass("da-search-error");
}
else{
origPosition = { line: 0, ch: 0, xRel: 1 }
sc = daCodeMirror.getSearchCursor(query, origPosition);
show_matches(query);
var found = sc.findNext();
if (found){
daCodeMirror.setSelection(sc.from(), sc.to());
scroll_to_selection();
$("#""" + form + """ input[name='search_term']").removeClass("da-search-error");
}
else{
$("#""" + form + """ input[name='search_term']").addClass("da-search-error");
}
}
event.preventDefault();
return false;
});
}
function show_matches(query){
clear_matches();
if (query.length == 0){
daCodeMirror.setCursor(daCodeMirror.getCursor('from'));
$("#""" + form + """ input[name='search_term']").removeClass("da-search-error");
return;
}
searchMatches = daCodeMirror.showMatchesOnScrollbar(query);
}
function clear_matches(){
if (searchMatches != null){
try{
searchMatches.clear();
}
catch(err){}
}
}
function scroll_to_selection(){
daCodeMirror.scrollIntoView(daCodeMirror.getCursor('from'))
var t = daCodeMirror.charCoords(daCodeMirror.getCursor('from'), "local").top;
daCodeMirror.scrollTo(null, t);
}
function enter_search(event){
var theCode = event.which || event.keyCode;
if(theCode == 13) {
event.preventDefault();
$("#daSearchNext").click();
return false;
}
}
function update_search(event){
var query = $(this).val();
if (query.length == 0){
clear_matches();
daCodeMirror.setCursor(daCodeMirror.getCursor('from'));
$(this).removeClass("da-search-error");
return;
}
var theCode = event.which || event.keyCode;
if(theCode == 13) {
event.preventDefault();
return false;
}
var sc = daCodeMirror.getSearchCursor(query, origPosition);
show_matches(query);
var found = sc.findNext();
if (found){
daCodeMirror.setSelection(sc.from(), sc.to());
scroll_to_selection();
$(this).removeClass("da-search-error");
}
else{
origPosition = { line: 0, ch: 0, xRel: 1 }
sc = daCodeMirror.getSearchCursor(query, origPosition);
show_matches(query);
var found = sc.findNext();
if (found){
daCodeMirror.setSelection(sc.from(), sc.to());
scroll_to_selection();
$(this).removeClass("da-search-error");
}
else{
$(this).addClass("da-search-error");
}
}
}
"""
def variables_js(form=None, office_mode=False):
playground_user = get_playground_user()
output = """
function activatePopovers(){
var daPopoverTriggerList = [].slice.call(document.querySelectorAll('[data-bs-toggle="popover"]'));
var daPopoverList = daPopoverTriggerList.map(function (daPopoverTriggerEl) {
return new bootstrap.Popover(daPopoverTriggerEl, {trigger: "focus", html: true});
});
}
function activateVariables(){
$(".daparenthetical").on("click", function(event){
var reference = $(this).data("ref");
//console.log("reference is " + reference);
var target = $('[data-name="' + reference + '"]').first();
if (target.length > 0){
//console.log("target is " + target);
//console.log("scrolltop is now " + $('#daplaygroundcard').scrollTop());
//console.log("Scrolling to " + target.parent().parent().position().top);
$('#daplaygroundcard').animate({
scrollTop: target.parent().parent().position().top
}, 1000);
}
event.preventDefault();
});
$(".dashowmethods").on("click", function(event){
var target_id = $(this).data("showhide");
$("#" + target_id).slideToggle();
});
$(".dashowattributes").each(function(){
var basename = $(this).data('name');
if (attrs_showing.hasOwnProperty(basename)){
if (attrs_showing[basename]){
$('tr[data-parent="' + basename + '"]').show();
}
}
else{
attrs_showing[basename] = false;
}
});
$(".dashowattributes").on("click", function(event){
var basename = $(this).data('name');
attrs_showing[basename] = !attrs_showing[basename];
$('tr[data-parent="' + basename + '"]').each(function(){
$(this).toggle();
});
});"""
if office_mode:
return output + "\n}"
if form is None:
form = 'form'
output += """
$(".playground-variable").on("click", function(event){
daCodeMirror.replaceSelection($(this).data("insert"), "around");
daCodeMirror.focus();
});
$(".dasearchicon").on("click", function(event){
var query = $(this).data('name');
if (query == null || query.length == 0){
clear_matches();
daCodeMirror.setCursor(daCodeMirror.getCursor('from'));
return;
}
origPosition = daCodeMirror.getCursor('to');
$("#""" + form + """ input[name='search_term']").val(query);
var sc = daCodeMirror.getSearchCursor(query, origPosition);
show_matches(query);
var found = sc.findNext();
if (found){
daCodeMirror.setSelection(sc.from(), sc.to());
scroll_to_selection();
$("#form input[name='search_term']").removeClass('da-search-error');
}
else{
origPosition = { line: 0, ch: 0, xRel: 1 }
sc = daCodeMirror.getSearchCursor(query, origPosition);
show_matches(query);
var found = sc.findNext();
if (found){
daCodeMirror.setSelection(sc.from(), sc.to());
scroll_to_selection();
$("#""" + form + """ input[name='search_term']").removeClass('da-search-error');
}
else{
$("#""" + form + """ input[name='search_term']").addClass('da-search-error');
}
}
event.preventDefault();
return false;
});
}
var interviewBaseUrl = '""" + url_for('index', reset='1', cache='0', i='docassemble.playground' + str(playground_user.id) + ':.yml') + """';
var shareBaseUrl = '""" + url_for('index', i='docassemble.playground' + str(playground_user.id) + ':.yml') + """';
function updateRunLink(){
if (currentProject == 'default'){
$("#daRunButton").attr("href", interviewBaseUrl.replace(':.yml', ':' + $("#daVariables").val()));
$("a.da-example-share").attr("href", shareBaseUrl.replace(':.yml', ':' + $("#daVariables").val()));
}
else{
$("#daRunButton").attr("href", interviewBaseUrl.replace(':.yml', currentProject + ':' + $("#daVariables").val()));
$("a.da-example-share").attr("href", shareBaseUrl.replace(':.yml', currentProject + ':' + $("#daVariables").val()));
}
}
function fetchVars(changed){
daCodeMirror.save();
updateRunLink();
$.ajax({
type: "POST",
url: """ + '"' + url_for('playground_variables') + '"' + """ + '?project=' + currentProject,
data: 'csrf_token=' + $("#""" + form + """ input[name='csrf_token']").val() + '&variablefile=' + $("#daVariables").val() + '&ajax=1&changed=' + (changed ? 1 : 0),
success: function(data){
if (data.action && data.action == 'reload'){
location.reload(true);
}
if (data.vocab_list != null){
vocab = data.vocab_list;
}
if (data.current_project != null){
currentProject = data.current_project;
}
if (data.variables_html != null){
$("#daplaygroundtable").html(data.variables_html);
var daPopoverTriggerList = [].slice.call(document.querySelectorAll('[data-bs-toggle="popover"]'));
var daPopoverList = daPopoverTriggerList.map(function (daPopoverTriggerEl) {
return new bootstrap.Popover(daPopoverTriggerEl, {trigger: "focus", html: true});
});
activateVariables();
}
},
dataType: 'json'
});
$("#daVariables").blur();
}
function variablesReady(){
$("#daVariables").change(function(event){
fetchVars(true);
});
}
$( document ).ready(function() {
$(document).on('keydown', function(e){
if (e.which == 13){
var tag = $( document.activeElement ).prop("tagName");
if (tag == "INPUT"){
e.preventDefault();
e.stopPropagation();
$(".CodeMirror textarea").focus();
return false;
}
}
});
});
"""
return output
@app.route("/varsreport", methods=['GET'])
@login_required
@roles_required(['admin', 'developer'])
def variables_report():
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
setup_translation()
playground_user = get_playground_user()
playground = SavedFile(playground_user.id, fix=True, section='playground')
the_file = request.args.get('file', None)
if the_file is not None:
the_file = secure_filename_spaces_ok(the_file)
current_project = werkzeug.utils.secure_filename(request.args.get('project', 'default'))
the_directory = directory_for(playground, current_project)
files = sorted([f for f in os.listdir(the_directory) if os.path.isfile(os.path.join(the_directory, f)) and re.search(r'^[A-Za-z0-9]', f)])
if len(files) == 0:
return jsonify(success=False, reason=1)
if the_file is None or the_file not in files:
return jsonify(success=False, reason=2)
interview_source = docassemble.base.parse.interview_source_from_string('docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':' + the_file)
interview_source.set_testing(True)
interview = interview_source.get_interview()
ensure_ml_file_exists(interview, the_file, current_project)
yaml_file = 'docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':' + the_file
the_current_info = current_info(yaml=yaml_file, req=request, action=None, device_id=request.cookies.get('ds', None))
docassemble.base.functions.this_thread.current_info = the_current_info
interview_status = docassemble.base.parse.InterviewStatus(current_info=the_current_info)
variables_html, vocab_list, vocab_dict = get_vars_in_use(interview, interview_status, debug_mode=False, current_project=current_project) # pylint: disable=unused-variable
results = []
result_dict = {}
for name in vocab_list:
if name in ('x', 'row_item', 'i', 'j', 'k', 'l', 'm', 'n') or name.startswith('x.') or name.startswith('x[') or name.startswith('row_item.'):
continue
result = {'name': name, 'questions': []}
results.append(result)
result_dict[name] = result
for question in interview.questions_list:
names_seen = {}
for the_type, the_set in (('in mako', question.mako_names), ('mentioned in', question.names_used), ('defined by', question.fields_used)):
for name in the_set:
the_name = name
subnames = [the_name]
while True:
if re.search(r'\[[^\]]\]$', the_name):
the_name = re.sub(r'\[[^\]]\]$', '', the_name)
elif '.' in the_name:
the_name = re.sub(r'\.[^\.]*$', '', the_name)
else:
break
subnames.append(the_name)
on_first = True
for subname in subnames:
if the_type == 'defined by' and not on_first:
the_type = 'mentioned in'
on_first = False
if subname not in result_dict:
continue
if subname not in names_seen:
names_seen[subname] = {'yaml_file': question.from_source.path, 'source_code': question.source_code.strip(), 'usage': []}
result_dict[subname]['questions'].append(names_seen[subname])
if the_type not in names_seen[subname]['usage']:
names_seen[subname]['usage'].append(the_type)
return jsonify(success=True, yaml_file=yaml_file, items=results)
@app.route('/playgroundvariables', methods=['POST'])
@login_required
@roles_required(['developer', 'admin'])
def playground_variables():
setup_translation()
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
playground_user = get_playground_user()
current_project = get_current_project()
playground = SavedFile(playground_user.id, fix=True, section='playground')
the_directory = directory_for(playground, current_project)
files = sorted([f for f in os.listdir(the_directory) if os.path.isfile(os.path.join(the_directory, f)) and re.search(r'^[A-Za-z0-9]', f)])
if len(files) == 0:
return jsonify(success=False, reason=1)
post_data = request.form.copy()
if request.method == 'POST' and 'variablefile' in post_data:
active_file = post_data['variablefile']
if post_data['variablefile'] in files:
if 'changed' in post_data and int(post_data['changed']):
set_variable_file(current_project, active_file)
interview_source = docassemble.base.parse.interview_source_from_string('docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':' + active_file)
interview_source.set_testing(True)
else:
if active_file == '' and current_project == 'default':
active_file = 'test.yml'
content = ''
if 'playground_content' in post_data:
content = re.sub(r'\r\n', r'\n', post_data['playground_content'])
interview_source = docassemble.base.parse.InterviewSourceString(content=content, directory=the_directory, package="docassemble.playground" + str(playground_user.id) + project_name(current_project), path="docassemble.playground" + str(playground_user.id) + project_name(current_project) + ":" + active_file, testing=True)
interview = interview_source.get_interview()
ensure_ml_file_exists(interview, active_file, current_project)
the_current_info = current_info(yaml='docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':' + active_file, req=request, action=None, device_id=request.cookies.get('ds', None))
docassemble.base.functions.this_thread.current_info = the_current_info
interview_status = docassemble.base.parse.InterviewStatus(current_info=the_current_info)
variables_html, vocab_list, vocab_dict = get_vars_in_use(interview, interview_status, debug_mode=False, current_project=current_project) # pylint: disable=unused-variable
return jsonify(success=True, variables_html=variables_html, vocab_list=vocab_list, current_project=current_project)
return jsonify(success=False, reason=2)
def ensure_ml_file_exists(interview, yaml_file, current_project):
playground_user = get_playground_user()
if len(interview.mlfields) > 0:
if hasattr(interview, 'ml_store'):
parts = interview.ml_store.split(':')
if parts[0] != 'docassemble.playground' + str(playground_user.id) + current_project:
return
source_filename = re.sub(r'.*/', '', parts[1])
else:
source_filename = 'ml-' + re.sub(r'\.ya?ml$', '', yaml_file) + '.json'
# logmessage("Source filename is " + source_filename)
source_dir = SavedFile(playground_user.id, fix=False, section='playgroundsources')
source_directory = directory_for(source_dir, current_project)
if current_project != 'default':
source_filename = os.path.join(current_project, source_filename)
if source_filename not in source_dir.list_of_files():
# logmessage("Source filename does not exist yet")
source_dir.fix()
source_path = os.path.join(source_directory, source_filename)
with open(source_path, 'a', encoding='utf-8'):
os.utime(source_path, None)
source_dir.finalize()
def assign_opacity(files):
if len(files) == 1:
files[0]['opacity'] = 1.0
else:
indexno = 0.0
max_indexno = float(len(files) - 1)
for file_dict in sorted(files, key=lambda x: x['modtime']):
file_dict['opacity'] = round(0.2 + 0.8*(indexno/max_indexno), 2)
indexno += 1.0
@app.route('/playground_run', methods=['GET', 'POST'])
@login_required
@roles_required(['developer', 'admin'])
def playground_page_run():
setup_translation()
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
playground_user = get_playground_user()
current_project = get_current_project()
the_file = secure_filename_spaces_ok(request.args.get('file'))
if the_file:
active_interview_string = 'docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':' + the_file
the_url = url_for('index', reset=1, i=active_interview_string)
key = 'da:runplayground:' + str(playground_user.id)
# logmessage("Setting key " + str(key) + " to " + str(the_url))
pipe = r.pipeline()
pipe.set(key, the_url)
pipe.expire(key, 25)
pipe.execute()
return redirect(url_for('playground_page', file=the_file, project=current_project))
return redirect(url_for('playground_page', project=current_project))
def get_list_of_projects(user_id):
playground = SavedFile(user_id, fix=False, section='playground')
return playground.list_of_dirs()
def rename_project(user_id, old_name, new_name):
fix_package_folder()
for sec in ('', 'sources', 'static', 'template', 'modules', 'packages'):
area = SavedFile(user_id, fix=True, section='playground' + sec)
if os.path.isdir(os.path.join(area.directory, old_name)):
os.rename(os.path.join(area.directory, old_name), os.path.join(area.directory, new_name))
area.finalize()
def create_project(user_id, new_name):
fix_package_folder()
for sec in ('', 'sources', 'static', 'template', 'modules', 'packages'):
area = SavedFile(user_id, fix=True, section='playground' + sec)
new_dir = os.path.join(area.directory, new_name)
if not os.path.isdir(new_dir):
os.makedirs(new_dir, exist_ok=True)
path = os.path.join(new_dir, '.placeholder')
with open(path, 'a', encoding='utf-8'):
os.utime(path, None)
area.finalize()
def delete_project(user_id, the_project_name):
fix_package_folder()
for sec in ('', 'sources', 'static', 'template', 'modules', 'packages'):
area = SavedFile(user_id, fix=True, section='playground' + sec)
area.delete_directory(the_project_name)
area.finalize()
@app.route('/playgroundproject', methods=['GET', 'POST'])
@login_required
@roles_required(['developer', 'admin'])
def playground_project():
setup_translation()
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
use_gd = bool(app.config['USE_GOOGLE_DRIVE'] is True and get_gd_folder() is not None)
use_od = bool(use_gd is False and app.config['USE_ONEDRIVE'] is True and get_od_folder() is not None)
playground_user = get_playground_user()
current_project = get_current_project()
if request.args.get('rename'):
form = RenameProject(request.form)
mode = 'rename'
description = word("You are renaming the project called %s.") % (current_project, )
page_title = word("Rename project")
if request.method == 'POST' and form.validate():
if current_project == 'default':
flash(word("You cannot rename the default Playground project"), 'error')
else:
rename_project(playground_user.id, current_project, form.name.data)
if use_gd:
try:
rename_gd_project(current_project, form.name.data)
except Exception as the_err:
logmessage("playground_project: unable to rename project on Google Drive. " + str(the_err))
elif use_od:
try:
rename_od_project(current_project, form.name.data)
except Exception as the_err:
try:
logmessage("playground_project: unable to rename project on OneDrive. " + str(the_err))
except:
logmessage("playground_project: unable to rename project on OneDrive.")
current_project = set_current_project(form.name.data)
flash(word('Since you renamed a project, the server needs to restart in order to reload any modules.'), 'info')
return redirect(url_for('restart_page', next=url_for('playground_project', project=current_project)))
elif request.args.get('new'):
form = NewProject(request.form)
mode = 'new'
description = word("Enter the name of the new project you want to create.")
page_title = word("New project")
if request.method == 'POST' and form.validate():
if form.name.data == 'default' or form.name.data in get_list_of_projects(playground_user.id):
flash(word("The project name %s is not available.") % (form.name.data, ), "error")
else:
create_project(playground_user.id, form.name.data)
current_project = set_current_project(form.name.data)
mode = 'standard'
return redirect(url_for('playground_page', project=current_project))
elif request.args.get('delete'):
form = DeleteProject(request.form)
mode = 'delete'
description = word("WARNING! If you press Delete, the contents of the %s project will be permanently deleted.") % (current_project, )
page_title = word("Delete project")
if request.method == 'POST' and form.validate():
if current_project == 'default':
flash(word("The default project cannot be deleted."), "error")
else:
if use_gd:
try:
trash_gd_project(current_project)
except Exception as the_err:
logmessage("playground_project: unable to delete project on Google Drive. " + str(the_err))
elif use_od:
try:
trash_od_project(current_project)
except Exception as the_err:
try:
logmessage("playground_project: unable to delete project on OneDrive. " + str(the_err))
except:
logmessage("playground_project: unable to delete project on OneDrive.")
delete_project(playground_user.id, current_project)
flash(word("The project %s was deleted.") % (current_project,), "success")
current_project = set_current_project('default')
return redirect(url_for('playground_project', project=current_project))
else:
form = None
mode = 'standard'
page_title = word("Projects")
description = word("You can divide up your Playground into multiple separate areas, apart from your default Playground area. Each Project has its own question files and Folders.")
back_button = Markup('<span class="navbar-brand navbar-nav dabackicon me-3"><a href="' + url_for('playground_page', project=current_project) + '" class="dabackbuttoncolor nav-link" title=' + json.dumps(word("Go back to the main Playground page")) + '><i class="fas fa-chevron-left"></i><span class="daback">' + word('Back') + '</span></a></span>')
response = make_response(render_template('pages/manage_projects.html', version_warning=None, bodyclass='daadminbody', back_button=back_button, tab_title=word("Projects"), description=description, page_title=page_title, projects=get_list_of_projects(playground_user.id), current_project=current_project, mode=mode, form=form), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
def set_current_project(new_name):
key = 'da:playground:project:' + str(current_user.id)
pipe = r.pipeline()
pipe.set(key, new_name)
pipe.expire(key, 2592000)
pipe.execute()
return new_name
def get_current_project():
current_project = request.args.get('project', None)
if current_project is not None:
current_project = werkzeug.utils.secure_filename(current_project)
key = 'da:playground:project:' + str(current_user.id)
if current_project is None:
current_project = r.get(key)
if current_project is not None:
current_project = current_project.decode()
else:
pipe = r.pipeline()
pipe.set(key, current_project)
pipe.expire(key, 2592000)
pipe.execute()
if current_project is None:
return 'default'
return current_project
def set_current_file(current_project, section, new_name):
key = 'da:playground:project:' + str(current_user.id) + ':playground' + section + ':' + current_project
pipe = r.pipeline()
pipe.set(key, new_name)
pipe.expire(key, 2592000)
pipe.execute()
return new_name
def get_current_file(current_project, section):
key = 'da:playground:project:' + str(current_user.id) + ':playground' + section + ':' + current_project
current_file = r.get(key)
if current_file is None:
return ''
return current_file.decode()
def delete_current_file(current_project, section):
key = 'da:playground:project:' + str(current_user.id) + ':playground' + section + ':' + current_project
r.delete(key)
def clear_current_playground_info():
r.delete('da:playground:project:' + str(current_user.id))
to_delete = []
for key in r.keys('da:playground:project:' + str(current_user.id) + ':playground*'):
to_delete.append(key)
for key in to_delete:
r.delete(key)
def set_variable_file(current_project, variable_file):
key = 'da:playground:project:' + str(current_user.id) + ':' + current_project + ':variablefile'
pipe = r.pipeline()
pipe.set(key, variable_file)
pipe.expire(key, 2592000)
pipe.execute()
return variable_file
def get_variable_file(current_project):
key = 'da:playground:project:' + str(current_user.id) + ':' + current_project + ':variablefile'
variable_file = r.get(key)
if variable_file is not None:
variable_file = variable_file.decode()
return variable_file
def delete_variable_file(current_project):
key = 'da:playground:project:' + str(current_user.id) + ':' + current_project + ':variablefile'
r.delete(key)
def get_list_of_playgrounds():
user_list = []
for user in db.session.execute(select(UserModel.id, UserModel.social_id, UserModel.email, UserModel.first_name, UserModel.last_name).join(UserRoles, UserModel.id == UserRoles.user_id).join(Role, UserRoles.role_id == Role.id).where(and_(UserModel.active == True, or_(Role.name == 'admin', Role.name == 'developer'))).order_by(UserModel.id)): # noqa: E712 # pylint: disable=singleton-comparison
if user.social_id.startswith('disabled$'):
continue
user_info = {}
for attrib in ('id', 'email'):
user_info[attrib] = getattr(user, attrib)
name_string = ''
if user.first_name:
name_string += str(user.first_name) + " "
if user.last_name:
name_string += str(user.last_name)
user_info['name'] = name_string
user_list.append(user_info)
return user_list
@app.route('/playgroundselect', methods=['GET', 'POST'])
@login_required
@roles_required(['developer', 'admin'])
def playground_select():
setup_translation()
if not (app.config['ENABLE_PLAYGROUND'] and app.config['ENABLE_SHARING_PLAYGROUNDS']):
return ('File not found', 404)
current_project = get_current_project()
if request.args.get('select'):
clear_current_playground_info()
set_playground_user(int(request.args['select']))
return redirect(url_for('playground_page', project='default'))
form = None
mode = 'standard'
page_title = word("All Playgrounds")
description = word("You can use the Playground of another user who has admin or developer privileges.")
back_button = Markup('<span class="navbar-brand navbar-nav dabackicon me-3"><a href="' + url_for('playground_page', project=current_project) + '" class="dabackbuttoncolor nav-link" title=' + json.dumps(word("Go back to the main Playground page")) + '><i class="fas fa-chevron-left"></i><span class="daback">' + word('Back') + '</span></a></span>')
response = make_response(render_template('pages/manage_playgrounds.html', version_warning=None, bodyclass='daadminbody', back_button=back_button, tab_title=word("All Playgrounds"), description=description, page_title=page_title, playgrounds=get_list_of_playgrounds(), current_project=current_project, mode=mode, form=form), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
@app.route("/pgcodecache", methods=['GET'])
@login_required
@roles_required(['developer', 'admin'])
def get_pg_var_cache():
response = make_response(altyaml.dump_to_bytes(pg_code_cache), 200)
response.headers['Content-Disposition'] = 'attachment; filename=pgcodecache.yml'
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
response.headers['Content-Type'] = 'text/plain; charset=utf-8'
return response
@app.route('/playground', methods=['GET', 'POST'])
@login_required
@roles_required(['developer', 'admin'])
def playground_page():
setup_translation()
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
playground_user = get_playground_user()
current_project = get_current_project()
if 'ajax' in request.form and int(request.form['ajax']):
is_ajax = True
use_gd = False
use_od = False
else:
is_ajax = False
use_gd = bool(app.config['USE_GOOGLE_DRIVE'] is True and get_gd_folder() is not None)
use_od = bool(use_gd is False and app.config['USE_ONEDRIVE'] is True and get_od_folder() is not None)
if request.method == 'GET' and needs_to_change_password():
return redirect(url_for('user.change_password', next=url_for('playground_page', project=current_project)))
fileform = PlaygroundUploadForm(request.form)
form = PlaygroundForm(request.form)
interview = None
the_file = secure_filename_spaces_ok(request.args.get('file', get_current_file(current_project, 'questions')))
valid_form = None
if request.method == 'POST':
valid_form = form.validate()
if request.method == 'GET':
is_new = true_or_false(request.args.get('new', False))
debug_mode = true_or_false(request.args.get('debug', False))
else:
debug_mode = False
is_new = bool(not valid_form and form.status.data == 'new')
if is_new:
the_file = ''
playground = SavedFile(playground_user.id, fix=True, section='playground')
the_directory = directory_for(playground, current_project)
if current_project != 'default' and not os.path.isdir(the_directory):
current_project = set_current_project('default')
the_directory = directory_for(playground, current_project)
if request.method == 'POST' and 'uploadfile' in request.files:
the_files = request.files.getlist('uploadfile')
if the_files:
for up_file in the_files:
try:
filename = secure_filename(up_file.filename)
extension, mimetype = get_ext_and_mimetype(filename) # pylint: disable=unused-variable
if extension not in ('yml', 'yaml'):
flash(word("Sorry, only YAML files can be uploaded here. To upload other types of files, use the Folders."), 'error')
return redirect(url_for('playground_page', project=current_project))
filename = re.sub(r'[^A-Za-z0-9\-\_\. ]+', '_', filename)
new_file = filename
filename = os.path.join(the_directory, filename)
up_file.save(filename)
try:
with open(filename, 'r', encoding='utf-8') as fp:
fp.read()
except:
os.remove(filename)
flash(word("There was a problem reading the YAML file you uploaded. Are you sure it is a YAML file? File was not saved."), 'error')
return redirect(url_for('playground_page', project=current_project))
playground.finalize()
r.incr('da:interviewsource:docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':' + new_file)
return redirect(url_for('playground_page', project=current_project, file=os.path.basename(filename)))
except Exception as errMess:
flash("Error of type " + str(type(errMess)) + " processing upload: " + str(errMess), "error")
return redirect(url_for('playground_page', project=current_project))
if request.method == 'POST' and (form.submit.data or form.run.data or form.delete.data):
if valid_form and form.playground_name.data:
the_file = secure_filename_spaces_ok(form.playground_name.data)
# the_file = re.sub(r'[^A-Za-z0-9\_\-\. ]', '', the_file)
if the_file != '':
if not re.search(r'\.ya?ml$', the_file):
the_file = re.sub(r'\..*', '', the_file) + '.yml'
filename = os.path.join(the_directory, the_file)
if not os.path.isfile(filename):
with open(filename, 'a', encoding='utf-8'):
os.utime(filename, None)
else:
# flash(word('You need to type in a name for the interview'), 'error')
is_new = True
else:
# flash(word('You need to type in a name for the interview'), 'error')
is_new = True
# the_file = re.sub(r'[^A-Za-z0-9\_\-\. ]', '', the_file)
files = sorted([{'name': f, 'modtime': os.path.getmtime(os.path.join(the_directory, f))} for f in os.listdir(the_directory) if os.path.isfile(os.path.join(the_directory, f)) and re.search(r'^[A-Za-z0-9].*[A-Za-z]$', f)], key=lambda x: x['name'])
file_listing = [x['name'] for x in files]
assign_opacity(files)
if valid_form is False:
content = form.playground_content.data
else:
content = ''
if the_file and not is_new and the_file not in file_listing:
if request.method == 'GET':
delete_current_file(current_project, 'questions')
return redirect(url_for('playground_page', project=current_project))
the_file = ''
is_default = False
if request.method == 'GET' and not the_file and not is_new:
current_file = get_current_file(current_project, 'questions')
if current_file in files:
the_file = current_file
else:
delete_current_file(current_project, 'questions')
if len(files) > 0:
the_file = sorted(files, key=lambda x: x['modtime'])[-1]['name']
elif current_project == 'default':
the_file = 'test.yml'
is_default = True
content = default_playground_yaml
else:
the_file = ''
is_default = False
content = ''
is_new = True
if the_file in file_listing:
set_current_file(current_project, 'questions', the_file)
active_file = the_file
current_variable_file = get_variable_file(current_project)
if current_variable_file is not None:
if current_variable_file in file_listing:
active_file = current_variable_file
else:
delete_variable_file(current_project)
if the_file != '':
filename = os.path.join(the_directory, the_file)
if (valid_form or is_default) and not os.path.isfile(filename):
with open(filename, 'w', encoding='utf-8') as fp:
fp.write(content)
playground.finalize()
console_messages = []
if request.method == 'POST' and the_file != '' and valid_form:
if form.delete.data:
filename_to_del = os.path.join(the_directory, form.playground_name.data)
if os.path.isfile(filename_to_del):
os.remove(filename_to_del)
flash(word('File deleted.'), 'info')
r.delete('da:interviewsource:docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':' + the_file)
if active_file != the_file:
r.incr('da:interviewsource:docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':' + active_file)
cloud_trash(use_gd, use_od, 'questions', form.playground_name.data, current_project)
playground.finalize()
current_variable_file = get_variable_file(current_project)
if current_variable_file in (the_file, form.playground_name.data):
delete_variable_file(current_project)
delete_current_file(current_project, 'questions')
return redirect(url_for('playground_page', project=current_project))
flash(word('File not deleted. There was an error.'), 'error')
if (form.submit.data or form.run.data):
if form.original_playground_name.data and form.original_playground_name.data != the_file:
old_filename = os.path.join(the_directory, form.original_playground_name.data)
if not is_ajax:
flash(word("Changed name of interview"), 'success')
cloud_trash(use_gd, use_od, 'questions', form.original_playground_name.data, current_project)
if os.path.isfile(old_filename):
os.remove(old_filename)
files = sorted([{'name': f, 'modtime': os.path.getmtime(os.path.join(the_directory, f))} for f in os.listdir(the_directory) if os.path.isfile(os.path.join(the_directory, f)) and re.search(r'^[A-Za-z0-9].*[A-Za-z]$', f)], key=lambda x: x['name'])
file_listing = [x['name'] for x in files]
assign_opacity(files)
if active_file == form.original_playground_name.data:
active_file = the_file
set_variable_file(current_project, active_file)
the_time = formatted_current_time()
should_save = True
the_content = re.sub(r'\r\n', r'\n', form.playground_content.data)
if os.path.isfile(filename):
with open(filename, 'r', encoding='utf-8') as fp:
orig_content = fp.read()
if orig_content == the_content:
# logmessage("No need to save")
should_save = False
if should_save:
with open(filename, 'w', encoding='utf-8') as fp:
fp.write(the_content)
if not form.submit.data and active_file != the_file:
active_file = the_file
set_variable_file(current_project, active_file)
this_interview_string = 'docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':' + the_file
active_interview_string = 'docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':' + active_file
r.incr('da:interviewsource:' + this_interview_string)
if the_file != active_file:
r.incr('da:interviewsource:' + active_interview_string)
playground.finalize()
docassemble.base.interview_cache.clear_cache(this_interview_string)
if active_interview_string != this_interview_string:
docassemble.base.interview_cache.clear_cache(active_interview_string)
if not form.submit.data:
the_url = url_for('index', reset=1, i=this_interview_string)
key = 'da:runplayground:' + str(playground_user.id)
# logmessage("Setting key " + str(key) + " to " + str(the_url))
pipe = r.pipeline()
pipe.set(key, the_url)
pipe.expire(key, 12)
pipe.execute()
try:
interview_source = docassemble.base.parse.interview_source_from_string(active_interview_string)
interview_source.set_testing(True)
interview = interview_source.get_interview()
ensure_ml_file_exists(interview, active_file, current_project)
the_current_info = current_info(yaml='docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':' + active_file, req=request, action=None, device_id=request.cookies.get('ds', None))
docassemble.base.functions.this_thread.current_info = the_current_info
interview_status = docassemble.base.parse.InterviewStatus(current_info=the_current_info)
variables_html, vocab_list, vocab_dict = get_vars_in_use(interview, interview_status, debug_mode=debug_mode, current_project=current_project) # pylint: disable=unused-variable
if form.submit.data:
flash_message = flash_as_html(word('Saved at') + ' ' + the_time + '.', 'success', is_ajax=is_ajax)
else:
flash_message = flash_as_html(word('Saved at') + ' ' + the_time + '. ' + word('Running in other tab.'), message_type='success', is_ajax=is_ajax)
if interview.issue.get('mandatory_id', False):
console_messages.append(word("Note: it is a best practice to tag every mandatory block with an id."))
if interview.issue.get('id_collision', False):
console_messages.append(word("Note: more than one block uses id") + " " + interview.issue['id_collision'])
except DAError:
variables_html = None
flash_message = flash_as_html(word('Saved at') + ' ' + the_time + '. ' + word('Problem detected.'), message_type='error', is_ajax=is_ajax)
if is_ajax:
return jsonify(variables_html=variables_html, vocab_list=vocab_list, flash_message=flash_message, current_project=current_project, console_messages=console_messages, active_file=active_file, active_interview_url=url_for('index', i=active_interview_string))
else:
flash(word('Playground not saved. There was an error.'), 'error')
interview_path = None
if valid_form is not False and the_file != '':
with open(filename, 'r', encoding='utf-8') as fp:
form.original_playground_name.data = the_file
form.playground_name.data = the_file
content = fp.read()
# if not form.playground_content.data:
# form.playground_content.data = content
if active_file != '':
is_fictitious = False
interview_path = 'docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':' + active_file
if is_default:
interview_source = docassemble.base.parse.InterviewSourceString(content=content, directory=the_directory, package="docassemble.playground" + str(playground_user.id) + project_name(current_project), path="docassemble.playground" + str(playground_user.id) + project_name(current_project) + ":" + active_file, testing=True)
else:
interview_source = docassemble.base.parse.interview_source_from_string(interview_path)
interview_source.set_testing(True)
else:
is_fictitious = True
if current_project == 'default':
active_file = 'test.yml'
else:
is_new = True
if form.playground_content.data:
content = re.sub(r'\r', '', form.playground_content.data)
interview_source = docassemble.base.parse.InterviewSourceString(content=content, directory=the_directory, package="docassemble.playground" + str(playground_user.id) + project_name(current_project), path="docassemble.playground" + str(playground_user.id) + project_name(current_project) + ":" + active_file, testing=True)
else:
interview_source = docassemble.base.parse.InterviewSourceString(content='', directory=the_directory, package="docassemble.playground" + str(playground_user.id) + project_name(current_project), path="docassemble.playground" + str(playground_user.id) + project_name(current_project) + ":" + active_file, testing=True)
interview = interview_source.get_interview()
if hasattr(interview, 'mandatory_id_issue') and interview.mandatory_id_issue:
console_messages.append(word("Note: it is a best practice to tag every mandatory block with an id."))
the_current_info = current_info(yaml='docassemble.playground' + str(playground_user.id) + project_name(current_project) + ':' + active_file, req=request, action=None, device_id=request.cookies.get('ds', None))
docassemble.base.functions.this_thread.current_info = the_current_info
interview_status = docassemble.base.parse.InterviewStatus(current_info=the_current_info)
variables_html, vocab_list, vocab_dict = get_vars_in_use(interview, interview_status, debug_mode=debug_mode, current_project=current_project)
pulldown_files = [x['name'] for x in files]
define_examples()
if is_fictitious or is_new or is_default:
new_active_file = word('(New file)')
if new_active_file not in pulldown_files:
pulldown_files.insert(0, new_active_file)
if is_fictitious:
active_file = new_active_file
ajax = """
var exampleData;
var originalFileName = """ + json.dumps(the_file) + """;
var isNew = """ + json.dumps(is_new) + """;
var validForm = """ + json.dumps(valid_form) + """;
var vocab = """ + json.dumps(vocab_list) + """;
var existingFiles = """ + json.dumps(file_listing) + """;
var currentProject = """ + json.dumps(current_project) + """;
var currentFile = """ + json.dumps(the_file) + """;
var attrs_showing = Object();
var daExpireSession = null;
var daNotificationContainer = """ + json.dumps(NOTIFICATION_CONTAINER) + """;
var daNotificationMessage = """ + json.dumps(NOTIFICATION_MESSAGE) + """;
Object.defineProperty(String.prototype, "daSprintf", {
value: function () {
var args = Array.from(arguments),
i = 0;
function defaultNumber(iValue) {
return iValue != undefined && !isNaN(iValue) ? iValue : "0";
}
function defaultString(iValue) {
return iValue == undefined ? "" : "" + iValue;
}
return this.replace(
/%%|%([+\\-])?([^1-9])?(\\d+)?(\\.\\d+)?([deEfhHioQqs])/g,
function (match, sign, filler, scale, precision, type) {
var strOut, space, value;
var asNumber = false;
if (match == "%%") return "%";
if (i >= args.length) return match;
value = args[i];
while (Array.isArray(value)) {
args.splice(i, 1);
for (var j = i; value.length > 0; j++)
args.splice(j, 0, value.shift());
value = args[i];
}
i++;
if (filler == undefined) filler = " "; // default
if (scale == undefined && !isNaN(filler)) {
scale = filler;
filler = " ";
}
if (sign == undefined) sign = "sqQ".indexOf(type) >= 0 ? "+" : "-"; // default
if (scale == undefined) scale = 0; // default
if (precision == undefined) precision = ".0"; // default
scale = parseInt(scale);
precision = parseInt(precision.substr(1));
switch (type) {
case "d":
case "i":
// decimal integer
asNumber = true;
strOut = parseInt(defaultNumber(value));
if (precision > 0) strOut += "." + "0".repeat(precision);
break;
case "e":
case "E":
// float in exponential notation
asNumber = true;
strOut = parseFloat(defaultNumber(value));
if (precision == 0) strOut = strOut.toExponential();
else strOut = strOut.toExponential(precision);
if (type == "E") strOut = strOut.replace("e", "E");
break;
case "f":
// decimal float
asNumber = true;
strOut = parseFloat(defaultNumber(value));
if (precision != 0) strOut = strOut.toFixed(precision);
break;
case "o":
case "h":
case "H":
// Octal or Hexagesimal integer notation
strOut =
"\\\\" +
(type == "o" ? "0" : type) +
parseInt(defaultNumber(value)).toString(type == "o" ? 8 : 16);
break;
case "q":
// single quoted string
strOut = "'" + defaultString(value) + "'";
break;
case "Q":
// double quoted string
strOut = '"' + defaultString(value) + '"';
break;
default:
// string
strOut = defaultString(value);
break;
}
if (typeof strOut != "string") strOut = "" + strOut;
if ((space = strOut.length) < scale) {
if (asNumber) {
if (sign == "-") {
if (strOut.indexOf("-") < 0)
strOut = filler.repeat(scale - space) + strOut;
else
strOut =
"-" +
filler.repeat(scale - space) +
strOut.replace("-", "");
} else {
if (strOut.indexOf("-") < 0)
strOut = "+" + filler.repeat(scale - space - 1) + strOut;
else
strOut =
"-" +
filler.repeat(scale - space) +
strOut.replace("-", "");
}
} else {
if (sign == "-") strOut = filler.repeat(scale - space) + strOut;
else strOut = strOut + filler.repeat(scale - space);
}
} else if (asNumber && sign == "+" && strOut.indexOf("-") < 0)
strOut = "+" + strOut;
return strOut;
}
);
},
});
Object.defineProperty(window, "daSprintf", {
value: function (str, ...rest) {
if (typeof str == "string")
return String.prototype.daSprintf.apply(str, rest);
return "";
},
});
function resetExpireSession(){
if (daExpireSession != null){
window.clearTimeout(daExpireSession);
}
daExpireSession = setTimeout(function(){
alert(""" + json.dumps(word("Your browser session has expired and you have been signed out. You will not be able to save your work. Please log in again.")) + """);
}, """ + str(999 * int(daconfig.get('session lifetime seconds', 43200))) + """);
}
""" + variables_js() + """
""" + search_js() + """
function activateExample(id, scroll){
var info = exampleData[id];
$("#da-example-source").html(info['html']);
$("#da-example-source-before").html(info['before_html']);
$("#da-example-source-after").html(info['after_html']);
$("#da-example-image-link").attr("href", info['interview']);
$("#da-example-image").attr("src", info['image']);
if (info['documentation'] != null){
$("#da-example-documentation-link").attr("href", info['documentation']);
$("#da-example-documentation-link").removeClass("da-example-hidden");
//$("#da-example-documentation-link").slideUp();
}
else{
$("#da-example-documentation-link").addClass("da-example-hidden");
//$("#da-example-documentation-link").slideDown();
}
$(".da-example-list").addClass("da-example-hidden");
$(".da-example-link").removeClass("da-example-active");
$(".da-example-link").removeClass("active");
$(".da-example-link").each(function(){
if ($(this).data("example") == id){
$(this).addClass("da-example-active");
$(this).addClass("active");
$(this).parents(".da-example-list").removeClass("da-example-hidden");
if (scroll){
setTimeout(function(){
//console.log($(this).parents("li").last()[0].offsetTop);
//console.log($(this).parents("li").last().parent()[0].offsetTop);
$(".da-example-active").parents("ul").last().scrollTop($(".da-example-active").parents("li").last()[0].offsetTop);
}, 0);
}
//$(this).parents(".da-example-list").slideDown();
}
});
$("#da-hide-full-example").addClass("dainvisible");
if (info['has_context']){
$("#da-show-full-example").removeClass("dainvisible");
}
else{
$("#da-show-full-example").addClass("dainvisible");
}
$("#da-example-source-before").addClass("dainvisible");
$("#da-example-source-after").addClass("dainvisible");
}
function daFetchVariableReportCallback(data){
var translations = """ + json.dumps({'in mako': word("in mako"), 'mentioned in': word("mentioned in"), 'defined by': word("defined by")}) + """;
var modal = $("#daVariablesReport .modal-body");
if (modal.length == 0){
console.log("No modal body on page");
return;
}
if (!data.success){
$(modal).html('<p>""" + word("Failed to load report") + """</p>');
return;
}
var yaml_file = data.yaml_file;
console.log(yaml_file)
modal.empty();
var accordion = $('<div>');
accordion.addClass("accordion");
accordion.attr("id", "varsreport");
var n = data.items.length;
for (var i = 0; i < n; ++i){
var item = data.items[i];
if (item.questions.length){
var accordionItem = $('<div>');
accordionItem.addClass("accordion-item");
var accordionItemHeader = $('<h2>');
accordionItemHeader.addClass("accordion-header");
accordionItemHeader.attr("id", "accordionItemheader" + i);
accordionItemHeader.html('<button class="accordion-button collapsed" type="button" data-bs-toggle="collapse" data-bs-target="#collapse' + i + '" aria-expanded="false" aria-controls="collapse' + i + '">' + item.name + '</button>');
accordionItem.append(accordionItemHeader);
var collapse = $("<div>");
collapse.attr("id", "collapse" + i);
collapse.attr("aria-labelledby", "accordionItemheader" + i);
collapse.data("bs-parent", "#varsreport");
collapse.addClass("accordion-collapse");
collapse.addClass("collapse");
var accordionItemBody = $("<div>");
accordionItemBody.addClass("accordion-body");
var m = item.questions.length;
for (var j = 0; j < m; j++){
var h5 = $("<h5>");
h5.html(item.questions[j].usage.map(x => translations[x]).join(','));
var pre = $("<pre>");
pre.html(item.questions[j].source_code);
accordionItemBody.append(h5);
accordionItemBody.append(pre);
if (item.questions[j].yaml_file != yaml_file){
var p = $("<p>");
p.html(""" + json.dumps(word("from")) + """ + ' ' + item.questions[j].yaml_file);
accordionItemBody.append(p);
}
}
collapse.append(accordionItemBody);
accordionItem.append(collapse);
accordion.append(accordionItem);
}
}
modal.append(accordion);
}
function daFetchVariableReport(){
url = """ + json.dumps(url_for('variables_report', project=current_project)) + """ + "&file=" + currentFile;
$("#daVariablesReport .modal-body").html('<p>""" + word("Loading . . .") + """</p>');
$.ajax({
type: "GET",
url: url,
success: daFetchVariableReportCallback,
xhrFields: {
withCredentials: true
},
error: function(xhr, status, error){
$("#daVariablesReport .modal-body").html('<p>""" + word("Failed to load report") + """</p>');
}
});
}
function saveCallback(data){
if (data.action && data.action == 'reload'){
location.reload(true);
}
if ($("#daflash").length){
$("#daflash").html(data.flash_message);
}
else{
$("#damain").prepend(daSprintf(daNotificationContainer, data.flash_message));
}
if (data.vocab_list != null){
vocab = data.vocab_list;
}
if (data.current_project != null){
currentProject = data.current_project;
}
history.replaceState({}, "", """ + json.dumps(url_for('playground_page')) + """ + encodeURI('?project=' + currentProject + '&file=' + currentFile));
$("#daVariables").val(data.active_file);
$("#share-link").attr('href', data.active_interview_url);
if (data.variables_html != null){
$("#daplaygroundtable").html(data.variables_html);
activateVariables();
$("#form").trigger("reinitialize.areYouSure");
var daPopoverTriggerList = [].slice.call(document.querySelectorAll('[data-bs-toggle="popover"]'));
var daPopoverList = daPopoverTriggerList.map(function (daPopoverTriggerEl) {
return new bootstrap.Popover(daPopoverTriggerEl, {trigger: "focus", html: true});
});
}
daConsoleMessages = data.console_messages;
daShowConsoleMessages();
}
function daShowConsoleMessages(){
for (i=0; i < daConsoleMessages.length; ++i){
console.log(daConsoleMessages[i]);
}
}
function disableButtonsUntilCallback(){
$("button.dasubmitbutton").prop('disabled', true);
$("a.dasubmitbutton").addClass('dadisabled');
}
function enableButtons(){
$(".dasubmitbutton").prop('disabled', false);
$("a.dasubmitbutton").removeClass('dadisabled');
}
$( document ).ready(function() {
variablesReady();
searchReady();
resetExpireSession();
$("#playground_name").on('change', function(){
var newFileName = $(this).val();
if ((!isNew) && newFileName == currentFile){
return;
}
for (var i = 0; i < existingFiles.length; i++){
if (newFileName == existingFiles[i] || newFileName + '.yml' == existingFiles[i]){
alert(""" + json.dumps(word("Warning: a file by that name already exists. If you save, you will overwrite it.")) + """);
return;
}
}
return;
});
$("#daRun").click(function(event){
if (originalFileName != $("#playground_name").val() || $("#playground_name").val() == ''){
$("#form button[name='submit']").click();
event.preventDefault();
return false;
}
daCodeMirror.save();
disableButtonsUntilCallback();
$.ajax({
type: "POST",
url: """ + '"' + url_for('playground_page', project=current_project) + '"' + """,
data: $("#form").serialize() + '&run=Save+and+Run&ajax=1',
success: function(data){
if (data.action && data.action == 'reload'){
location.reload(true);
}
enableButtons();
resetExpireSession();
saveCallback(data);
},
dataType: 'json'
});
//event.preventDefault();
return true;
});
var thisWindow = window;
$("#daRunSyncGD").click(function(event){
daCodeMirror.save();
$("#form").trigger("checkform.areYouSure");
if ($('#form').hasClass('dirty') && !confirm(""" + json.dumps(word("There are unsaved changes. Are you sure you wish to leave this page?")) + """)){
event.preventDefault();
return false;
}
if ($("#playground_name").val() == ''){
$("#form button[name='submit']").click();
event.preventDefault();
return false;
}
setTimeout(function(){
thisWindow.location.replace('""" + url_for('sync_with_google_drive', project=current_project, auto_next=url_for('playground_page_run', file=the_file, project=current_project)) + """');
}, 100);
return true;
});
$("#daRunSyncOD").click(function(event){
daCodeMirror.save();
$("#form").trigger("checkform.areYouSure");
if ($('#form').hasClass('dirty') && !confirm(""" + json.dumps(word("There are unsaved changes. Are you sure you wish to leave this page?")) + """)){
event.preventDefault();
return false;
}
if ($("#playground_name").val() == ''){
$("#form button[name='submit']").click();
event.preventDefault();
return false;
}
setTimeout(function(){
thisWindow.location.replace('""" + url_for('sync_with_onedrive', project=current_project, auto_next=url_for('playground_page_run', file=the_file, project=current_project)) + """');
}, 100);
return true;
});
$("#form button[name='submit']").click(function(event){
daCodeMirror.save();
if (validForm == false || isNew == true || originalFileName != $("#playground_name").val() || $("#playground_name").val().trim() == ""){
return true;
}
disableButtonsUntilCallback();
$.ajax({
type: "POST",
url: """ + '"' + url_for('playground_page', project=current_project) + '"' + """,
data: $("#form").serialize() + '&submit=Save&ajax=1',
success: function(data){
if (data.action && data.action == 'reload'){
location.reload(true);
}
enableButtons();
resetExpireSession();
saveCallback(data);
setTimeout(function(){
$("#daflash .alert-success").hide(300, function(){
$(self).remove();
});
}, 3000);
},
dataType: 'json'
});
event.preventDefault();
return false;
});
$(".da-example-link").on("click", function(){
var id = $(this).data("example");
activateExample(id, false);
});
$(".da-example-copy").on("click", function(event){
if (daCodeMirror.somethingSelected()){
daCodeMirror.replaceSelection("");
}
var id = $(".da-example-active").data("example");
var curPos = daCodeMirror.getCursor();
var notFound = 1;
var insertLine = daCodeMirror.lastLine();
daCodeMirror.eachLine(curPos.line, insertLine, function(line){
if (notFound){
if (line.text.substring(0, 3) == "---" || line.text.substring(0, 3) == "..."){
insertLine = daCodeMirror.getLineNumber(line)
//console.log("Found break at line number " + insertLine)
notFound = 0;
}
}
});
if (notFound){
daCodeMirror.setSelection({'line': insertLine, 'ch': null});
daCodeMirror.replaceSelection("\\n---\\n" + exampleData[id]['source'] + "\\n", "around");
}
else{
daCodeMirror.setSelection({'line': insertLine, 'ch': 0});
daCodeMirror.replaceSelection("---\\n" + exampleData[id]['source'] + "\\n", "around");
}
daCodeMirror.focus();
event.preventDefault();
return false;
});
$(".da-example-heading").on("click", function(){
var list = $(this).parent().children("ul").first();
if (list != null){
if (!list.hasClass("da-example-hidden")){
return;
}
$(".da-example-list").addClass("da-example-hidden");
//$(".da-example-list").slideUp();
var new_link = $(this).parent().find("a.da-example-link").first();
if (new_link.length){
var id = new_link.data("example");
activateExample(id, true);
}
}
});
activatePopovers();
$("#da-show-full-example").on("click", function(){
var id = $(".da-example-active").data("example");
var info = exampleData[id];
$(this).addClass("dainvisible");
$("#da-hide-full-example").removeClass("dainvisible");
$("#da-example-source-before").removeClass("dainvisible");
$("#da-example-source-after").removeClass("dainvisible");
});
$("#da-hide-full-example").on("click", function(){
var id = $(".da-example-active").data("example");
var info = exampleData[id];
$(this).addClass("dainvisible");
$("#da-show-full-example").removeClass("dainvisible");
$("#da-example-source-before").addClass("dainvisible");
$("#da-example-source-after").addClass("dainvisible");
});
if ($("#playground_name").val().length > 0){
daCodeMirror.focus();
}
else{
$("#playground_name").focus()
}
activateVariables();
updateRunLink();
origPosition = daCodeMirror.getCursor();
daShowConsoleMessages();
if (currentFile != ''){
history.replaceState({}, "", """ + json.dumps(url_for('playground_page')) + """ + encodeURI('?project=' + currentProject + '&file=' + currentFile));
}
});
"""
any_files = len(files) > 0
cm_setup = """
<script>
var word_re = /[\w$]+/
$( document ).ready(function(){
CodeMirror.registerHelper("hint", "yaml", function(editor, options){
var cur = editor.getCursor(), curLine = editor.getLine(cur.line);
var end = cur.ch, start = end;
while (start && word_re.test(curLine.charAt(start - 1))) --start;
var curWord = start != end && curLine.slice(start, end);
var list = [];
if (curWord){
var n = vocab.length;
for (var i = 0; i < n; ++i){
if (vocab[i].indexOf(curWord) == 0){
list.push(vocab[i]);
}
}
}
return {list: list, from: CodeMirror.Pos(cur.line, start), to: CodeMirror.Pos(cur.line, end)};
});""" + upload_js() + """
});
</script>""" # noqa: W605
if keymap:
kbOpt = 'keyMap: "' + keymap + '", cursorBlinkRate: 0, '
kbLoad = '<script src="' + url_for('static', filename="codemirror/keymap/" + keymap + ".js", v=da_version) + '"></script>\n '
else:
kbOpt = ''
kbLoad = ''
page_title = word("Playground")
if current_user.id != playground_user.id:
page_title += " / " + playground_user.email
if current_project != 'default':
page_title += " / " + current_project
extra_js = '<script src="' + url_for('static', filename="app/playgroundbundle.js", v=da_version) + '"></script>\n ' + kbLoad + cm_setup + '<script>\n var daConsoleMessages = ' + json.dumps(console_messages) + ';\n $("#daDelete").click(function(event){if (originalFileName != $("#playground_name").val() || $("#playground_name").val() == \'\'){ $("#form button[name=\'submit\']").click(); event.preventDefault(); return false; } if(!confirm("' + word("Are you sure that you want to delete this playground file?") + '")){event.preventDefault();}});\n daTextArea = document.getElementById("playground_content");\n var daCodeMirror = CodeMirror.fromTextArea(daTextArea, {specialChars: /[\\u00a0\\u0000-\\u001f\\u007f-\\u009f\\u00ad\\u061c\\u200b-\\u200f\\u2028\\u2029\\ufeff]/, mode: "' + ('yamlmixed' if daconfig.get('test yamlmixed mode') else 'yamlmixed') + '", ' + kbOpt + 'tabSize: 2, tabindex: 70, autofocus: false, lineNumbers: true, matchBrackets: true, lineWrapping: ' + ('true' if daconfig.get('wrap lines in playground', True) else 'false') + '});\n $(window).bind("beforeunload", function(){daCodeMirror.save(); $("#form").trigger("checkform.areYouSure");});\n $("#form").areYouSure(' + json.dumps({'message': word("There are unsaved changes. Are you sure you wish to leave this page?")}) + ');\n $("#form").bind("submit", function(){daCodeMirror.save(); $("#form").trigger("reinitialize.areYouSure"); return true;});\n daCodeMirror.setSize(null, null);\n daCodeMirror.setOption("extraKeys", { Tab: function(cm) { var spaces = Array(cm.getOption("indentUnit") + 1).join(" "); cm.replaceSelection(spaces); }, "Ctrl-Space": "autocomplete", "F11": function(cm) { cm.setOption("fullScreen", !cm.getOption("fullScreen")); }, "Esc": function(cm) { if (cm.getOption("fullScreen")) cm.setOption("fullScreen", false); }});\n daCodeMirror.setOption("coverGutterNextToScrollbar", true);\n' + indent_by(ajax, 6) + '\n'
if pg_ex['encoded_data_dict'] is not None:
extra_js += ' exampleData = JSON.parse(atob("' + pg_ex['encoded_data_dict'] + '"));\n activateExample("' + str(pg_ex['pg_first_id'][0]) + '", false);\n'
extra_js += ' $("#my-form").trigger("reinitialize.areYouSure");\n $("#daVariablesReport").on("shown.bs.modal", function () { daFetchVariableReport(); })\n </script>'
response = make_response(render_template('pages/playground.html', projects=get_list_of_projects(playground_user.id), current_project=current_project, version_warning=None, bodyclass='daadminbody', use_gd=use_gd, use_od=use_od, userid=playground_user.id, page_title=Markup(page_title), tab_title=word("Playground"), extra_css=Markup('\n <link href="' + url_for('static', filename='app/playgroundbundle.css', v=da_version) + '" rel="stylesheet">'), extra_js=Markup(extra_js), form=form, fileform=fileform, files=sorted(files, key=lambda y: y['name'].lower()), any_files=any_files, pulldown_files=sorted(pulldown_files, key=lambda y: y.lower()), current_file=the_file, active_file=active_file, content=content, variables_html=Markup(variables_html), example_html=pg_ex['encoded_example_html'], interview_path=interview_path, is_new=str(is_new), valid_form=str(valid_form), own_playground=bool(playground_user.id == current_user.id)), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
@app.errorhandler(404)
def page_not_found_error(the_error): # pylint: disable=unused-argument
return render_template('pages/404.html'), 404
@app.errorhandler(Exception)
def server_error(the_error):
setup_translation()
if hasattr(the_error, 'interview') and the_error.interview.debug and hasattr(the_error, 'interview_status'):
the_history = get_history(the_error.interview, the_error.interview_status)
else:
the_history = None
the_vars = None
if isinstance(the_error, (DAError, DANotFoundError, DAInvalidFilename)):
errmess = str(the_error)
the_trace = None
logmessage(errmess)
elif isinstance(the_error, TemplateError):
errmess = str(the_error)
if hasattr(the_error, 'name') and the_error.name is not None:
errmess += "\nName: " + str(the_error.name)
if hasattr(the_error, 'filename') and the_error.filename is not None:
errmess += "\nFilename: " + str(the_error.filename)
if hasattr(the_error, 'docx_context'):
errmess += "\n\nContext:\n" + "\n".join(map(lambda x: " " + x, the_error.docx_context))
the_trace = traceback.format_exc()
try:
logmessage(errmess)
except:
logmessage("Could not log the error message")
else:
try:
errmess = str(type(the_error).__name__) + ": " + str(the_error)
except:
errmess = str(type(the_error).__name__)
if hasattr(the_error, 'traceback'):
the_trace = the_error.traceback
else:
the_trace = traceback.format_exc()
if hasattr(docassemble.base.functions.this_thread, 'misc') and 'current_field' in docassemble.base.functions.this_thread.misc:
errmess += "\nIn field index number " + str(docassemble.base.functions.this_thread.misc['current_field'])
if hasattr(the_error, 'da_line_with_error'):
errmess += "\nIn line: " + str(the_error.da_line_with_error)
logmessage(the_trace)
if isinstance(the_error, DAError):
error_code = the_error.error_code
if isinstance(the_error, DANotFoundError):
error_code = 404
elif isinstance(the_error, werkzeug.exceptions.HTTPException):
error_code = the_error.code
else:
error_code = 501
if hasattr(the_error, 'user_dict'):
the_vars = the_error.user_dict
if hasattr(the_error, 'interview'):
special_error_markdown = the_error.interview.consolidated_metadata.get('error help', None)
if isinstance(special_error_markdown, dict):
language = docassemble.base.functions.get_language()
if language in special_error_markdown:
special_error_markdown = special_error_markdown[language]
elif '*' in special_error_markdown:
special_error_markdown = special_error_markdown['*']
elif DEFAULT_LANGUAGE in special_error_markdown:
special_error_markdown = special_error_markdown[DEFAULT_LANGUAGE]
else:
special_error_markdown = None
else:
special_error_markdown = None
if special_error_markdown is None:
special_error_markdown = daconfig.get('error help', None)
if special_error_markdown is not None:
special_error_html = docassemble.base.util.markdown_to_html(special_error_markdown)
else:
special_error_html = None
flask_logtext = []
if os.path.exists(LOGFILE):
with open(LOGFILE, encoding='utf-8') as the_file:
for line in the_file:
if re.match('Exception', line):
flask_logtext = []
flask_logtext.append(line)
orig_errmess = errmess
errmess = noquote(errmess)
if re.search(r'\n', errmess):
errmess = '<pre>' + errmess + '</pre>'
else:
errmess = '<blockquote class="blockquote">' + errmess + '</blockquote>'
script = """
<script>
var daGlobalEval = eval;
var daMessageLog = JSON.parse(atob(""" + json.dumps(safeid(json.dumps(docassemble.base.functions.get_message_log()))) + """));
var daNotificationMessage = """ + json.dumps(NOTIFICATION_MESSAGE) + """;
if (!String.prototype.daSprintf){
Object.defineProperty(String.prototype, "daSprintf", {
value: function () {
var args = Array.from(arguments),
i = 0;
function defaultNumber(iValue) {
return iValue != undefined && !isNaN(iValue) ? iValue : "0";
}
function defaultString(iValue) {
return iValue == undefined ? "" : "" + iValue;
}
return this.replace(
/%%|%([+\\-])?([^1-9])?(\\d+)?(\\.\\d+)?([deEfhHioQqs])/g,
function (match, sign, filler, scale, precision, type) {
var strOut, space, value;
var asNumber = false;
if (match == "%%") return "%";
if (i >= args.length) return match;
value = args[i];
while (Array.isArray(value)) {
args.splice(i, 1);
for (var j = i; value.length > 0; j++)
args.splice(j, 0, value.shift());
value = args[i];
}
i++;
if (filler == undefined) filler = " "; // default
if (scale == undefined && !isNaN(filler)) {
scale = filler;
filler = " ";
}
if (sign == undefined) sign = "sqQ".indexOf(type) >= 0 ? "+" : "-"; // default
if (scale == undefined) scale = 0; // default
if (precision == undefined) precision = ".0"; // default
scale = parseInt(scale);
precision = parseInt(precision.substr(1));
switch (type) {
case "d":
case "i":
// decimal integer
asNumber = true;
strOut = parseInt(defaultNumber(value));
if (precision > 0) strOut += "." + "0".repeat(precision);
break;
case "e":
case "E":
// float in exponential notation
asNumber = true;
strOut = parseFloat(defaultNumber(value));
if (precision == 0) strOut = strOut.toExponential();
else strOut = strOut.toExponential(precision);
if (type == "E") strOut = strOut.replace("e", "E");
break;
case "f":
// decimal float
asNumber = true;
strOut = parseFloat(defaultNumber(value));
if (precision != 0) strOut = strOut.toFixed(precision);
break;
case "o":
case "h":
case "H":
// Octal or Hexagesimal integer notation
strOut =
"\\\\" +
(type == "o" ? "0" : type) +
parseInt(defaultNumber(value)).toString(type == "o" ? 8 : 16);
break;
case "q":
// single quoted string
strOut = "'" + defaultString(value) + "'";
break;
case "Q":
// double quoted string
strOut = '"' + defaultString(value) + '"';
break;
default:
// string
strOut = defaultString(value);
break;
}
if (typeof strOut != "string") strOut = "" + strOut;
if ((space = strOut.length) < scale) {
if (asNumber) {
if (sign == "-") {
if (strOut.indexOf("-") < 0)
strOut = filler.repeat(scale - space) + strOut;
else
strOut =
"-" +
filler.repeat(scale - space) +
strOut.replace("-", "");
} else {
if (strOut.indexOf("-") < 0)
strOut = "+" + filler.repeat(scale - space - 1) + strOut;
else
strOut =
"-" +
filler.repeat(scale - space) +
strOut.replace("-", "");
}
} else {
if (sign == "-") strOut = filler.repeat(scale - space) + strOut;
else strOut = strOut + filler.repeat(scale - space);
}
} else if (asNumber && sign == "+" && strOut.indexOf("-") < 0)
strOut = "+" + strOut;
return strOut;
}
);
},
});
Object.defineProperty(window, "daSprintf", {
value: function (str, ...rest) {
if (typeof str == "string")
return String.prototype.daSprintf.apply(str, rest);
return "";
},
});
}
function flash(message, priority){
if (priority == null){
priority = 'info'
}
if (!$("#daflash").length){
$("body").append(""" + json.dumps(NOTIFICATION_CONTAINER % ('',)) + """);
}
$("#daflash").append(daSprintf(daNotificationMessage, priority, message));
if (priority == 'success'){
setTimeout(function(){
$("#daflash .alert-success").hide(300, function(){
$(self).remove();
});
}, 3000);
}
}
var da_flash = flash;
function daShowNotifications(){
var n = daMessageLog.length;
for (var i = 0; i < n; i++){
var message = daMessageLog[i];
if (message.priority == 'console'){
console.log(message.message);
}
else if (message.priority == 'javascript'){
daGlobalEval(message.message);
}
else if (message.priority == 'success' || message.priority == 'warning' || message.priority == 'danger' || message.priority == 'secondary' || message.priority == 'tertiary' || message.priority == 'info' || message.priority == 'dark' || message.priority == 'light' || message.priority == 'primary'){
da_flash(message.message, message.priority);
}
else{
da_flash(message.message, 'info');
}
}
}
$( document ).ready(function() {
$("#da-retry").on('click', function(e){
location.reload();
e.preventDefault();
return false;
});
daShowNotifications();
});
</script>""" # noqa: W605
error_notification(the_error, message=errmess, history=the_history, trace=the_trace, the_request=request, the_vars=the_vars)
if (request.path.endswith('/interview') or request.path.endswith('/start') or request.path.endswith('/run')) and docassemble.base.functions.interview_path() is not None:
try:
release_lock(docassemble.base.functions.this_thread.current_info['session'], docassemble.base.functions.this_thread.current_info['yaml_filename'])
except:
pass
if 'in error' not in session and docassemble.base.functions.this_thread.interview is not None and 'error action' in docassemble.base.functions.this_thread.interview.consolidated_metadata:
session['in error'] = True
return index(action_argument={'action': docassemble.base.functions.this_thread.interview.consolidated_metadata['error action'], 'arguments': {'error_message': orig_errmess, 'error_history': the_history, 'error_trace': the_trace}}, refer=['error'])
show_debug = not bool((not DEBUG) and isinstance(the_error, (DAError, DAInvalidFilename)))
if int(int(error_code)/100) == 4:
show_debug = False
if error_code == 404:
the_template = 'pages/404.html'
else:
the_template = 'pages/501.html'
try:
yaml_filename = docassemble.base.functions.interview_path()
except:
yaml_filename = None
show_retry = request.path.endswith('/interview') or request.path.endswith('/start') or request.path.endswith('/run')
return render_template(the_template, verbose=daconfig.get('verbose error messages', True), version_warning=None, tab_title=word("Error"), page_title=word("Error"), error=errmess, historytext=str(the_history), logtext=str(the_trace), extra_js=Markup(script), special_error=special_error_html, show_debug=show_debug, yaml_filename=yaml_filename, show_retry=show_retry), error_code
@app.route('/bundle.css', methods=['GET'])
def css_bundle():
base_path = Path(importlib.resources.files('docassemble.webapp'), 'static')
output = ''
for parts in [['bootstrap-fileinput', 'css', 'fileinput.min.css'], ['labelauty', 'source', 'jquery-labelauty.min.css'], ['bootstrap-combobox', 'css', 'bootstrap-combobox.min.css'], ['bootstrap-slider', 'dist', 'css', 'bootstrap-slider.min.css'], ['app', 'app.min.css']]:
with open(os.path.join(base_path, *parts), encoding='utf-8') as fp:
output += fp.read()
output += "\n"
return Response(output, mimetype='text/css')
@app.route('/playgroundbundle.css', methods=['GET'])
def playground_css_bundle():
base_path = Path(importlib.resources.files('docassemble.webapp'), 'static')
output = ''
for parts in [['codemirror', 'lib', 'codemirror.css'], ['codemirror', 'addon', 'search', 'matchesonscrollbar.css'], ['codemirror', 'addon', 'display', 'fullscreen.css'], ['codemirror', 'addon', 'scroll', 'simplescrollbars.css'], ['codemirror', 'addon', 'hint', 'show-hint.css'], ['app', 'pygments.min.css'], ['bootstrap-fileinput', 'css', 'fileinput.min.css']]:
with open(os.path.join(base_path, *parts), encoding='utf-8') as fp:
output += fp.read()
output += "\n"
return Response(output, mimetype='text/css')
@app.route('/bundle.js', methods=['GET'])
def js_bundle():
base_path = Path(importlib.resources.files('docassemble.webapp'), 'static')
output = ''
for parts in [['app', 'jquery.min.js'], ['app', 'jquery.validate.min.js'], ['app', 'additional-methods.min.js'], ['app', 'jquery.visible.min.js'], ['bootstrap', 'js', 'bootstrap.bundle.min.js'], ['bootstrap-slider', 'dist', 'bootstrap-slider.min.js'], ['labelauty', 'source', 'jquery-labelauty.min.js'], ['bootstrap-fileinput', 'js', 'plugins', 'piexif.min.js'], ['bootstrap-fileinput', 'js', 'fileinput.min.js'], ['bootstrap-fileinput', 'themes', 'fas', 'theme.min.js'], ['app', 'app.min.js'], ['bootstrap-combobox', 'js', 'bootstrap-combobox.min.js'], ['app', 'socket.io.min.js']]:
with open(os.path.join(base_path, *parts), encoding='utf-8') as fp:
output += fp.read()
output += "\n"
return Response(output, mimetype='application/javascript')
@app.route('/playgroundbundle.js', methods=['GET'])
def playground_js_bundle():
base_path = Path(importlib.resources.files('docassemble.webapp'), 'static')
output = ''
for parts in [['areyousure', 'jquery.are-you-sure.js'], ['codemirror', 'lib', 'codemirror.js'], ['codemirror', 'addon', 'search', 'searchcursor.js'], ['codemirror', 'addon', 'scroll', 'annotatescrollbar.js'], ['codemirror', 'addon', 'search', 'matchesonscrollbar.js'], ['codemirror', 'addon', 'display', 'fullscreen.js'], ['codemirror', 'addon', 'edit', 'matchbrackets.js'], ['codemirror', 'addon', 'hint', 'show-hint.js'], ['codemirror', 'mode', 'yaml', 'yaml.js'], ['codemirror', 'mode', 'python', 'python.js'], ['yamlmixed', 'yamlmixed.js'], ['codemirror', 'mode', 'markdown', 'markdown.js'], ['bootstrap-fileinput', 'js', 'plugins', 'piexif.min.js'], ['bootstrap-fileinput', 'js', 'fileinput.min.js'], ['bootstrap-fileinput', 'themes', 'fas', 'theme.min.js']]:
with open(os.path.join(base_path, *parts), encoding='utf-8') as fp:
output += fp.read()
output += "\n"
return Response(output, mimetype='application/javascript')
@app.route('/adminbundle.js', methods=['GET'])
def js_admin_bundle():
base_path = Path(importlib.resources.files('docassemble.webapp'), 'static')
output = ''
for parts in [['app', 'jquery.min.js'], ['bootstrap', 'js', 'bootstrap.bundle.min.js']]:
with open(os.path.join(base_path, *parts), encoding='utf-8') as fp:
output += fp.read()
output += "\n"
return Response(output, mimetype='application/javascript')
@app.route('/bundlewrapjquery.js', methods=['GET'])
def js_bundle_wrap():
base_path = Path(importlib.resources.files('docassemble.webapp'), 'static')
output = '(function($) {'
for parts in [['app', 'jquery.validate.min.js'], ['app', 'additional-methods.min.js'], ['app', 'jquery.visible.js'], ['bootstrap', 'js', 'bootstrap.bundle.min.js'], ['bootstrap-slider', 'dist', 'bootstrap-slider.min.js'], ['bootstrap-fileinput', 'js', 'plugins', 'piexif.min.js'], ['bootstrap-fileinput', 'js', 'fileinput.min.js'], ['bootstrap-fileinput', 'themes', 'fas', 'theme.min.js'], ['app', 'app.min.js'], ['labelauty', 'source', 'jquery-labelauty.min.js'], ['bootstrap-combobox', 'js', 'bootstrap-combobox.min.js'], ['app', 'socket.io.min.js']]:
with open(os.path.join(base_path, *parts), encoding='utf-8') as fp:
output += fp.read()
output += "\n"
output += '})(jQuery);'
return Response(output, mimetype='application/javascript')
@app.route('/bundlenojquery.js', methods=['GET'])
def js_bundle_no_query():
base_path = Path(importlib.resources.files('docassemble.webapp'), 'static')
output = ''
for parts in [['app', 'jquery.validate.min.js'], ['app', 'additional-methods.min.js'], ['app', 'jquery.visible.min.js'], ['bootstrap', 'js', 'bootstrap.bundle.min.js'], ['bootstrap-slider', 'dist', 'bootstrap-slider.min.js'], ['bootstrap-fileinput', 'js', 'plugins', 'piexif.min.js'], ['bootstrap-fileinput', 'js', 'fileinput.min.js'], ['bootstrap-fileinput', 'themes', 'fas', 'theme.min.js'], ['app', 'app.min.js'], ['labelauty', 'source', 'jquery-labelauty.min.js'], ['bootstrap-combobox', 'js', 'bootstrap-combobox.min.js'], ['app', 'socket.io.min.js']]:
with open(os.path.join(base_path, *parts), encoding='utf-8') as fp:
output += fp.read()
output += "\n"
output += ''
return Response(output, mimetype='application/javascript')
@app.route('/packagestatic/<package>/<path:filename>', methods=['GET'])
def package_static(package, filename):
try:
attach = int(request.args.get('attachment', 0))
except:
attach = 0
if '../' in filename:
return ('File not found', 404)
if package == 'fonts':
return redirect(url_for('static', filename='bootstrap/fonts/' + filename, v=da_version))
try:
filename = re.sub(r'^\.+', '', filename)
filename = re.sub(r'\/\.+', '/', filename)
the_file = docassemble.base.functions.package_data_filename(str(package) + ':data/static/' + str(filename))
except:
return ('File not found', 404)
if the_file is None:
return ('File not found', 404)
if not os.path.isfile(the_file):
return ('File not found', 404)
extension, mimetype = get_ext_and_mimetype(the_file) # pylint: disable=unused-variable
response = send_file(the_file, mimetype=str(mimetype), download_name=filename)
if attach:
filename = os.path.basename(filename)
response.headers['Content-Disposition'] = 'attachment; filename=' + json.dumps(filename)
return response
@app.route('/logfile/<filename>', methods=['GET'])
@login_required
@roles_required(['admin', 'developer'])
def logfile(filename):
if LOGSERVER is None:
the_file = os.path.join(LOG_DIRECTORY, filename)
if not os.path.isfile(the_file):
return ('File not found', 404)
else:
h = httplib2.Http()
resp, content = h.request("http://" + LOGSERVER + ':8082', "GET") # pylint: disable=unused-variable
try:
the_file, headers = urlretrieve("http://" + LOGSERVER + ':8082/' + urllibquote(filename)) # pylint: disable=unused-variable
except:
return ('File not found', 404)
response = send_file(the_file, as_attachment=True, mimetype='text/plain', download_name=filename, max_age=0)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
@app.route('/logs', methods=['GET', 'POST'])
@login_required
@roles_required(['admin', 'developer'])
def logs():
setup_translation()
if not app.config['ALLOW_LOG_VIEWING']:
return ('File not found', 404)
form = LogForm(request.form)
use_zip = true_or_false(request.args.get('zip', None))
if LOGSERVER is None and use_zip:
timezone = get_default_timezone()
zip_archive = tempfile.NamedTemporaryFile(mode="wb", prefix="datemp", suffix=".zip", delete=False)
zf = zipfile.ZipFile(zip_archive, compression=zipfile.ZIP_DEFLATED, mode='w')
for f in os.listdir(LOG_DIRECTORY):
zip_path = os.path.join(LOG_DIRECTORY, f)
if f.startswith('.') or not os.path.isfile(zip_path):
continue
info = zipfile.ZipInfo(f)
info.compress_type = zipfile.ZIP_DEFLATED
info.external_attr = 0o644 << 16
info.date_time = datetime.datetime.utcfromtimestamp(os.path.getmtime(zip_path)).replace(tzinfo=datetime.timezone.utc).astimezone(zoneinfo.ZoneInfo(timezone)).timetuple()
with open(zip_path, 'rb') as fp:
zf.writestr(info, fp.read())
zf.close()
zip_file_name = re.sub(r'[^A-Za-z0-9_]+', '', app.config['APP_NAME']) + '_logs.zip'
response = send_file(zip_archive.name, mimetype='application/zip', as_attachment=True, download_name=zip_file_name)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
the_file = request.args.get('file', None)
if the_file is not None:
the_file = secure_filename_spaces_ok(the_file)
default_filter_string = request.args.get('q', '')
if request.method == 'POST' and form.file_name.data:
the_file = form.file_name.data
if the_file is not None and (the_file.startswith('.') or the_file.startswith('/') or the_file == ''):
the_file = None
if the_file is not None:
the_file = secure_filename_spaces_ok(the_file)
total_bytes = 0
if LOGSERVER is None:
call_sync()
files = []
for f in os.listdir(LOG_DIRECTORY):
path = os.path.join(LOG_DIRECTORY, f)
if not os.path.isfile(path):
continue
files.append(f)
total_bytes += os.path.getsize(path)
files = sorted(files)
total_bytes = humanize.naturalsize(total_bytes)
if the_file is None and len(files):
if 'docassemble.log' in files:
the_file = 'docassemble.log'
else:
the_file = files[0]
if the_file is not None:
filename = os.path.join(LOG_DIRECTORY, the_file)
else:
h = httplib2.Http()
resp, content = h.request("http://" + LOGSERVER + ':8082', "GET")
if int(resp['status']) >= 200 and int(resp['status']) < 300:
files = [f for f in content.decode().split("\n") if f != '' and f is not None]
else:
return ('File not found', 404)
if len(files) > 0:
if the_file is None:
the_file = files[0]
filename, headers = urlretrieve("http://" + LOGSERVER + ':8082/' + urllibquote(the_file)) # pylint: disable=unused-variable
if len(files) > 0 and not os.path.isfile(filename):
flash(word("The file you requested does not exist."), 'error')
if len(files) > 0:
the_file = files[0]
filename = os.path.join(LOG_DIRECTORY, files[0])
if len(files) > 0:
if request.method == 'POST' and form.submit.data and form.filter_string.data:
default_filter_string = form.filter_string.data
try:
reg_exp = re.compile(default_filter_string)
except:
flash(word("The regular expression you provided could not be parsed."), 'error')
default_filter_string = ''
if default_filter_string == '':
try:
lines = tailer.tail(open(filename, encoding='utf-8'), 30)
except:
lines = [word('Unable to read log file; please download.')]
else:
temp_file = tempfile.NamedTemporaryFile(mode='a+', encoding='utf-8')
with open(filename, 'r', encoding='utf-8') as fp:
for line in fp:
if reg_exp.search(line):
temp_file.write(line)
temp_file.seek(0)
try:
lines = tailer.tail(temp_file, 30)
except:
lines = [word('Unable to read log file; please download.')]
temp_file.close()
content = "\n".join(map(lambda x: x, lines))
else:
content = "No log files available"
show_download_all = bool(LOGSERVER is None)
response = make_response(render_template('pages/logs.html', version_warning=version_warning, bodyclass='daadminbody', tab_title=word("Logs"), page_title=word("Logs"), form=form, files=files, current_file=the_file, content=content, default_filter_string=default_filter_string, show_download_all=show_download_all, total_bytes=total_bytes), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
@app.route('/reqdev', methods=['GET', 'POST'])
@login_required
def request_developer():
setup_translation()
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
form = RequestDeveloperForm(request.form)
recipients = []
if request.method == 'POST':
for user in db.session.execute(select(UserModel.id, UserModel.email).join(UserRoles, UserModel.id == UserRoles.user_id).join(Role, UserRoles.role_id == Role.id).where(and_(UserModel.active == True, Role.name == 'admin'))): # noqa: E712 # pylint: disable=singleton-comparison
if user.email not in recipients:
recipients.append(user.email)
body = "User " + str(current_user.email) + " (" + str(current_user.id) + ") has requested developer privileges.\n\n"
if form.reason.data:
body += "Reason given: " + str(form.reason.data) + "\n\n"
body += "Go to " + url_for('edit_user_profile_page', user_id=current_user.id, _external=True) + " to change the user's privileges."
msg = Message("Request for developer account from " + str(current_user.email), recipients=recipients, body=body)
if len(recipients) == 0:
flash(word('No administrators could be found.'), 'error')
else:
try:
da_send_mail(msg)
flash(word('Your request was submitted.'), 'success')
except:
flash(word('We were unable to submit your request.'), 'error')
return redirect(url_for('user.profile'))
return render_template('users/request_developer.html', version_warning=None, bodyclass='daadminbody', tab_title=word("Developer Access"), page_title=word("Developer Access"), form=form)
def docx_variable_fix(variable):
variable = re.sub(r'\\', '', variable)
variable = re.sub(r'^([A-Za-z\_][A-Za-z\_0-9]*).*', r'\1', variable)
return variable
def sanitize(default):
default = re.sub(r'\n?\r\n?', "\n", str(default))
if re.search(r'[\#\!\?\:\n\r\"\'\[\]\{\}]+', default):
return "|\n" + docassemble.base.functions.indent(default, by=10)
return default
def read_fields(filename, orig_file_name, input_format, output_format):
if output_format == 'yaml':
if input_format == 'pdf':
fields = docassemble.base.pdftk.read_fields(filename)
fields_seen = set()
if fields is None:
raise DAException(word("Error: no fields could be found in the file"))
fields_output = "---\nquestion: " + word("Here is your document.") + "\nevent: " + 'some_event' + "\nattachment:" + "\n - name: " + os.path.splitext(orig_file_name)[0] + "\n filename: " + os.path.splitext(orig_file_name)[0] + "\n pdf template file: " + re.sub(r'[^A-Za-z0-9\-\_\. ]+', '_', orig_file_name) + "\n fields:\n"
for field, default, pageno, rect, field_type, export_value in fields:
if field not in fields_seen:
fields_output += ' - "' + str(field) + '": ' + sanitize(default) + "\n"
fields_seen.add(field)
fields_output += "---"
return fields_output
if input_format in ('docx', 'markdown'):
if input_format == 'docx':
result_file = word_to_markdown(filename, 'docx')
if result_file is None:
raise DAException(word("Error: no fields could be found in the file"))
with open(result_file.name, 'r', encoding='utf-8') as fp:
result = fp.read()
elif input_format == 'markdown':
with open(filename, 'r', encoding='utf-8') as fp:
result = fp.read()
fields = set()
for variable in re.findall(r'{{[pr] \s*([^\}\s]+)\s*}}', result):
fields.add(docx_variable_fix(variable))
for variable in re.findall(r'{{\s*([^\}\s]+)\s*}}', result):
fields.add(docx_variable_fix(variable))
for variable in re.findall(r'{%[a-z]* for [A-Za-z\_][A-Za-z0-9\_]* in *([^\} ]+) *%}', result):
fields.add(docx_variable_fix(variable))
if len(fields) == 0:
raise DAException(word("Error: no fields could be found in the file"))
fields_output = "---\nquestion: " + word("Here is your document.") + "\nevent: " + 'some_event' + "\nattachment:" + "\n - name: " + os.path.splitext(orig_file_name)[0] + "\n filename: " + os.path.splitext(orig_file_name)[0] + "\n docx template file: " + re.sub(r'[^A-Za-z0-9\-\_\. ]+', '_', orig_file_name) + "\n fields:\n"
for field in fields:
fields_output += ' "' + field + '": ' + "Something\n"
fields_output += "---"
return fields_output
if output_format == 'json':
if input_format == 'pdf':
default_text = word("something")
output = {'fields': [], 'default_values': {}, 'types': {}, 'locations': {}, 'export_values': {}}
fields = docassemble.base.pdftk.read_fields(filename)
if fields is not None:
fields_seen = set()
for field, default, pageno, rect, field_type, export_value in fields:
real_default = str(default)
if real_default == default_text:
real_default = ''
if field not in fields_seen:
output['fields'].append(str(field))
output['default_values'][field] = real_default
output['types'][field] = re.sub(r"'", r'', str(field_type))
output['locations'][field] = {'page': int(pageno), 'box': rect}
output['export_values'][field] = export_value
return json.dumps(output, sort_keys=True, indent=2)
if input_format in ('docx', 'markdown'):
if input_format == 'docx':
result_file = word_to_markdown(filename, 'docx')
if result_file is None:
return json.dumps({'fields': []}, indent=2)
with open(result_file.name, 'r', encoding='utf-8') as fp:
result = fp.read()
elif input_format == 'markdown':
with open(filename, 'r', encoding='utf-8') as fp:
result = fp.read()
fields = set()
for variable in re.findall(r'{{ *([^\} ]+) *}}', result):
fields.add(docx_variable_fix(variable))
for variable in re.findall(r'{%[a-z]* for [A-Za-z\_][A-Za-z0-9\_]* in *([^\} ]+) *%}', result):
fields.add(docx_variable_fix(variable))
return json.dumps({'fields': list(fields)}, sort_keys=True, indent=2)
return None
@app.route('/utilities', methods=['GET', 'POST'])
@login_required
@roles_required(['admin', 'developer'])
def utilities():
setup_translation()
form = Utilities(request.form)
fields_output = None
word_box = None
uses_null = False
file_type = None
if request.method == 'GET' and needs_to_change_password():
return redirect(url_for('user.change_password', next=url_for('utilities')))
if request.method == 'POST':
if 'language' in request.form:
language = request.form['language']
result = {}
result[language] = {}
existing = docassemble.base.functions.word_collection.get(language, {})
if 'api key' in daconfig['google'] and daconfig['google']['api key']:
try:
service = googleapiclient.discovery.build('translate', 'v2',
developerKey=daconfig['google']['api key'])
use_google_translate = True
except:
logmessage("utilities: attempt to call Google Translate failed")
use_google_translate = False
else:
use_google_translate = False
words_to_translate = []
for the_word in base_words:
if the_word in existing and existing[the_word] is not None:
result[language][the_word] = existing[the_word]
continue
words_to_translate.append(the_word)
chunk_limit = daconfig.get('google translate words at a time', 20)
chunks = []
interim_list = []
while len(words_to_translate) > 0:
the_word = words_to_translate.pop(0)
interim_list.append(the_word)
if len(interim_list) >= chunk_limit:
chunks.append(interim_list)
interim_list = []
if len(interim_list) > 0:
chunks.append(interim_list)
for chunk in chunks:
if use_google_translate:
try:
resp = service.translations().list(
source='en',
target=language,
q=chunk
).execute()
except Exception as errstr:
logmessage("utilities: translation failed: " + str(errstr))
resp = None
if isinstance(resp, dict) and 'translations' in resp and isinstance(resp['translations'], list) and len(resp['translations']) == len(chunk):
for the_index, the_chunk in enumerate(chunk):
if isinstance(resp['translations'][the_index], dict) and 'translatedText' in resp['translations'][the_index]:
result[language][the_chunk] = re.sub(r''', r"'", str(resp['translations'][the_index]['translatedText']))
else:
result[language][the_chunk] = 'XYZNULLXYZ'
uses_null = True
else:
for the_word in chunk:
result[language][the_word] = 'XYZNULLXYZ'
uses_null = True
else:
for the_word in chunk:
result[language][the_word] = 'XYZNULLXYZ'
uses_null = True
if form.systemfiletype.data == 'YAML':
word_box = altyamlstring.dump_to_string(result)
word_box = re.sub(r'"XYZNULLXYZ"', r'null', word_box)
elif form.systemfiletype.data == 'XLSX':
temp_file = tempfile.NamedTemporaryFile(suffix='.xlsx', delete=False)
xlsx_filename = language + "-words.xlsx"
workbook = xlsxwriter.Workbook(temp_file.name)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': 1, 'num_format': '@'})
text = workbook.add_format({'num_format': '@'})
text.set_align('top')
wrapping = workbook.add_format({'num_format': '@'})
wrapping.set_align('top')
wrapping.set_text_wrap()
# wrapping.set_locked(False)
numb = workbook.add_format()
numb.set_align('top')
worksheet.write('A1', 'orig_lang', bold)
worksheet.write('B1', 'tr_lang', bold)
worksheet.write('C1', 'orig_text', bold)
worksheet.write('D1', 'tr_text', bold)
worksheet.set_column(0, 0, 10)
worksheet.set_column(1, 1, 10)
worksheet.set_column(2, 2, 55)
worksheet.set_column(3, 3, 55)
row = 1
for key, val in result[language].items():
worksheet.write_string(row, 0, 'en', text)
worksheet.write_string(row, 1, language, text)
worksheet.write_string(row, 2, key, wrapping)
worksheet.write_string(row, 3, val, wrapping)
row += 1
workbook.close()
response = send_file(temp_file.name, mimetype='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', as_attachment=True, download_name=xlsx_filename)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
elif form.systemfiletype.data == 'XLIFF 1.2':
temp_file = tempfile.NamedTemporaryFile(suffix='.xlf', delete=False)
xliff_filename = language + "-words.xlf"
xliff = ET.Element('xliff')
xliff.set('xmlns', 'urn:oasis:names:tc:xliff:document:1.2')
xliff.set('version', '1.2')
the_file = ET.SubElement(xliff, 'file')
the_file.set('source-language', 'en')
the_file.set('target-language', language)
the_file.set('datatype', 'plaintext')
the_file.set('original', 'self')
the_file.set('id', 'f1')
the_file.set('xml:space', 'preserve')
body = ET.SubElement(the_file, 'body')
indexno = 1
for key, val in result[language].items():
trans_unit = ET.SubElement(body, 'trans-unit')
trans_unit.set('id', str(indexno))
trans_unit.set('xml:space', 'preserve')
source = ET.SubElement(trans_unit, 'source')
source.set('xml:space', 'preserve')
target = ET.SubElement(trans_unit, 'target')
target.set('xml:space', 'preserve')
source.text = key
target.text = val
indexno += 1
temp_file.write(ET.tostring(xliff))
temp_file.close()
response = send_file(temp_file.name, mimetype='application/xml', as_attachment=True, download_name=xliff_filename)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
elif form.systemfiletype.data == 'XLIFF 2.0':
temp_file = tempfile.NamedTemporaryFile(suffix='.xlf', delete=False)
xliff_filename = language + "-words.xlf"
xliff = ET.Element('xliff')
xliff.set('xmlns', 'urn:oasis:names:tc:xliff:document:2.0')
xliff.set('version', '2.0')
xliff.set('srcLang', 'en')
xliff.set('trgLang', language)
the_file = ET.SubElement(xliff, 'file')
the_file.set('id', 'f1')
the_file.set('original', 'self')
the_file.set('xml:space', 'preserve')
unit = ET.SubElement(the_file, 'unit')
unit.set('id', "docassemble_phrases")
indexno = 1
for key, val in result[language].items():
segment = ET.SubElement(unit, 'segment')
segment.set('id', str(indexno))
segment.set('xml:space', 'preserve')
source = ET.SubElement(segment, 'source')
source.set('xml:space', 'preserve')
target = ET.SubElement(segment, 'target')
target.set('xml:space', 'preserve')
source.text = key
target.text = val
indexno += 1
temp_file.write(ET.tostring(xliff))
temp_file.close()
response = send_file(temp_file.name, mimetype='application/xml', as_attachment=True, download_name=xliff_filename)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
if 'pdfdocxfile' in request.files and request.files['pdfdocxfile'].filename:
filename = secure_filename(request.files['pdfdocxfile'].filename)
extension, mimetype = get_ext_and_mimetype(filename) # pylint: disable=unused-variable
if mimetype == 'application/pdf':
file_type = 'pdf'
pdf_file = tempfile.NamedTemporaryFile(mode="wb", suffix=".pdf", delete=True)
the_file = request.files['pdfdocxfile']
the_file.save(pdf_file.name)
try:
fields_output = read_fields(pdf_file.name, the_file.filename, 'pdf', 'yaml')
except Exception as err:
fields_output = str(err)
pdf_file.close()
elif mimetype == 'application/vnd.openxmlformats-officedocument.wordprocessingml.document':
file_type = 'docx'
docx_file = tempfile.NamedTemporaryFile(mode="wb", suffix=".docx", delete=True)
the_file = request.files['pdfdocxfile']
the_file.save(docx_file.name)
try:
fields_output = read_fields(docx_file.name, the_file.filename, 'docx', 'yaml')
except Exception as err:
fields_output = str(err)
docx_file.close()
if form.officeaddin_submit.data:
resp = make_response(render_template('pages/officemanifest.xml', office_app_version=form.officeaddin_version.data, guid=str(uuid.uuid4())))
resp.headers['Content-type'] = 'text/xml; charset=utf-8'
resp.headers['Content-Disposition'] = 'attachment; filename="manifest.xml"'
resp.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return resp
extra_js = """
<script>
$('#pdfdocxfile').on('change', function(){
var fileName = $(this).val();
fileName = fileName.replace(/.*\\\\/, '');
fileName = fileName.replace(/.*\\//, '');
$(this).next('.custom-file-label').html(fileName);
});
</script>"""
form.systemfiletype.choices = [('YAML', 'YAML'), ('XLSX', 'XLSX'), ('XLIFF 1.2', 'XLIFF 1.2'), ('XLIFF 2.0', 'XLIFF 2.0')]
form.systemfiletype.data = 'YAML'
form.filetype.choices = [('XLSX', 'XLSX'), ('XLIFF 1.2', 'XLIFF 1.2'), ('XLIFF 2.0', 'XLIFF 2.0')]
form.filetype.data = 'XLSX'
response = make_response(render_template('pages/utilities.html', extra_js=Markup(extra_js), version_warning=version_warning, bodyclass='daadminbody', tab_title=word("Utilities"), page_title=word("Utilities"), form=form, fields=fields_output, word_box=word_box, uses_null=uses_null, file_type=file_type, interview_placeholder=word("E.g., docassemble.demo:data/questions/questions.yml"), language_placeholder=word("E.g., es, fr, it")), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
# @app.route('/save', methods=['GET', 'POST'])
# def save_for_later():
# if current_user.is_authenticated and not current_user.is_anonymous:
# return render_template('pages/save_for_later.html', interview=sdf)
# secret = request.cookies.get('secret', None)
@app.route('/after_reset', methods=['GET', 'POST'])
def after_reset():
# logmessage("after_reset")
response = redirect(url_for('user.login'))
if 'newsecret' in session:
# logmessage("after_reset: fixing cookie")
response.set_cookie('secret', session['newsecret'], httponly=True, secure=app.config['SESSION_COOKIE_SECURE'], samesite=app.config['SESSION_COOKIE_SAMESITE'])
del session['newsecret']
return response
# @app.before_request
# def reset_thread_local():
# docassemble.base.functions.reset_thread_local()
# @app.after_request
# def remove_temporary_files(response):
# docassemble.base.functions.close_files()
# return response
def needs_to_change_password():
if not current_user.has_role('admin'):
return False
if not (current_user.social_id and current_user.social_id.startswith('local')):
return False
if r.get('da:insecure_password_present') is not None:
r.delete('da:insecure_password_present')
session.pop('_flashes', None)
flash(word("Your password is insecure and needs to be changed"), "warning")
return True
return False
def fix_group_id(the_package, the_file, the_group_id):
if the_package == '_global':
group_id_to_use = the_group_id
else:
group_id_to_use = the_package
if re.search(r'^data/', the_file):
group_id_to_use += ':' + the_file
else:
group_id_to_use += ':data/sources/ml-' + the_file + '.json'
group_id_to_use += ':' + the_group_id
return group_id_to_use
def ensure_training_loaded(interview):
# parts = yaml_filename.split(':')
# if len(parts) != 2:
# logmessage("ensure_training_loaded: could not read yaml_filename " + str(yaml_filename))
# return
# source_filename = parts[0] + ':data/sources/ml-' + re.sub(r'\.ya?ml$', '', re.sub(r'.*/', '', parts[1])) + '.json'
# logmessage("Source filename is " + source_filename)
source_filename = interview.get_ml_store()
parts = source_filename.split(':')
if len(parts) == 3 and parts[0].startswith('docassemble.') and re.match(r'data/sources/.*\.json$', parts[1]):
the_file = docassemble.base.functions.package_data_filename(source_filename)
if the_file is not None:
record = db.session.execute(select(MachineLearning.group_id).where(MachineLearning.group_id.like(source_filename + ':%'))).first()
if record is None:
if os.path.isfile(the_file):
with open(the_file, 'r', encoding='utf-8') as fp:
content = fp.read()
if len(content) > 0:
try:
href = json.loads(content)
if isinstance(href, dict):
nowtime = datetime.datetime.utcnow()
for group_id, train_list in href.items():
if isinstance(train_list, list):
for entry in train_list:
if 'independent' in entry:
depend = entry.get('dependent', None)
if depend is not None:
new_entry = MachineLearning(group_id=source_filename + ':' + group_id, independent=codecs.encode(pickle.dumps(entry['independent']), 'base64').decode(), dependent=codecs.encode(pickle.dumps(depend), 'base64').decode(), modtime=nowtime, create_time=nowtime, active=True, key=entry.get('key', None))
else:
new_entry = MachineLearning(group_id=source_filename + ':' + group_id, independent=codecs.encode(pickle.dumps(entry['independent']), 'base64').decode(), modtime=nowtime, create_time=nowtime, active=False, key=entry.get('key', None))
db.session.add(new_entry)
db.session.commit()
else:
logmessage("ensure_training_loaded: source filename " + source_filename + " not used because it did not contain a dict")
except:
logmessage("ensure_training_loaded: source filename " + source_filename + " not used because it did not contain valid JSON")
else:
logmessage("ensure_training_loaded: source filename " + source_filename + " not used because its content was empty")
else:
logmessage("ensure_training_loaded: source filename " + source_filename + " not used because it did not exist")
else:
logmessage("ensure_training_loaded: source filename " + source_filename + " not used because training data existed")
else:
logmessage("ensure_training_loaded: source filename " + source_filename + " did not exist")
else:
logmessage("ensure_training_loaded: source filename " + source_filename + " was not part of a package")
def get_corresponding_interview(the_package, the_file):
# logmessage("get_corresponding_interview: " + the_package + " " + the_file)
interview = None
if re.match(r'docassemble.playground[0-9]+', the_package):
separator = ':'
else:
separator = ':data/questions/'
for interview_file in (the_package + separator + the_file + '.yml', the_package + separator + the_file + '.yaml', the_package + separator + 'examples/' + the_file + '.yml'):
# logmessage("Looking for " + interview_file)
try:
interview = docassemble.base.interview_cache.get_interview(interview_file)
break
except:
# logmessage("There was an exception looking for " + interview_file + ": " + str(the_err))
continue
return interview
def ml_prefix(the_package, the_file):
the_prefix = the_package
if re.search(r'^data/', the_file):
the_prefix += ':' + the_file
else:
the_prefix += ':data/sources/ml-' + the_file + '.json'
return the_prefix
@app.route('/train', methods=['GET', 'POST'])
@login_required
@roles_required(['admin', 'developer', 'trainer'])
def train():
setup_translation()
the_package = request.args.get('package', None)
if the_package is not None:
if the_package.startswith('_'):
the_package = '_' + werkzeug.utils.secure_filename(the_package)
else:
the_package = werkzeug.utils.secure_filename(the_package)
the_file = request.args.get('file', None)
if the_file is not None:
if the_file.startswith('_'):
the_file = '_' + secure_filename_spaces_ok(the_file)
else:
the_file = secure_filename_spaces_ok(the_file)
the_group_id = request.args.get('group_id', None)
show_all = int(request.args.get('show_all', 0))
form = TrainingForm(request.form)
uploadform = TrainingUploadForm(request.form)
if request.method == 'POST' and the_package is not None and the_file is not None:
if the_package == '_global':
the_prefix = ''
else:
the_prefix = ml_prefix(the_package, the_file)
json_file = None
if the_package != '_global' and uploadform.usepackage.data == 'yes':
the_file = docassemble.base.functions.package_data_filename(the_prefix)
if the_file is None or not os.path.isfile(the_file):
flash(word("Error reading JSON file from package. File did not exist."), 'error')
return redirect(url_for('train', package=the_package, file=the_file, group_id=the_group_id, show_all=show_all))
json_file = open(the_file, 'r', encoding='utf-8')
if uploadform.usepackage.data == 'no' and 'jsonfile' in request.files and request.files['jsonfile'].filename:
json_file = tempfile.NamedTemporaryFile(prefix="datemp", suffix=".json")
request.files['jsonfile'].save(json_file.name)
json_file.seek(0)
if json_file is not None:
try:
href = json.load(json_file)
except:
flash(word("Error reading JSON file. Not a valid JSON file."), 'error')
return redirect(url_for('train', package=the_package, file=the_file, group_id=the_group_id, show_all=show_all))
json_file.close()
if not isinstance(href, dict):
flash(word("Error reading JSON file. The JSON file needs to contain a dictionary at the root level."), 'error')
return redirect(url_for('train', package=the_package, file=the_file, group_id=the_group_id, show_all=show_all))
nowtime = datetime.datetime.utcnow()
for group_id, train_list in href.items():
if not isinstance(train_list, list):
logmessage("train: could not import part of JSON file. Items in dictionary must be lists.")
continue
if uploadform.importtype.data == 'replace':
db.session.execute(sqldelete(MachineLearning).filter_by(group_id=the_prefix + ':' + group_id))
db.session.commit()
for entry in train_list:
if 'independent' in entry:
depend = entry.get('dependent', None)
if depend is not None:
new_entry = MachineLearning(group_id=the_prefix + ':' + group_id, independent=codecs.encode(pickle.dumps(entry['independent']), 'base64').decode(), dependent=codecs.encode(pickle.dumps(depend), 'base64').decode(), modtime=nowtime, create_time=nowtime, active=True, key=entry.get('key', None))
else:
new_entry = MachineLearning(group_id=the_prefix + ':' + group_id, independent=codecs.encode(pickle.dumps(entry['independent']), 'base64').decode(), modtime=nowtime, create_time=nowtime, active=False, key=entry.get('key', None))
db.session.add(new_entry)
elif uploadform.importtype.data == 'merge':
indep_in_use = set()
for record in db.session.execute(select(MachineLearning).filter_by(group_id=the_prefix + ':' + group_id)).scalars():
indep = fix_pickle_obj(codecs.decode(bytearray(record.independent, encoding='utf-8'), 'base64'))
if indep is not None:
indep_in_use.add(indep)
for entry in train_list:
if 'independent' in entry and entry['independent'] not in indep_in_use:
depend = entry.get('dependent', None)
if depend is not None:
new_entry = MachineLearning(group_id=the_prefix + ':' + group_id, independent=codecs.encode(pickle.dumps(entry['independent']), 'base64').decode(), dependent=codecs.encode(pickle.dumps(depend), 'base64').decode(), modtime=nowtime, create_time=nowtime, active=True, key=entry.get('key', None))
else:
new_entry = MachineLearning(group_id=the_prefix + ':' + group_id, independent=codecs.encode(pickle.dumps(entry['independent']), 'base64').decode(), modtime=nowtime, create_time=nowtime, active=False, key=entry.get('key', None))
db.session.add(new_entry)
db.session.commit()
flash(word("Training data were successfully imported."), 'success')
return redirect(url_for('train', package=the_package, file=the_file, group_id=the_group_id, show_all=show_all))
if form.cancel.data:
return redirect(url_for('train', package=the_package, file=the_file, show_all=show_all))
if form.submit.data:
group_id_to_use = fix_group_id(the_package, the_file, the_group_id)
post_data = request.form.copy()
deleted = set()
for key, val in post_data.items():
m = re.match(r'delete([0-9]+)', key)
if not m:
continue
entry_id = int(m.group(1))
model = docassemble.base.util.SimpleTextMachineLearner(group_id=group_id_to_use)
model.delete_by_id(entry_id)
deleted.add('dependent' + m.group(1))
for key in deleted:
if key in post_data:
del post_data[key]
for key, val in post_data.items():
m = re.match(r'dependent([0-9]+)', key)
if not m:
continue
orig_key = 'original' + m.group(1)
if orig_key in post_data and post_data[orig_key] != val and val != '':
entry_id = int(m.group(1))
model = docassemble.base.util.SimpleTextMachineLearner(group_id=group_id_to_use)
model.set_dependent_by_id(entry_id, val)
if post_data.get('newindependent', '') != '':
model = docassemble.base.util.SimpleTextMachineLearner(group_id=group_id_to_use)
if post_data.get('newdependent', '') != '':
model.add_to_training_set(post_data['newindependent'], post_data['newdependent'])
else:
model.save_for_classification(post_data['newindependent'])
return redirect(url_for('train', package=the_package, file=the_file, group_id=the_group_id, show_all=show_all))
if show_all:
show_all = 1
show_cond = MachineLearning.id != None # noqa: E711 # pylint: disable=singleton-comparison
else:
show_all = 0
show_cond = MachineLearning.dependent == None # noqa: E711 # pylint: disable=singleton-comparison
package_list = {}
file_list = {}
group_id_list = {}
entry_list = []
if current_user.has_role('admin', 'developer'):
playground_package = 'docassemble.playground' + str(current_user.id)
else:
playground_package = None
if the_package is None:
for record in db.session.execute(select(MachineLearning.group_id, db.func.count(MachineLearning.id).label('cnt')).where(show_cond).group_by(MachineLearning.group_id)):
group_id = record.group_id
parts = group_id.split(':')
if is_package_ml(parts):
if parts[0] not in package_list:
package_list[parts[0]] = 0
package_list[parts[0]] += record.cnt
else:
if '_global' not in package_list:
package_list['_global'] = 0
package_list['_global'] += record.cnt
if not show_all:
for record in db.session.execute(select(MachineLearning.group_id).group_by(MachineLearning.group_id)):
parts = record.group_id.split(':')
if is_package_ml(parts):
if parts[0] not in package_list:
package_list[parts[0]] = 0
if '_global' not in package_list:
package_list['_global'] = 0
if playground_package and playground_package not in package_list:
package_list[playground_package] = 0
package_list = [(x, package_list[x]) for x in sorted(package_list)]
response = make_response(render_template('pages/train.html', version_warning=version_warning, bodyclass='daadminbody', tab_title=word("Train"), page_title=word("Train machine learning models"), the_package=the_package, the_file=the_file, the_group_id=the_group_id, package_list=package_list, file_list=file_list, group_id_list=group_id_list, entry_list=entry_list, show_all=show_all, show_package_list=True, playground_package=playground_package), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
if playground_package and the_package == playground_package:
the_package_display = word("My Playground")
else:
the_package_display = the_package
if the_file is None:
file_list = {}
for record in db.session.execute(select(MachineLearning.group_id, db.func.count(MachineLearning.id).label('cnt')).where(and_(MachineLearning.group_id.like(the_package + ':%'), show_cond)).group_by(MachineLearning.group_id)):
parts = record.group_id.split(':')
# logmessage("Group id is " + str(parts))
if not is_package_ml(parts):
continue
if re.match(r'data/sources/ml-.*\.json$', parts[1]):
parts[1] = re.sub(r'^data/sources/ml-|\.json$', '', parts[1])
if parts[1] not in file_list:
file_list[parts[1]] = 0
file_list[parts[1]] += record.cnt
if not show_all:
for record in db.session.execute(select(MachineLearning.group_id).where(MachineLearning.group_id.like(the_package + ':%')).group_by(MachineLearning.group_id)):
parts = record.group_id.split(':')
# logmessage("Other group id is " + str(parts))
if not is_package_ml(parts):
continue
if re.match(r'data/sources/ml-.*\.json$', parts[1]):
parts[1] = re.sub(r'^data/sources/ml-|\.json$', '', parts[1])
if parts[1] not in file_list:
file_list[parts[1]] = 0
if playground_package and the_package == playground_package:
area = SavedFile(current_user.id, fix=False, section='playgroundsources')
for filename in area.list_of_files():
# logmessage("hey file is " + str(filename))
if re.match(r'ml-.*\.json$', filename):
short_file_name = re.sub(r'^ml-|\.json$', '', filename)
if short_file_name not in file_list:
file_list[short_file_name] = 0
file_list = [(x, file_list[x]) for x in sorted(file_list)]
response = make_response(render_template('pages/train.html', version_warning=version_warning, bodyclass='daadminbody', tab_title=word("Train"), page_title=word("Train machine learning models"), the_package=the_package, the_package_display=the_package_display, the_file=the_file, the_group_id=the_group_id, package_list=package_list, file_list=file_list, group_id_list=group_id_list, entry_list=entry_list, show_all=show_all, show_file_list=True), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
if the_group_id is None:
the_prefix = ml_prefix(the_package, the_file)
the_package_file = docassemble.base.functions.package_data_filename(the_prefix)
package_file_available = bool(the_package_file is not None and os.path.isfile(the_package_file))
if 'download' in request.args and request.args['download']:
output = {}
if the_package == '_global':
json_filename = 'ml-global.json'
for record in db.session.execute(select(MachineLearning.id, MachineLearning.group_id, MachineLearning.independent, MachineLearning.dependent, MachineLearning.key)):
if is_package_ml(record.group_id.split(':')):
continue
if record.group_id not in output:
output[record.group_id] = []
if record.dependent is None:
the_dependent = None
else:
the_dependent = fix_pickle_obj(codecs.decode(bytearray(record.dependent, encoding='utf-8'), 'base64'))
the_independent = fix_pickle_obj(codecs.decode(bytearray(record.independent, encoding='utf-8'), 'base64'))
try:
str(the_independent) + "" # pylint: disable=expression-not-assigned
str(the_dependent) + "" # pylint: disable=expression-not-assigned
except Exception as e:
logmessage("Bad record: id " + str(record.id) + " where error was " + str(e))
continue
the_entry = {'independent': fix_pickle_obj(codecs.decode(bytearray(record.independent, encoding='utf-8'), 'base64')), 'dependent': the_dependent}
if record.key is not None:
the_entry['key'] = record.key
output[record.group_id].append(the_entry)
else:
json_filename = 'ml-' + the_file + '.json'
prefix = ml_prefix(the_package, the_file)
for record in db.session.execute(select(MachineLearning.group_id, MachineLearning.independent, MachineLearning.dependent, MachineLearning.key).where(MachineLearning.group_id.like(prefix + ':%'))):
parts = record.group_id.split(':')
if not is_package_ml(parts):
continue
if parts[2] not in output:
output[parts[2]] = []
if record.dependent is None:
the_dependent = None
else:
the_dependent = fix_pickle_obj(codecs.decode(bytearray(record.dependent, encoding='utf-8'), 'base64'))
the_entry = {'independent': fix_pickle_obj(codecs.decode(bytearray(record.independent, encoding='utf-8'), 'base64')), 'dependent': the_dependent}
if record.key is not None:
the_entry['key'] = record.key
output[parts[2]].append(the_entry)
if len(output) > 0:
the_json_file = tempfile.NamedTemporaryFile(mode='w', prefix="datemp", suffix=".json", delete=False, encoding='utf-8')
json.dump(output, the_json_file, sort_keys=True, indent=2)
response = send_file(the_json_file, mimetype='application/json', as_attachment=True, download_name=json_filename)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
flash(word("No data existed in training set. JSON file not created."), "error")
return redirect(url_for('train', package=the_package, file=the_file, show_all=show_all))
if the_package == '_global':
for record in db.session.execute(select(MachineLearning.group_id, db.func.count(MachineLearning.id).label('cnt')).where(show_cond).group_by(MachineLearning.group_id)):
if is_package_ml(record.group_id.split(':')):
continue
if record.group_id not in group_id_list:
group_id_list[record.group_id] = 0
group_id_list[record.group_id] += record.cnt
if not show_all:
for record in db.session.execute(select(MachineLearning.group_id).group_by(MachineLearning.group_id)):
if is_package_ml(record.group_id.split(':')):
continue
if record.group_id not in group_id_list:
group_id_list[record.group_id] = 0
else:
the_prefix = ml_prefix(the_package, the_file)
# logmessage("My prefix is " + the_prefix)
for record in db.session.execute(select(MachineLearning.group_id, db.func.count(MachineLearning.id).label('cnt')).where(and_(MachineLearning.group_id.like(the_prefix + ':%'), show_cond)).group_by(MachineLearning.group_id)):
parts = record.group_id.split(':')
if not is_package_ml(parts):
continue
if parts[2] not in group_id_list:
group_id_list[parts[2]] = 0
group_id_list[parts[2]] += record.cnt
if not show_all:
for record in db.session.execute(select(MachineLearning.group_id).where(MachineLearning.group_id.like(the_prefix + ':%')).group_by(MachineLearning.group_id)):
parts = record.group_id.split(':')
if not is_package_ml(parts):
continue
if parts[2] not in group_id_list:
group_id_list[parts[2]] = 0
if the_package != '_global' and not re.search(r'^data/', the_file):
interview = get_corresponding_interview(the_package, the_file)
if interview is not None and len(interview.mlfields):
for saveas in interview.mlfields:
if 'ml_group' in interview.mlfields[saveas] and not interview.mlfields[saveas]['ml_group'].uses_mako:
the_saveas = interview.mlfields[saveas]['ml_group'].original_text
else:
the_saveas = saveas
if not re.search(r':', the_saveas):
if the_saveas not in group_id_list:
group_id_list[the_saveas] = 0
group_id_list = [(x, group_id_list[x]) for x in sorted(group_id_list)]
extra_js = """
<script>
$( document ).ready(function() {
$("#showimport").click(function(e){
$("#showimport").hide();
$("#hideimport").show();
$("#importcontrols").show('fast');
e.preventDefault();
return false;
});
$("#hideimport").click(function(e){
$("#showimport").show();
$("#hideimport").hide();
$("#importcontrols").hide('fast');
e.preventDefault();
return false;
});
$("input[type=radio][name=usepackage]").on('change', function(e) {
if ($(this).val() == 'no'){
$("#uploadinput").show();
}
else{
$("#uploadinput").hide();
}
e.preventDefault();
return false;
});
});
</script>"""
response = make_response(render_template('pages/train.html', extra_js=Markup(extra_js), version_warning=version_warning, bodyclass='daadminbody', tab_title=word("Train"), page_title=word("Train machine learning models"), the_package=the_package, the_package_display=the_package_display, the_file=the_file, the_group_id=the_group_id, package_list=package_list, file_list=file_list, group_id_list=group_id_list, entry_list=entry_list, show_all=show_all, show_group_id_list=True, package_file_available=package_file_available, the_package_location=the_prefix, uploadform=uploadform), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
group_id_to_use = fix_group_id(the_package, the_file, the_group_id)
model = docassemble.base.util.SimpleTextMachineLearner(group_id=group_id_to_use)
for record in db.session.execute(select(MachineLearning.id, MachineLearning.group_id, MachineLearning.key, MachineLearning.info, MachineLearning.independent, MachineLearning.dependent, MachineLearning.create_time, MachineLearning.modtime, MachineLearning.active).where(and_(MachineLearning.group_id == group_id_to_use, show_cond))):
new_entry = {'id': record.id, 'group_id': record.group_id, 'key': record.key, 'independent': fix_pickle_obj(codecs.decode(bytearray(record.independent, encoding='utf-8'), 'base64')) if record.independent is not None else None, 'dependent': fix_pickle_obj(codecs.decode(bytearray(record.dependent, encoding='utf-8'), 'base64')) if record.dependent is not None else None, 'info': fix_pickle_obj(codecs.decode(bytearray(record.info, encoding='utf-8'), 'base64')) if record.info is not None else None, 'create_type': record.create_time, 'modtime': record.modtime, 'active': MachineLearning.active}
if new_entry['dependent'] is None and new_entry['active'] is True:
new_entry['active'] = False
if isinstance(new_entry['independent'], (DADict, dict)):
new_entry['independent_display'] = '<div class="damldatacontainer">' + '<br>'.join(['<span class="damldatakey">' + str(key) + '</span>: <span class="damldatavalue">' + str(val) + ' (' + str(val.__class__.__name__) + ')</span>' for key, val in new_entry['independent'].items()]) + '</div>'
new_entry['type'] = 'data'
else:
new_entry['type'] = 'text'
if new_entry['dependent'] is None:
new_entry['predictions'] = model.predict(new_entry['independent'], probabilities=True)
if len(new_entry['predictions']) == 0:
new_entry['predictions'] = None
elif len(new_entry['predictions']) > 10:
new_entry['predictions'] = new_entry['predictions'][0:10]
if new_entry['predictions'] is not None:
new_entry['predictions'] = [(prediction, '%d%%' % (100.0*probability)) for prediction, probability in new_entry['predictions']]
else:
new_entry['predictions'] = None
if new_entry['info'] is not None:
if isinstance(new_entry['info'], DAFile):
image_file_list = [new_entry['info']]
elif isinstance(new_entry['info'], DAFileList):
image_file_list = new_entry['info']
else:
logmessage("train: info is not a DAFile or DAFileList")
continue
new_entry['image_files'] = []
for image_file in image_file_list:
if not isinstance(image_file, DAFile):
logmessage("train: file is not a DAFile")
continue
if not image_file.ok:
logmessage("train: file does not have a number")
continue
if image_file.extension not in ('pdf', 'png', 'jpg', 'jpeg', 'gif'):
logmessage("train: file did not have a recognizable image type")
continue
doc_url = get_url_from_file_reference(image_file)
if image_file.extension == 'pdf':
image_url = get_url_from_file_reference(image_file, size="screen", page=1, ext='pdf')
else:
image_url = doc_url
new_entry['image_files'].append({'doc_url': doc_url, 'image_url': image_url})
entry_list.append(new_entry)
if len(entry_list) == 0:
record = db.session.execute(select(MachineLearning.independent).where(and_(MachineLearning.group_id == group_id_to_use, MachineLearning.independent != None))).first() # noqa: E711 # pylint: disable=singleton-comparison
if record is not None:
sample_indep = fix_pickle_obj(codecs.decode(bytearray(record.independent, encoding='utf-8'), 'base64'))
else:
sample_indep = None
else:
sample_indep = entry_list[0]['independent']
is_data = isinstance(sample_indep, (DADict, dict))
choices = {}
for record in db.session.execute(select(MachineLearning.dependent, db.func.count(MachineLearning.id).label('cnt')).where(and_(MachineLearning.group_id == group_id_to_use)).group_by(MachineLearning.dependent)):
# logmessage("There is a choice")
if record.dependent is None:
continue
key = fix_pickle_obj(codecs.decode(bytearray(record.dependent, encoding='utf-8'), 'base64'))
if key is not None:
choices[key] = record.cnt
if len(choices) > 0:
# logmessage("There are choices")
choices = [(x, choices[x]) for x in sorted(choices, key=operator.itemgetter(0), reverse=False)]
else:
# logmessage("There are no choices")
choices = None
extra_js = """
<script>
$( document ).ready(function(){
$("button.prediction").click(function(){
if (!($("#dependent" + $(this).data("id-number")).prop('disabled'))){
$("#dependent" + $(this).data("id-number")).val($(this).data("prediction"));
}
});
$("select.trainer").change(function(){
var the_number = $(this).data("id-number");
if (the_number == "newdependent"){
$("#newdependent").val($(this).val());
}
else{
$("#dependent" + the_number).val($(this).val());
}
});
$("div.dadelete-observation input").change(function(){
var the_number = $(this).data("id-number");
if ($(this).is(':checked')){
$("#dependent" + the_number).prop('disabled', true);
$("#selector" + the_number).prop('disabled', true);
}
else{
$("#dependent" + the_number).prop('disabled', false);
$("#selector" + the_number).prop('disabled', false);
}
});
});
</script>"""
response = make_response(render_template('pages/train.html', extra_js=Markup(extra_js), form=form, version_warning=version_warning, bodyclass='daadminbody', tab_title=word("Train"), page_title=word("Train machine learning models"), the_package=the_package, the_package_display=the_package_display, the_file=the_file, the_group_id=the_group_id, entry_list=entry_list, choices=choices, show_all=show_all, show_entry_list=True, is_data=is_data), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
def user_interviews_filter(obj):
if isinstance(obj, docassemble.base.DA.Condition):
leftside = user_interviews_filter(obj.leftside)
rightside = user_interviews_filter(obj.rightside)
if obj.operator == 'and':
return leftside & rightside
if obj.operator == 'xor':
return leftside ^ rightside
if obj.operator == 'or':
return leftside | rightside
if obj.operator == 'not':
return not_(leftside)
if obj.operator == 'le':
return leftside <= rightside
if obj.operator == 'ge':
return leftside >= rightside
if obj.operator == 'gt':
return leftside > rightside
if obj.operator == 'lt':
return leftside < rightside
if obj.operator == 'eq':
return leftside == rightside
if obj.operator == 'ne':
return leftside != rightside
if obj.operator == 'like':
return leftside.like(rightside)
if obj.operator == 'in':
return leftside.in_(rightside)
raise DAException("Operator not recognized")
if isinstance(obj, docassemble.base.DA.Group):
items = [user_interviews_filter(item) for item in obj.items]
if obj.group_type == 'and':
return and_(*items)
if obj.group_type == 'or':
return or_(*items)
raise DAException("Group type not recognized")
if isinstance(obj, docassemble.base.DA.Column):
if obj.name == 'indexno':
return UserDict.indexno
if obj.name == 'modtime':
return UserDict.modtime
if obj.name == 'filename':
return UserDictKeys.filename
if obj.name == 'key':
return UserDictKeys.key
if obj.name == 'encrypted':
return UserDict.encrypted
if obj.name == 'user_id':
return UserDictKeys.user_id
if obj.name == 'email':
return UserModel.email
if obj.name == 'first_name':
return UserModel.first_name
if obj.name == 'last_name':
return UserModel.last_name
if obj.name == 'country':
return UserModel.country
if obj.name == 'subdivisionfirst':
return UserModel.subdivisionfirst
if obj.name == 'subdivisionsecond':
return UserModel.subdivisionsecond
if obj.name == 'subdivisionthird':
return UserModel.subdivisionthird
if obj.name == 'organization':
return UserModel.organization
if obj.name == 'timezone':
return UserModel.timezone
if obj.name == 'language':
return UserModel.language
if obj.name == 'last_login':
return UserModel.last_login
raise DAException("Column " + repr(obj.name) + " not available")
return obj
def user_interviews(user_id=None, secret=None, exclude_invalid=True, action=None, filename=None, session=None, tag=None, include_dict=True, delete_shared=False, admin=False, start_id=None, temp_user_id=None, query=None, minimal=False): # pylint: disable=redefined-outer-name
# logmessage("user_interviews: user_id is " + str(user_id) + " and secret is " + str(secret))
if minimal is False:
if session is not None and user_id is None and temp_user_id is None and current_user.is_authenticated and not current_user.has_role_or_permission('admin', 'advocate', permissions=['access_sessions']):
user_id = current_user.id
elif user_id is None and (current_user.is_anonymous or not current_user.has_role_or_permission('admin', 'advocate', permissions=['access_sessions'])):
raise DAException('user_interviews: you do not have sufficient privileges to access information about other users')
if user_id is not None and admin is False and not (current_user.is_authenticated and (current_user.same_as(user_id) or current_user.has_role_or_permission('admin', 'advocate', permissions=['access_sessions']))):
raise DAException('user_interviews: you do not have sufficient privileges to access information about other users')
if action is not None and admin is False and not current_user.has_role_or_permission('admin', 'advocate', permissions=['edit_sessions']):
if user_id is None:
raise DAException("user_interviews: no user_id provided")
the_user = get_person(int(user_id), {})
if the_user is None:
raise DAException("user_interviews: user_id " + str(user_id) + " not valid")
if query is not None:
the_query = user_interviews_filter(query)
if action == 'delete_all':
sessions_to_delete = set()
if tag or query is not None:
start_id = None
while True:
(the_list, start_id) = user_interviews(user_id=user_id, secret=secret, filename=filename, session=session, tag=tag, include_dict=False, exclude_invalid=False, start_id=start_id, temp_user_id=temp_user_id, query=query, minimal=True)
for interview_info in the_list:
sessions_to_delete.add((interview_info['session'], interview_info['filename'], interview_info['user_id'], interview_info['temp_user_id']))
if start_id is None:
break
else:
where_clause = []
if temp_user_id is not None:
where_clause.append(UserDictKeys.temp_user_id == temp_user_id)
elif user_id is not None:
where_clause.append(UserDictKeys.user_id == user_id)
if filename is not None:
where_clause.append(UserDictKeys.filename == filename)
if session is not None:
where_clause.append(UserDictKeys.key == session)
interview_query = db.session.execute(select(UserDictKeys.filename, UserDictKeys.key, UserDictKeys.user_id, UserDictKeys.temp_user_id).where(*where_clause).group_by(UserDictKeys.filename, UserDictKeys.key, UserDictKeys.user_id, UserDictKeys.temp_user_id))
for interview_info in interview_query:
sessions_to_delete.add((interview_info.key, interview_info.filename, interview_info.user_id, interview_info.temp_user_id))
if user_id is not None:
if filename is None:
interview_query = db.session.execute(select(UserDict.filename, UserDict.key).where(UserDict.user_id == user_id).group_by(UserDict.filename, UserDict.key))
else:
interview_query = db.session.execute(select(UserDict.filename, UserDict.key).where(UserDict.user_id == user_id, UserDict.filename == filename).group_by(UserDict.filename, UserDict.key))
for interview_info in interview_query:
sessions_to_delete.add((interview_info.key, interview_info.filename, user_id, None))
logmessage("Deleting " + str(len(sessions_to_delete)) + " interviews")
if len(sessions_to_delete) > 0:
for session_id, yaml_filename, the_user_id, the_temp_user_id in sessions_to_delete:
manual_checkout(manual_session_id=session_id, manual_filename=yaml_filename, user_id=the_user_id, delete_session=True, temp_user_id=the_temp_user_id)
# obtain_lock(session_id, yaml_filename)
if the_user_id is None or delete_shared:
reset_user_dict(session_id, yaml_filename, user_id=the_user_id, temp_user_id=the_temp_user_id, force=True)
else:
reset_user_dict(session_id, yaml_filename, user_id=the_user_id, temp_user_id=the_temp_user_id)
# release_lock(session_id, yaml_filename)
return len(sessions_to_delete)
if action == 'delete':
if filename is None or session is None:
raise DAException("user_interviews: filename and session must be provided in order to delete interview")
manual_checkout(manual_session_id=session, manual_filename=filename, user_id=user_id, temp_user_id=temp_user_id, delete_session=True)
# obtain_lock(session, filename)
reset_user_dict(session, filename, user_id=user_id, temp_user_id=temp_user_id, force=delete_shared)
# release_lock(session, filename)
return True
if minimal:
the_timezone = None
elif admin is False and current_user and current_user.is_authenticated and current_user.timezone:
the_timezone = zoneinfo.ZoneInfo(current_user.timezone)
else:
the_timezone = zoneinfo.ZoneInfo(get_default_timezone())
interviews_length = 0
interviews = []
while True:
there_are_more = False
if temp_user_id is not None:
query_elements = [UserDict.indexno, UserDictKeys.user_id, UserDictKeys.temp_user_id, UserDictKeys.filename, UserDictKeys.key, UserModel.email]
subq_filter_elements = [UserDictKeys.temp_user_id == temp_user_id]
if include_dict:
query_elements.extend([UserDict.dictionary, UserDict.encrypted])
else:
query_elements.append(UserDict.modtime)
if filename is not None:
subq_filter_elements.append(UserDictKeys.filename == filename)
if session is not None:
subq_filter_elements.append(UserDictKeys.key == session)
if start_id is not None:
subq_filter_elements.append(UserDict.indexno > start_id)
subq = select(UserDictKeys.filename, UserDictKeys.key, db.func.max(UserDict.indexno).label('indexno')).join(UserDict, and_(UserDictKeys.filename == UserDict.filename, UserDictKeys.key == UserDict.key))
if len(subq_filter_elements) > 0:
subq = subq.where(and_(*subq_filter_elements))
subq = subq.group_by(UserDictKeys.filename, UserDictKeys.key).subquery()
interview_query = select(*query_elements).select_from(subq.join(UserDict, subq.c.indexno == UserDict.indexno).join(UserDictKeys, and_(UserDict.filename == UserDictKeys.filename, UserDict.key == UserDictKeys.key, UserDictKeys.temp_user_id == temp_user_id)).outerjoin(UserModel, 0 == 1)) # pylint: disable=comparison-of-constants
if query is not None:
interview_query = interview_query.where(the_query)
interview_query = interview_query.order_by(UserDict.indexno)
elif user_id is not None:
query_elements = [UserDict.indexno, UserDictKeys.user_id, UserDictKeys.temp_user_id, UserDictKeys.filename, UserDictKeys.key, UserModel.email]
subq_filter_elements = [UserDictKeys.user_id == user_id]
if include_dict:
query_elements.extend([UserDict.dictionary, UserDict.encrypted])
else:
query_elements.append(UserDict.modtime)
if filename is not None:
subq_filter_elements.append(UserDictKeys.filename == filename)
if session is not None:
subq_filter_elements.append(UserDictKeys.key == session)
if start_id is not None:
subq_filter_elements.append(UserDict.indexno > start_id)
subq = select(UserDictKeys.filename, UserDictKeys.key, db.func.max(UserDict.indexno).label('indexno')).join(UserDict, and_(UserDictKeys.filename == UserDict.filename, UserDictKeys.key == UserDict.key))
if len(subq_filter_elements) > 0:
subq = subq.where(and_(*subq_filter_elements))
subq = subq.group_by(UserDictKeys.filename, UserDictKeys.key).subquery()
interview_query = select(*query_elements).select_from(subq.join(UserDict, subq.c.indexno == UserDict.indexno).join(UserDictKeys, and_(UserDict.filename == UserDictKeys.filename, UserDict.key == UserDictKeys.key, UserDictKeys.user_id == user_id)).join(UserModel, UserDictKeys.user_id == UserModel.id))
if query is not None:
interview_query = interview_query.where(the_query)
interview_query = interview_query.order_by(UserDict.indexno)
else:
query_elements = [UserDict.indexno, UserDictKeys.user_id, UserDictKeys.temp_user_id, UserDict.filename, UserDict.key, UserModel.email]
subq_filter_elements = []
if include_dict:
query_elements.extend([UserDict.dictionary, UserDict.encrypted])
else:
query_elements.append(UserDict.modtime)
if filename is not None:
subq_filter_elements.append(UserDict.filename == filename)
if session is not None:
subq_filter_elements.append(UserDict.key == session)
if start_id is not None:
subq_filter_elements.append(UserDict.indexno > start_id)
subq = select(UserDict.filename, UserDict.key, db.func.max(UserDict.indexno).label('indexno'))
if len(subq_filter_elements) > 0:
subq = subq.where(and_(*subq_filter_elements))
subq = subq.group_by(UserDict.filename, UserDict.key).subquery()
interview_query = select(*query_elements).select_from(subq.join(UserDict, subq.c.indexno == UserDict.indexno).join(UserDictKeys, and_(UserDict.filename == UserDictKeys.filename, UserDict.key == UserDictKeys.key)).outerjoin(UserModel, and_(UserDictKeys.user_id == UserModel.id, UserModel.active == True))) # noqa: E712 # pylint: disable=singleton-comparison
if query is not None:
interview_query = interview_query.where(the_query)
interview_query = interview_query.order_by(UserDict.indexno)
interview_query = interview_query.limit(PAGINATION_LIMIT_PLUS_ONE)
stored_info = []
results_in_query = 0
for interview_info in db.session.execute(interview_query):
results_in_query += 1
if results_in_query == PAGINATION_LIMIT_PLUS_ONE:
there_are_more = True
break
# logmessage("filename is " + str(interview_info.filename) + " " + str(interview_info.key))
if session is not None and interview_info.key != session:
continue
if include_dict and interview_info.dictionary is None:
continue
if include_dict:
stored_info.append({'filename': interview_info.filename,
'encrypted': interview_info.encrypted,
'dictionary': interview_info.dictionary,
'key': interview_info.key,
'email': interview_info.email,
'user_id': interview_info.user_id,
'temp_user_id': interview_info.temp_user_id,
'indexno': interview_info.indexno})
else:
stored_info.append({'filename': interview_info.filename,
'modtime': interview_info.modtime,
'key': interview_info.key,
'email': interview_info.email,
'user_id': interview_info.user_id,
'temp_user_id': interview_info.temp_user_id,
'indexno': interview_info.indexno})
for interview_info in stored_info:
if interviews_length == PAGINATION_LIMIT:
there_are_more = True
break
start_id = interview_info['indexno']
if minimal:
interviews.append({'filename': interview_info['filename'], 'session': interview_info['key'], 'user_id': interview_info['user_id'], 'temp_user_id': interview_info['temp_user_id']})
interviews_length += 1
continue
interview_title = {}
is_valid = True
interview_valid = True
try:
interview = docassemble.base.interview_cache.get_interview(interview_info['filename'])
except:
if exclude_invalid:
continue
logmessage("user_interviews: unable to load interview file " + interview_info['filename'])
interview_title['full'] = word('Error: interview not found')
interview_valid = False
is_valid = False
# logmessage("Found old interview with title " + interview_title)
if include_dict:
if interview_info['encrypted']:
try:
dictionary = decrypt_dictionary(interview_info['dictionary'], secret)
except Exception as the_err:
if exclude_invalid:
continue
try:
logmessage("user_interviews: unable to decrypt dictionary. " + str(the_err.__class__.__name__) + ": " + str(the_err))
except:
logmessage("user_interviews: unable to decrypt dictionary. " + str(the_err.__class__.__name__))
dictionary = fresh_dictionary()
dictionary['_internal']['starttime'] = None
dictionary['_internal']['modtime'] = None
is_valid = False
else:
try:
dictionary = unpack_dictionary(interview_info['dictionary'])
except Exception as the_err:
if exclude_invalid:
continue
try:
logmessage("user_interviews: unable to unpack dictionary. " + str(the_err.__class__.__name__) + ": " + str(the_err))
except:
logmessage("user_interviews: unable to unpack dictionary. " + str(the_err.__class__.__name__))
dictionary = fresh_dictionary()
dictionary['_internal']['starttime'] = None
dictionary['_internal']['modtime'] = None
is_valid = False
if not isinstance(dictionary, dict):
logmessage("user_interviews: found a dictionary that was not a dictionary")
continue
if is_valid:
if include_dict:
interview_title = interview.get_title(dictionary)
tags = interview.get_tags(dictionary)
else:
interview_title = interview.get_title({'_internal': {}})
tags = interview.get_tags({'_internal': {}})
metadata = copy.deepcopy(interview.consolidated_metadata)
elif interview_valid:
interview_title = interview.get_title({'_internal': {}})
metadata = copy.deepcopy(interview.consolidated_metadata)
if include_dict:
tags = interview.get_tags(dictionary)
if 'full' not in interview_title:
interview_title['full'] = word("Interview answers cannot be decrypted")
else:
interview_title['full'] += ' - ' + word('interview answers cannot be decrypted')
else:
tags = interview.get_tags({'_internal': {}})
if 'full' not in interview_title:
interview_title['full'] = word('Unknown')
else:
interview_title['full'] = word('Error: interview not found and answers could not be decrypted')
metadata = {}
tags = set()
if include_dict:
if dictionary['_internal']['starttime']:
utc_starttime = dictionary['_internal']['starttime']
starttime = nice_date_from_utc(dictionary['_internal']['starttime'], timezone=the_timezone)
else:
utc_starttime = None
starttime = ''
if dictionary['_internal']['modtime']:
utc_modtime = dictionary['_internal']['modtime']
modtime = nice_date_from_utc(dictionary['_internal']['modtime'], timezone=the_timezone)
else:
utc_modtime = None
modtime = ''
else:
utc_starttime = None
starttime = ''
utc_modtime = interview_info['modtime']
modtime = nice_date_from_utc(interview_info['modtime'], timezone=the_timezone)
if tag is not None and tag not in tags:
continue
out = {'filename': interview_info['filename'], 'session': interview_info['key'], 'modtime': modtime, 'starttime': starttime, 'utc_modtime': utc_modtime, 'utc_starttime': utc_starttime, 'title': interview_title.get('full', word('Untitled')), 'subtitle': interview_title.get('sub', None), 'valid': is_valid, 'metadata': metadata, 'tags': tags, 'email': interview_info['email'], 'user_id': interview_info['user_id'], 'temp_user_id': interview_info['temp_user_id']}
if include_dict:
out['dict'] = dictionary
out['encrypted'] = interview_info['encrypted']
interviews.append(out)
interviews_length += 1
if interviews_length == PAGINATION_LIMIT or results_in_query < PAGINATION_LIMIT_PLUS_ONE:
break
if there_are_more:
return (interviews, start_id)
return (interviews, None)
@app.route('/interviews', methods=['GET', 'POST'])
@login_required
def interview_list():
setup_translation()
form = InterviewsListForm(request.form)
is_json = bool(('json' in request.form and as_int(request.form['json'])) or ('json' in request.args and as_int(request.args['json'])))
if 'lang' in request.form:
session['language'] = request.form['lang']
docassemble.base.functions.set_language(session['language'])
tag = request.args.get('tag', None)
if request.method == 'POST':
tag = form.tags.data
if tag is not None:
tag = werkzeug.utils.secure_filename(tag)
if 'newsecret' in session:
# logmessage("interview_list: fixing cookie")
the_args = {}
if is_json:
the_args['json'] = '1'
if tag:
the_args['tag'] = tag
if 'from_login' in request.args:
the_args['from_login'] = request.args['from_login']
if 'post_restart' in request.args:
the_args['post_restart'] = request.args['post_restart']
if 'resume' in request.args:
the_args['resume'] = request.args['resume']
response = redirect(url_for('interview_list', **the_args))
response.set_cookie('secret', session['newsecret'], httponly=True, secure=app.config['SESSION_COOKIE_SECURE'], samesite=app.config['SESSION_COOKIE_SAMESITE'])
del session['newsecret']
return response
if request.method == 'GET' and needs_to_change_password():
return redirect(url_for('user.change_password', next=url_for('interview_list')))
secret = request.cookies.get('secret', None)
if secret is not None:
secret = str(secret)
# logmessage("interview_list: secret is " + repr(secret))
if request.method == 'POST':
if form.delete_all.data:
num_deleted = user_interviews(user_id=current_user.id, secret=secret, action='delete_all', tag=tag)
if num_deleted > 0:
flash(word("Deleted interviews"), 'success')
if is_json:
return redirect(url_for('interview_list', json='1'))
return redirect(url_for('interview_list'))
if form.delete.data:
yaml_file = form.i.data
session_id = form.session.data
if yaml_file is not None and session_id is not None:
user_interviews(user_id=current_user.id, secret=secret, action='delete', session=session_id, filename=yaml_file)
flash(word("Deleted interview"), 'success')
if is_json:
return redirect(url_for('interview_list', json='1'))
return redirect(url_for('interview_list'))
# if daconfig.get('resume interview after login', False) and 'i' in session and 'uid' in session and (request.args.get('from_login', False) or (re.search(r'user/(register|sign-in)', str(request.referrer)) and 'next=' not in str(request.referrer))):
# if is_json:
# return redirect(url_for('index', i=session['i'], json='1'))
# else:
# return redirect(url_for('index', i=session['i']))
if request.args.get('from_login', False) or (re.search(r'user/(register|sign-in)', str(request.referrer)) and 'next=' not in str(request.referrer)):
next_page = app.user_manager.make_safe_url_function(request.args.get('next', page_after_login()))
if next_page is None:
logmessage("Invalid page " + str(next_page))
next_page = 'interview_list'
if next_page not in ('interview_list', 'interviews'):
return redirect(get_url_from_file_reference(next_page))
if daconfig.get('session list interview', None) is not None:
if is_json:
return redirect(url_for('index', i=daconfig.get('session list interview'), from_list='1', json='1'))
return redirect(url_for('index', i=daconfig.get('session list interview'), from_list='1'))
exclude_invalid = not current_user.has_role('admin', 'developer')
resume_interview = request.args.get('resume', None)
if resume_interview is None and daconfig.get('auto resume interview', None) is not None and (request.args.get('from_login', False) or (re.search(r'user/(register|sign-in)', str(request.referrer)) and 'next=' not in str(request.referrer))):
resume_interview = daconfig['auto resume interview']
device_id = request.cookies.get('ds', None)
if device_id is None:
device_id = random_string(16)
the_current_info = current_info(yaml=None, req=request, interface='web', session_info=None, secret=secret, device_id=device_id)
docassemble.base.functions.this_thread.current_info = the_current_info
if resume_interview is not None:
(interviews, start_id) = user_interviews(user_id=current_user.id, secret=secret, exclude_invalid=True, filename=resume_interview, include_dict=True)
if len(interviews) > 0:
return redirect(url_for('index', i=interviews[0]['filename'], session=interviews[0]['session'], from_list='1'))
return redirect(url_for('index', i=resume_interview, from_list='1'))
next_id_code = request.args.get('next_id', None)
if next_id_code:
try:
start_id = int(from_safeid(next_id_code))
assert start_id >= 0
show_back = True
except:
start_id = None
show_back = False
else:
start_id = None
show_back = False
result = user_interviews(user_id=current_user.id, secret=secret, exclude_invalid=exclude_invalid, tag=tag, start_id=start_id)
if result is None:
raise DAException("interview_list: could not obtain list of interviews")
(interviews, start_id) = result
if start_id is None:
next_id = None
else:
next_id = safeid(str(start_id))
if is_json:
for interview in interviews:
if 'dict' in interview:
del interview['dict']
if 'tags' in interview:
interview['tags'] = sorted(interview['tags'])
return jsonify(action="interviews", interviews=interviews, next_id=next_id)
script = """
<script>
$(".dadeletebutton").on('click', function(event){
console.log("Doing click");
var yamlFilename = $("<input>")
.attr("type", "hidden")
.attr("name", "i").val($(this).data('i'));
$("#daform").append($(yamlFilename));
var session = $("<input>")
.attr("type", "hidden")
.attr("name", "session").val($(this).data('session'));
$("#daform").append($(session));
return true;
});
$("#delete_all").on('click', function(event){
if (confirm(""" + json.dumps(word("Are you sure you want to delete all saved interviews?")) + """)){
return true;
}
event.preventDefault();
return false;
});
</script>"""
if re.search(r'user/register', str(request.referrer)) and len(interviews) == 1:
return redirect(url_for('index', i=interviews[0]['filename'], session=interviews[0]['session'], from_list=1))
tags_used = set()
for interview in interviews:
for the_tag in interview['tags']:
if the_tag != tag:
tags_used.add(the_tag)
# interview_page_title = word(daconfig.get('interview page title', 'Interviews'))
# title = word(daconfig.get('interview page heading', 'Resume an interview'))
argu = {'version_warning': version_warning, 'tags_used': sorted(tags_used) if len(tags_used) > 0 else None, 'numinterviews': len([y for y in interviews if not y['metadata'].get('hidden', False)]), 'interviews': sorted(interviews, key=valid_date_key), 'tag': tag, 'next_id': next_id, 'show_back': show_back, 'form': form, 'page_js': Markup(script)}
if 'interview page template' in daconfig and daconfig['interview page template']:
the_page = docassemble.base.functions.package_template_filename(daconfig['interview page template'])
if the_page is None:
raise DAError("Could not find start page template " + daconfig['start page template'])
with open(the_page, 'r', encoding='utf-8') as fp:
template_string = fp.read()
response = make_response(render_template_string(template_string, **argu), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
else:
response = make_response(render_template('pages/interviews.html', **argu), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
def valid_date_key(x):
if x['dict']['_internal']['starttime'] is None:
return datetime.datetime.now()
return x['dict']['_internal']['starttime']
def fix_secret(user=None, to_convert=None):
# logmessage("fix_secret starting")
if user is None:
user = current_user
password = str(request.form.get('password', request.form.get('new_password', None)))
if password is not None:
secret = str(request.cookies.get('secret', None))
newsecret = pad_to_16(MD5Hash(data=password).hexdigest())
if secret == 'None' or secret != newsecret:
# logmessage("fix_secret: calling substitute_secret with " + str(secret) + ' and ' + str(newsecret))
# logmessage("fix_secret: setting newsecret session")
session['newsecret'] = substitute_secret(str(secret), newsecret, user=user, to_convert=to_convert)
# else:
# logmessage("fix_secret: secrets are the same")
else:
logmessage("fix_secret: password not in request")
def login_or_register(sender, user, source, **extra): # pylint: disable=unused-argument
# logmessage("login or register!")
if 'i' in session: # TEMPORARY
get_session(session['i'])
to_convert = []
if 'tempuser' in session:
to_convert.extend(sub_temp_user_dict_key(session['tempuser'], user.id))
if 'sessions' in session:
for filename, info in session['sessions'].items():
if (filename, info['uid']) not in to_convert:
to_convert.append((filename, info['uid']))
save_user_dict_key(info['uid'], filename, priors=True, user=user)
update_session(filename, key_logged=True)
fix_secret(user=user, to_convert=to_convert)
sub_temp_other(user)
if not (source == 'register' and daconfig.get('confirm registration', False)):
session['user_id'] = user.id
if user.language:
session['language'] = user.language
docassemble.base.functions.set_language(user.language)
def update_last_login(user):
user.last_login = datetime.datetime.utcnow()
db.session.commit()
@user_logged_in.connect_via(app)
def _on_user_login(sender, user, **extra):
# logmessage("on user login")
update_last_login(user)
login_or_register(sender, user, 'login', **extra)
# flash(word('You have signed in successfully.'), 'success')
@user_changed_password.connect_via(app)
def _on_password_change(sender, user, **extra): # pylint: disable=unused-argument
# logmessage("on password change")
fix_secret(user=user)
# @user_reset_password.connect_via(app)
# def _on_password_reset(sender, user, **extra):
# # logmessage("on password reset")
# fix_secret(user=user)
@user_registered.connect_via(app)
def on_register_hook(sender, user, **extra):
# why did I not just import it globally?
# from docassemble.webapp.users.models import Role
user_invite = extra.get('user_invite', None)
this_user_role = None
if user_invite is not None:
this_user_role = db.session.execute(select(Role).filter_by(id=user_invite.role_id)).scalar()
if this_user_role is None:
this_user_role = db.session.execute(select(Role).filter_by(name='user')).scalar()
roles_to_remove = []
for role in user.roles:
roles_to_remove.append(role)
for role in roles_to_remove:
user.roles.remove(role)
user.roles.append(this_user_role)
db.session.commit()
update_last_login(user)
login_or_register(sender, user, 'register', **extra)
@app.route("/fax_callback", methods=['POST'])
@csrf.exempt
def fax_callback():
if twilio_config is None:
logmessage("fax_callback: Twilio not enabled")
return ('', 204)
post_data = request.form.copy()
if 'FaxSid' not in post_data or 'AccountSid' not in post_data:
logmessage("fax_callback: FaxSid and/or AccountSid missing")
return ('', 204)
tconfig = None
for config_name, config_info in twilio_config['name'].items(): # pylint: disable=unused-variable
if 'account sid' in config_info and config_info['account sid'] == post_data['AccountSid']:
tconfig = config_info
if tconfig is None:
logmessage("fax_callback: account sid of fax callback did not match any account sid in the Twilio configuration")
if 'fax' not in tconfig or tconfig['fax'] in (False, None):
logmessage("fax_callback: fax feature not enabled")
return ('', 204)
params = {}
for param in ('FaxSid', 'From', 'To', 'RemoteStationId', 'FaxStatus', 'ApiVersion', 'OriginalMediaUrl', 'NumPages', 'MediaUrl', 'ErrorCode', 'ErrorMessage'):
params[param] = post_data.get(param, None)
the_key = 'da:faxcallback:sid:' + post_data['FaxSid']
pipe = r.pipeline()
pipe.set(the_key, json.dumps(params))
pipe.expire(the_key, 86400)
pipe.execute()
return ('', 204)
@app.route("/clicksend_fax_callback", methods=['POST'])
@csrf.exempt
def clicksend_fax_callback():
if clicksend_config is None or fax_provider != 'clicksend':
logmessage("clicksend_fax_callback: Clicksend not enabled")
return ('', 204)
post_data = request.form.copy()
if 'message_id' not in post_data:
logmessage("clicksend_fax_callback: message_id missing")
return ('', 204)
the_key = 'da:faxcallback:sid:' + post_data['message_id']
the_json = r.get(the_key)
try:
params = json.loads(the_json)
except:
logmessage("clicksend_fax_callback: existing fax record could not be found")
return ('', 204)
for param in ('timestamp_send', 'timestamp', 'message_id', 'status', 'status_code', 'status_text', 'error_code', 'error_text', 'custom_string', 'user_id', 'subaccount_id', 'message_type'):
params[param] = post_data.get(param, None)
pipe = r.pipeline()
pipe.set(the_key, json.dumps(params))
pipe.expire(the_key, 86400)
pipe.execute()
return ('', 204)
@app.route("/telnyx_fax_callback", methods=['POST'])
@csrf.exempt
def telnyx_fax_callback():
if telnyx_config is None:
logmessage("telnyx_fax_callback: Telnyx not enabled")
return ('', 204)
data = request.get_json(silent=True)
try:
the_id = data['data']['payload']['fax_id']
except:
logmessage("telnyx_fax_callback: fax_id not found")
return ('', 204)
the_key = 'da:faxcallback:sid:' + str(the_id)
the_json = r.get(the_key)
try:
params = json.loads(the_json)
except:
logmessage("telnyx_fax_callback: existing fax record could not be found")
return ('', 204)
try:
params['status'] = data['data']['payload']['status']
if params['status'] == 'failed' and 'failure_reason' in data['data']['payload']:
params['status'] += ': ' + data['data']['payload']['failure_reason']
logmessage("telnyx_fax_callback: failure because " + data['data']['payload']['failure_reason'])
except:
logmessage("telnyx_fax_callback: could not find status")
try:
params['latest_update_time'] = data['data']['occurred_at']
except:
logmessage("telnyx_fax_callback: could not update latest_update_time")
if 'status' in params and params['status'] == 'delivered':
try:
params['page_count'] = data['data']['payload']['page_count']
except:
logmessage("telnyx_fax_callback: could not update page_count")
pipe = r.pipeline()
pipe.set(the_key, json.dumps(params))
pipe.expire(the_key, 86400)
pipe.execute()
return ('', 204)
@app.route("/voice", methods=['POST', 'GET'])
@csrf.exempt
def voice():
docassemble.base.functions.set_language(DEFAULT_LANGUAGE)
resp = twilio.twiml.voice_response.VoiceResponse()
if twilio_config is None:
logmessage("voice: ignoring call to voice because Twilio not enabled")
return Response(str(resp), mimetype='text/xml')
if 'voice' not in twilio_config['name']['default'] or twilio_config['name']['default']['voice'] in (False, None):
logmessage("voice: ignoring call to voice because voice feature not enabled")
return Response(str(resp), mimetype='text/xml')
if "AccountSid" not in request.form or request.form["AccountSid"] != twilio_config['name']['default'].get('account sid', None):
logmessage("voice: request to voice did not authenticate")
return Response(str(resp), mimetype='text/xml')
for item in request.form:
logmessage("voice: item " + str(item) + " is " + str(request.form[item]))
with resp.gather(action=url_for("digits_endpoint"), finishOnKey='#', method="POST", timeout=10, numDigits=5) as gg:
gg.say(word("Please enter the four digit code, followed by the pound sign."))
# twilio_config = daconfig.get('twilio', None)
# if twilio_config is None:
# logmessage("Could not get twilio configuration")
# return
# twilio_caller_id = twilio_config.get('number', None)
# if "To" in request.form and request.form["To"] != '':
# dial = resp.dial(callerId=twilio_caller_id)
# if phone_pattern.match(request.form["To"]):
# dial.number(request.form["To"])
# else:
# dial.client(request.form["To"])
# else:
# resp.say("Thanks for calling!")
return Response(str(resp), mimetype='text/xml')
@app.route("/digits", methods=['POST', 'GET'])
@csrf.exempt
def digits_endpoint():
docassemble.base.functions.set_language(DEFAULT_LANGUAGE)
resp = twilio.twiml.voice_response.VoiceResponse()
if twilio_config is None:
logmessage("digits: ignoring call to digits because Twilio not enabled")
return Response(str(resp), mimetype='text/xml')
if "AccountSid" not in request.form or request.form["AccountSid"] != twilio_config['name']['default'].get('account sid', None):
logmessage("digits: request to digits did not authenticate")
return Response(str(resp), mimetype='text/xml')
if "Digits" in request.form:
the_digits = re.sub(r'[^0-9]', '', request.form["Digits"])
logmessage("digits: got " + str(the_digits))
phone_number = r.get('da:callforward:' + str(the_digits))
if phone_number is None:
resp.say(word("I am sorry. The code you entered is invalid or expired. Goodbye."))
resp.hangup()
else:
phone_number = phone_number.decode()
resp.dial(number=phone_number)
r.delete('da:callforward:' + str(the_digits))
else:
logmessage("digits: no digits received")
resp.say(word("No access code was entered."))
resp.hangup()
return Response(str(resp), mimetype='text/xml')
def sms_body(phone_number, body='question', config='default'):
if twilio_config is None:
raise DAError("sms_body: Twilio not enabled")
if config not in twilio_config['name']:
raise DAError("sms_body: specified config value, " + str(config) + ", not in Twilio configuration")
tconfig = twilio_config['name'][config]
if 'sms' not in tconfig or tconfig['sms'] in (False, None, 0):
raise DAError("sms_body: sms feature is not enabled in Twilio configuration")
if 'account sid' not in tconfig:
raise DAError("sms_body: account sid not in Twilio configuration")
if 'number' not in tconfig:
raise DAError("sms_body: phone number not in Twilio configuration")
if 'doing_sms' in session:
raise DAError("Cannot call sms_body from within sms_body")
form = {'To': tconfig['number'], 'From': phone_number, 'Body': body, 'AccountSid': tconfig['account sid']}
base_url = url_for('rootindex', _external=True)
url_root = base_url
tbackup = docassemble.base.functions.backup_thread_variables()
sbackup = backup_session()
session['doing_sms'] = True
resp = do_sms(form, base_url, url_root, save=False)
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
if resp is None or len(resp.verbs) == 0 or len(resp.verbs[0].verbs) == 0:
return None
return resp.verbs[0].verbs[0].body
def favicon_file(filename, alt=None):
the_dir = docassemble.base.functions.package_data_filename(daconfig.get('favicon', 'docassemble.webapp:data/static/favicon'))
if the_dir is None or not os.path.isdir(the_dir):
logmessage("favicon_file: could not find favicon directory")
return ('File not found', 404)
the_file = os.path.join(the_dir, filename)
if not os.path.isfile(the_file):
if alt is not None:
the_file = os.path.join(the_dir, alt)
if not os.path.isfile(the_file):
return ('File not found', 404)
if filename == 'site.webmanifest':
mimetype = 'application/manifest+json'
else:
extension, mimetype = get_ext_and_mimetype(the_file) # pylint: disable=unused-variable
response = send_file(the_file, mimetype=mimetype, download_name=filename)
return response
def test_favicon_file(filename, alt=None):
the_dir = docassemble.base.functions.package_data_filename(daconfig.get('favicon', 'docassemble.webapp:data/static/favicon'))
if the_dir is None or not os.path.isdir(the_dir):
return False
the_file = os.path.join(the_dir, filename)
if not os.path.isfile(the_file):
if alt is not None:
the_file = os.path.join(the_dir, alt)
if not os.path.isfile(the_file):
return False
return True
@app.route("/favicon.ico", methods=['GET'])
def favicon():
return favicon_file('favicon.ico')
@app.route("/apple-touch-icon.png", methods=['GET'])
def apple_touch_icon():
return favicon_file('apple-touch-icon.png')
@app.route("/favicon-32x32.png", methods=['GET'])
def favicon_md():
return favicon_file('favicon-32x32.png')
@app.route("/favicon-16x16.png", methods=['GET'])
def favicon_sm():
return favicon_file('favicon-16x16.png')
@app.route("/site.webmanifest", methods=['GET'])
def favicon_site_webmanifest():
return favicon_file('site.webmanifest', alt='manifest.json')
@app.route("/manifest.json", methods=['GET'])
def favicon_manifest_json():
return favicon_file('manifest.json', alt='site.webmanifest')
@app.route("/safari-pinned-tab.svg", methods=['GET'])
def favicon_safari_pinned_tab():
return favicon_file('safari-pinned-tab.svg')
@app.route("/android-chrome-192x192.png", methods=['GET'])
def favicon_android_md():
return favicon_file('android-chrome-192x192.png')
@app.route("/android-chrome-512x512.png", methods=['GET'])
def favicon_android_lg():
return favicon_file('android-chrome-512x512.png')
@app.route("/mstile-150x150.png", methods=['GET'])
def favicon_mstile():
return favicon_file('mstile-150x150.png')
@app.route("/browserconfig.xml", methods=['GET'])
def favicon_browserconfig():
return favicon_file('browserconfig.xml')
@app.route("/robots.txt", methods=['GET'])
def robots():
if 'robots' not in daconfig and daconfig.get('allow robots', False):
response = make_response("User-agent: *\nDisallow:", 200)
response.mimetype = "text/plain"
return response
the_file = docassemble.base.functions.package_data_filename(daconfig.get('robots', 'docassemble.webapp:data/static/robots.txt'))
if the_file is None:
return ('File not found', 404)
if not os.path.isfile(the_file):
return ('File not found', 404)
response = send_file(the_file, mimetype='text/plain', download_name='robots.txt')
return response
@app.route("/sms", methods=['POST'])
@csrf.exempt
def sms():
# logmessage("Received: " + str(request.form))
form = request.form
base_url = url_for('rootindex', _external=True)
url_root = base_url
resp = do_sms(form, base_url, url_root)
return Response(str(resp), mimetype='text/xml')
def do_sms(form, base_url, url_root, config='default', save=True):
docassemble.base.functions.set_language(DEFAULT_LANGUAGE)
resp = twilio.twiml.messaging_response.MessagingResponse()
special_messages = []
if twilio_config is None:
logmessage("do_sms: ignoring message to sms because Twilio not enabled")
return resp
if "AccountSid" not in form or form["AccountSid"] not in twilio_config['account sid']:
logmessage("do_sms: request to sms did not authenticate")
return resp
if "To" not in form:
logmessage("do_sms: request to sms ignored because phone number not provided")
return resp
if form["To"].startswith('whatsapp:'):
actual_number = re.sub(r'^whatsapp:', '', form["To"])
if actual_number not in twilio_config['whatsapp number']:
logmessage("do_sms: request to whatsapp ignored because recipient number " + str(form['To']) + " not in configuration")
return resp
tconfig = twilio_config['whatsapp number'][actual_number]
else:
if form["To"] not in twilio_config['number']:
logmessage("do_sms: request to sms ignored because recipient number " + str(form['To']) + " not in configuration")
return resp
tconfig = twilio_config['number'][form["To"]]
if 'sms' not in tconfig or tconfig['sms'] in (False, None, 0):
logmessage("do_sms: ignoring message to sms because SMS not enabled")
return resp
if "From" not in form or not re.search(r'[0-9]', form["From"]):
logmessage("do_sms: request to sms ignored because unable to determine caller ID")
return resp
if "Body" not in form:
logmessage("do_sms: request to sms ignored because message had no content")
return resp
inp = form['Body'].strip()
# logmessage("do_sms: received >" + inp + "<")
key = 'da:sms:client:' + form["From"] + ':server:' + tconfig['number']
action = None
action_performed = False
for try_num in (0, 1): # pylint: disable=unused-variable
sess_contents = r.get(key)
if sess_contents is None:
# logmessage("do_sms: received input '" + str(inp) + "' from new user")
yaml_filename = tconfig.get('default interview', default_yaml_filename)
if 'dispatch' in tconfig and isinstance(tconfig['dispatch'], dict):
if inp.lower() in tconfig['dispatch']:
yaml_filename = tconfig['dispatch'][inp.lower()]
# logmessage("do_sms: using interview from dispatch: " + str(yaml_filename))
if yaml_filename is None:
# logmessage("do_sms: request to sms ignored because no interview could be determined")
return resp
if (not DEBUG) and (yaml_filename.startswith('docassemble.base') or yaml_filename.startswith('docassemble.demo')):
raise DAException("do_sms: not authorized to run interviews in docassemble.base or docassemble.demo")
secret = random_string(16)
uid = get_unique_name(yaml_filename, secret)
new_temp_user = TempUser()
db.session.add(new_temp_user)
db.session.commit()
sess_info = {'yaml_filename': yaml_filename, 'uid': uid, 'secret': secret, 'number': form["From"], 'encrypted': True, 'tempuser': new_temp_user.id, 'user_id': None, 'session_uid': random_string(10)}
r.set(key, pickle.dumps(sess_info))
accepting_input = False
else:
try:
sess_info = fix_pickle_obj(sess_contents)
except:
logmessage("do_sms: unable to decode session information")
return resp
accepting_input = True
if 'session_uid' not in sess_info:
sess_info['session_uid'] = random_string(10)
if inp.lower() in (word('exit'), word('quit')):
logmessage("do_sms: exiting")
if save:
reset_user_dict(sess_info['uid'], sess_info['yaml_filename'], temp_user_id=sess_info['tempuser'])
r.delete(key)
return resp
user = None
if sess_info['user_id'] is not None:
user = load_user(sess_info['user_id'])
if user is None:
ci = {'user': {'is_anonymous': True, 'is_authenticated': False, 'email': None, 'theid': sess_info['tempuser'], 'the_user_id': 't' + str(sess_info['tempuser']), 'roles': ['user'], 'firstname': 'SMS', 'lastname': 'User', 'nickname': None, 'country': None, 'subdivisionfirst': None, 'subdivisionsecond': None, 'subdivisionthird': None, 'organization': None, 'timezone': None, 'location': None, 'session_uid': sess_info['session_uid'], 'device_id': form["From"]}, 'session': sess_info['uid'], 'secret': sess_info['secret'], 'yaml_filename': sess_info['yaml_filename'], 'interface': 'sms', 'url': base_url, 'url_root': url_root, 'encrypted': sess_info['encrypted'], 'headers': {}, 'clientip': None, 'method': None, 'skip': {}, 'sms_sender': form["From"]}
else:
ci = {'user': {'is_anonymous': False, 'is_authenticated': True, 'email': user.email, 'theid': user.id, 'the_user_id': user.id, 'roles': user.roles, 'firstname': user.first_name, 'lastname': user.last_name, 'nickname': user.nickname, 'country': user.country, 'subdivisionfirst': user.subdivisionfirst, 'subdivisionsecond': user.subdivisionsecond, 'subdivisionthird': user.subdivisionthird, 'organization': user.organization, 'timezone': user.timezone, 'location': None, 'session_uid': sess_info['session_uid'], 'device_id': form["From"]}, 'session': sess_info['uid'], 'secret': sess_info['secret'], 'yaml_filename': sess_info['yaml_filename'], 'interface': 'sms', 'url': base_url, 'url_root': url_root, 'encrypted': sess_info['encrypted'], 'headers': {}, 'clientip': None, 'method': None, 'skip': {}}
if action is not None:
logmessage("do_sms: setting action to " + str(action))
ci.update(action)
docassemble.base.functions.this_thread.current_info = ci
obtain_lock(sess_info['uid'], sess_info['yaml_filename'])
steps, user_dict, is_encrypted = fetch_user_dict(sess_info['uid'], sess_info['yaml_filename'], secret=sess_info['secret'])
if user_dict is None:
r.delete(key)
continue
break
encrypted = sess_info['encrypted']
while True:
if user_dict.get('multi_user', False) is True and encrypted is True:
encrypted = False
update_session(sess_info['yaml_filename'], encrypted=encrypted, uid=sess_info['uid'])
is_encrypted = encrypted
r.set(key, pickle.dumps(sess_info))
if save:
decrypt_session(sess_info['secret'], user_code=sess_info['uid'], filename=sess_info['yaml_filename'])
if user_dict.get('multi_user', False) is False and encrypted is False:
encrypted = True
update_session(sess_info['yaml_filename'], encrypted=encrypted, uid=sess_info['uid'])
is_encrypted = encrypted
r.set(key, pickle.dumps(sess_info))
if save:
encrypt_session(sess_info['secret'], user_code=sess_info['uid'], filename=sess_info['yaml_filename'])
interview = docassemble.base.interview_cache.get_interview(sess_info['yaml_filename'])
if 'skip' not in user_dict['_internal']:
user_dict['_internal']['skip'] = {}
# if 'smsgather' in user_dict['_internal']:
# # logmessage("do_sms: need to gather smsgather " + user_dict['_internal']['smsgather'])
# sms_variable = user_dict['_internal']['smsgather']
# else:
# sms_variable = None
# if action is not None:
# action_manual = True
# else:
# action_manual = False
ci['encrypted'] = is_encrypted
interview_status = docassemble.base.parse.InterviewStatus(current_info=ci)
interview.assemble(user_dict, interview_status)
logmessage("do_sms: back from assemble 1; had been seeking variable " + str(interview_status.sought))
logmessage("do_sms: question is " + interview_status.question.name)
if action is not None:
logmessage('do_sms: question is now ' + interview_status.question.name + ' because action')
sess_info['question'] = interview_status.question.name
r.set(key, pickle.dumps(sess_info))
elif 'question' in sess_info and sess_info['question'] != interview_status.question.name:
if inp not in (word('?'), word('back'), word('question'), word('exit')):
logmessage("do_sms: blanking the input because question changed from " + str(sess_info['question']) + " to " + str(interview_status.question.name))
sess_info['question'] = interview_status.question.name
inp = 'question'
r.set(key, pickle.dumps(sess_info))
# logmessage("do_sms: inp is " + inp.lower() + " and steps is " + str(steps) + " and can go back is " + str(interview_status.can_go_back))
m = re.search(r'^(' + word('menu') + '|' + word('link') + ')([0-9]+)', inp.lower())
if m:
# logmessage("Got " + inp)
arguments = {}
selection_type = m.group(1)
selection_number = int(m.group(2)) - 1
links = []
menu_items = []
sms_info = as_sms(interview_status, user_dict, links=links, menu_items=menu_items)
target_url = None
if selection_type == word('menu') and selection_number < len(menu_items):
(target_url, label) = menu_items[selection_number] # pylint: disable=unused-variable
if selection_type == word('link') and selection_number < len(links):
(target_url, label) = links[selection_number] # pylint: disable=unused-variable
if target_url is not None:
uri_params = re.sub(r'^[\?]*\?', r'', target_url)
for statement in re.split(r'&', uri_params):
parts = re.split(r'=', statement)
arguments[parts[0]] = parts[1]
if 'action' in arguments:
# logmessage(myb64unquote(urllibunquote(arguments['action'])))
action = json.loads(myb64unquote(urllibunquote(arguments['action'])))
# logmessage("Action is " + str(action))
action_performed = True
accepting_input = False
inp = ''
continue
break
if inp.lower() == word('back'):
if 'skip' in user_dict['_internal'] and len(user_dict['_internal']['skip']):
max_entry = -1
for the_entry in user_dict['_internal']['skip'].keys():
if the_entry > max_entry:
max_entry = the_entry
if max_entry in user_dict['_internal']['skip']:
del user_dict['_internal']['skip'][max_entry]
if 'command_cache' in user_dict['_internal'] and max_entry in user_dict['_internal']['command_cache']:
del user_dict['_internal']['command_cache'][max_entry]
save_user_dict(sess_info['uid'], user_dict, sess_info['yaml_filename'], secret=sess_info['secret'], encrypt=encrypted, changed=False, steps=steps)
accepting_input = False
inp = ''
continue
if steps > 1 and interview_status.can_go_back:
steps, user_dict, is_encrypted = fetch_previous_user_dict(sess_info['uid'], sess_info['yaml_filename'], secret=sess_info['secret'])
ci['encrypted'] = is_encrypted
if 'question' in sess_info:
del sess_info['question']
r.set(key, pickle.dumps(sess_info))
accepting_input = False
inp = ''
continue
break
break
false_list = [word('no'), word('n'), word('false'), word('f')]
true_list = [word('yes'), word('y'), word('true'), word('t')]
inp_lower = inp.lower()
skip_it = False
changed = False
if accepting_input:
if inp_lower == word('?'):
sms_info = as_sms(interview_status, user_dict)
message = ''
if sms_info['help'] is None:
message += word('Sorry, no help is available for this question.')
else:
message += sms_info['help']
message += "\n" + word("To read the question again, type question.")
resp.message(message)
release_lock(sess_info['uid'], sess_info['yaml_filename'])
return resp
if inp_lower == word('question'):
accepting_input = False
user_entered_skip = bool(inp_lower == word('skip'))
if accepting_input:
saveas = None
uses_util = False
uncheck_others = False
if len(interview_status.question.fields) > 0:
question = interview_status.question
if question.question_type == "fields":
field = None
next_field = None
for the_field in interview_status.get_field_list():
if hasattr(the_field, 'datatype') and the_field.datatype in ('html', 'note', 'script', 'css'):
continue
if interview_status.is_empty_mc(the_field):
continue
if the_field.number in user_dict['_internal']['skip']:
continue
if field is None:
field = the_field
elif next_field is None:
next_field = the_field
else:
break
if field is None:
logmessage("do_sms: unclear what field is necessary!")
# if 'smsgather' in user_dict['_internal']:
# del user_dict['_internal']['smsgather']
field = interview_status.question.fields[0]
next_field = None
saveas = myb64unquote(field.saveas)
else:
if hasattr(interview_status.question.fields[0], 'saveas'):
saveas = myb64unquote(interview_status.question.fields[0].saveas)
logmessage("do_sms: variable to set is " + str(saveas))
else:
saveas = None
field = interview_status.question.fields[0]
next_field = None
if question.question_type == "settrue":
if inp_lower == word('ok'):
data = 'True'
else:
data = None
elif question.question_type == 'signature':
filename = 'canvas.png'
extension = 'png'
mimetype = 'image/png'
temp_image_file = tempfile.NamedTemporaryFile(suffix='.' + extension)
image = Image.new("RGBA", (200, 50))
image.save(temp_image_file.name, 'PNG')
(file_number, extension, mimetype) = save_numbered_file(filename, temp_image_file.name, yaml_file_name=sess_info['yaml_filename'], uid=sess_info['uid'])
saveas_tr = sub_indices(saveas, user_dict)
if inp_lower == word('x'):
the_string = saveas + " = docassemble.base.util.DAFile('" + saveas_tr + "', filename='" + str(filename) + "', number=" + str(file_number) + ", mimetype='" + str(mimetype) + "', extension='" + str(extension) + "')"
try:
exec('import docassemble.base.util', user_dict)
exec(the_string, user_dict)
if not changed:
steps += 1
user_dict['_internal']['steps'] = steps
changed = True
except Exception as errMess:
logmessage("do_sms: error: " + str(errMess))
special_messages.append(word("Error") + ": " + str(errMess))
skip_it = True
data = repr('')
else:
data = None
elif hasattr(field, 'datatype') and field.datatype in ("ml", "mlarea"):
try:
exec("import docassemble.base.util", user_dict)
except Exception as errMess:
special_messages.append("Error: " + str(errMess))
if 'ml_train' in interview_status.extras and field.number in interview_status.extras['ml_train']:
if not interview_status.extras['ml_train'][field.number]:
use_for_training = 'False'
else:
use_for_training = 'True'
else:
use_for_training = 'True'
if 'ml_group' in interview_status.extras and field.number in interview_status.extras['ml_group']:
data = 'docassemble.base.util.DAModel(' + repr(saveas) + ', group_id=' + repr(interview_status.extras['ml_group'][field.number]) + ', text=' + repr(inp) + ', store=' + repr(interview.get_ml_store()) + ', use_for_training=' + use_for_training + ')'
else:
data = 'docassemble.base.util.DAModel(' + repr(saveas) + ', text=' + repr(inp) + ', store=' + repr(interview.get_ml_store()) + ', use_for_training=' + use_for_training + ')'
elif hasattr(field, 'datatype') and field.datatype in ("file", "files", "camera", "user", "environment", "camcorder", "microphone"):
if user_entered_skip and not interview_status.extras['required'][field.number]:
skip_it = True
data = repr('')
elif user_entered_skip:
data = None
special_messages.append(word("You must attach a file."))
else:
files_to_process = []
num_media = int(form.get('NumMedia', '0'))
fileindex = 0
while True:
if field.datatype == "file" and fileindex > 0:
break
if fileindex >= num_media or 'MediaUrl' + str(fileindex) not in form:
break
# logmessage("mime type is" + form.get('MediaContentType' + str(fileindex), 'Unknown'))
mimetype = form.get('MediaContentType' + str(fileindex), 'image/jpeg')
extension = re.sub(r'\.', r'', mimetypes.guess_extension(mimetype))
if extension == 'jpe':
extension = 'jpg'
# original_extension = extension
# if extension == 'gif':
# extension == 'png'
# mimetype = 'image/png'
filename = 'file' + '.' + extension
file_number = get_new_file_number(sess_info['uid'], filename, yaml_file_name=sess_info['yaml_filename'])
saved_file = SavedFile(file_number, extension=extension, fix=True, should_not_exist=True)
the_url = form['MediaUrl' + str(fileindex)]
# logmessage("Fetching from >" + the_url + "<")
saved_file.fetch_url(the_url)
process_file(saved_file, saved_file.path, mimetype, extension)
files_to_process.append((filename, file_number, mimetype, extension))
fileindex += 1
if len(files_to_process) > 0:
elements = []
indexno = 0
saveas_tr = sub_indices(saveas, user_dict)
for (filename, file_number, mimetype, extension) in files_to_process:
elements.append("docassemble.base.util.DAFile(" + repr(saveas_tr + "[" + str(indexno) + "]") + ", filename=" + repr(filename) + ", number=" + str(file_number) + ", mimetype=" + repr(mimetype) + ", extension=" + repr(extension) + ")")
indexno += 1
the_string = saveas + " = docassemble.base.util.DAFileList(" + repr(saveas_tr) + ", elements=[" + ", ".join(elements) + "])"
try:
exec('import docassemble.base.util', user_dict)
exec(the_string, user_dict)
if not changed:
steps += 1
user_dict['_internal']['steps'] = steps
changed = True
except Exception as errMess:
logmessage("do_sms: error: " + str(errMess))
special_messages.append(word("Error") + ": " + str(errMess))
skip_it = True
data = repr('')
else:
data = None
if interview_status.extras['required'][field.number]:
special_messages.append(word("You must attach a file."))
elif question.question_type == "yesno" or (hasattr(field, 'datatype') and (hasattr(field, 'datatype') and field.datatype == 'boolean' and (hasattr(field, 'sign') and field.sign > 0))):
if inp_lower in true_list:
data = 'True'
if question.question_type == "fields" and hasattr(field, 'uncheckothers') and field.uncheckothers is not False:
uncheck_others = field
elif inp_lower in false_list:
data = 'False'
else:
data = None
elif question.question_type == "yesnomaybe" or (hasattr(field, 'datatype') and (field.datatype == 'threestate' and (hasattr(field, 'sign') and field.sign > 0))):
if inp_lower in true_list:
data = 'True'
if question.question_type == "fields" and hasattr(field, 'uncheckothers') and field.uncheckothers is not False:
uncheck_others = field
elif inp_lower in false_list:
data = 'False'
else:
data = 'None'
elif question.question_type == "noyes" or (hasattr(field, 'datatype') and (field.datatype in ('noyes', 'noyeswide') or (field.datatype == 'boolean' and (hasattr(field, 'sign') and field.sign < 0)))):
if inp_lower in true_list:
data = 'False'
elif inp_lower in false_list:
data = 'True'
if question.question_type == "fields" and hasattr(field, 'uncheckothers') and field.uncheckothers is not False:
uncheck_others = field
else:
data = None
elif question.question_type in ('noyesmaybe', 'noyesmaybe', 'noyeswidemaybe') or (hasattr(field, 'datatype') and field.datatype == 'threestate' and (hasattr(field, 'sign') and field.sign < 0)):
if inp_lower in true_list:
data = 'False'
elif inp_lower in false_list:
data = 'True'
if question.question_type == "fields" and hasattr(field, 'uncheckothers') and field.uncheckothers is not False:
uncheck_others = field
else:
data = 'None'
elif question.question_type == 'multiple_choice' or hasattr(field, 'choicetype') or (hasattr(field, 'datatype') and field.datatype in ('object', 'object_radio', 'multiselect', 'object_multiselect', 'checkboxes', 'object_checkboxes')) or (hasattr(field, 'inputtype') and field.inputtype == 'radio'):
cdata, choice_list = get_choices_with_abb(interview_status, field, user_dict)
data = None
if hasattr(field, 'datatype') and field.datatype in ('multiselect', 'object_multiselect', 'checkboxes', 'object_checkboxes') and saveas is not None:
if 'command_cache' not in user_dict['_internal']:
user_dict['_internal']['command_cache'] = {}
if field.number not in user_dict['_internal']['command_cache']:
user_dict['_internal']['command_cache'][field.number] = []
docassemble.base.parse.ensure_object_exists(saveas, field.datatype, user_dict, commands=user_dict['_internal']['command_cache'][field.number])
saveas = saveas + '.gathered'
data = 'True'
if (user_entered_skip or (inp_lower == word('none') and hasattr(field, 'datatype') and field.datatype in ('multiselect', 'object_multiselect', 'checkboxes', 'object_checkboxes'))) and ((hasattr(field, 'disableothers') and field.disableothers) or (hasattr(field, 'datatype') and field.datatype in ('multiselect', 'object_multiselect', 'checkboxes', 'object_checkboxes')) or not (interview_status.extras['required'][field.number] or (question.question_type == 'multiple_choice' and hasattr(field, 'saveas')))):
logmessage("do_sms: skip accepted")
# user typed 'skip,' or, where checkboxes, 'none.' Also:
# field is skippable, either because it has disableothers, or it is a checkbox field, or
# it is not required. Multiple choice fields with saveas are considered required.
if hasattr(field, 'datatype'):
if field.datatype in ('object', 'object_radio'):
skip_it = True
data = repr('')
if field.datatype in ('multiselect', 'object_multiselect', 'checkboxes', 'object_checkboxes'):
for choice in choice_list:
if choice[1] is None:
continue
user_dict['_internal']['command_cache'][field.number].append(choice[1] + ' = False')
elif (question.question_type == 'multiple_choice' and hasattr(field, 'saveas')) or hasattr(field, 'choicetype'):
if user_entered_skip:
skip_it = True
data = repr('')
else:
logmessage("do_sms: setting skip_it to True")
skip_it = True
data = repr('')
elif field.datatype == 'integer':
data = '0'
elif field.datatype in ('number', 'float', 'currency', 'range'):
data = '0.0'
else:
data = repr('')
else:
data = repr('')
else:
# There is a real value here
if hasattr(field, 'datatype') and field.datatype in ('object_multiselect', 'object_checkboxes'):
true_values = set()
for selection in re.split(r' *[,;] *', inp_lower):
for potential_abb, value in cdata['abblower'].items():
if selection and selection.startswith(potential_abb):
for choice in choice_list:
if value == choice[0]:
true_values.add(choice[2])
the_saveas = myb64unquote(field.saveas)
logmessage("do_sms: the_saveas is " + repr(the_saveas))
for choice in choice_list:
if choice[2] is None:
continue
if choice[2] in true_values:
logmessage("do_sms: " + choice[2] + " is in true_values")
the_string = 'if ' + choice[2] + ' not in ' + the_saveas + '.elements:\n ' + the_saveas + '.append(' + choice[2] + ')'
else:
the_string = 'if ' + choice[2] + ' in ' + the_saveas + '.elements:\n ' + the_saveas + '.remove(' + choice[2] + ')'
user_dict['_internal']['command_cache'][field.number].append(the_string)
elif hasattr(field, 'datatype') and field.datatype in ('multiselect', 'checkboxes'):
true_values = set()
for selection in re.split(r' *[,;] *', inp_lower):
for potential_abb, value in cdata['abblower'].items():
if selection and selection.startswith(potential_abb):
for choice in choice_list:
if value == choice[0]:
true_values.add(choice[1])
for choice in choice_list:
if choice[1] is None:
continue
if choice[1] in true_values:
the_string = choice[1] + ' = True'
else:
the_string = choice[1] + ' = False'
user_dict['_internal']['command_cache'][field.number].append(the_string)
else:
# regular multiple choice
# logmessage("do_sms: user selected " + inp_lower + " and data is " + str(cdata))
for potential_abb, value in cdata['abblower'].items():
if inp_lower.startswith(potential_abb):
# logmessage("do_sms: user selected " + value)
for choice in choice_list:
# logmessage("do_sms: considering " + choice[0])
if value == choice[0]:
# logmessage("do_sms: found a match")
saveas = choice[1]
if hasattr(field, 'datatype') and field.datatype in ('object', 'object_radio'):
data = choice[2]
else:
data = repr(choice[2])
break
break
elif hasattr(field, 'datatype') and field.datatype == 'integer':
if user_entered_skip and not interview_status.extras['required'][field.number]:
data = repr('')
skip_it = True
else:
data = re.sub(r'[^0-9\.\-]', '', inp)
try:
the_value = eval("int(" + repr(data) + ")")
data = "int(" + repr(data) + ")"
except:
special_messages.append('"' + inp + '" ' + word("is not a whole number."))
data = None
elif hasattr(field, 'datatype') and field.datatype in ('date', 'datetime'):
if user_entered_skip and not interview_status.extras['required'][field.number]:
data = repr('')
skip_it = True
else:
try:
dateutil.parser.parse(inp)
data = "docassemble.base.util.as_datetime(" + repr(inp) + ")"
uses_util = True
except Exception as the_err:
logmessage("do_sms: date validation error was " + str(the_err))
if field.datatype == 'date':
special_messages.append('"' + inp + '" ' + word("is not a valid date."))
else:
special_messages.append('"' + inp + '" ' + word("is not a valid date and time."))
data = None
elif hasattr(field, 'datatype') and field.datatype == 'time':
if user_entered_skip and not interview_status.extras['required'][field.number]:
data = repr('')
skip_it = True
else:
try:
dateutil.parser.parse(inp)
data = "docassemble.base.util.as_datetime(" + repr(inp) + ").time()"
uses_util = True
except Exception as the_err:
logmessage("do_sms: time validation error was " + str(the_err))
special_messages.append('"' + inp + '" ' + word("is not a valid time."))
data = None
elif hasattr(field, 'datatype') and field.datatype == 'range':
if user_entered_skip and not interview_status.extras['required'][field.number]:
data = repr('')
skip_it = True
else:
data = re.sub(r'[^0-9\-\.]', '', inp)
try:
the_value = eval("float(" + repr(data) + ")", user_dict)
if the_value > int(interview_status.extras['max'][field.number]) or the_value < int(interview_status.extras['min'][field.number]):
special_messages.append('"' + inp + '" ' + word("is not within the range."))
data = None
except:
data = None
elif hasattr(field, 'datatype') and field.datatype in ('number', 'float', 'currency'):
if user_entered_skip and not interview_status.extras['required'][field.number]:
data = '0.0'
skip_it = True
else:
data = re.sub(r'[^0-9\.\-]', '', inp)
try:
the_value = eval("float(" + json.dumps(data) + ")", user_dict)
data = "float(" + json.dumps(data) + ")"
except:
special_messages.append('"' + inp + '" ' + word("is not a valid number."))
data = None
else:
if user_entered_skip:
if interview_status.extras['required'][field.number]:
data = repr(inp)
else:
data = repr('')
skip_it = True
else:
data = repr(inp)
else:
data = None
if data is None:
logmessage("do_sms: could not process input: " + inp)
special_messages.append(word("I do not understand what you mean by") + ' "' + inp + '."')
else:
if uses_util:
exec("import docassemble.base.util", user_dict)
if uncheck_others:
for other_field in interview_status.get_field_list():
if hasattr(other_field, 'datatype') and other_field.datatype == 'boolean' and other_field is not uncheck_others and 'command_cache' in user_dict['_internal'] and other_field.number in user_dict['_internal']['command_cache']:
for command_index in range(len(user_dict['_internal']['command_cache'][other_field.number])):
if other_field.sign > 0:
user_dict['_internal']['command_cache'][other_field.number][command_index] = re.sub(r'= True$', '= False', user_dict['_internal']['command_cache'][other_field.number][command_index])
else:
user_dict['_internal']['command_cache'][other_field.number][command_index] = re.sub(r'= False$', '= True', user_dict['_internal']['command_cache'][other_field.number][command_index])
the_string = saveas + ' = ' + data
try:
if not skip_it:
if hasattr(field, 'disableothers') and field.disableothers and hasattr(field, 'saveas'):
logmessage("do_sms: disabling others")
next_field = None
if next_field is not None:
if 'command_cache' not in user_dict['_internal']:
user_dict['_internal']['command_cache'] = {}
if field.number not in user_dict['_internal']['command_cache']:
user_dict['_internal']['command_cache'][field.number] = []
user_dict['_internal']['command_cache'][field.number].append(the_string)
logmessage("do_sms: storing in command cache: " + str(the_string))
else:
for the_field in interview_status.get_field_list():
if interview_status.is_empty_mc(the_field):
logmessage("do_sms: a field is empty")
the_saveas = myb64unquote(the_field.saveas)
if 'command_cache' not in user_dict['_internal']:
user_dict['_internal']['command_cache'] = {}
if the_field.number not in user_dict['_internal']['command_cache']:
user_dict['_internal']['command_cache'][the_field.number] = []
if hasattr(the_field, 'datatype'):
if the_field.datatype in ('object_multiselect', 'object_checkboxes'):
docassemble.base.parse.ensure_object_exists(the_saveas, the_field.datatype, user_dict, commands=user_dict['_internal']['command_cache'][the_field.number])
user_dict['_internal']['command_cache'][the_field.number].append(the_saveas + '.clear()')
user_dict['_internal']['command_cache'][the_field.number].append(the_saveas + '.gathered = True')
elif the_field.datatype in ('object', 'object_radio'):
try:
eval(the_saveas, user_dict)
except:
user_dict['_internal']['command_cache'][the_field.number].append(the_saveas + ' = None')
elif the_field.datatype in ('multiselect', 'checkboxes'):
docassemble.base.parse.ensure_object_exists(the_saveas, the_field.datatype, user_dict, commands=user_dict['_internal']['command_cache'][the_field.number])
user_dict['_internal']['command_cache'][the_field.number].append(the_saveas + '.gathered = True')
else:
user_dict['_internal']['command_cache'][the_field.number].append(the_saveas + ' = None')
else:
user_dict['_internal']['command_cache'][the_field.number].append(the_saveas + ' = None')
if 'command_cache' in user_dict['_internal']:
for field_num in sorted(user_dict['_internal']['command_cache'].keys()):
for pre_string in user_dict['_internal']['command_cache'][field_num]:
logmessage("do_sms: doing command cache: " + pre_string)
exec(pre_string, user_dict)
logmessage("do_sms: doing regular: " + the_string)
exec(the_string, user_dict)
if not changed:
steps += 1
user_dict['_internal']['steps'] = steps
changed = True
if next_field is None:
if skip_it:
# Run the commands that we have been storing up
if 'command_cache' in user_dict['_internal']:
for field_num in sorted(user_dict['_internal']['command_cache'].keys()):
for pre_string in user_dict['_internal']['command_cache'][field_num]:
logmessage("do_sms: doing command cache: " + pre_string)
exec(pre_string, user_dict)
if not changed:
steps += 1
user_dict['_internal']['steps'] = steps
changed = True
logmessage("do_sms: next_field is None")
if 'skip' in user_dict['_internal']:
user_dict['_internal']['skip'].clear()
if 'command_cache' in user_dict['_internal']:
user_dict['_internal']['command_cache'].clear()
# if 'sms_variable' in interview_status.current_info:
# del interview_status.current_info['sms_variable']
else:
logmessage("do_sms: next_field is not None")
user_dict['_internal']['skip'][field.number] = True
# user_dict['_internal']['smsgather'] = interview_status.sought
# if 'smsgather' in user_dict['_internal'] and user_dict['_internal']['smsgather'] == saveas:
# # logmessage("do_sms: deleting " + user_dict['_internal']['smsgather'])
# del user_dict['_internal']['smsgather']
except Exception as the_err:
logmessage("do_sms: failure to set variable with " + the_string)
logmessage("do_sms: error was " + str(the_err))
release_lock(sess_info['uid'], sess_info['yaml_filename'])
# if 'uid' in session:
# del session['uid']
return resp
if changed and next_field is None and question.name not in user_dict['_internal']['answers']:
logmessage("do_sms: setting internal answers for " + str(question.name))
question.mark_as_answered(user_dict)
interview.assemble(user_dict, interview_status)
logmessage("do_sms: back from assemble 2; had been seeking variable " + str(interview_status.sought))
logmessage("do_sms: question is now " + str(interview_status.question.name))
sess_info['question'] = interview_status.question.name
r.set(key, pickle.dumps(sess_info))
else:
logmessage("do_sms: not accepting input.")
if interview_status.question.question_type in ("restart", "exit", "logout", "exit_logout", "new_session"):
logmessage("do_sms: exiting because of restart or exit")
if save:
obtain_lock(sess_info['uid'], sess_info['yaml_filename'])
reset_user_dict(sess_info['uid'], sess_info['yaml_filename'], temp_user_id=sess_info['tempuser'])
release_lock(sess_info['uid'], sess_info['yaml_filename'])
r.delete(key)
if interview_status.question.question_type in ('restart', 'new_session'):
sess_info = {'yaml_filename': sess_info['yaml_filename'], 'uid': get_unique_name(sess_info['yaml_filename'], sess_info['secret']), 'secret': sess_info['secret'], 'number': form["From"], 'encrypted': True, 'tempuser': sess_info['tempuser'], 'user_id': None}
r.set(key, pickle.dumps(sess_info))
form = {'To': form['To'], 'From': form['From'], 'AccountSid': form['AccountSid'], 'Body': word('question')}
return do_sms(form, base_url, url_root, config=config, save=True)
else:
if not interview_status.can_go_back:
user_dict['_internal']['steps_offset'] = steps
# I had commented this out in do_sms(), but not in index()
# user_dict['_internal']['answers'] = {}
# Why do this?
if (not interview_status.followed_mc) and len(user_dict['_internal']['answers']):
user_dict['_internal']['answers'].clear()
# if interview_status.question.name and interview_status.question.name in user_dict['_internal']['answers']:
# del user_dict['_internal']['answers'][interview_status.question.name]
# logmessage("do_sms: " + as_sms(interview_status, user_dict))
# twilio_client = TwilioRestClient(tconfig['account sid'], tconfig['auth token'])
# message = twilio_client.messages.create(to=form["From"], from_=form["To"], body=as_sms(interview_status, user_dict))
logmessage("do_sms: calling as_sms")
sms_info = as_sms(interview_status, user_dict)
qoutput = sms_info['question']
if sms_info['next'] is not None:
logmessage("do_sms: next variable is " + sms_info['next'])
if interview_status.sought is None:
logmessage("do_sms: sought variable is None")
# user_dict['_internal']['smsgather'] = interview_status.sought
if (accepting_input or changed or action_performed or sms_info['next'] is not None) and save:
save_user_dict(sess_info['uid'], user_dict, sess_info['yaml_filename'], secret=sess_info['secret'], encrypt=encrypted, changed=changed, steps=steps)
for special_message in special_messages:
qoutput = re.sub(r'XXXXMESSAGE_AREAXXXX', "\n" + special_message + 'XXXXMESSAGE_AREAXXXX', qoutput)
qoutput = re.sub(r'XXXXMESSAGE_AREAXXXX', '', qoutput)
if user_dict.get('multi_user', False) is True and encrypted is True:
encrypted = False
update_session(sess_info['yaml_filename'], encrypted=encrypted, uid=sess_info['uid'])
is_encrypted = encrypted
r.set(key, pickle.dumps(sess_info))
if save:
decrypt_session(sess_info['secret'], user_code=sess_info['uid'], filename=sess_info['yaml_filename'])
if user_dict.get('multi_user', False) is False and encrypted is False:
encrypted = True
update_session(sess_info['yaml_filename'], encrypted=encrypted, uid=sess_info['uid'])
is_encrypted = encrypted
r.set(key, pickle.dumps(sess_info))
if save:
encrypt_session(sess_info['secret'], user_code=sess_info['uid'], filename=sess_info['yaml_filename'])
if len(interview_status.attachments) > 0:
if tconfig.get("mms attachments", True):
with resp.message(qoutput) as m:
media_count = 0
for attachment in interview_status.attachments:
if media_count >= 9:
break
for doc_format in attachment['formats_to_use']:
if media_count >= 9:
break
if doc_format != 'pdf':
continue
url = url_for('serve_stored_file', _external=True, uid=sess_info['uid'], number=attachment['file'][doc_format], filename=attachment['filename'], extension=docassemble.base.parse.extension_of_doc_format[doc_format])
m.media(url)
media_count += 1
else:
for attachment in interview_status.attachments:
for doc_format in attachment['formats_to_use']:
if doc_format not in ('pdf', 'rtf', 'docx'):
continue
qoutput += "\n" + url_for('serve_stored_file', _external=True, uid=sess_info['uid'], number=attachment['file'][doc_format], filename=attachment['filename'], extension=docassemble.base.parse.extension_of_doc_format[doc_format])
resp.message(qoutput)
else:
resp.message(qoutput)
release_lock(sess_info['uid'], sess_info['yaml_filename'])
return resp
def get_api_key():
api_key = request.args.get('key', None)
if api_key is None and request.method in ('POST', 'PUT', 'PATCH'):
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
if 'key' in post_data:
api_key = post_data['key']
if api_key is None and 'X-API-Key' in request.cookies:
api_key = request.cookies['X-API-Key']
if api_key is None and 'X-API-Key' in request.headers:
api_key = request.headers['X-API-Key']
if api_key is None and 'Authorization' in request.headers:
m = re.search(r'^bearer (.*)', request.headers['Authorization'], flags=re.IGNORECASE)
if m:
api_key = m.group(1).strip()
return api_key
def api_verify(roles=None, permissions=None):
api_key = get_api_key()
if api_key is None:
logmessage("api_verify: no API key provided")
return False
api_key = encrypt_api_key(api_key, app.secret_key)
rkeys = r.keys('da:apikey:userid:*:key:' + api_key + ':info')
if len(rkeys) == 0:
logmessage("api_verify: API key not found")
return False
try:
info = json.loads(r.get(rkeys[0].decode()).decode())
except:
logmessage("api_verify: API information could not be unpacked")
return False
m = re.match(r'da:apikey:userid:([0-9]+):key:' + re.escape(api_key) + ':info', rkeys[0].decode())
if not m:
logmessage("api_verify: user id could not be extracted")
return False
user_id = m.group(1)
if not isinstance(info, dict):
logmessage("api_verify: API information was in the wrong format")
return False
if len(info['constraints']) > 0:
clientip = get_requester_ip(request)
if info['method'] == 'ip' and clientip not in info['constraints']:
logmessage("api_verify: IP address " + str(clientip) + " did not match")
return False
if info['method'] == 'referer':
if not request.referrer:
the_referer = request.headers.get('Origin', None)
if not the_referer:
logmessage("api_verify: could not authorize based on referer because no referer provided")
return False
else:
the_referer = request.referrer
matched = False
for constraint in info['constraints']:
constraint = re.sub(r'^[\*]+|[\*]+$', '', constraint)
constraint = re.escape(constraint)
constraint = re.sub(r'\\\*+', '.*', constraint)
the_referer = re.sub(r'\?.*', '', the_referer)
the_referer = re.sub(r'^https?://([^/]*)/', r'\1', the_referer)
if re.search(constraint, the_referer):
matched = True
break
if not matched:
logmessage("api_verify: authorization failure referer " + str(the_referer) + " could not be matched")
return False
user = db.session.execute(select(UserModel).options(db.joinedload(UserModel.roles)).where(UserModel.id == user_id)).scalar()
if user is None or user.social_id.startswith('disabled$'):
logmessage("api_verify: user does not exist")
return False
if not user.active:
logmessage("api_verify: user is no longer active")
return False
login_user(user, remember=False)
update_last_login(user)
if current_user.has_role('admin') and 'permissions' in info and len(info['permissions']) > 0:
current_user.limited_api = True
current_user.limits = info['permissions']
ok_permission = False
if permissions:
for permission in permissions:
if current_user.can_do(permission):
ok_permission = True
break
if current_user.limited_api and not ok_permission:
logmessage("api_verify: user did not have correct privileges for resource")
return False
if roles and not ok_permission:
ok_role = False
for role in roles:
if current_user.has_role(role):
ok_role = True
break
if not ok_role:
logmessage("api_verify: user did not have correct privileges for resource")
return False
docassemble.base.functions.this_thread.current_info = current_info(req=request, interface='api', device_id=request.cookies.get('ds', None), session_uid=current_user.email)
return True
def jsonify_with_status(data, code):
resp = jsonify(data)
resp.status_code = code
return resp
def true_or_false(text):
if text in (False, None) or text == 0 or str(text).lower().strip() in ('0', 'false', 'f'):
return False
return True
def get_user_list(include_inactive=False, start_id=None):
if not (current_user.is_authenticated and current_user.has_role_or_permission('admin', 'advocate', permissions=['access_user_info', 'create_user'])):
raise DAException("You do not have sufficient privileges to access information about other users")
user_length = 0
user_list = []
while True:
there_are_more = False
filter_list = []
if start_id is not None:
filter_list.append(UserModel.id > start_id)
if not include_inactive:
filter_list.append(UserModel.active == True) # noqa: E712 # pylint: disable=singleton-comparison
the_users = select(UserModel).options(db.joinedload(UserModel.roles))
if len(filter_list) > 0:
the_users = the_users.where(*filter_list)
the_users = the_users.order_by(UserModel.id).limit(PAGINATION_LIMIT_PLUS_ONE)
results_in_query = 0
for user in db.session.execute(the_users).unique().scalars():
results_in_query += 1
if results_in_query == PAGINATION_LIMIT_PLUS_ONE:
there_are_more = True
break
start_id = user.id
if user.social_id.startswith('disabled$'):
continue
if user_length == PAGINATION_LIMIT:
there_are_more = True
break
user_info = {}
user_info['privileges'] = []
for role in user.roles:
user_info['privileges'].append(role.name)
for attrib in ('id', 'email', 'first_name', 'last_name', 'country', 'subdivisionfirst', 'subdivisionsecond', 'subdivisionthird', 'organization', 'timezone', 'language'):
user_info[attrib] = getattr(user, attrib)
if include_inactive:
user_info['active'] = getattr(user, 'active')
user_list.append(user_info)
user_length += 1
if user_length == PAGINATION_LIMIT or results_in_query < PAGINATION_LIMIT_PLUS_ONE:
break
if not there_are_more:
start_id = None
return (user_list, start_id)
@app.route('/translation_file', methods=['POST'])
@login_required
@roles_required(['admin', 'developer'])
def translation_file():
setup_translation()
form = Utilities(request.form)
yaml_filename = form.interview.data
if yaml_filename is None or not re.search(r'\S', yaml_filename):
flash(word("You must provide an interview filename"), 'error')
return redirect(url_for('utilities'))
tr_lang = form.tr_language.data
if tr_lang is None or not re.search(r'\S', tr_lang):
flash(word("You must provide a language"), 'error')
return redirect(url_for('utilities'))
try:
interview_source = docassemble.base.parse.interview_source_from_string(yaml_filename)
except DAError:
flash(word("Invalid interview"), 'error')
return redirect(url_for('utilities'))
interview_source.update()
interview_source.translating = True
interview = interview_source.get_interview()
tr_cache = {}
if len(interview.translations) > 0:
for item in interview.translations:
if item.lower().endswith(".xlsx"):
the_xlsx_file = docassemble.base.functions.package_data_filename(item)
if not os.path.isfile(the_xlsx_file):
continue
df = pandas.read_excel(the_xlsx_file, na_values=['NaN', '-NaN', '#NA', '#N/A'], keep_default_na=False)
invalid = False
for column_name in ('interview', 'question_id', 'index_num', 'hash', 'orig_lang', 'tr_lang', 'orig_text', 'tr_text'):
if column_name not in df.columns:
invalid = True
break
if invalid:
continue
for indexno in df.index:
try:
assert df['interview'][indexno]
assert df['question_id'][indexno]
assert df['index_num'][indexno] >= 0
assert df['hash'][indexno]
assert df['orig_lang'][indexno]
assert df['tr_lang'][indexno]
assert df['orig_text'][indexno] != ''
assert df['tr_text'][indexno] != ''
if isinstance(df['orig_text'][indexno], float):
assert not math.isnan(df['orig_text'][indexno])
if isinstance(df['tr_text'][indexno], float):
assert not math.isnan(df['tr_text'][indexno])
except:
continue
the_dict = {'interview': str(df['interview'][indexno]), 'question_id': str(df['question_id'][indexno]), 'index_num': df['index_num'][indexno], 'hash': str(df['hash'][indexno]), 'orig_lang': str(df['orig_lang'][indexno]), 'tr_lang': str(df['tr_lang'][indexno]), 'orig_text': str(df['orig_text'][indexno]), 'tr_text': str(df['tr_text'][indexno])}
if df['orig_text'][indexno] not in tr_cache:
tr_cache[df['orig_text'][indexno]] = {}
if df['orig_lang'][indexno] not in tr_cache[df['orig_text'][indexno]]:
tr_cache[df['orig_text'][indexno]][df['orig_lang'][indexno]] = {}
tr_cache[df['orig_text'][indexno]][df['orig_lang'][indexno]][df['tr_lang'][indexno]] = the_dict
elif item.lower().endswith(".xlf") or item.lower().endswith(".xliff"):
the_xlf_file = docassemble.base.functions.package_data_filename(item)
if not os.path.isfile(the_xlf_file):
continue
tree = ET.parse(the_xlf_file)
root = tree.getroot()
indexno = 1
if root.attrib['version'] == "1.2":
for the_file in root.iter('{urn:oasis:names:tc:xliff:document:1.2}file'):
source_lang = the_file.attrib.get('source-language', 'en')
target_lang = the_file.attrib.get('target-language', 'en')
source_filename = the_file.attrib.get('original', yaml_filename)
for transunit in the_file.iter('{urn:oasis:names:tc:xliff:document:1.2}trans-unit'):
orig_text = ''
tr_text = ''
for source in transunit.iter('{urn:oasis:names:tc:xliff:document:1.2}source'):
if source.text:
orig_text += source.text
for mrk in source:
orig_text += mrk.text
if mrk.tail:
orig_text += mrk.tail
for target in transunit.iter('{urn:oasis:names:tc:xliff:document:1.2}target'):
if target.text:
tr_text += target.text
for mrk in target:
tr_text += mrk.text
if mrk.tail:
tr_text += mrk.tail
if orig_text == '' or tr_text == '':
continue
the_dict = {'interview': source_filename, 'question_id': 'Unknown' + str(indexno), 'index_num': transunit.attrib.get('id', str(indexno)), 'hash': hashlib.md5(orig_text.encode('utf-8')).hexdigest(), 'orig_lang': source_lang, 'tr_lang': target_lang, 'orig_text': orig_text, 'tr_text': tr_text}
if orig_text not in tr_cache:
tr_cache[orig_text] = {}
if source_lang not in tr_cache[orig_text]:
tr_cache[orig_text][source_lang] = {}
tr_cache[orig_text][source_lang][target_lang] = the_dict
indexno += 1
elif root.attrib['version'] == "2.0":
source_lang = root.attrib['srcLang']
target_lang = root.attrib['trgLang']
for the_file in root.iter('{urn:oasis:names:tc:xliff:document:2.0}file'):
source_filename = the_file.attrib.get('original', yaml_filename)
for unit in the_file.iter('{urn:oasis:names:tc:xliff:document:2.0}unit'):
question_id = unit.attrib.get('id', 'Unknown' + str(indexno))
for segment in unit.iter('{urn:oasis:names:tc:xliff:document:2.0}segment'):
orig_text = ''
tr_text = ''
for source in transunit.iter('{urn:oasis:names:tc:xliff:document:2.0}source'):
if source.text:
orig_text += source.text
for mrk in source:
orig_text += mrk.text
if mrk.tail:
orig_text += mrk.tail
for target in transunit.iter('{urn:oasis:names:tc:xliff:document:2.0}target'):
if target.text:
tr_text += target.text
for mrk in target:
tr_text += mrk.text
if mrk.tail:
tr_text += mrk.tail
if orig_text == '' or tr_text == '':
continue
the_dict = {'interview': source_filename, 'question_id': question_id, 'index_num': segment.attrib.get('id', str(indexno)), 'hash': hashlib.md5(orig_text.encode('utf-8')).hexdigest(), 'orig_lang': source_lang, 'tr_lang': target_lang, 'orig_text': orig_text, 'tr_text': tr_text}
if orig_text not in tr_cache:
tr_cache[orig_text] = {}
if source_lang not in tr_cache[orig_text]:
tr_cache[orig_text][source_lang] = {}
tr_cache[orig_text][source_lang][target_lang] = the_dict
indexno += 1
if form.filetype.data == 'XLSX':
temp_file = tempfile.NamedTemporaryFile(suffix='.xlsx', delete=False)
xlsx_filename = docassemble.base.functions.space_to_underscore(os.path.splitext(os.path.basename(re.sub(r'.*:', '', yaml_filename)))[0]) + "_" + tr_lang + ".xlsx"
workbook = xlsxwriter.Workbook(temp_file.name)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': 1})
text = workbook.add_format()
text.set_align('top')
fixedcell = workbook.add_format()
fixedcell.set_align('top')
fixedcell.set_text_wrap()
fixedunlockedcell = workbook.add_format()
fixedunlockedcell.set_align('top')
fixedunlockedcell.set_text_wrap()
# fixedunlockedcell.set_locked(False)
fixed = workbook.add_format()
fixedone = workbook.add_format()
fixedone.set_bold()
fixedone.set_font_color('green')
fixedtwo = workbook.add_format()
fixedtwo.set_bold()
fixedtwo.set_font_color('blue')
fixedunlocked = workbook.add_format()
fixedunlockedone = workbook.add_format()
fixedunlockedone.set_bold()
fixedunlockedone.set_font_color('green')
fixedunlockedtwo = workbook.add_format()
fixedunlockedtwo.set_bold()
fixedunlockedtwo.set_font_color('blue')
wholefixed = workbook.add_format()
wholefixed.set_align('top')
wholefixed.set_text_wrap()
wholefixedone = workbook.add_format()
wholefixedone.set_bold()
wholefixedone.set_font_color('green')
wholefixedone.set_align('top')
wholefixedone.set_text_wrap()
wholefixedtwo = workbook.add_format()
wholefixedtwo.set_bold()
wholefixedtwo.set_font_color('blue')
wholefixedtwo.set_align('top')
wholefixedtwo.set_text_wrap()
wholefixedunlocked = workbook.add_format()
wholefixedunlocked.set_align('top')
wholefixedunlocked.set_text_wrap()
# wholefixedunlocked.set_locked(False)
wholefixedunlockedone = workbook.add_format()
wholefixedunlockedone.set_bold()
wholefixedunlockedone.set_font_color('green')
wholefixedunlockedone.set_align('top')
wholefixedunlockedone.set_text_wrap()
# wholefixedunlockedone.set_locked(False)
wholefixedunlockedtwo = workbook.add_format()
wholefixedunlockedtwo.set_bold()
wholefixedunlockedtwo.set_font_color('blue')
wholefixedunlockedtwo.set_align('top')
wholefixedunlockedtwo.set_text_wrap()
# wholefixedunlockedtwo.set_locked(False)
numb = workbook.add_format()
numb.set_align('top')
worksheet.write('A1', 'interview', bold)
worksheet.write('B1', 'question_id', bold)
worksheet.write('C1', 'index_num', bold)
worksheet.write('D1', 'hash', bold)
worksheet.write('E1', 'orig_lang', bold)
worksheet.write('F1', 'tr_lang', bold)
worksheet.write('G1', 'orig_text', bold)
worksheet.write('H1', 'tr_text', bold)
# options = {
# 'objects': False,
# 'scenarios': False,
# 'format_cells': False,
# 'format_columns': False,
# 'format_rows': False,
# 'insert_columns': False,
# 'insert_rows': True,
# 'insert_hyperlinks': False,
# 'delete_columns': False,
# 'delete_rows': True,
# 'select_locked_cells': True,
# 'sort': True,
# 'autofilter': True,
# 'pivot_tables': False,
# 'select_unlocked_cells': True,
# }
# worksheet.protect('', options)
worksheet.set_column(0, 0, 25)
worksheet.set_column(1, 1, 15)
worksheet.set_column(2, 2, 12)
worksheet.set_column(6, 6, 75)
worksheet.set_column(6, 7, 75)
row = 1
seen = []
for question in interview.all_questions:
if not hasattr(question, 'translations'):
continue
language = question.language
if language == '*':
language = question.from_source.get_language()
if language == '*':
language = interview.default_language
if language == tr_lang:
continue
indexno = 0
if hasattr(question, 'id'):
question_id = question.id
else:
question_id = question.name
for item in question.translations:
if item in seen:
continue
if item in tr_cache and language in tr_cache[item] and tr_lang in tr_cache[item][language]:
tr_text = str(tr_cache[item][language][tr_lang]['tr_text'])
else:
tr_text = ''
worksheet.write_string(row, 0, question.from_source.get_name(), text)
worksheet.write_string(row, 1, question_id, text)
worksheet.write_number(row, 2, indexno, numb)
worksheet.write_string(row, 3, hashlib.md5(item.encode('utf-8')).hexdigest(), text)
worksheet.write_string(row, 4, language, text)
worksheet.write_string(row, 5, tr_lang, text)
mako = mako_parts(item)
if len(mako) == 0:
worksheet.write_string(row, 6, '', wholefixed)
elif len(mako) == 1:
if mako[0][1] == 0:
worksheet.write_string(row, 6, item, wholefixed)
elif mako[0][1] == 1:
worksheet.write_string(row, 6, item, wholefixedone)
elif mako[0][1] == 2:
worksheet.write_string(row, 6, item, wholefixedtwo)
else:
parts = [row, 6]
for part in mako:
if part[1] == 0:
parts.extend([fixed, part[0]])
elif part[1] == 1:
parts.extend([fixedone, part[0]])
elif part[1] == 2:
parts.extend([fixedtwo, part[0]])
parts.append(fixedcell)
worksheet.write_rich_string(*parts)
mako = mako_parts(tr_text)
if len(mako) == 0:
worksheet.write_string(row, 7, '', wholefixedunlocked)
elif len(mako) == 1:
if mako[0][1] == 0:
worksheet.write_string(row, 7, tr_text, wholefixedunlocked)
elif mako[0][1] == 1:
worksheet.write_string(row, 7, tr_text, wholefixedunlockedone)
elif mako[0][1] == 2:
worksheet.write_string(row, 7, tr_text, wholefixedunlockedtwo)
else:
parts = [row, 7]
for part in mako:
if part[1] == 0:
parts.extend([fixedunlocked, part[0]])
elif part[1] == 1:
parts.extend([fixedunlockedone, part[0]])
elif part[1] == 2:
parts.extend([fixedunlockedtwo, part[0]])
parts.append(fixedunlockedcell)
worksheet.write_rich_string(*parts)
num_lines = item.count('\n')
# if num_lines > 25:
# num_lines = 25
if num_lines > 0:
worksheet.set_row(row, 15*(num_lines + 1))
indexno += 1
row += 1
seen.append(item)
for item, cache_item in tr_cache.items():
if item in seen or language not in cache_item or tr_lang not in cache_item[language]:
continue
worksheet.write_string(row, 0, cache_item[language][tr_lang]['interview'], text)
worksheet.write_string(row, 1, cache_item[language][tr_lang]['question_id'], text)
worksheet.write_number(row, 2, 1000 + cache_item[language][tr_lang]['index_num'], numb)
worksheet.write_string(row, 3, cache_item[language][tr_lang]['hash'], text)
worksheet.write_string(row, 4, cache_item[language][tr_lang]['orig_lang'], text)
worksheet.write_string(row, 5, cache_item[language][tr_lang]['tr_lang'], text)
mako = mako_parts(cache_item[language][tr_lang]['orig_text'])
if len(mako) == 1:
if mako[0][1] == 0:
worksheet.write_string(row, 6, cache_item[language][tr_lang]['orig_text'], wholefixed)
elif mako[0][1] == 1:
worksheet.write_string(row, 6, cache_item[language][tr_lang]['orig_text'], wholefixedone)
elif mako[0][1] == 2:
worksheet.write_string(row, 6, cache_item[language][tr_lang]['orig_text'], wholefixedtwo)
else:
parts = [row, 6]
for part in mako:
if part[1] == 0:
parts.extend([fixed, part[0]])
elif part[1] == 1:
parts.extend([fixedone, part[0]])
elif part[1] == 2:
parts.extend([fixedtwo, part[0]])
parts.append(fixedcell)
worksheet.write_rich_string(*parts)
mako = mako_parts(cache_item[language][tr_lang]['tr_text'])
if len(mako) == 1:
if mako[0][1] == 0:
worksheet.write_string(row, 7, cache_item[language][tr_lang]['tr_text'], wholefixedunlocked)
elif mako[0][1] == 1:
worksheet.write_string(row, 7, cache_item[language][tr_lang]['tr_text'], wholefixedunlockedone)
elif mako[0][1] == 2:
worksheet.write_string(row, 7, cache_item[language][tr_lang]['tr_text'], wholefixedunlockedtwo)
else:
parts = [row, 7]
for part in mako:
if part[1] == 0:
parts.extend([fixedunlocked, part[0]])
elif part[1] == 1:
parts.extend([fixedunlockedone, part[0]])
elif part[1] == 2:
parts.extend([fixedunlockedtwo, part[0]])
parts.append(fixedunlockedcell)
worksheet.write_rich_string(*parts)
num_lines = cache_item[language][tr_lang]['orig_text'].count('\n')
if num_lines > 0:
worksheet.set_row(row, 15*(num_lines + 1))
row += 1
workbook.close()
response = send_file(temp_file.name, mimetype='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', as_attachment=True, download_name=xlsx_filename)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
if form.filetype.data.startswith('XLIFF'):
seen = set()
translations = {}
xliff_files = []
if form.filetype.data == 'XLIFF 1.2':
for question in interview.all_questions:
if not hasattr(question, 'translations'):
continue
language = question.language
if language == '*':
language = interview_source.language
if language == '*':
language = DEFAULT_LANGUAGE
if language == tr_lang:
continue
question_id = question.name
lang_combo = (language, tr_lang)
if lang_combo not in translations:
translations[lang_combo] = []
for item in question.translations:
if item in seen:
continue
if item in tr_cache and language in tr_cache[item] and tr_lang in tr_cache[item][language]:
tr_text = str(tr_cache[item][language][tr_lang]['tr_text'])
else:
tr_text = ''
orig_mako = mako_parts(item)
tr_mako = mako_parts(tr_text)
translations[lang_combo].append([orig_mako, tr_mako])
seen.add(item)
for lang_combo, translation_list in translations.items():
temp_file = tempfile.NamedTemporaryFile(suffix='.xlf', delete=False)
if len(translations) > 1:
xlf_filename = docassemble.base.functions.space_to_underscore(os.path.splitext(os.path.basename(re.sub(r'.*:', '', yaml_filename)))[0]) + "_" + lang_combo[0] + "_" + lang_combo[1] + ".xlf"
else:
xlf_filename = docassemble.base.functions.space_to_underscore(os.path.splitext(os.path.basename(re.sub(r'.*:', '', yaml_filename)))[0]) + "_" + lang_combo[1] + ".xlf"
xliff = ET.Element('xliff')
xliff.set('xmlns', 'urn:oasis:names:tc:xliff:document:1.2')
xliff.set('version', '1.2')
indexno = 1
the_file = ET.SubElement(xliff, 'file')
the_file.set('id', 'f1')
the_file.set('original', yaml_filename)
the_file.set('xml:space', 'preserve')
the_file.set('source-language', lang_combo[0])
the_file.set('target-language', lang_combo[1])
body = ET.SubElement(the_file, 'body')
for item in translation_list:
transunit = ET.SubElement(body, 'trans-unit')
transunit.set('id', str(indexno))
transunit.set('xml:space', 'preserve')
source = ET.SubElement(transunit, 'source')
source.set('xml:space', 'preserve')
target = ET.SubElement(transunit, 'target')
target.set('xml:space', 'preserve')
last_elem = None
for (elem, i) in ((source, 0), (target, 1)):
if len(item[i]) == 0:
elem.text = ''
elif len(item[i]) == 1 and item[i][0][1] == 0:
elem.text = item[i][0][0]
else:
for part in item[i]:
if part[1] == 0:
if last_elem is None:
if elem.text is None:
elem.text = ''
elem.text += part[0]
else:
if last_elem.tail is None:
last_elem.tail = ''
last_elem.tail += part[0]
else:
mrk = ET.SubElement(elem, 'mrk')
mrk.set('xml:space', 'preserve')
mrk.set('mtype', 'protected')
mrk.text = part[0]
last_elem = mrk
indexno += 1
temp_file.write(ET.tostring(xliff))
temp_file.close()
xliff_files.append([temp_file, xlf_filename])
elif form.filetype.data == 'XLIFF 2.0':
for question in interview.all_questions:
if not hasattr(question, 'translations'):
continue
language = question.language
if language == '*':
language = interview_source.language
if language == '*':
language = DEFAULT_LANGUAGE
if language == tr_lang:
continue
question_id = question.name
lang_combo = (language, tr_lang)
if lang_combo not in translations:
translations[lang_combo] = {}
filename = question.from_source.get_name()
if filename not in translations[lang_combo]:
translations[lang_combo][filename] = {}
if question_id not in translations[lang_combo][filename]:
translations[lang_combo][filename][question_id] = []
for item in question.translations:
if item in seen:
continue
if item in tr_cache and language in tr_cache[item] and tr_lang in tr_cache[item][language]:
tr_text = str(tr_cache[item][language][tr_lang]['tr_text'])
else:
tr_text = ''
orig_mako = mako_parts(item)
tr_mako = mako_parts(tr_text)
translations[lang_combo][filename][question_id].append([orig_mako, tr_mako])
seen.add(item)
for lang_combo, translations_by_filename in translations.items():
temp_file = tempfile.NamedTemporaryFile(suffix='.xlf', delete=False)
if len(translations) > 1:
xlf_filename = docassemble.base.functions.space_to_underscore(os.path.splitext(os.path.basename(re.sub(r'.*:', '', yaml_filename)))[0]) + "_" + lang_combo[0] + "_" + lang_combo[1] + ".xlf"
else:
xlf_filename = docassemble.base.functions.space_to_underscore(os.path.splitext(os.path.basename(re.sub(r'.*:', '', yaml_filename)))[0]) + "_" + lang_combo[1] + ".xlf"
xliff = ET.Element('xliff')
xliff.set('xmlns', 'urn:oasis:names:tc:xliff:document:2.0')
xliff.set('version', '2.0')
xliff.set('srcLang', lang_combo[0])
xliff.set('trgLang', lang_combo[1])
file_index = 1
indexno = 1
for filename, translations_by_question in translations_by_filename.items():
the_file = ET.SubElement(xliff, 'file')
the_file.set('id', 'f' + str(file_index))
the_file.set('original', filename)
the_file.set('xml:space', 'preserve')
for question_id, translation_list in translations_by_question.items():
unit = ET.SubElement(the_file, 'unit')
unit.set('id', question_id)
for item in translation_list:
segment = ET.SubElement(unit, 'segment')
segment.set('id', str(indexno))
segment.set('xml:space', 'preserve')
source = ET.SubElement(segment, 'source')
source.set('xml:space', 'preserve')
target = ET.SubElement(segment, 'target')
target.set('xml:space', 'preserve')
last_elem = None
for (elem, i) in ((source, 0), (target, 1)):
if len(item[i]) == 0:
elem.text = ''
elif len(item[i]) == 1 and item[i][0][1] == 0:
elem.text = item[i][0][0]
else:
for part in item[i]:
if part[1] == 0:
if last_elem is None:
if elem.text is None:
elem.text = ''
elem.text += part[0]
else:
if last_elem.tail is None:
last_elem.tail = ''
last_elem.tail += part[0]
else:
mrk = ET.SubElement(elem, 'mrk')
mrk.set('xml:space', 'preserve')
mrk.set('translate', 'no')
mrk.text = part[0]
last_elem = mrk
indexno += 1
file_index += 1
temp_file.write(ET.tostring(xliff))
temp_file.close()
xliff_files.append([temp_file, xlf_filename])
else:
flash(word("Bad file format"), 'error')
return redirect(url_for('utilities'))
if len(xliff_files) == 1:
response = send_file(xliff_files[0][0].name, mimetype='application/xml', as_attachment=True, download_name=xliff_files[0][1])
else:
zip_file = tempfile.NamedTemporaryFile(suffix='.zip', delete=False)
zip_file_name = docassemble.base.functions.space_to_underscore(os.path.splitext(os.path.basename(re.sub(r'.*:', '', yaml_filename)))[0]) + "_" + tr_lang + ".zip"
with zipfile.ZipFile(zip_file, compression=zipfile.ZIP_DEFLATED, mode='w') as zf:
for item in xliff_files:
info = zipfile.ZipInfo(item[1])
with open(item[0].name, 'rb') as fp:
zf.writestr(info, fp.read())
zf.close()
response = send_file(zip_file.name, mimetype='application/xml', as_attachment=True, download_name=zip_file_name)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
flash(word("Bad file format"), 'error')
return redirect(url_for('utilities'))
@app.route('/api/user_list', methods=['GET'])
@cross_origin(origins='*', methods=['GET', 'HEAD'], automatic_options=True)
def api_user_list():
if not api_verify(roles=['admin', 'advocate'], permissions=['access_user_info']):
return jsonify_with_status("Access denied.", 403)
include_inactive = true_or_false(request.args.get('include_inactive', False))
next_id_code = request.args.get('next_id', None)
if next_id_code:
try:
start_id = int(from_safeid(next_id_code))
assert start_id >= 0
except:
start_id = None
else:
start_id = None
try:
(user_list, start_id) = get_user_list(include_inactive=include_inactive, start_id=start_id)
except Exception as err:
return jsonify_with_status(str(err), 400)
if start_id is None:
next_id = None
else:
next_id = safeid(str(start_id))
return jsonify({'next_id': next_id, 'items': user_list})
def get_user_info(user_id=None, email=None, case_sensitive=False, admin=False):
if user_id is not None:
assert isinstance(user_id, int)
if user_id is None and email is None:
user_id = current_user.id
if email is not None:
assert isinstance(email, str)
email = email.strip()
user_info = {'privileges': []}
if user_id is not None:
user = db.session.execute(select(UserModel).options(db.joinedload(UserModel.roles)).where(UserModel.id == user_id)).scalar()
else:
if case_sensitive:
user = db.session.execute(select(UserModel).options(db.joinedload(UserModel.roles)).filter_by(email=email)).scalar()
else:
email = re.sub(r'\%', '', email)
user = db.session.execute(select(UserModel).options(db.joinedload(UserModel.roles)).where(UserModel.email.ilike(email))).scalar()
if user is None or user.social_id.startswith('disabled$'):
return None
if not admin and not current_user.has_role_or_permission('admin', 'advocate', permissions=['access_user_info']) and not current_user.same_as(user_id):
raise DAException("You do not have sufficient privileges to access information about other users")
for role in user.roles:
user_info['privileges'].append(role.name)
for attrib in ('id', 'email', 'first_name', 'last_name', 'country', 'subdivisionfirst', 'subdivisionsecond', 'subdivisionthird', 'organization', 'timezone', 'language', 'active'):
user_info[attrib] = getattr(user, attrib)
user_info['account_type'] = re.sub(r'\$.*', '', user.social_id)
return user_info
def make_user_inactive(user_id=None, email=None):
if not current_user.has_role_or_permission('admin', permissions=['edit_user_active_status']):
raise DAException("You do not have sufficient privileges to make a user inactive")
if user_id is None and email is None:
raise DAException("You must supply a user ID or an e-mail address to make a user inactive")
if user_id is not None:
user = db.session.execute(select(UserModel).filter_by(id=user_id)).scalar()
else:
assert isinstance(email, str)
email = email.strip()
user = db.session.execute(select(UserModel).filter_by(email=email)).scalar()
if user is None:
raise DAException("User not found")
user.active = False
db.session.commit()
@app.route('/api/user', methods=['GET', 'POST', 'PATCH'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'POST', 'PATCH', 'HEAD'], automatic_options=True)
def api_user():
if not api_verify():
return jsonify_with_status("Access denied.", 403)
if current_user.limited_api and not current_user.can_do('access_user_info'):
return jsonify_with_status("You do not have sufficient privileges to access user information", 403)
try:
user_info = get_user_info(user_id=current_user.id)
except Exception as err:
return jsonify_with_status("Error obtaining user information: " + str(err), 400)
if user_info is None:
return jsonify_with_status('User not found', 404)
if request.method == 'GET':
return jsonify(user_info)
if request.method in ('POST', 'PATCH'):
if current_user.limited_api and not current_user.can_do('edit_user_info'):
return jsonify_with_status("You do not have sufficient privileges to edit a user's information", 403)
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
info = {}
for key in ('first_name', 'last_name', 'country', 'subdivisionfirst', 'subdivisionsecond', 'subdivisionthird', 'organization', 'timezone', 'language', 'password', 'old_password'):
if key in post_data:
info[key] = post_data[key]
if 'password' in info and not current_user.has_role_or_permission('admin', permissions='edit_user_password'):
return jsonify_with_status("You do not have sufficient privileges to change a user's password.", 403)
try:
set_user_info(user_id=current_user.id, **info)
except Exception as err:
return jsonify_with_status(str(err), 400)
return ('', 204)
return ('', 204)
@app.route('/api/user/privileges', methods=['GET'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'HEAD'], automatic_options=True)
def api_user_privileges():
if not api_verify():
return jsonify_with_status("Access denied.", 403)
try:
user_info = get_user_info(user_id=current_user.id)
except Exception as err:
return jsonify_with_status("Error obtaining user information: " + str(err), 400)
if user_info is None:
return jsonify_with_status('User not found', 404)
return jsonify(user_info['privileges'])
@app.route('/api/user/new', methods=['POST'])
@csrf.exempt
@cross_origin(origins='*', methods=['POST', 'HEAD'], automatic_options=True)
def api_create_user():
if not api_verify(roles=['admin'], permissions=['create_user']):
return jsonify_with_status("Access denied.", 403)
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
if 'email' in post_data and 'username' not in post_data: # temporary
post_data['username'] = post_data['email'].strip()
del post_data['email']
if 'username' not in post_data:
return jsonify_with_status("An e-mail address must be supplied.", 400)
info = {}
for key in ('first_name', 'last_name', 'country', 'subdivisionfirst', 'subdivisionsecond', 'subdivisionthird', 'organization', 'timezone', 'language'):
if key in post_data:
info[key] = post_data[key].strip()
if 'privileges' in post_data and isinstance(post_data['privileges'], list):
role_list = post_data['privileges']
else:
try:
role_list = json.loads(post_data.get('privileges', '[]'))
except:
role_list = [post_data['privileges']]
if not isinstance(role_list, list):
if not isinstance(role_list, str):
return jsonify_with_status("List of privileges must be a string or a list.", 400)
role_list = [role_list]
valid_role_names = set()
for rol in db.session.execute(select(Role).where(Role.name != 'cron').order_by(Role.id)).scalars():
valid_role_names.add(rol.name)
for role_name in role_list:
if role_name not in valid_role_names:
return jsonify_with_status("Invalid privilege name. " + str(role_name) + " is not an existing privilege.", 400)
password = post_data.get('password', random_alphanumeric(10)).strip()
if len(password) < 4 or len(password) > 254:
return jsonify_with_status("Password too short or too long", 400)
try:
password = str(password)
user_id = create_user(post_data['username'], password, role_list, info)
except Exception as err:
return jsonify_with_status(str(err), 400)
return jsonify_with_status({'user_id': user_id, 'password': password}, 200)
def invite_user(email_address, privilege=None, send=True):
if not (current_user.is_authenticated and current_user.has_role_or_permission('admin', permissions=['create_user'])):
raise DAError("You do not have sufficient privileges to create a new user")
role_name = privilege or 'user'
the_role_id = None
for role in db.session.execute(select(Role).order_by('id')).scalars():
if role.name == role_name:
the_role_id = role.id
break
if the_role_id is None:
raise DAError("Invalid privilege name " + repr(privilege))
user, user_email = app.user_manager.find_user_by_email(email_address) # pylint: disable=unused-variable
if user:
return DAError("A user with that email address already exists")
user_invite = MyUserInvitation(email=email_address, role_id=the_role_id, invited_by_user_id=current_user.id)
db.session.add(user_invite)
db.session.commit()
token = app.user_manager.generate_token(user_invite.id)
accept_invite_link = url_for('user.register',
token=token,
_external=True)
user_invite.token = token
db.session.commit()
if send:
try:
logmessage("Trying to send invite e-mail to " + str(user_invite.email))
docassemble_flask_user.emails.send_invite_email(user_invite, accept_invite_link)
logmessage("Sent e-mail invite to " + str(user_invite.email))
except Exception as e:
try:
logmessage("Failed to send invite e-mail: " + e.__class__.__name__ + ': ' + str(e))
except:
logmessage("Failed to send invite e-mail")
db.session.delete(user_invite)
db.session.commit()
raise DAError("Invitation email failed to send")
return None
return accept_invite_link
@app.route('/api/user_invite', methods=['POST'])
@csrf.exempt
@cross_origin(origins='*', methods=['POST', 'HEAD'], automatic_options=True)
def api_invite_user():
if not api_verify(roles=['admin'], permissions=['create_user']):
return jsonify_with_status("Access denied.", 403)
is_admin = current_user.has_role('admin')
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
send_emails = true_or_false(post_data.get('send_emails', True))
role_name = str(post_data.get('privilege', 'user')).strip() or 'user'
valid_role_names = set()
for rol in db.session.execute(select(Role).where(Role.name != 'cron').order_by(Role.id)).scalars():
if not is_admin and rol.name in ('admin', 'developer', 'advocate'):
continue
valid_role_names.add(rol.name)
if role_name not in valid_role_names:
return jsonify_with_status("Invalid privilege name.", 400)
raw_email_addresses = post_data.get('email_addresses', post_data.get('email_address', []))
if isinstance(raw_email_addresses, str):
if raw_email_addresses.startswith('[') or raw_email_addresses.startswith('"'):
try:
raw_email_addresses = json.loads(raw_email_addresses)
except:
return jsonify_with_status("The email_addresses field did not contain valid JSON.", 400)
if not isinstance(raw_email_addresses, list):
raw_email_addresses = [str(raw_email_addresses)]
email_addresses = []
for email_address in raw_email_addresses:
(part_one, part_two) = emailpackage.utils.parseaddr(str(email_address)) # pylint: disable=unused-variable
if not re.match(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b', part_two):
return jsonify_with_status("Invalid e-mail address.", 400)
email_addresses.append(part_two)
if len(email_addresses) == 0:
return jsonify_with_status("One or more 'email_addresses' must be supplied.", 400)
the_role_id = None
for role in db.session.execute(select(Role).order_by('id')).scalars():
if role.name == role_name:
the_role_id = role.id
break
if the_role_id is None:
return jsonify_with_status("Invalid privilege name.", 400)
for email_address in email_addresses:
user, user_email = app.user_manager.find_user_by_email(email_address) # pylint: disable=unused-variable
if user:
return jsonify_with_status("That e-mail address is already being used.", 400)
invite_info = []
for email_address in email_addresses:
user_invite = MyUserInvitation(email=email_address, role_id=the_role_id, invited_by_user_id=current_user.id)
db.session.add(user_invite)
db.session.commit()
token = app.user_manager.generate_token(user_invite.id)
accept_invite_link = url_for('user.register',
token=token,
_external=True)
user_invite.token = token
db.session.commit()
info = {'email': email_address}
if send_emails:
try:
logmessage("Trying to send invite e-mail to " + str(user_invite.email))
docassemble_flask_user.emails.send_invite_email(user_invite, accept_invite_link)
logmessage("Sent e-mail invite to " + str(user_invite.email))
info['invitation_sent'] = True
info['url'] = accept_invite_link
except Exception as e:
try:
logmessage("Failed to send invite e-mail: " + e.__class__.__name__ + ': ' + str(e))
except:
logmessage("Failed to send invite e-mail")
db.session.delete(user_invite)
db.session.commit()
info['invitation_sent'] = False
else:
info['url'] = accept_invite_link
invite_info.append(info)
return jsonify(invite_info)
@app.route('/api/user_info', methods=['GET'])
@cross_origin(origins='*', methods=['GET', 'HEAD'], automatic_options=True)
def api_user_info():
if not api_verify(roles=['admin', 'advocate'], permissions=['access_user_info']):
return jsonify_with_status("Access denied.", 403)
if 'username' not in request.args:
return jsonify_with_status("An e-mail address must be supplied.", 400)
case_sensitive = true_or_false(request.args.get('case_sensitive', False))
try:
user_info = get_user_info(email=request.args['username'], case_sensitive=case_sensitive)
except Exception as err:
return jsonify_with_status("Error obtaining user information: " + str(err), 400)
if user_info is None:
return jsonify_with_status("User not found.", 404)
return jsonify(user_info)
@app.route('/api/user/<int:user_id>', methods=['GET', 'DELETE', 'POST', 'PATCH'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'DELETE', 'POST', 'PATCH', 'HEAD'], automatic_options=True)
def api_user_by_id(user_id):
if not api_verify():
return jsonify_with_status("Access denied.", 403)
try:
user_id = int(user_id)
except:
return jsonify_with_status("User ID must be an integer.", 400)
if not (current_user.same_as(user_id) or current_user.has_role_or_permission('admin', 'advocate', permissions=['access_user_info'])):
return jsonify_with_status("You do not have sufficient privileges to access user information", 403)
try:
user_info = get_user_info(user_id=user_id)
except Exception as err:
return jsonify_with_status("Error obtaining user information: " + str(err), 400)
if user_info is None:
return jsonify_with_status("User not found.", 404)
if request.method == 'GET':
return jsonify(user_info)
if request.method == 'DELETE':
if user_id in (1, current_user.id):
return jsonify_with_status("This user account cannot be deleted or deactivated.", 403)
if request.args.get('remove', None) == 'account':
if not (current_user.id == user_id or current_user.has_role_or_permission('admin', permissions=['delete_user'])):
return jsonify_with_status("You do not have sufficient privileges to delete user accounts.", 403)
user_interviews(user_id=user_id, secret=None, exclude_invalid=False, action='delete_all', delete_shared=False)
delete_user_data(user_id, r, r_user)
elif request.args.get('remove', None) == 'account_and_shared':
if not current_user.has_role_or_permission('admin', permissions=['delete_user']):
return jsonify_with_status("You do not have sufficient privileges to delete user accounts.", 403)
user_interviews(user_id=user_id, secret=None, exclude_invalid=False, action='delete_all', delete_shared=True)
delete_user_data(user_id, r, r_user)
else:
if not current_user.has_role_or_permission('admin', permissions=['edit_user_active_status']):
return jsonify_with_status("You do not have sufficient privileges to inactivate user accounts.", 403)
make_user_inactive(user_id=user_id)
return ('', 204)
if request.method in ('POST', 'PATCH'):
if not (current_user.has_role_or_permission('admin', permissions=['edit_user_info']) or current_user.same_as(user_id)):
return jsonify_with_status("You do not have sufficient privileges to edit user information.", 403)
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
info = {}
for key in ('first_name', 'last_name', 'country', 'subdivisionfirst', 'subdivisionsecond', 'subdivisionthird', 'organization', 'timezone', 'language', 'password', 'old_password'):
if key in post_data:
info[key] = post_data[key]
if 'active' in post_data:
if user_id in (1, current_user.id):
return jsonify_with_status("The active status of this user account cannot be changed.", 403)
if not current_user.has_role_or_permission('admin', permissions=['edit_user_active_status']):
return jsonify_with_status("You do not have sufficient privileges to change the active status of user accounts.", 403)
active_status = true_or_false(post_data['active'])
if user_info['active'] and not active_status:
info['active'] = False
elif not user_info['active'] and active_status:
info['active'] = True
if 'password' in info and not current_user.has_role_or_permission('admin', permissions=['edit_user_password']):
return jsonify_with_status("You must have admin privileges to change a password.", 403)
try:
set_user_info(user_id=user_id, **info)
except Exception as err:
return jsonify_with_status(str(err), 400)
return ('', 204)
return ('', 204)
@app.route('/api/fields', methods=['POST'])
@csrf.exempt
@cross_origin(origins='*', methods=['POST', 'HEAD'], automatic_options=True)
def api_fields():
if not api_verify(roles=['admin', 'developer'], permissions=['template_parse']):
return jsonify_with_status("Access denied.", 403)
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
output_format = post_data.get('format', 'json')
if output_format not in ('json', 'yaml'):
return jsonify_with_status("Invalid output format.", 400)
if 'template' not in request.files:
return jsonify_with_status("File not included.", 400)
the_files = request.files.getlist('template')
if not the_files:
return jsonify_with_status("File not included.", 400)
for the_file in the_files:
filename = secure_filename(the_file.filename)
temp_file = tempfile.NamedTemporaryFile(prefix="datemp", delete=False)
the_file.save(temp_file.name)
try:
input_format = os.path.splitext(filename.lower())[1][1:]
except:
input_format = 'bin'
if input_format == 'md':
input_format = 'markdown'
if input_format not in ('docx', 'markdown', 'pdf'):
return jsonify_with_status("Invalid input format.", 400)
try:
output = read_fields(temp_file.name, filename, input_format, output_format)
except Exception as err:
logmessage("api_fields: got error " + err.__class__.__name__ + ": " + str(err))
if output_format == 'yaml':
return jsonify_with_status("No fields could be found.", 400)
return jsonify({'fields': []})
break
if output_format == 'yaml':
response = make_response(output.encode('utf-8'), '200 OK')
response.headers['Content-type'] = 'text/plain; charset=utf-8'
else:
response = make_response(output.encode('utf-8'), 200)
response.headers['Content-Type'] = 'application/json; charset=utf-8'
return response
@app.route('/api/privileges', methods=['GET', 'DELETE', 'POST'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'DELETE', 'POST', 'HEAD'], automatic_options=True)
def api_privileges():
if not api_verify():
return jsonify_with_status("Access denied.", 403)
if request.method == 'GET':
try:
return jsonify(get_privileges_list())
except Exception as err:
return jsonify_with_status(str(err), 400)
if request.method == 'DELETE':
if not current_user.has_role_or_permission('admin', permissions=['edit_privileges']):
return jsonify_with_status("Access denied.", 403)
if 'privilege' not in request.args:
return jsonify_with_status("A privilege name must be provided.", 400)
try:
remove_privilege(request.args['privilege'])
except Exception as err:
return jsonify_with_status(str(err), 400)
return ('', 204)
if request.method == 'POST':
if not current_user.has_role_or_permission('admin', permissions=['edit_privileges']):
return jsonify_with_status("Access denied.", 403)
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
if 'privilege' not in post_data:
return jsonify_with_status("A privilege name must be provided.", 400)
try:
add_privilege(post_data['privilege'])
except Exception as err:
return jsonify_with_status(str(err), 400)
return ('', 204)
return ('', 204)
def get_privileges_list(admin=False):
if admin is False and not current_user.has_role_or_permission('admin', 'developer', permissions=['access_privileges']):
raise DAException('You do not have sufficient privileges to see the list of privileges.')
role_names = []
for role in db.session.execute(select(Role.name).order_by(Role.name)):
role_names.append(role.name)
return role_names
def get_permissions_of_privilege(privilege):
if not current_user.has_role_or_permission('admin', 'developer', permissions=['access_privileges']):
raise DAException('You do not have sufficient privileges to inspect privileges.')
if privilege == 'admin':
return copy.copy(PERMISSIONS_LIST)
if privilege == 'developer':
return ['demo_interviews', 'template_parse', 'interview_data']
if privilege == 'advocate':
return ['access_user_info', 'access_sessions', 'edit_sessions']
if privilege == 'cron':
return []
if privilege in docassemble.base.config.allowed:
return list(docassemble.base.config.allowed[privilege])
return []
def add_privilege(privilege):
if not current_user.has_role_or_permission('admin', permissions=['edit_privileges']):
raise DAException('You do not have sufficient privileges to add a privilege.')
role_names = get_privileges_list()
if privilege in role_names:
raise DAException("The given privilege already exists.")
db.session.add(Role(name=privilege))
db.session.commit()
def remove_privilege(privilege):
if not current_user.has_role_or_permission('admin', permissions=['edit_privileges']):
raise DAException('You do not have sufficient privileges to delete a privilege.')
if privilege in ['user', 'admin', 'developer', 'advocate', 'cron']:
raise DAException('The specified privilege is built-in and cannot be deleted.')
role = db.session.execute(select(Role).filter_by(name=privilege)).scalar()
if role is None:
raise DAException('The privilege ' + str(privilege) + ' did not exist.')
db.session.delete(role)
db.session.commit()
@app.route('/api/user/<int:user_id>/privileges', methods=['GET', 'DELETE', 'POST'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'DELETE', 'POST', 'HEAD'], automatic_options=True)
def api_user_by_id_privileges(user_id):
if not api_verify():
return jsonify_with_status("Access denied.", 403)
try:
user_info = get_user_info(user_id=user_id)
except Exception as err:
return jsonify_with_status("Error obtaining user information: " + str(err), 400)
if user_info is None:
return jsonify_with_status('User not found', 404)
if request.method == 'GET':
return jsonify(user_info['privileges'])
if request.method in ('DELETE', 'POST'):
if not current_user.has_role_or_permission('admin', permissions=['edit_user_privileges']):
return jsonify_with_status("Access denied.", 403)
if request.method == 'DELETE':
role_name = request.args.get('privilege', None)
if role_name is None:
return jsonify_with_status("A privilege name must be provided", 400)
try:
remove_user_privilege(user_id, role_name)
except Exception as err:
return jsonify_with_status(str(err), 400)
elif request.method == 'POST':
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
role_name = post_data.get('privilege', None)
if role_name is None:
return jsonify_with_status("A privilege name must be provided", 400)
try:
add_user_privilege(user_id, role_name)
except Exception as err:
return jsonify_with_status(str(err), 400)
db.session.commit()
return ('', 204)
return ('', 204)
def add_user_privilege(user_id, privilege):
if not current_user.has_role_or_permission('admin', permissions=['edit_user_privileges']):
raise DAException('You do not have sufficient privileges to give another user a privilege.')
if privilege in ('admin', 'developer', 'advocate', 'cron') and not current_user.has_role_or_permission('admin'):
raise DAException('You do not have sufficient privileges to give the user this privilege.')
if privilege not in get_privileges_list(admin=True):
raise DAException('The specified privilege does not exist.')
if privilege == 'cron':
raise DAException('You cannot give a user the cron privilege.')
user = db.session.execute(select(UserModel).options(db.joinedload(UserModel.roles)).where(UserModel.id == user_id)).scalar()
if user is None or user.social_id.startswith('disabled$'):
raise DAException("The specified user did not exist")
for role in user.roles:
if role.name == privilege:
raise DAException("The user already had that privilege.")
role_to_add = None
for role in db.session.execute(select(Role).order_by(Role.id)).scalars():
if role.name == privilege:
role_to_add = role
if role_to_add is None:
raise DAException("The specified privilege did not exist.")
user.roles.append(role_to_add)
db.session.commit()
def remove_user_privilege(user_id, privilege):
if not current_user.has_role_or_permission('admin', permissions=['edit_user_privileges']):
raise DAException('You do not have sufficient privileges to take a privilege away from a user.')
if current_user.id == user_id and privilege == 'admin':
raise DAException('You cannot take away the admin privilege from the current user.')
if privilege in ('admin', 'developer', 'advocate', 'cron') and not current_user.has_role('admin'):
raise DAException('You do not have sufficient privileges to take away this privilege.')
if privilege not in get_privileges_list(admin=True):
raise DAException('The specified privilege does not exist.')
user = db.session.execute(select(UserModel).options(db.joinedload(UserModel.roles)).where(UserModel.id == user_id)).scalar()
if user is None or user.social_id.startswith('disabled$'):
raise DAException("The specified user did not exist")
role_to_remove = None
for role in user.roles:
if role.name == privilege:
role_to_remove = role
if role_to_remove is None:
raise DAException("The user did not already have that privilege.")
user.roles.remove(role_to_remove)
db.session.commit()
def create_user(email, password, privileges=None, info=None):
if not current_user.has_role_or_permission('admin', permissions=['create_user']):
raise DAException("You do not have sufficient privileges to create a user")
email = email.strip()
password = str(password).strip()
if len(password) < 4 or len(password) > 254:
raise DAException("Password too short or too long")
if privileges is None:
privileges = []
if isinstance(privileges, DAList):
info = info.elements
if not isinstance(privileges, list):
if not isinstance(privileges, str):
raise DAException("The privileges parameter to create_user() must be a list or a string.")
privileges = [privileges]
if info is None:
info = {}
if isinstance(info, DADict):
info = info.elements
if not isinstance(info, dict):
raise DAException("The info parameter to create_user() must be a dictionary.")
user, user_email = app.user_manager.find_user_by_email(email) # pylint: disable=unused-variable
if user:
raise DAException("That e-mail address is already being used.")
user_auth = UserAuthModel(password=app.user_manager.hash_password(password))
while True:
new_social = 'local$' + random_alphanumeric(32)
existing_user = db.session.execute(select(UserModel).filter_by(social_id=new_social)).first()
if existing_user:
continue
break
the_user = UserModel(
active=True,
nickname=re.sub(r'@.*', '', email),
social_id=new_social,
email=email,
user_auth=user_auth,
first_name=info.get('first_name', ''),
last_name=info.get('last_name', ''),
country=info.get('country', ''),
subdivisionfirst=info.get('subdivisionfirst', ''),
subdivisionsecond=info.get('subdivisionsecond', ''),
subdivisionthird=info.get('subdivisionthird', ''),
organization=info.get('organization', ''),
timezone=info.get('timezone', ''),
language=info.get('language', ''),
confirmed_at=datetime.datetime.now()
)
num_roles = 0
is_admin = current_user.has_role('admin')
for role in db.session.execute(select(Role).where(Role.name != 'cron').order_by(Role.id)).scalars():
if role.name in privileges and (is_admin or role.name not in ('admin', 'developer', 'advocate')):
the_user.roles.append(role)
num_roles += 1
if num_roles == 0:
user_role = db.session.execute(select(Role).filter_by(name='user')).scalar_one()
the_user.roles.append(user_role)
db.session.add(user_auth)
db.session.add(the_user)
db.session.commit()
return the_user.id
def set_user_info(**kwargs):
user_id = kwargs.get('user_id', None)
email = kwargs.get('email', None)
if user_id is None and email is None:
user_id = int(current_user.id)
if not current_user.has_role_or_permission('admin', permissions=['edit_user_info']):
if (user_id is not None and current_user.id != user_id) or (email is not None and current_user.email != email):
raise DAException("You do not have sufficient privileges to edit user information")
if user_id is not None:
user = db.session.execute(select(UserModel).options(db.joinedload(UserModel.roles)).filter_by(id=user_id)).scalar()
else:
user = db.session.execute(select(UserModel).options(db.joinedload(UserModel.roles)).filter_by(email=email)).scalar()
if user is None or user.social_id.startswith('disabled$'):
raise DAException("User not found")
editing_self = current_user.same_as(user.id)
if not current_user.has_role_or_permission('admin'):
if not editing_self:
if user.has_role('admin', 'developer', 'advocate', 'cron'):
raise DAException("You do not have sufficient privileges to edit this user's information.")
if 'password' in kwargs and not current_user.can_do('edit_user_password'):
raise DAException("You do not have sufficient privileges to change this user's password.")
if 'privileges' in kwargs:
if user.has_role('admin', 'developer', 'advocate', 'cron') or not current_user.can_do('edit_user_privileges'):
raise DAException("You do not have sufficient privileges to edit this user's privileges.")
if 'active' in kwargs:
if not isinstance(kwargs['active'], bool):
raise DAException("The active parameter must be True or False")
if editing_self:
raise DAException("Cannot change active status of the current user.")
if not current_user.has_role_or_permission('admin', permissions=['edit_user_active_status']):
raise DAException("You do not have sufficient privileges to edit this user's active status.")
for key, val in kwargs.items():
if key in ('first_name', 'last_name', 'country', 'subdivisionfirst', 'subdivisionsecond', 'subdivisionthird', 'organization', 'timezone', 'language'):
setattr(user, key, val)
if 'password' in kwargs:
if not editing_self and not current_user.has_role_or_permission('admin', permissions=['edit_user_password']):
raise DAException("You do not have sufficient privileges to change a user's password.")
if 'old_password' in kwargs and kwargs['password'] != kwargs['old_password']:
user_manager = current_app.user_manager
if (not user_manager.get_password(user)) or (not user_manager.verify_password(kwargs['old_password'], user)):
raise DAException("The old_password is incorrect")
substitute_secret(pad_to_16(MD5Hash(data=kwargs['old_password']).hexdigest()), pad_to_16(MD5Hash(data=kwargs['password']).hexdigest()), user=user)
user.user_auth.password = app.user_manager.hash_password(kwargs['password'])
if 'active' in kwargs:
user.active = kwargs['active']
db.session.commit()
if 'privileges' in kwargs and isinstance(kwargs['privileges'], (list, tuple, set)):
if len(kwargs['privileges']) == 0:
raise DAException("Cannot remove all of a user's privileges.")
roles_to_add = []
roles_to_delete = []
role_names = [role.name for role in user.roles]
for role in role_names:
if role not in kwargs['privileges']:
roles_to_delete.append(role)
for role in kwargs['privileges']:
if role not in role_names:
roles_to_add.append(role)
for role in roles_to_delete:
remove_user_privilege(user.id, role)
for role in roles_to_add:
add_user_privilege(user.id, role)
@app.route('/api/secret', methods=['GET'])
@cross_origin(origins='*', methods=['GET', 'HEAD'], automatic_options=True)
def api_get_secret():
if not api_verify():
return jsonify_with_status("Access denied.", 403)
username = request.args.get('username', None)
password = request.args.get('password', None)
if username is None or password is None:
return jsonify_with_status("A username and password must be supplied", 400)
try:
secret = get_secret(str(username), str(password))
except Exception as err:
return jsonify_with_status(str(err), 403)
return jsonify(secret)
def get_secret(username, password, case_sensitive=False):
if case_sensitive:
user = db.session.execute(select(UserModel).filter_by(email=username)).scalar()
else:
username = re.sub(r'\%', '', username)
user = db.session.execute(select(UserModel).where(UserModel.email.ilike(username))).scalar()
if user is None:
raise DAException("Username not known")
if app.config['USE_MFA'] and user.otp_secret is not None:
raise DAException("Secret will not be supplied because two factor authentication is enabled")
user_manager = current_app.user_manager
if not user_manager.get_password(user):
raise DAException("Password not set")
if not user_manager.verify_password(password, user):
raise DAException("Incorrect password")
return pad_to_16(MD5Hash(data=password).hexdigest())
def parse_api_sessions_query(query):
if query is None or query.strip() == '':
return None
if illegal_sessions_query(query):
raise DAException("Illegal query")
return eval(query, {'DA': docassemble.base.DA})
@app.route('/api/users/interviews', methods=['GET', 'DELETE'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'DELETE', 'HEAD'], automatic_options=True)
def api_users_interviews():
if not api_verify(roles=['admin', 'advocate'], permissions=['access_sessions']):
return jsonify_with_status("Access denied.", 403)
user_id = request.args.get('user_id', None)
filename = request.args.get('i', None)
session_id = request.args.get('session', None)
query = request.args.get('query', None)
try:
query = parse_api_sessions_query(query)
except:
return jsonify_with_status("Invalid query parameter", 400)
secret = request.args.get('secret', None)
tag = request.args.get('tag', None)
next_id_code = request.args.get('next_id', None)
if next_id_code:
try:
start_id = int(from_safeid(next_id_code))
assert start_id >= 0
except:
start_id = None
else:
start_id = None
if secret is not None:
secret = str(secret)
if request.method == 'GET':
include_dict = true_or_false(request.args.get('include_dictionary', False))
try:
(the_list, start_id) = user_interviews(user_id=user_id, secret=secret, exclude_invalid=False, tag=tag, filename=filename, session=session_id, query=query, include_dict=include_dict, start_id=start_id)
except Exception as err:
return jsonify_with_status("Error getting interview list. " + str(err), 400)
if start_id is None:
next_id = None
else:
next_id = safeid(str(start_id))
return jsonify({'next_id': next_id, 'items': docassemble.base.functions.safe_json(the_list)})
if request.method == 'DELETE':
start_id = None
while True:
try:
(the_list, start_id) = user_interviews(user_id=user_id, exclude_invalid=False, tag=tag, filename=filename, session=session_id, query=query, include_dict=False, start_id=start_id)
except:
return jsonify_with_status("Error reading interview list.", 400)
for info in the_list:
user_interviews(user_id=info['user_id'], action='delete', filename=info['filename'], session=info['session'])
if start_id is None:
break
return ('', 204)
return ('', 204)
@app.route('/api/user/<int:user_id>/interviews', methods=['GET', 'DELETE'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'DELETE', 'HEAD'], automatic_options=True)
def api_user_user_id_interviews(user_id):
if not api_verify():
return jsonify_with_status("Access denied.", 403)
if not (current_user.id == user_id or current_user.has_role_or_permission('admin', 'advocate', permissions=['access_sessions'])):
return jsonify_with_status("Access denied.", 403)
filename = request.args.get('i', None)
session_id = request.args.get('session', None)
query = request.args.get('query', None)
try:
query = parse_api_sessions_query(query)
except:
return jsonify_with_status("Invalid query parameter", 400)
secret = request.args.get('secret', None)
tag = request.args.get('tag', None)
next_id_code = request.args.get('next_id', None)
if next_id_code:
try:
start_id = int(from_safeid(next_id_code))
assert start_id >= 0
except:
start_id = None
else:
start_id = None
if secret is not None:
secret = str(secret)
include_dict = true_or_false(request.args.get('include_dictionary', False))
if request.method == 'GET':
try:
(the_list, start_id) = user_interviews(user_id=user_id, secret=secret, exclude_invalid=False, tag=tag, filename=filename, session=session_id, query=query, include_dict=include_dict, start_id=start_id)
except:
return jsonify_with_status("Error reading interview list.", 400)
if start_id is None:
next_id = None
else:
next_id = safeid(str(start_id))
return jsonify({'next_id': next_id, 'items': docassemble.base.functions.safe_json(the_list)})
if request.method == 'DELETE':
start_id = None
while True:
try:
(the_list, start_id) = user_interviews(user_id=user_id, exclude_invalid=False, tag=tag, filename=filename, session=session_id, query=query, include_dict=False, start_id=start_id)
except:
return jsonify_with_status("Error reading interview list.", 400)
for info in the_list:
user_interviews(user_id=info['user_id'], action='delete', filename=info['filename'], session=info['session'])
if start_id is None:
break
return ('', 204)
return ('', 204)
@app.route('/api/session/back', methods=['POST'])
@csrf.exempt
@cross_origin(origins='*', methods=['POST', 'HEAD'], automatic_options=True)
def api_session_back():
if not api_verify():
return jsonify_with_status("Access denied.", 403)
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
yaml_filename = post_data.get('i', None)
session_id = post_data.get('session', None)
secret = str(post_data.get('secret', None))
reply_with_question = true_or_false(post_data.get('question', True))
if yaml_filename is None or session_id is None:
return jsonify_with_status("Parameters i and session are required.", 400)
docassemble.base.functions.this_thread.current_info['yaml_filename'] = yaml_filename
try:
data = go_back_in_session(yaml_filename, session_id, secret=secret, return_question=reply_with_question)
except Exception as the_err:
return jsonify_with_status(str(the_err), 400)
if data is None:
return ('', 204)
if data.get('questionType', None) == 'response':
return data['response']
return jsonify(**data)
def transform_json_variables(obj):
if isinstance(obj, str):
if re.search(r'^[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]', obj):
try:
return docassemble.base.util.as_datetime(dateutil.parser.parse(obj))
except:
pass
elif re.search(r'^[0-9][0-9]:[0-9][0-9]:[0-9][0-9]', obj):
try:
return datetime.time.fromisoformat(obj)
except:
pass
return obj
if isinstance(obj, (bool, int, float)):
return obj
if isinstance(obj, dict):
if '_class' in obj and obj['_class'] == 'type' and 'name' in obj and isinstance(obj['name'], str) and obj['name'].startswith('docassemble.') and not illegal_variable_name(obj['name']):
if '.' in obj['name']:
the_module = re.sub(r'\.[^\.]+$', '', obj['name'])
else:
the_module = None
try:
if the_module:
importlib.import_module(the_module)
new_obj = eval(obj['name'])
if not isinstance(new_obj, TypeType):
raise DAException("name is not a class")
return new_obj
except Exception as err:
logmessage("transform_json_variables: " + err.__class__.__name__ + ": " + str(err))
return None
if '_class' in obj and isinstance(obj['_class'], str) and 'instanceName' in obj and obj['_class'].startswith('docassemble.') and not illegal_variable_name(obj['_class']) and isinstance(obj['instanceName'], str):
the_module = re.sub(r'\.[^\.]+$', '', obj['_class'])
try:
importlib.import_module(the_module)
the_class = eval(obj['_class'])
if not isinstance(the_class, TypeType):
raise DAException("_class was not a class")
new_obj = the_class(obj['instanceName'])
for key, val in obj.items():
if key == '_class':
continue
setattr(new_obj, key, transform_json_variables(val))
return new_obj
except Exception as err:
logmessage("transform_json_variables: " + err.__class__.__name__ + ": " + str(err))
return None
new_dict = {}
for key, val in obj.items():
new_dict[transform_json_variables(key)] = transform_json_variables(val)
return new_dict
if isinstance(obj, list):
return [transform_json_variables(val) for val in obj]
if isinstance(obj, set):
return set(transform_json_variables(val) for val in obj)
return obj
@app.route('/api/session', methods=['GET', 'POST', 'DELETE'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'POST', 'DELETE', 'HEAD'], automatic_options=True)
def api_session():
if not api_verify():
return jsonify_with_status("Access denied.", 403)
if request.method == 'GET':
yaml_filename = request.args.get('i', None)
session_id = request.args.get('session', None)
secret = request.args.get('secret', None)
if secret is not None:
secret = str(secret)
if yaml_filename is None or session_id is None:
return jsonify_with_status("Parameters i and session are required.", 400)
docassemble.base.functions.this_thread.current_info['yaml_filename'] = yaml_filename
try:
variables = get_session_variables(yaml_filename, session_id, secret=secret)
except Exception as the_err:
return jsonify_with_status(str(the_err), 400)
return jsonify(variables)
if request.method == 'POST':
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
yaml_filename = post_data.get('i', None)
session_id = post_data.get('session', None)
secret = str(post_data.get('secret', None))
question_name = post_data.get('question_name', None)
treat_as_raw = true_or_false(post_data.get('raw', False))
advance_progress_meter = true_or_false(post_data.get('advance_progress_meter', False))
post_setting = not true_or_false(post_data.get('overwrite', False))
reply_with_question = true_or_false(post_data.get('question', True))
if yaml_filename is None or session_id is None:
return jsonify_with_status("Parameters i and session are required.", 400)
docassemble.base.functions.this_thread.current_info['yaml_filename'] = yaml_filename
if 'variables' in post_data and isinstance(post_data['variables'], dict):
variables = post_data['variables']
else:
try:
variables = json.loads(post_data.get('variables', '{}'))
except:
return jsonify_with_status("Malformed variables.", 400)
if not treat_as_raw:
variables = transform_json_variables(variables)
if 'file_variables' in post_data and isinstance(post_data['file_variables'], dict):
file_variables = post_data['file_variables']
else:
try:
file_variables = json.loads(post_data.get('file_variables', '{}'))
except:
return jsonify_with_status("Malformed list of file variables.", 400)
if 'delete_variables' in post_data and isinstance(post_data['delete_variables'], list):
del_variables = post_data['delete_variables']
else:
try:
del_variables = json.loads(post_data.get('delete_variables', '[]'))
except:
return jsonify_with_status("Malformed list of delete variables.", 400)
if 'event_list' in post_data and isinstance(post_data['event_list'], list):
event_list = post_data['event_list']
else:
try:
event_list = json.loads(post_data.get('event_list', '[]'))
assert isinstance(event_list, list)
except:
return jsonify_with_status("Malformed event list.", 400)
if not isinstance(variables, dict):
return jsonify_with_status("Variables data is not a dict.", 400)
if not isinstance(file_variables, dict):
return jsonify_with_status("File variables data is not a dict.", 400)
if not isinstance(del_variables, list):
return jsonify_with_status("Delete variables data is not a list.", 400)
if not isinstance(event_list, list):
return jsonify_with_status("Event list data is not a list.", 400)
literal_variables = {}
for filekey in request.files:
if filekey not in file_variables:
file_variables[filekey] = filekey
the_files = request.files.getlist(filekey)
files_to_process = []
if the_files:
for the_file in the_files:
filename = secure_filename(the_file.filename)
file_number = get_new_file_number(session_id, filename, yaml_file_name=yaml_filename)
extension, mimetype = get_ext_and_mimetype(filename)
saved_file = SavedFile(file_number, extension=extension, fix=True, should_not_exist=True)
temp_file = tempfile.NamedTemporaryFile(prefix="datemp", suffix='.' + extension, delete=False)
the_file.save(temp_file.name)
process_file(saved_file, temp_file.name, mimetype, extension)
files_to_process.append((filename, file_number, mimetype, extension))
file_field = file_variables[filekey]
if illegal_variable_name(file_field):
return jsonify_with_status("Malformed file variable.", 400)
if len(files_to_process) > 0:
elements = []
indexno = 0
for (filename, file_number, mimetype, extension) in files_to_process:
elements.append("docassemble.base.util.DAFile(" + repr(file_field + '[' + str(indexno) + ']') + ", filename=" + repr(filename) + ", number=" + str(file_number) + ", make_pngs=True, mimetype=" + repr(mimetype) + ", extension=" + repr(extension) + ")")
indexno += 1
literal_variables[file_field] = "docassemble.base.util.DAFileList(" + repr(file_field) + ", elements=[" + ", ".join(elements) + "])"
else:
literal_variables[file_field] = "None"
try:
data = set_session_variables(yaml_filename, session_id, variables, secret=secret, return_question=reply_with_question, literal_variables=literal_variables, del_variables=del_variables, question_name=question_name, event_list=event_list, advance_progress_meter=advance_progress_meter, post_setting=post_setting)
except Exception as the_err:
return jsonify_with_status(str(the_err), 400)
if data is None:
return ('', 204)
if data.get('questionType', None) == 'response':
return data['response']
return jsonify(**data)
if request.method == 'DELETE':
yaml_filename = request.args.get('i', None)
session_id = request.args.get('session', None)
if yaml_filename is None or session_id is None:
return jsonify_with_status("Parameters i and session are required.", 400)
user_interviews(action='delete', filename=yaml_filename, session=session_id)
return ('', 204)
return ('', 204)
@app.route('/api/file/<int:file_number>', methods=['GET'])
@cross_origin(origins='*', methods=['GET', 'HEAD'], automatic_options=True)
def api_file(file_number):
if not api_verify():
return jsonify_with_status("Access denied.", 403)
# yaml_filename = request.args.get('i', None)
# session_id = request.args.get('session', None)
number = re.sub(r'[^0-9]', '', str(file_number))
privileged = bool(current_user.is_authenticated and current_user.has_role('admin', 'advocate'))
try:
file_info = get_info_from_file_number(number, privileged=privileged, uids=get_session_uids())
except:
return ('File not found', 404)
if 'path' not in file_info:
return ('File not found', 404)
if 'extension' in request.args:
extension = werkzeug.utils.secure_filename(request.args['extension'])
if os.path.isfile(file_info['path'] + '.' + extension):
the_path = file_info['path'] + '.' + extension
extension, mimetype = get_ext_and_mimetype(file_info['path'] + '.' + extension)
else:
return ('File not found', 404)
elif 'filename' in request.args:
the_filename = secure_filename_spaces_ok(request.args['filename'])
if os.path.isfile(os.path.join(os.path.dirname(file_info['path']), the_filename)):
the_path = os.path.join(os.path.dirname(file_info['path']), the_filename)
extension, mimetype = get_ext_and_mimetype(the_filename)
else:
return ('File not found', 404)
else:
the_path = file_info['path']
mimetype = file_info['mimetype']
if not os.path.isfile(the_path):
return ('File not found', 404)
response = send_file(the_path, mimetype=mimetype)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
def get_session_variables(yaml_filename, session_id, secret=None, simplify=True, use_lock=False):
if use_lock:
obtain_lock(session_id, yaml_filename)
# logmessage("get_session_variables: fetch_user_dict")
if secret is None:
secret = docassemble.base.functions.this_thread.current_info.get('secret', None)
tbackup = docassemble.base.functions.backup_thread_variables()
docassemble.base.functions.this_thread.current_info['yaml_filename'] = yaml_filename
try:
steps, user_dict, is_encrypted = fetch_user_dict(session_id, yaml_filename, secret=str(secret)) # pylint: disable=unused-variable
except Exception as the_err:
if use_lock:
release_lock(session_id, yaml_filename)
docassemble.base.functions.restore_thread_variables(tbackup)
raise DAException("Unable to decrypt interview dictionary: " + str(the_err))
if use_lock:
release_lock(session_id, yaml_filename)
docassemble.base.functions.restore_thread_variables(tbackup)
if user_dict is None:
raise DAException("Unable to obtain interview dictionary.")
if simplify:
variables = docassemble.base.functions.serializable_dict(user_dict, include_internal=True)
# variables['_internal'] = docassemble.base.functions.serializable_dict(user_dict['_internal'])
return variables
return user_dict
def go_back_in_session(yaml_filename, session_id, secret=None, return_question=False, use_lock=False, encode=False):
if use_lock:
obtain_lock(session_id, yaml_filename)
tbackup = docassemble.base.functions.backup_thread_variables()
docassemble.base.functions.this_thread.current_info['yaml_filename'] = yaml_filename
try:
steps, user_dict, is_encrypted = fetch_user_dict(session_id, yaml_filename, secret=secret)
except:
if use_lock:
release_lock(session_id, yaml_filename)
docassemble.base.functions.restore_thread_variables(tbackup)
raise DAException("Unable to decrypt interview dictionary.")
if user_dict is None:
if use_lock:
release_lock(session_id, yaml_filename)
docassemble.base.functions.restore_thread_variables(tbackup)
raise DAException("Unable to obtain interview dictionary.")
if steps == 1:
if use_lock:
release_lock(session_id, yaml_filename)
docassemble.base.functions.restore_thread_variables(tbackup)
raise DAException("Cannot go back.")
old_user_dict = user_dict
steps, user_dict, is_encrypted = fetch_previous_user_dict(session_id, yaml_filename, secret)
if user_dict is None:
if use_lock:
release_lock(session_id, yaml_filename)
docassemble.base.functions.restore_thread_variables(tbackup)
raise DAException("Unable to obtain interview dictionary.")
if return_question:
try:
data = get_question_data(yaml_filename, session_id, secret, use_lock=False, user_dict=user_dict, steps=steps, is_encrypted=is_encrypted, old_user_dict=old_user_dict, encode=encode)
except Exception as the_err:
if use_lock:
release_lock(session_id, yaml_filename)
docassemble.base.functions.restore_thread_variables(tbackup)
raise DAException("Problem getting current question:" + str(the_err))
else:
data = None
if use_lock:
release_lock(session_id, yaml_filename)
docassemble.base.functions.restore_thread_variables(tbackup)
return data
def set_session_variables(yaml_filename, session_id, variables, secret=None, return_question=False, literal_variables=None, del_variables=None, question_name=None, event_list=None, advance_progress_meter=False, post_setting=True, use_lock=False, encode=False, process_objects=False):
if use_lock:
obtain_lock(session_id, yaml_filename)
tbackup = docassemble.base.functions.backup_thread_variables()
sbackup = backup_session()
device_id = docassemble.base.functions.this_thread.current_info['user']['device_id']
session_uid = docassemble.base.functions.this_thread.current_info['user']['session_uid']
if secret is None:
secret = docassemble.base.functions.this_thread.current_info.get('secret', None)
docassemble.base.functions.this_thread.current_info['yaml_filename'] = yaml_filename
try:
steps, user_dict, is_encrypted = fetch_user_dict(session_id, yaml_filename, secret=secret)
except:
if use_lock:
release_lock(session_id, yaml_filename)
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
raise DAException("Unable to decrypt interview dictionary.")
vars_set = set()
old_values = {}
if user_dict is None:
if use_lock:
release_lock(session_id, yaml_filename)
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
raise DAException("Unable to obtain interview dictionary.")
if process_objects:
variables = transform_json_variables(variables)
pre_assembly_necessary = False
for key, val in variables.items():
if contains_volatile.search(key):
pre_assembly_necessary = True
break
if pre_assembly_necessary is False and literal_variables is not None:
for key, val in literal_variables.items():
if contains_volatile.search(key):
pre_assembly_necessary = True
break
if pre_assembly_necessary is False and del_variables is not None:
for key in del_variables:
if contains_volatile.search(key):
pre_assembly_necessary = True
break
if pre_assembly_necessary:
interview = docassemble.base.interview_cache.get_interview(yaml_filename)
if current_user.is_anonymous:
if not interview.allowed_to_access(is_anonymous=True):
if use_lock:
release_lock(session_id, yaml_filename)
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
raise DAException('Insufficient permissions to run this interview.')
else:
if not interview.allowed_to_access(has_roles=[role.name for role in current_user.roles]):
if use_lock:
release_lock(session_id, yaml_filename)
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
raise DAException('Insufficient permissions to run this interview.')
ci = current_info(yaml=yaml_filename, req=request, secret=secret, device_id=device_id, session_uid=session_uid)
ci['session'] = session_id
ci['encrypted'] = is_encrypted
ci['secret'] = secret
interview_status = docassemble.base.parse.InterviewStatus(current_info=ci)
try:
interview.assemble(user_dict, interview_status)
except Exception as err:
if use_lock:
release_lock(session_id, yaml_filename)
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
raise DAException("Error processing session: " + err.__class__.__name__ + ": " + str(err))
try:
for key, val in variables.items():
if illegal_variable_name(key):
raise DAException("Illegal value as variable name.")
if isinstance(val, (str, bool, int, float, NoneType)):
exec(str(key) + ' = ' + repr(val), user_dict)
else:
if key == '_xxxtempvarxxx':
continue
user_dict['_xxxtempvarxxx'] = copy.deepcopy(val)
exec(str(key) + ' = _xxxtempvarxxx', user_dict)
del user_dict['_xxxtempvarxxx']
process_set_variable(str(key), user_dict, vars_set, old_values)
except Exception as the_err:
if '_xxxtempvarxxx' in user_dict:
del user_dict['_xxxtempvarxxx']
if use_lock:
release_lock(session_id, yaml_filename)
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
raise DAException("Problem setting variables:" + str(the_err))
if literal_variables is not None:
exec('import docassemble.base.util', user_dict)
for key, val in literal_variables.items():
if illegal_variable_name(key):
if use_lock:
release_lock(session_id, yaml_filename)
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
raise DAException("Illegal value as variable name.")
exec(str(key) + ' = ' + val, user_dict)
process_set_variable(str(key), user_dict, vars_set, old_values)
if question_name is not None:
interview = docassemble.base.interview_cache.get_interview(yaml_filename)
if current_user.is_anonymous:
if not interview.allowed_to_access(is_anonymous=True):
if use_lock:
release_lock(session_id, yaml_filename)
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
raise DAException('Insufficient permissions to run this interview.')
else:
if not interview.allowed_to_access(has_roles=[role.name for role in current_user.roles]):
if use_lock:
release_lock(session_id, yaml_filename)
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
raise DAException('Insufficient permissions to run this interview.')
if question_name in interview.questions_by_name:
interview.questions_by_name[question_name].mark_as_answered(user_dict)
else:
if use_lock:
release_lock(session_id, yaml_filename)
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
raise DAException("Problem marking question as completed")
if del_variables is not None:
try:
for key in del_variables:
if illegal_variable_name(key):
raise DAException("Illegal value as variable name.")
exec('del ' + str(key), user_dict)
except Exception as the_err:
if use_lock:
release_lock(session_id, yaml_filename)
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
raise DAException("Problem deleting variables: " + str(the_err))
session_uid = docassemble.base.functions.this_thread.current_info['user']['session_uid']
# if 'event_stack' in user_dict['_internal']:
# logmessage("Event stack starting as: " + repr(user_dict['_internal']['event_stack']))
# else:
# logmessage("No event stack.")
if event_list is not None and len(event_list) and 'event_stack' in user_dict['_internal'] and session_uid in user_dict['_internal']['event_stack'] and len(user_dict['_internal']['event_stack'][session_uid]):
for event_name in event_list:
if illegal_variable_name(event_name):
raise DAException("Illegal value as event name.")
if user_dict['_internal']['event_stack'][session_uid][0]['action'] == event_name:
user_dict['_internal']['event_stack'][session_uid].pop(0)
# logmessage("Popped " + str(event_name))
if len(user_dict['_internal']['event_stack'][session_uid]) == 0:
break
if len(vars_set) > 0 and 'event_stack' in user_dict['_internal'] and session_uid in user_dict['_internal']['event_stack'] and len(user_dict['_internal']['event_stack'][session_uid]):
for var_name in vars_set:
if user_dict['_internal']['event_stack'][session_uid][0]['action'] == var_name:
user_dict['_internal']['event_stack'][session_uid].pop(0)
# logmessage("Popped " + str(var_name))
if len(user_dict['_internal']['event_stack'][session_uid]) == 0:
break
if question_name is not None:
for var_name in vars_set:
if var_name in interview.invalidation_todo or var_name in interview.onchange_todo:
interview.invalidate_dependencies(var_name, user_dict, old_values)
try:
del user_dict['_internal']['dirty'][var_name]
except:
pass
# if 'event_stack' in user_dict['_internal']:
# logmessage("Event stack now: " + repr(user_dict['_internal']['event_stack']))
if post_setting:
steps += 1
if return_question:
try:
data = get_question_data(yaml_filename, session_id, secret, use_lock=False, user_dict=user_dict, steps=steps, is_encrypted=is_encrypted, post_setting=post_setting, advance_progress_meter=advance_progress_meter, encode=encode)
except Exception as the_err:
if use_lock:
release_lock(session_id, yaml_filename)
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
raise DAException("Problem getting current question:" + str(the_err))
else:
data = None
if not return_question:
save_user_dict(session_id, user_dict, yaml_filename, secret=secret, encrypt=is_encrypted, changed=post_setting, steps=steps)
if 'multi_user' in vars_set:
if user_dict.get('multi_user', False) is True and is_encrypted is True:
decrypt_session(secret, user_code=session_id, filename=yaml_filename)
is_encrypted = False
if user_dict.get('multi_user', False) is False and is_encrypted is False:
encrypt_session(secret, user_code=session_id, filename=yaml_filename)
is_encrypted = True
if use_lock:
release_lock(session_id, yaml_filename)
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
return data
@app.route('/api/session/new', methods=['GET'])
@cross_origin(origins='*', methods=['GET', 'HEAD'], automatic_options=True)
def api_session_new():
if not api_verify():
return jsonify_with_status("Access denied.", 403)
yaml_filename = request.args.get('i', None)
if yaml_filename is None:
return jsonify_with_status("Parameter i is required.", 400)
secret = request.args.get('secret', None)
if secret is None:
new_secret = True
secret = random_string(16)
else:
new_secret = False
secret = str(secret)
url_args = {}
for argname in request.args:
if argname in ('i', 'secret', 'key'):
continue
if re.match('[A-Za-z_][A-Za-z0-9_]*', argname):
url_args[argname] = request.args[argname]
docassemble.base.functions.this_thread.current_info['yaml_filename'] = yaml_filename
try:
(encrypted, session_id) = create_new_interview(yaml_filename, secret, url_args=url_args, req=request)
except Exception as err:
return jsonify_with_status(err.__class__.__name__ + ': ' + str(err), 400)
if encrypted and new_secret:
return jsonify({'session': session_id, 'i': yaml_filename, 'secret': secret, 'encrypted': encrypted})
return jsonify({'session': session_id, 'i': yaml_filename, 'encrypted': encrypted})
def create_new_interview(yaml_filename, secret, url_args=None, referer=None, req=None):
interview = docassemble.base.interview_cache.get_interview(yaml_filename)
if current_user.is_anonymous:
if not interview.allowed_to_initiate(is_anonymous=True):
raise DAException('Insufficient permissions to run this interview.')
if not interview.allowed_to_access(is_anonymous=True):
raise DAException('Insufficient permissions to run this interview.')
else:
if (not current_user.has_role('admin')) and (not interview.allowed_to_initiate(has_roles=[role.name for role in current_user.roles])):
raise DAException('Insufficient permissions to run this interview.')
if not interview.allowed_to_access(has_roles=[role.name for role in current_user.roles]):
raise DAException('Insufficient permissions to run this interview.')
if req is None:
req = request
if secret is None:
secret = random_string(16)
tbackup = docassemble.base.functions.backup_thread_variables()
sbackup = backup_session()
session_id, user_dict = reset_session(yaml_filename, secret)
add_referer(user_dict, referer=referer)
if url_args and (isinstance(url_args, dict) or (hasattr(url_args, 'instanceName') and hasattr(url_args, 'elements') and isinstance(url_args.elements, dict))):
for key, val in url_args.items():
if isinstance(val, str):
val = val.encode('unicode_escape').decode()
user_dict['url_args'][key] = val
device_id = docassemble.base.functions.this_thread.current_info['user']['device_id']
session_uid = docassemble.base.functions.this_thread.current_info['user']['session_uid']
ci = current_info(yaml=yaml_filename, req=req, secret=secret, device_id=device_id, session_uid=session_uid)
ci['session'] = session_id
ci['encrypted'] = True
ci['secret'] = secret
interview_status = docassemble.base.parse.InterviewStatus(current_info=ci)
interview_status.checkin = True
try:
interview.assemble(user_dict, interview_status)
except DAErrorMissingVariable:
pass
except Exception as e:
release_lock(session_id, yaml_filename)
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
if hasattr(e, 'traceback'):
the_trace = e.traceback
else:
the_trace = traceback.format_exc()
raise DAException("create_new_interview: failure to assemble interview: " + e.__class__.__name__ + ": " + str(e) + "\n" + str(the_trace))
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
encrypted = not bool(user_dict.get('multi_user', False) is True)
save_user_dict(session_id, user_dict, yaml_filename, secret=secret, encrypt=encrypted, changed=False, steps=1)
save_user_dict_key(session_id, yaml_filename)
release_lock(session_id, yaml_filename)
return (encrypted, session_id)
@app.route('/api/session/question', methods=['GET'])
@cross_origin(origins='*', methods=['GET', 'HEAD'], automatic_options=True)
def api_session_question():
if not api_verify():
return jsonify_with_status("Access denied.", 403)
yaml_filename = request.args.get('i', None)
session_id = request.args.get('session', None)
secret = request.args.get('secret', None)
if secret is not None:
secret = str(secret)
if yaml_filename is None or session_id is None:
return jsonify_with_status("Parameters i and session are required.", 400)
docassemble.base.functions.this_thread.current_info['yaml_filename'] = yaml_filename
try:
data = get_question_data(yaml_filename, session_id, secret)
except Exception as err:
return jsonify_with_status(str(err), 400)
if data.get('questionType', None) == 'response':
return data['response']
return jsonify(**data)
def get_question_data(yaml_filename, session_id, secret, use_lock=True, user_dict=None, steps=None, is_encrypted=None, old_user_dict=None, save=True, post_setting=False, advance_progress_meter=False, action=None, encode=False):
if use_lock:
obtain_lock(session_id, yaml_filename)
tbackup = docassemble.base.functions.backup_thread_variables()
sbackup = backup_session()
interview = docassemble.base.interview_cache.get_interview(yaml_filename)
if current_user.is_anonymous:
if not interview.allowed_to_access(is_anonymous=True):
raise DAException('Insufficient permissions to run this interview.')
else:
if not interview.allowed_to_access(has_roles=[role.name for role in current_user.roles]):
raise DAException('Insufficient permissions to run this interview.')
device_id = docassemble.base.functions.this_thread.current_info['user']['device_id']
session_uid = docassemble.base.functions.this_thread.current_info['user']['session_uid']
ci = current_info(yaml=yaml_filename, req=request, secret=secret, device_id=device_id, action=action, session_uid=session_uid)
ci['session'] = session_id
ci['secret'] = secret
docassemble.base.functions.this_thread.current_info = ci
if user_dict is None:
try:
steps, user_dict, is_encrypted = fetch_user_dict(session_id, yaml_filename, secret=secret)
except Exception as err:
if use_lock:
release_lock(session_id, yaml_filename)
raise DAException("Unable to obtain interview dictionary: " + str(err))
ci['encrypted'] = is_encrypted
interview_status = docassemble.base.parse.InterviewStatus(current_info=ci)
# interview_status.checkin = True
try:
interview.assemble(user_dict, interview_status=interview_status, old_user_dict=old_user_dict)
except DAErrorMissingVariable as err:
if use_lock:
# save_user_dict(session_id, user_dict, yaml_filename, secret=secret, encrypt=is_encrypted, changed=False, steps=steps)
release_lock(session_id, yaml_filename)
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
return {'questionType': 'undefined_variable', 'variable': err.variable, 'message_log': docassemble.base.functions.get_message_log()}
except Exception as e:
if use_lock:
release_lock(session_id, yaml_filename)
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
raise DAException("get_question_data: failure to assemble interview: " + e.__class__.__name__ + ": " + str(e))
save_status = docassemble.base.functions.this_thread.misc.get('save_status', 'new')
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
try:
the_section = user_dict['nav'].get_section()
the_section_display = user_dict['nav'].get_section(display=True)
the_sections = user_dict['nav'].get_sections()
except:
the_section = None
the_section_display = None
the_sections = []
if advance_progress_meter:
if interview.use_progress_bar and interview_status.question.progress is None and save_status == 'new':
advance_progress(user_dict, interview)
if interview.use_progress_bar and interview_status.question.progress is not None and (user_dict['_internal']['progress'] is None or interview.options.get('strict progress', False) or interview_status.question.progress > user_dict['_internal']['progress']):
user_dict['_internal']['progress'] = interview_status.question.progress
if save:
save_user_dict(session_id, user_dict, yaml_filename, secret=secret, encrypt=is_encrypted, changed=post_setting, steps=steps)
if user_dict.get('multi_user', False) is True and is_encrypted is True:
decrypt_session(secret, user_code=session_id, filename=yaml_filename)
is_encrypted = False
if user_dict.get('multi_user', False) is False and is_encrypted is False:
encrypt_session(secret, user_code=session_id, filename=yaml_filename)
is_encrypted = True
if use_lock:
release_lock(session_id, yaml_filename)
if interview_status.question.question_type == "response":
if hasattr(interview_status.question, 'response_code'):
resp_code = interview_status.question.response_code
else:
resp_code = 200
if hasattr(interview_status.question, 'all_variables'):
if hasattr(interview_status.question, 'include_internal'):
include_internal = interview_status.question.include_internal
else:
include_internal = False
response_to_send = make_response(docassemble.base.functions.dict_as_json(user_dict, include_internal=include_internal).encode('utf-8'), resp_code)
elif hasattr(interview_status.question, 'binaryresponse'):
response_to_send = make_response(interview_status.question.binaryresponse, resp_code)
else:
response_to_send = make_response(interview_status.questionText.encode('utf-8'), resp_code)
response_to_send.headers['Content-Type'] = interview_status.extras['content_type']
return {'questionType': 'response', 'response': response_to_send}
if interview_status.question.question_type == "sendfile":
if interview_status.question.response_file is not None:
the_path = interview_status.question.response_file.path()
else:
return jsonify_with_status("Could not send file because the response was None", 404)
if not os.path.isfile(the_path):
return jsonify_with_status("Could not send file because " + str(the_path) + " not found", 404)
response_to_send = send_file(the_path, mimetype=interview_status.extras['content_type'])
response_to_send.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return {'questionType': 'response', 'response': response_to_send}
if interview_status.question.language != '*':
interview_language = interview_status.question.language
else:
interview_language = DEFAULT_LANGUAGE
title_info = interview.get_title(user_dict, status=interview_status, converter=lambda content, part: title_converter(content, part, interview_status))
interview_status.exit_url = title_info.get('exit url', None)
interview_status.exit_link = title_info.get('exit link', 'exit')
interview_status.exit_label = title_info.get('exit label', word('Exit'))
interview_status.title = title_info.get('full', default_title)
interview_status.display_title = title_info.get('logo', interview_status.title)
interview_status.tabtitle = title_info.get('tab', interview_status.title)
interview_status.short_title = title_info.get('short', title_info.get('full', default_short_title))
interview_status.display_short_title = title_info.get('short logo', title_info.get('logo', interview_status.short_title))
interview_status.title_url = title_info.get('title url', None)
interview_status.title_url_opens_in_other_window = title_info.get('title url opens in other window', True)
interview_status.nav_item = title_info.get('navigation bar html', '')
the_main_page_parts = main_page_parts.get(interview_language, main_page_parts.get('*'))
interview_status.pre = title_info.get('pre', the_main_page_parts['main page pre'])
interview_status.post = title_info.get('post', the_main_page_parts['main page post'])
interview_status.footer = title_info.get('footer', the_main_page_parts['main page footer'] or get_part('global footer'))
if interview_status.footer:
interview_status.footer = re.sub(r'</?p.*?>', '', str(interview_status.footer), flags=re.IGNORECASE).strip()
if interview_status.footer == 'off':
interview_status.footer = ''
interview_status.submit = title_info.get('submit', the_main_page_parts['main page submit'])
interview_status.back = title_info.get('back button label', the_main_page_parts['main page back button label'] or interview_status.question.back())
interview_status.cornerback = title_info.get('corner back button label', the_main_page_parts['main page corner back button label'] or interview_status.question.cornerback())
if steps is None:
steps = user_dict['_internal']['steps']
allow_going_back = bool(interview_status.question.can_go_back and (steps is None or (steps - user_dict['_internal']['steps_offset']) > 1))
data = {'browser_title': interview_status.tabtitle, 'exit_link': interview_status.exit_link, 'exit_url': interview_status.exit_url, 'exit_label': interview_status.exit_label, 'title': interview_status.title, 'display_title': interview_status.display_title, 'short_title': interview_status.short_title, 'lang': interview_language, 'steps': steps, 'allow_going_back': allow_going_back, 'message_log': docassemble.base.functions.get_message_log(), 'section': the_section, 'display_section': the_section_display, 'sections': the_sections}
if allow_going_back:
data['cornerBackButton'] = interview_status.cornerback
data.update(interview_status.as_data(user_dict, encode=encode))
if 'source' in data:
data['source']['varsLink'] = url_for('get_variables', i=yaml_filename)
data['source']['varsLabel'] = word('Show variables and values')
# if interview_status.question.question_type == "review" and len(interview_status.question.fields_used):
# next_action_review = {'action': list(interview_status.question.fields_used)[0], 'arguments': {}}
# else:
# next_action_review = None
if 'reload_after' in interview_status.extras:
reload_after = 1000 * int(interview_status.extras['reload_after'])
else:
reload_after = 0
# if next_action_review:
# data['next_action'] = next_action_review
data['interview_options'] = interview.options
if reload_after and reload_after > 0:
data['reload_after'] = reload_after
for key in list(data.keys()):
if key == "_question_name":
data['questionName'] = data[key]
del data[key]
elif key.startswith('_'):
del data[key]
data['menu'] = {'items': []}
menu_items = data['menu']['items']
if 'menu_items' in interview_status.extras:
if not isinstance(interview_status.extras['menu_items'], list):
menu_items.append({'anchor': word("Error: menu_items is not a Python list")})
elif len(interview_status.extras['menu_items']) > 0:
for menu_item in interview_status.extras['menu_items']:
if not (isinstance(menu_item, dict) and 'url' in menu_item and 'label' in menu_item):
menu_items.append({'anchor': word("Error: menu item is not a Python dict with keys of url and label")})
else:
match_action = re.search(r'^\?action=([^\&]+)', menu_item['url'])
if match_action:
menu_items.append({'href': menu_item['url'], 'action': match_action.group(1), 'anchor': menu_item['label']})
else:
menu_items.append({'href': menu_item['url'], 'anchor': menu_item['label']})
if ALLOW_REGISTRATION:
sign_in_text = word('Sign in or sign up to save answers')
else:
sign_in_text = word('Sign in to save answers')
if daconfig.get('resume interview after login', False):
login_url = url_for('user.login', next=url_for('index', i=yaml_filename))
else:
login_url = url_for('user.login')
if interview.consolidated_metadata.get('show login', SHOW_LOGIN):
if current_user.is_anonymous:
if len(menu_items) > 0:
data['menu']['top'] = {'anchor': word("Menu")}
menu_items.append({'href': login_url, 'anchor': sign_in_text})
else:
data['menu']['top'] = {'href': login_url, 'anchor': sign_in_text}
else:
if len(menu_items) == 0 and interview.options.get('hide standard menu', False):
data['menu']['top'] = {'anchor': (current_user.email if current_user.email else re.sub(r'.*\$', '', current_user.social_id))}
else:
data['menu']['top'] = {'anchor': current_user.email if current_user.email else re.sub(r'.*\$', '', current_user.social_id)}
if not interview.options.get('hide standard menu', False):
if current_user.has_role('admin', 'developer') and interview.debug:
menu_items.append({'href': '#source', 'title': word("How this question came to be asked"), 'anchor': word('Source')})
if current_user.has_role('admin', 'advocate') and app.config['ENABLE_MONITOR']:
menu_items.append({'href': url_for('monitor'), 'anchor': word('Monitor')})
if current_user.has_role('admin', 'developer', 'trainer'):
menu_items.append({'href': url_for('train'), 'anchor': word('Train')})
if current_user.has_role('admin', 'developer'):
if app.config['ALLOW_UPDATES']:
menu_items.append({'href': url_for('update_package'), 'anchor': word('Package Management')})
if app.config['ALLOW_LOG_VIEWING']:
menu_items.append({'href': url_for('logs'), 'anchor': word('Logs')})
if app.config['ENABLE_PLAYGROUND']:
menu_items.append({'href': url_for('playground_page'), 'anchor': word('Playground')})
menu_items.append({'href': url_for('utilities'), 'anchor': word('Utilities')})
if current_user.has_role('admin', 'advocate') or current_user.can_do('access_user_info'):
menu_items.append({'href': url_for('user_list'), 'anchor': word('User List')})
if current_user.has_role('admin') and app.config['ALLOW_CONFIGURATION_EDITING']:
menu_items.append({'href': url_for('config_page'), 'anchor': word('Configuration')})
if app.config['SHOW_DISPATCH']:
menu_items.append({'href': url_for('interview_start'), 'anchor': word('Available Interviews')})
for item in app.config['ADMIN_INTERVIEWS']:
if item.can_use() and item.is_not(docassemble.base.functions.this_thread.current_info.get('yaml_filename', '')):
menu_items.append({'href': item.get_url(), 'anchor': item.get_title(docassemble.base.functions.get_language())})
if app.config['SHOW_MY_INTERVIEWS'] or current_user.has_role('admin'):
menu_items.append({'href': url_for('interview_list'), 'anchor': word('My Interviews')})
if current_user.has_role('admin', 'developer'):
menu_items.append({'href': url_for('user_profile_page'), 'anchor': word('Profile')})
else:
if app.config['SHOW_PROFILE'] or current_user.has_role('admin'):
menu_items.append({'href': url_for('user_profile_page'), 'anchor': word('Profile')})
else:
menu_items.append({'href': url_for('user.change_password'), 'anchor': word('Change Password')})
menu_items.append({'href': url_for('user.logout'), 'anchor': word('Sign Out')})
else:
if len(menu_items) > 0:
data['menu']['top'] = {'anchor': word("Menu")}
if not interview.options.get('hide standard menu', False):
menu_items.append({'href': exit_href(data=True), 'anchor': interview_status.exit_label})
else:
data['menu']['top'] = {'href': exit_href(data=True), 'anchor': interview_status.exit_label}
# logmessage("Ok returning")
return data
@app.route('/api/session/action', methods=['POST'])
@csrf.exempt
@cross_origin(origins='*', methods=['POST', 'HEAD'], automatic_options=True)
def api_session_action():
if not api_verify():
return jsonify_with_status("Access denied.", 403)
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
result = run_action_in_session(**post_data)
if not isinstance(result, dict):
return result
if result['status'] == 'success':
return ('', 204)
return jsonify_with_status(result['message'], 400)
def run_action_in_session(**kwargs):
yaml_filename = kwargs.get('i', None)
session_id = kwargs.get('session', None)
secret = kwargs.get('secret', None)
action = kwargs.get('action', None)
persistent = true_or_false(kwargs.get('persistent', False))
overwrite = true_or_false(kwargs.get('overwrite', False))
if yaml_filename is None or session_id is None or action is None:
return {"status": "error", "message": "Parameters i, session, and action are required."}
secret = str(secret)
if 'arguments' in kwargs and kwargs['arguments'] is not None:
if isinstance(kwargs['arguments'], dict):
arguments = kwargs['arguments']
else:
try:
arguments = json.loads(kwargs['arguments'])
except:
return {"status": "error", "message": "Malformed arguments."}
if not isinstance(arguments, dict):
return {"status": "error", "message": "Arguments data is not a dict."}
else:
arguments = {}
device_id = docassemble.base.functions.this_thread.current_info['user']['device_id']
session_uid = docassemble.base.functions.this_thread.current_info['user']['session_uid']
ci = current_info(yaml=yaml_filename, req=request, action={'action': action, 'arguments': arguments}, secret=secret, device_id=device_id, session_uid=session_uid)
ci['session'] = session_id
ci['secret'] = secret
interview = docassemble.base.interview_cache.get_interview(yaml_filename)
if current_user.is_anonymous:
if not interview.allowed_to_access(is_anonymous=True):
raise DAException('Insufficient permissions to run this interview.')
else:
if not interview.allowed_to_access(has_roles=[role.name for role in current_user.roles]):
raise DAException('Insufficient permissions to run this interview.')
tbackup = docassemble.base.functions.backup_thread_variables()
sbackup = backup_session()
docassemble.base.functions.this_thread.current_info = ci
obtain_lock(session_id, yaml_filename)
try:
steps, user_dict, is_encrypted = fetch_user_dict(session_id, yaml_filename, secret=secret)
except:
release_lock(session_id, yaml_filename)
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
return {"status": "error", "message": "Unable to obtain interview dictionary."}
ci['encrypted'] = is_encrypted
interview_status = docassemble.base.parse.InterviewStatus(current_info=ci)
if not persistent:
interview_status.checkin = True
changed = True
try:
interview.assemble(user_dict, interview_status)
except DAErrorMissingVariable:
if overwrite:
save_status = 'overwrite'
changed = False
else:
save_status = docassemble.base.functions.this_thread.misc.get('save_status', 'new')
if save_status == 'new':
steps += 1
user_dict['_internal']['steps'] = steps
if save_status != 'ignore':
save_user_dict(session_id, user_dict, yaml_filename, secret=secret, encrypt=is_encrypted, changed=changed, steps=steps)
if user_dict.get('multi_user', False) is True and is_encrypted is True:
is_encrypted = False
decrypt_session(secret, user_code=session_id, filename=yaml_filename)
if user_dict.get('multi_user', False) is False and is_encrypted is False:
encrypt_session(secret, user_code=session_id, filename=yaml_filename)
is_encrypted = True
release_lock(session_id, yaml_filename)
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
return {"status": "success"}
except Exception as e:
release_lock(session_id, yaml_filename)
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
return {"status": "error", "message": "api_session_action: failure to assemble interview: " + e.__class__.__name__ + ": " + str(e)}
if overwrite:
save_status = 'overwrite'
changed = False
else:
save_status = docassemble.base.functions.this_thread.misc.get('save_status', 'new')
if save_status == 'new':
steps += 1
user_dict['_internal']['steps'] = steps
if save_status != 'ignore':
save_user_dict(session_id, user_dict, yaml_filename, secret=secret, encrypt=is_encrypted, changed=changed, steps=steps)
if user_dict.get('multi_user', False) is True and is_encrypted is True:
is_encrypted = False
decrypt_session(secret, user_code=session_id, filename=yaml_filename)
if user_dict.get('multi_user', False) is False and is_encrypted is False:
encrypt_session(secret, user_code=session_id, filename=yaml_filename)
is_encrypted = True
release_lock(session_id, yaml_filename)
if interview_status.question.question_type == "response":
if hasattr(interview_status.question, 'all_variables'):
if hasattr(interview_status.question, 'include_internal'):
include_internal = interview_status.question.include_internal
else:
include_internal = False
response_to_send = make_response(docassemble.base.functions.dict_as_json(user_dict, include_internal=include_internal).encode('utf-8'), '200 OK')
elif hasattr(interview_status.question, 'binaryresponse'):
response_to_send = make_response(interview_status.question.binaryresponse, '200 OK')
else:
response_to_send = make_response(interview_status.questionText.encode('utf-8'), '200 OK')
response_to_send.headers['Content-Type'] = interview_status.extras['content_type']
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
return response_to_send
if interview_status.question.question_type == "sendfile":
if interview_status.question.response_file is not None:
the_path = interview_status.question.response_file.path()
else:
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
return jsonify_with_status("Could not send file because the response was None", 404)
if not os.path.isfile(the_path):
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
return jsonify_with_status("Could not send file because " + str(the_path) + " not found", 404)
response_to_send = send_file(the_path, mimetype=interview_status.extras['content_type'])
response_to_send.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
return response_to_send
restore_session(sbackup)
docassemble.base.functions.restore_thread_variables(tbackup)
return {'status': 'success'}
@app.route('/api/login_url', methods=['POST'])
@csrf.exempt
@cross_origin(origins='*', methods=['POST', 'HEAD'], automatic_options=True)
def api_login_url():
if not api_verify(roles=['admin'], permissions=['log_user_in']):
return jsonify_with_status("Access denied.", 403)
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
result = get_login_url(**post_data)
if result['status'] == 'error':
return jsonify_with_status(result['message'], 400)
if result['status'] == 'auth_error':
return jsonify_with_status(result['message'], 403)
if result['status'] == 'success':
return jsonify(result['url'])
return jsonify_with_status("Error", 400)
def get_login_url(**kwargs):
username = kwargs.get('username', None)
password = kwargs.get('password', None)
if username is None or password is None:
return {"status": "error", "message": "A username and password must be supplied"}
username = str(username)
password = str(password)
try:
secret = get_secret(username, password)
except Exception as err:
return {"status": "auth_error", "message": str(err)}
try:
expire = int(kwargs.get('expire', 15))
assert expire > 0
except:
return {"status": "error", "message": "Invalid number of seconds."}
if 'url_args' in kwargs:
if isinstance(kwargs['url_args'], dict):
url_args = kwargs['url_args']
else:
try:
url_args = json.loads(kwargs['url_args'])
assert isinstance(url_args, dict)
except:
return {"status": "error", "message": "Malformed URL arguments"}
else:
url_args = {}
username = re.sub(r'\%', '', username)
user = db.session.execute(select(UserModel).where(UserModel.email.ilike(username))).scalar()
if user is None:
return {"status": "auth_error", "message": "Username not known"}
info = {'user_id': user.id, 'secret': secret}
del user
if 'next' in kwargs:
try:
path = get_url_from_file_reference(kwargs['next'])
assert isinstance(path, str)
assert not path.startswith('javascript')
except:
return {"status": "error", "message": "Unknown path for next"}
for key in ['i', 'next', 'session']:
if key in kwargs:
info[key] = kwargs[key]
if len(url_args) > 0:
info['url_args'] = url_args
if 'i' in info:
old_yaml_filename = docassemble.base.functions.this_thread.current_info.get('yaml_filename', None)
docassemble.base.functions.this_thread.current_info['yaml_filename'] = info['i']
if 'session' in info:
try:
steps, user_dict, is_encrypted = fetch_user_dict(info['session'], info['i'], secret=secret) # pylint: disable=unused-variable
info['encrypted'] = is_encrypted
except:
if old_yaml_filename:
docassemble.base.functions.this_thread.current_info['yaml_filename'] = old_yaml_filename
return {"status": "error", "message": "Could not decrypt dictionary"}
elif true_or_false(kwargs.get('resume_existing', False)) or daconfig.get('auto login resume existing', False):
interviews = user_interviews(user_id=info['user_id'], secret=secret, exclude_invalid=True, filename=info['i'], include_dict=True)[0]
if len(interviews) > 0:
info['session'] = interviews[0]['session']
info['encrypted'] = interviews[0]['encrypted']
del interviews
if old_yaml_filename:
docassemble.base.functions.this_thread.current_info['yaml_filename'] = old_yaml_filename
encryption_key = random_string(16)
encrypted_text = encrypt_dictionary(info, encryption_key)
while True:
code = random_string(24)
the_key = 'da:auto_login:' + code
if r.get(the_key) is None:
break
pipe = r.pipeline()
pipe.set(the_key, encrypted_text)
pipe.expire(the_key, expire)
pipe.execute()
return {"status": "success", "url": url_for('auto_login', key=encryption_key + code, _external=True)}
@app.route('/api/list', methods=['GET'])
@cross_origin(origins='*', methods=['GET', 'HEAD'], automatic_options=True)
def api_list():
if not api_verify():
return jsonify_with_status("Access denied.", 403)
return jsonify(interview_menu(absolute_urls=true_or_false(request.args.get('absolute_urls', True)), tag=request.args.get('tag', None)))
@app.route('/api/user/interviews', methods=['GET', 'DELETE'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'DELETE', 'HEAD'], automatic_options=True)
def api_user_interviews():
if not api_verify():
return jsonify_with_status("Access denied.", 403)
filename = request.args.get('i', None)
session_id = request.args.get('session', None)
query = request.args.get('query', None)
try:
query = parse_api_sessions_query(query)
except:
return jsonify_with_status("Invalid query parameter", 400)
tag = request.args.get('tag', None)
secret = request.args.get('secret', None)
if secret is not None:
secret = str(secret)
include_dict = true_or_false(request.args.get('include_dictionary', False))
next_id_code = request.args.get('next_id', None)
if next_id_code:
try:
start_id = int(from_safeid(next_id_code))
assert start_id >= 0
except:
start_id = None
else:
start_id = None
if request.method == 'GET':
try:
(the_list, start_id) = user_interviews(user_id=current_user.id, secret=secret, filename=filename, session=session_id, query=query, exclude_invalid=False, tag=tag, include_dict=include_dict, start_id=start_id)
except:
return jsonify_with_status("Error reading interview list.", 400)
if start_id is None:
next_id = None
else:
next_id = safeid(str(start_id))
return jsonify({'next_id': next_id, 'items': docassemble.base.functions.safe_json(the_list)})
if request.method == 'DELETE':
start_id = None
while True:
try:
(the_list, start_id) = user_interviews(user_id=current_user.id, filename=filename, session=session_id, query=query, exclude_invalid=False, tag=tag, include_dict=False, start_id=start_id)
except:
return jsonify_with_status("Error reading interview list.", 400)
for info in the_list:
user_interviews(user_id=info['user_id'], action='delete', filename=info['filename'], session=info['session'])
if start_id is None:
break
return ('', 204)
return ('', 204)
@app.route('/api/interviews', methods=['GET', 'DELETE'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'DELETE', 'HEAD'], automatic_options=True)
def api_interviews():
if not api_verify(roles=['admin', 'advocate'], permissions=['access_sessions']):
return jsonify_with_status("Access denied.", 403)
filename = request.args.get('i', None)
session_id = request.args.get('session', None)
query = request.args.get('query', None)
try:
query = parse_api_sessions_query(query)
except:
return jsonify_with_status("Invalid query parameter", 400)
tag = request.args.get('tag', None)
secret = request.args.get('secret', None)
if secret is not None:
secret = str(secret)
include_dict = true_or_false(request.args.get('include_dictionary', False))
next_id_code = request.args.get('next_id', None)
if next_id_code:
try:
start_id = int(from_safeid(next_id_code))
assert start_id >= 0
except:
start_id = None
else:
start_id = None
if request.method == 'GET':
try:
(the_list, start_id) = user_interviews(secret=secret, filename=filename, session=session_id, query=query, exclude_invalid=False, tag=tag, include_dict=include_dict, start_id=start_id)
except Exception as err:
return jsonify_with_status("Error reading interview list: " + str(err), 400)
if start_id is None:
next_id = None
else:
next_id = safeid(str(start_id))
return jsonify({'next_id': next_id, 'items': docassemble.base.functions.safe_json(the_list)})
if request.method == 'DELETE':
if not current_user.has_role_or_permission('admin', 'advocate', permissions=['edit_sessions']):
return jsonify_with_status("Access denied.", 403)
start_id = None
while True:
try:
(the_list, start_id) = user_interviews(filename=filename, session=session_id, query=query, exclude_invalid=False, tag=tag, include_dict=False, start_id=start_id)
except:
return jsonify_with_status("Error reading interview list.", 400)
for info in the_list:
if info['user_id'] is not None:
user_interviews(user_id=info['user_id'], action='delete', filename=info['filename'], session=info['session'])
else:
user_interviews(temp_user_id=info['temp_user_id'], action='delete', filename=info['filename'], session=info['session'])
if start_id is None:
break
return ('', 204)
return ('', 204)
def jsonify_task(result):
while True:
code = random_string(24)
the_key = 'da:install_status:' + code
if r.get(the_key) is None:
break
pipe = r.pipeline()
pipe.set(the_key, json.dumps({'id': result.id, 'server_start_time': START_TIME}))
pipe.expire(the_key, 3600)
pipe.execute()
return jsonify({'task_id': code})
def jsonify_restart_task():
while True:
code = random_string(24)
the_key = 'da:restart_status:' + code
if r.get(the_key) is None:
break
pipe = r.pipeline()
pipe.set(the_key, json.dumps({'server_start_time': START_TIME}))
pipe.expire(the_key, 3600)
pipe.execute()
return jsonify({'task_id': code})
def should_run_create(package_name):
if package_name in ('docassemble.base', 'docassemble.webapp', 'docassemble.demo', 'docassemble'):
return True
return False
@app.route('/api/package', methods=['GET', 'POST', 'DELETE'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'POST', 'DELETE', 'HEAD'], automatic_options=True)
def api_package():
if not api_verify(roles=['admin', 'developer'], permissions=['manage_packages']):
return jsonify_with_status("Access denied.", 403)
if request.method == 'GET':
package_list, package_auth = get_package_info() # pylint: disable=unused-variable
packages = []
for package in package_list:
if not package.package.active:
continue
item = {'name': package.package.name, 'type': package.package.type, 'can_update': package.can_update, 'can_uninstall': package.can_uninstall}
if package.package.packageversion:
item['version'] = package.package.packageversion
if package.package.giturl:
item['git_url'] = package.package.giturl
if package.package.gitbranch:
item['branch'] = package.package.gitbranch
if package.package.upload:
item['zip_file_number'] = package.package.upload
packages.append(item)
return jsonify(packages)
if request.method == 'DELETE':
if not app.config['ALLOW_UPDATES']:
return ('File not found', 404)
target = request.args.get('package', None)
do_restart = true_or_false(request.args.get('restart', True))
if target is None:
return jsonify_with_status("Missing package name.", 400)
package_list, package_auth = get_package_info()
the_package = None
for package in package_list:
if package.package.name == target:
the_package = package
break
if the_package is None:
return jsonify_with_status("Package not found.", 400)
if not the_package.can_uninstall:
return jsonify_with_status("You are not allowed to uninstall that package.", 400)
uninstall_package(target)
if do_restart:
logmessage("Starting process of updating packages followed by restarting server")
result = docassemble.webapp.worker.update_packages.apply_async(link=docassemble.webapp.worker.reset_server.s(run_create=should_run_create(target)))
else:
result = docassemble.webapp.worker.update_packages.delay(restart=False)
return jsonify_task(result)
if request.method == 'POST':
if not app.config['ALLOW_UPDATES']:
return ('File not found', 404)
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
do_restart = true_or_false(post_data.get('restart', True))
num_commands = 0
if 'update' in post_data:
num_commands += 1
if 'github_url' in post_data:
num_commands += 1
if 'pip' in post_data:
num_commands += 1
if 'zip' in request.files:
num_commands += 1
if num_commands == 0:
return jsonify_with_status("No instructions provided.", 400)
if num_commands > 1:
return jsonify_with_status("Only one package can be installed or updated at a time.", 400)
if 'update' in post_data:
target = post_data['update']
package_list, package_auth = get_package_info()
the_package = None
for package in package_list:
if package.package.name == target:
the_package = package
break
if the_package is None:
return jsonify_with_status("Package not found.", 400)
if not the_package.can_update:
return jsonify_with_status("You are not allowed to update that package.", 400)
existing_package = db.session.execute(select(Package).filter_by(name=target, active=True).order_by(Package.id.desc())).scalar()
if existing_package is not None:
if existing_package.type == 'git' and existing_package.giturl is not None:
if existing_package.gitbranch:
install_git_package(target, existing_package.giturl, existing_package.gitbranch)
else:
install_git_package(target, existing_package.giturl, get_master_branch(existing_package.giturl))
elif existing_package.type == 'pip':
if existing_package.name == 'docassemble.webapp' and existing_package.limitation:
existing_package.limitation = None
install_pip_package(existing_package.name, existing_package.limitation)
db.session.commit()
if do_restart:
logmessage("Starting process of updating packages followed by restarting server")
result = docassemble.webapp.worker.update_packages.apply_async(link=docassemble.webapp.worker.reset_server.s(run_create=should_run_create(target)))
else:
result = docassemble.webapp.worker.update_packages.delay(restart=False)
return jsonify_task(result)
if 'github_url' in post_data:
github_url = post_data['github_url'].rstrip('/')
branch = post_data.get('branch', None)
if branch is None:
branch = get_master_branch(github_url)
m = re.search(r'#egg=(.*)', github_url)
if m:
packagename = re.sub(r'&.*', '', m.group(1))
github_url = re.sub(r'#.*', '', github_url)
else:
packagename = re.sub(r'/*$', '', github_url)
packagename = re.sub(r'^git+', '', packagename)
packagename = re.sub(r'#.*', '', packagename)
packagename = re.sub(r'\.git$', '', packagename)
packagename = re.sub(r'.*/', '', packagename)
packagename = re.sub(r'^docassemble-', 'docassemble.', packagename)
if user_can_edit_package(giturl=github_url) and user_can_edit_package(pkgname=packagename):
install_git_package(packagename, github_url, branch)
if do_restart:
logmessage("Starting process of updating packages followed by restarting server")
result = docassemble.webapp.worker.update_packages.apply_async(link=docassemble.webapp.worker.reset_server.s(run_create=should_run_create(packagename)))
else:
result = docassemble.webapp.worker.update_packages.delay(restart=False)
return jsonify_task(result)
jsonify_with_status("You do not have permission to install that package.", 403)
if 'pip' in post_data:
m = re.match(r'([^>=<]+)([>=<]+.+)', post_data['pip'])
if m:
packagename = m.group(1)
limitation = m.group(2)
else:
packagename = post_data['pip']
limitation = None
packagename = re.sub(r'[^A-Za-z0-9\_\-\.]', '', packagename)
if user_can_edit_package(pkgname=packagename):
install_pip_package(packagename, limitation)
if do_restart:
logmessage("Starting process of updating packages followed by restarting server")
result = docassemble.webapp.worker.update_packages.apply_async(link=docassemble.webapp.worker.reset_server.s(run_create=should_run_create(packagename)))
else:
result = docassemble.webapp.worker.update_packages.delay(restart=False)
return jsonify_task(result)
return jsonify_with_status("You do not have permission to install that package.", 403)
if 'zip' in request.files and request.files['zip'].filename:
try:
the_file = request.files['zip']
filename = secure_filename(the_file.filename)
file_number = get_new_file_number(docassemble.base.functions.get_uid(), filename)
saved_file = SavedFile(file_number, extension='zip', fix=True, should_not_exist=True)
file_set_attributes(file_number, private=False, persistent=True)
zippath = saved_file.path
the_file.save(zippath)
saved_file.save()
saved_file.finalize()
pkgname = get_package_name_from_zip(zippath)
if user_can_edit_package(pkgname=pkgname):
install_zip_package(pkgname, file_number)
if do_restart:
logmessage("Starting process of updating packages followed by restarting server")
result = docassemble.webapp.worker.update_packages.apply_async(link=docassemble.webapp.worker.reset_server.s(run_create=should_run_create(pkgname)))
else:
result = docassemble.webapp.worker.update_packages.delay(restart=False)
return jsonify_task(result)
return jsonify_with_status("You do not have permission to install that package.", 403)
except:
return jsonify_with_status("There was an error when installing that package.", 400)
return ('File not found', 404)
@app.route('/api/package_update_status', methods=['GET'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'HEAD'], automatic_options=True)
def api_package_update_status():
if not app.config['ALLOW_UPDATES']:
return ('File not found', 404)
if not api_verify(roles=['admin', 'developer'], permissions=['manage_packages']):
return jsonify_with_status("Access denied.", 403)
code = request.args.get('task_id', None)
if code is None:
return jsonify_with_status("Missing task_id", 400)
the_key = 'da:install_status:' + str(code)
task_data = r.get(the_key)
if task_data is None:
return jsonify({'status': 'unknown'})
task_info = json.loads(task_data.decode())
result = docassemble.webapp.worker.workerapp.AsyncResult(id=task_info['id'])
if result.ready():
the_result = result.get()
if isinstance(the_result, ReturnValue):
if the_result.ok:
if the_result.restart and START_TIME <= task_info['server_start_time']:
return jsonify(status='working')
r.delete(the_key)
return jsonify(status='completed', ok=True, log=summarize_results(the_result.results, the_result.logmessages, html=False))
if hasattr(the_result, 'error_message'):
r.delete(the_key)
return jsonify(status='completed', ok=False, error_message=str(the_result.error_message))
if hasattr(the_result, 'results') and hasattr(the_result, 'logmessages'):
r.delete(the_key)
return jsonify(status='completed', ok=False, error_message=summarize_results(the_result.results, the_result.logmessages, html=False))
r.expire(the_key, 30)
return jsonify(status='completed', ok=False, error_message=str("No error message. Result is " + str(the_result)))
r.expire(the_key, 30)
return jsonify(status='completed', ok=False, error_message=str(the_result))
return jsonify(status='working')
@app.route('/api/temp_url', methods=['GET'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'HEAD'], automatic_options=True)
def api_temporary_redirect():
if not api_verify():
return jsonify_with_status("Access denied.", 403)
url = request.args.get('url', None)
if url is None:
return jsonify_with_status("No url supplied.", 400)
try:
one_time = true_or_false(request.args.get('one_time', 0))
except:
one_time = False
try:
expire = int(request.args.get('expire', 3600))
assert expire > 0
except:
return jsonify_with_status("Invalid number of seconds.", 400)
return jsonify(docassemble.base.functions.temp_redirect(url, expire, False, one_time))
@app.route('/api/resume_url', methods=['POST'])
@csrf.exempt
@cross_origin(origins='*', methods=['POST', 'HEAD'], automatic_options=True)
def api_resume_url():
if not api_verify():
return jsonify_with_status("Access denied.", 403)
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
filename = post_data.get('i', None)
if filename is None:
return jsonify_with_status("No filename supplied.", 400)
session_id = post_data.get('session', post_data.get('session_id', None))
if 'url_args' in post_data:
if isinstance(post_data['url_args'], dict):
url_args = post_data['url_args']
else:
try:
url_args = json.loads(post_data['url_args'])
assert isinstance(url_args, dict)
except:
return jsonify_with_status("Malformed URL arguments", 400)
else:
url_args = {}
try:
one_time = bool(int(post_data.get('one_time', 0)))
except:
one_time = False
try:
expire = int(post_data.get('expire', 3600))
assert expire > 0
except:
return jsonify_with_status("Invalid number of seconds.", 400)
info = {'i': filename}
if session_id:
info['session'] = session_id
if one_time:
info['once'] = True
if len(url_args):
info['url_args'] = url_args
while True:
code = random_string(32)
the_key = 'da:resume_interview:' + code
if r.get(the_key) is None:
break
pipe = r.pipeline()
pipe.set(the_key, json.dumps(info))
pipe.expire(the_key, expire)
pipe.execute()
return jsonify(url_for('launch', c=code, _external=True))
@app.route('/api/clear_cache', methods=['POST'])
@csrf.exempt
@cross_origin(origins='*', methods=['POST', 'HEAD'], automatic_options=True)
def api_clear_cache():
if not api_verify(roles=['admin', 'developer'], permissions=['playground_control']):
return jsonify_with_status("Access denied.", 403)
for key in r.keys('da:interviewsource:*'):
r.incr(key.decode())
return ('', 204)
@app.route('/api/config', methods=['GET', 'POST', 'PATCH'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'POST', 'PATCH', 'HEAD'], automatic_options=True)
def api_config():
if not app.config['ALLOW_CONFIGURATION_EDITING']:
return ('File not found', 404)
if not api_verify(roles=['admin'], permissions=['manage_config']):
return jsonify_with_status("Access denied.", 403)
if request.method == 'GET':
try:
with open(daconfig['config file'], 'r', encoding='utf-8') as fp:
content = fp.read()
data = standardyaml.load(content, Loader=standardyaml.FullLoader)
except:
return jsonify_with_status("Could not parse Configuration.", 400)
return jsonify(data)
if request.method == 'POST':
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
if 'config' not in post_data:
return jsonify_with_status("Configuration not supplied.", 400)
if isinstance(post_data['config'], dict):
data = post_data['config']
else:
try:
data = json.loads(post_data['config'])
except:
return jsonify_with_status("Configuration was not valid JSON.", 400)
yaml_data = altyamlstring.dump_to_string(data)
if cloud is not None:
key = cloud.get_key('config.yml')
key.set_contents_from_string(yaml_data)
with open(daconfig['config file'], 'w', encoding='utf-8') as fp:
fp.write(yaml_data)
return_val = jsonify_restart_task()
restart_all()
return return_val
if request.method == 'PATCH':
try:
with open(daconfig['config file'], 'r', encoding='utf-8') as fp:
content = fp.read()
data = standardyaml.load(content, Loader=standardyaml.FullLoader)
except:
return jsonify_with_status("Could not parse Configuration.", 400)
patch_data = request.get_json(silent=True)
if patch_data is None:
# using_json = False
patch_data = request.form.copy()
# else:
# using_json = True
if 'config_changes' not in patch_data:
return jsonify_with_status("Configuration changes not supplied.", 400)
if isinstance(patch_data['config_changes'], dict):
new_data = patch_data['config_changes']
else:
try:
new_data = json.loads(patch_data['config_changes'])
except:
return jsonify_with_status("Configuration changes were not valid JSON.", 400)
data.update(new_data)
yaml_data = altyamlstring.dump_to_string(data)
if cloud is not None:
key = cloud.get_key('config.yml')
key.set_contents_from_string(yaml_data)
with open(daconfig['config file'], 'w', encoding='utf-8') as fp:
fp.write(yaml_data)
return_val = jsonify_restart_task()
restart_all()
return return_val
return ('File not found', 404)
@app.route('/api/playground_pull', methods=['GET', 'POST'])
@csrf.exempt
@cross_origin(origins='*', methods=['POST', 'HEAD'], automatic_options=True)
def api_playground_pull():
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
if not api_verify(roles=['admin', 'developer'], permissions=['playground_control']):
return jsonify_with_status("Access denied.", 403)
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
do_restart = true_or_false(post_data.get('restart', True))
current_project = post_data.get('project', 'default')
try:
if app.config['ENABLE_SHARING_PLAYGROUNDS'] or current_user.has_role_or_permission('admin', permissions=['playground_control']):
user_id = int(post_data.get('user_id', current_user.id))
else:
if 'user_id' in post_data:
assert int(post_data['user_id']) == current_user.id
user_id = current_user.id
except:
return jsonify_with_status("Invalid user_id.", 400)
if current_project != 'default' and current_project not in get_list_of_projects(user_id):
return jsonify_with_status("Invalid project.", 400)
docassemble.base.functions.this_thread.current_info['user'] = {'is_anonymous': False, 'theid': user_id}
if app.config['USE_GITHUB']:
github_auth = r.get('da:using_github:userid:' + str(current_user.id))
can_publish_to_github = bool(github_auth is not None)
else:
can_publish_to_github = None
github_url = None
branch = None
pypi_package = None
if 'github_url' in post_data:
github_url = post_data['github_url'].rstrip('/')
branch = post_data.get('branch', None)
if branch is None:
branch = get_master_branch(github_url)
m = re.search(r'#egg=(.*)', github_url)
if m:
packagename = re.sub(r'&.*', '', m.group(1))
github_url = re.sub(r'#.*', '', github_url)
else:
packagename = re.sub(r'/*$', '', github_url)
packagename = re.sub(r'^git+', '', packagename)
packagename = re.sub(r'#.*', '', packagename)
packagename = re.sub(r'\.git$', '', packagename)
packagename = re.sub(r'.*/', '', packagename)
packagename = re.sub(r'^docassemble-', 'docassemble.', packagename)
elif 'pip' in post_data:
m = re.match(r'([^>=<]+)([>=<]+.+)', post_data['pip'])
if m:
pypi_package = m.group(1)
# limitation = m.group(2)
else:
pypi_package = post_data['pip']
# limitation = None
packagename = re.sub(r'[^A-Za-z0-9\_\-\.]', '', pypi_package)
else:
return jsonify_with_status("Either github_url or pip is required.", 400)
area = {}
# area_sec = {'templates': 'playgroundtemplate', 'static': 'playgroundstatic', 'sources': 'playgroundsources', 'questions': 'playground'}
for sec in ('playground', 'playgroundpackages', 'playgroundtemplate', 'playgroundstatic', 'playgroundsources', 'playgroundmodules'):
area[sec] = SavedFile(user_id, fix=True, section=sec)
result = do_playground_pull(area, current_project, github_url=github_url, branch=branch, pypi_package=pypi_package, can_publish_to_github=can_publish_to_github, github_email=current_user.email)
if result['action'] in ('error', 'fail'):
return jsonify_with_status("Pull process encountered an error: " + result['message'], 400)
if result['action'] == 'finished':
if result['need_to_restart'] and do_restart:
return_val = jsonify_restart_task()
restart_all()
return return_val
return ('', 204)
@app.route('/api/restart', methods=['POST'])
@csrf.exempt
@cross_origin(origins='*', methods=['POST', 'HEAD'], automatic_options=True)
def api_restart():
if not app.config['ALLOW_RESTARTING']:
return ('File not found', 404)
if not api_verify(roles=['admin', 'developer'], permissions=['playground_control']):
return jsonify_with_status("Access denied.", 403)
return_val = jsonify_restart_task()
restart_all()
return return_val
@app.route('/api/restart_status', methods=['GET'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'HEAD'], automatic_options=True)
def api_restart_status():
if not app.config['ALLOW_RESTARTING']:
return ('File not found', 404)
if not api_verify(roles=['admin', 'developer'], permissions=['playground_control']):
return jsonify_with_status("Access denied.", 403)
code = request.args.get('task_id', None)
if code is None:
return jsonify_with_status("Missing task_id", 400)
the_key = 'da:restart_status:' + str(code)
task_data = r.get(the_key)
if task_data is None:
return jsonify(status='unknown')
task_info = json.loads(task_data.decode())
if START_TIME <= task_info['server_start_time']:
return jsonify(status='working')
r.expire(the_key, 30)
return jsonify(status='completed')
@app.route('/api/playground_install', methods=['POST'])
@csrf.exempt
@cross_origin(origins='*', methods=['POST', 'HEAD'], automatic_options=True)
def api_playground_install():
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
if not api_verify(roles=['admin', 'developer'], permissions=['playground_control']):
return jsonify_with_status("Access denied.", 403)
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
do_restart = true_or_false(post_data.get('restart', True))
current_project = post_data.get('project', 'default')
try:
if app.config['ENABLE_SHARING_PLAYGROUNDS'] or current_user.has_role_or_permission('admin', permissions=['playground_control']):
user_id = int(post_data.get('user_id', current_user.id))
else:
if 'user_id' in post_data:
assert int(post_data['user_id']) == current_user.id
user_id = current_user.id
except:
return jsonify_with_status("Invalid user_id.", 400)
if current_project != 'default' and current_project not in get_list_of_projects(user_id):
return jsonify_with_status("Invalid project.", 400)
docassemble.base.functions.this_thread.current_info['user'] = {'is_anonymous': False, 'theid': user_id}
found = False
expected_name = 'unknown'
need_to_restart = False
area = {}
area_sec = {'templates': 'playgroundtemplate', 'static': 'playgroundstatic', 'sources': 'playgroundsources', 'questions': 'playground'}
for sec in ('playground', 'playgroundpackages', 'playgroundtemplate', 'playgroundstatic', 'playgroundsources', 'playgroundmodules'):
area[sec] = SavedFile(user_id, fix=True, section=sec)
try:
for filekey in request.files:
the_files = request.files.getlist(filekey)
if not the_files:
continue
for up_file in the_files:
found = True
zippath = tempfile.NamedTemporaryFile(mode="wb", prefix='datemp', suffix=".zip", delete=False)
up_file.save(zippath)
up_file.close()
zippath.close()
with zipfile.ZipFile(zippath.name, mode='r') as zf:
readme_text = ''
setup_py = ''
extracted = {}
data_files = {'templates': [], 'static': [], 'sources': [], 'interviews': [], 'modules': [], 'questions': []}
has_docassemble_dir = set()
has_setup_file = set()
for zinfo in zf.infolist():
if zinfo.is_dir():
if zinfo.filename.endswith('/docassemble/'):
has_docassemble_dir.add(re.sub(r'/docassemble/$', '', zinfo.filename))
if zinfo.filename == 'docassemble/':
has_docassemble_dir.add('')
elif zinfo.filename.endswith('/setup.py'):
(directory, filename) = os.path.split(zinfo.filename)
has_setup_file.add(directory)
elif zinfo.filename == 'setup.py':
has_setup_file.add('')
root_dir = None
for directory in has_docassemble_dir.union(has_setup_file):
if root_dir is None or len(directory) < len(root_dir):
root_dir = directory
if root_dir is None:
return jsonify_with_status("Not a docassemble package.", 400)
for zinfo in zf.infolist():
if zinfo.filename.endswith('/'):
continue
(directory, filename) = os.path.split(zinfo.filename)
if filename.startswith('#') or filename.endswith('~'):
continue
dirparts = splitall(directory)
if '.git' in dirparts:
continue
levels = re.findall(r'/', directory)
time_tuple = zinfo.date_time
the_time = time.mktime(datetime.datetime(*time_tuple).timetuple())
for sec in ('templates', 'static', 'sources', 'questions'):
if directory.endswith('data/' + sec) and filename != 'README.md':
data_files[sec].append(filename)
target_filename = os.path.join(directory_for(area[area_sec[sec]], current_project), filename)
with zf.open(zinfo) as source_fp, open(target_filename, 'wb') as target_fp:
shutil.copyfileobj(source_fp, target_fp)
os.utime(target_filename, (the_time, the_time))
if filename == 'README.md' and directory == root_dir:
with zf.open(zinfo) as f:
the_file_obj = TextIOWrapper(f, encoding='utf8')
readme_text = the_file_obj.read()
if filename == 'setup.py' and directory == root_dir:
with zf.open(zinfo) as f:
the_file_obj = TextIOWrapper(f, encoding='utf8')
setup_py = the_file_obj.read()
elif len(levels) >= 1 and directory != root_dir and filename.endswith('.py') and filename != '__init__.py' and 'tests' not in dirparts and 'data' not in dirparts:
need_to_restart = True
data_files['modules'].append(filename)
target_filename = os.path.join(directory_for(area['playgroundmodules'], current_project), filename)
with zf.open(zinfo) as source_fp, open(target_filename, 'wb') as target_fp:
shutil.copyfileobj(source_fp, target_fp)
os.utime(target_filename, (the_time, the_time))
setup_py = re.sub(r'.*setup\(', '', setup_py, flags=re.DOTALL)
for line in setup_py.splitlines():
m = re.search(r"^ *([a-z_]+) *= *\(?'(.*)'", line)
if m:
extracted[m.group(1)] = m.group(2)
m = re.search(r'^ *([a-z_]+) *= *\(?"(.*)"', line)
if m:
extracted[m.group(1)] = m.group(2)
m = re.search(r'^ *([a-z_]+) *= *\[(.*)\]', line)
if m:
the_list = []
for item in re.split(r', *', m.group(2)):
inner_item = re.sub(r"'$", '', item)
inner_item = re.sub(r"^'", '', inner_item)
inner_item = re.sub(r'"+$', '', inner_item)
inner_item = re.sub(r'^"+', '', inner_item)
the_list.append(inner_item)
extracted[m.group(1)] = the_list
info_dict = {'readme': readme_text, 'interview_files': data_files['questions'], 'sources_files': data_files['sources'], 'static_files': data_files['static'], 'module_files': data_files['modules'], 'template_files': data_files['templates'], 'dependencies': list(map(lambda y: re.sub(r'[\>\<\=].*', '', y), extracted.get('install_requires', []))), 'description': extracted.get('description', ''), 'author_name': extracted.get('author', ''), 'author_email': extracted.get('author_email', ''), 'license': extracted.get('license', ''), 'url': extracted.get('url', ''), 'version': extracted.get('version', '')}
info_dict['dependencies'] = [x for x in map(lambda y: re.sub(r'[\>\<\=].*', '', y), info_dict['dependencies']) if x not in ('docassemble', 'docassemble.base', 'docassemble.webapp')]
package_name = re.sub(r'^docassemble\.', '', extracted.get('name', expected_name))
with open(os.path.join(directory_for(area['playgroundpackages'], current_project), 'docassemble.' + package_name), 'w', encoding='utf-8') as fp:
the_yaml = standardyaml.safe_dump(info_dict, default_flow_style=False, default_style='|')
fp.write(str(the_yaml))
for key in r.keys('da:interviewsource:docassemble.playground' + str(current_user.id) + project_name(current_project) + ':*'): # pylint: disable=consider-using-dict-items
r.incr(key.decode())
for the_area in area.values():
the_area.finalize()
# the_file = package_name
zippath.close()
except Exception as err:
logmessage("api_playground_install: " + err.__class__.__name__ + ": " + str(err))
return jsonify_with_status("Error installing packages.", 400)
if not found:
return jsonify_with_status("No package found.", 400)
for key in r.keys('da:interviewsource:docassemble.playground' + str(user_id) + project_name(current_project) + ':*'):
r.incr(key.decode())
if do_restart and need_to_restart:
return_val = jsonify_restart_task()
restart_all()
return return_val
return ('', 204)
@app.route('/api/playground/project', methods=['GET', 'POST', 'DELETE'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'POST', 'DELETE', 'HEAD'], automatic_options=True)
def api_playground_projects():
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
if not api_verify(roles=['admin', 'developer'], permissions=['playground_control']):
return jsonify_with_status("Access denied.", 403)
if request.method in ('GET', 'DELETE'):
try:
if app.config['ENABLE_SHARING_PLAYGROUNDS'] or current_user.has_role_or_permission('admin', permissions=['playground_control']):
user_id = int(request.args.get('user_id', current_user.id))
else:
if 'user_id' in request.args:
assert int(request.args['user_id']) == current_user.id
user_id = current_user.id
except:
return jsonify_with_status("Invalid user_id.", 400)
if request.method == 'GET':
return jsonify(get_list_of_projects(user_id))
if request.method == 'DELETE':
if 'project' not in request.args:
return jsonify_with_status("Project not provided.", 400)
project = request.args['project']
if project not in get_list_of_projects(user_id) or project == 'default':
return jsonify_with_status("Invalid project.", 400)
delete_project(user_id, project)
return ('', 204)
if request.method == 'POST':
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
try:
if app.config['ENABLE_SHARING_PLAYGROUNDS'] or current_user.has_role_or_permission('admin', permissions=['playground_control']):
user_id = int(post_data.get('user_id', current_user.id))
else:
if 'user_id' in post_data:
assert int(post_data['user_id']) == current_user.id
user_id = current_user.id
except:
return jsonify_with_status("Invalid user_id.", 400)
if 'project' not in post_data:
return jsonify_with_status("Project not provided.", 400)
project = post_data['project']
if re.search('^[0-9]', project) or re.search('[^A-Za-z0-9]', project):
return jsonify_with_status("Invalid project name.", 400)
if project in get_list_of_projects(user_id) or project == 'default':
return jsonify_with_status("Invalid project.", 400)
create_project(user_id, project)
return ('', 204)
return ('File not found', 404)
@app.route('/api/playground', methods=['GET', 'POST', 'DELETE'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'POST', 'DELETE', 'HEAD'], automatic_options=True)
def api_playground():
if not app.config['ENABLE_PLAYGROUND']:
return ('File not found', 404)
if not api_verify(roles=['admin', 'developer'], permissions=['playground_control']):
return jsonify_with_status("Access denied.", 403)
if request.method in ('GET', 'DELETE'):
folder = request.args.get('folder', 'static')
project = request.args.get('project', 'default')
try:
if app.config['ENABLE_SHARING_PLAYGROUNDS'] or current_user.has_role_or_permission('admin', permissions=['playground_control']):
user_id = int(request.args.get('user_id', current_user.id))
else:
if 'user_id' in request.args:
assert int(request.args['user_id']) == current_user.id
user_id = current_user.id
except:
return jsonify_with_status("Invalid user_id.", 400)
elif request.method == 'POST':
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
folder = post_data.get('folder', 'static')
project = post_data.get('project', 'default')
do_restart = true_or_false(post_data.get('restart', True))
try:
if app.config['ENABLE_SHARING_PLAYGROUNDS'] or current_user.has_role_or_permission('admin', permissions=['playground_control']):
user_id = int(post_data.get('user_id', current_user.id))
else:
if 'user_id' in post_data:
assert int(post_data['user_id']) == current_user.id
user_id = current_user.id
except:
return jsonify_with_status("Invalid user_id.", 400)
if request.method == 'DELETE':
do_restart = true_or_false(request.args.get('restart', True))
if 'filename' not in request.args:
return jsonify_with_status("Missing filename.", 400)
if folder not in ('questions', 'sources', 'static', 'templates', 'modules'):
return jsonify_with_status("Invalid folder.", 400)
if project != 'default' and project not in get_list_of_projects(user_id):
return jsonify_with_status("Invalid project.", 400)
if folder == 'questions':
section = ''
elif folder == 'templates':
section = 'template'
else:
section = folder
docassemble.base.functions.this_thread.current_info['user'] = {'is_anonymous': False, 'theid': user_id}
pg_section = PlaygroundSection(section=section, project=project)
if request.method == 'GET':
if 'filename' not in request.args:
return jsonify(pg_section.file_list)
the_filename = secure_filename_spaces_ok(request.args['filename'])
if not pg_section.file_exists(the_filename):
return jsonify_with_status("File not found", 404)
response_to_send = send_file(pg_section.get_file(the_filename), mimetype=pg_section.get_mimetype(the_filename))
response_to_send.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response_to_send
if request.method == 'DELETE':
pg_section.delete_file(secure_filename_spaces_ok(request.args['filename']))
if section == 'modules' and do_restart:
return_val = jsonify_restart_task()
restart_all()
return return_val
return ('', 204)
if request.method == 'POST':
found = False
try:
for filekey in request.files:
the_files = request.files.getlist(filekey)
if the_files:
for the_file in the_files:
filename = werkzeug.utils.secure_filename(the_file.filename)
temp_file = tempfile.NamedTemporaryFile(prefix="datemp", delete=False)
the_file.save(temp_file.name)
pg_section.copy_from(temp_file.name, filename=filename)
found = True
except:
return jsonify_with_status("Error saving file(s).", 400)
if not found:
return jsonify_with_status("No file found.", 400)
for key in r.keys('da:interviewsource:docassemble.playground' + str(user_id) + project_name(project) + ':*'):
r.incr(key.decode())
if section == 'modules' and do_restart:
return_val = jsonify_restart_task()
restart_all()
return return_val
return ('', 204)
return ('', 204)
@app.route('/api/convert_file', methods=['POST'])
@csrf.exempt
@cross_origin(origins='*', methods=['POST', 'HEAD'], automatic_options=True)
def api_convert_file():
if not api_verify():
return jsonify_with_status("Access denied.", 403)
post_data = request.form.copy()
to_format = post_data.get('format', 'md')
if to_format not in 'md':
return jsonify_with_status("Invalid output file format.", 400)
for filekey in request.files:
the_files = request.files.getlist(filekey)
if the_files:
for the_file in the_files:
filename = werkzeug.utils.secure_filename(the_file.filename)
extension, mimetype = get_ext_and_mimetype(filename)
if mimetype and mimetype in convertible_mimetypes:
the_format = convertible_mimetypes[mimetype]
elif extension and extension in convertible_extensions:
the_format = convertible_extensions[extension]
else:
return jsonify_with_status("Invalid input file format.", 400)
with tempfile.NamedTemporaryFile() as temp_file:
the_file.save(temp_file.name)
result = word_to_markdown(temp_file.name, the_format)
if result is None:
return jsonify_with_status("Unable to convert file.", 400)
with open(result.name, 'r', encoding='utf-8') as fp:
contents = fp.read()
response = make_response(contents, 200)
response.headers['Content-Type'] = 'text/plain'
return response
return ('File not found', 404)
def add_api_key(user_id, name, method, allowed):
info = {'constraints': allowed, 'method': method, 'name': name}
success = False
for attempt in range(10): # pylint: disable=unused-variable
api_key = random_alphanumeric(32)
info['last_four'] = api_key[-4:]
new_api_key = encrypt_api_key(api_key, app.secret_key)
if len(r.keys('da:apikey:userid:*:key:' + new_api_key + ':info')) == 0:
r.set('da:apikey:userid:' + str(user_id) + ':key:' + new_api_key + ':info', json.dumps(info))
success = True
break
if not success:
return None
return api_key
def api_key_exists(user_id, api_key):
rkeys = r.keys('da:apikey:userid:' + str(user_id) + ':key:' + encrypt_api_key(str(api_key), app.secret_key) + ':info')
if len(rkeys) > 0:
return True
return False
def existing_api_names(user_id, except_for=None):
result = []
rkeys = r.keys('da:apikey:userid:' + str(user_id) + ':key:*:info')
for key in rkeys:
key = key.decode()
if except_for is not None:
api_key = re.sub(r'.*:key:([^:]+):.*', r'\1', key)
if api_key == encrypt_api_key(except_for, app.secret_key):
continue
try:
info = json.loads(r.get(key).decode())
result.append(info['name'])
except:
continue
return result
def get_api_info(user_id, name=None, api_key=None):
result = []
rkeys = r.keys('da:apikey:userid:' + str(user_id) + ':key:*:info')
if api_key is not None:
api_key = encrypt_api_key(api_key, app.secret_key)
for key in rkeys:
key = key.decode()
try:
info = json.loads(r.get(key).decode())
except:
logmessage("API information could not be unpacked.")
continue
if name is not None:
if info['name'] == name:
return info
info['key'] = ('*' * 28) + info['last_four']
this_key = re.sub(r'.*:key:([^:]+):.*', r'\1', key)
if api_key is not None and this_key == api_key:
return info
if name is not None or api_key is not None:
continue
if 'permissions' not in info:
info['permissions'] = []
result.append(info)
if name is not None or api_key is not None:
return None
return result
def delete_api_key(user_id, api_key):
key = 'da:apikey:userid:' + str(user_id) + ':key:' + encrypt_api_key(api_key, app.secret_key) + ':info'
r.delete(key)
def update_api_key(user_id, api_key, name, method, allowed, add_to_allowed, remove_from_allowed, permissions, add_to_permissions, remove_from_permissions):
key = 'da:apikey:userid:' + str(user_id) + ':key:' + encrypt_api_key(api_key, app.secret_key) + ':info'
try:
info = json.loads(r.get(key).decode())
except:
return False
if name is not None:
info['name'] = name
if method is not None:
if info['method'] != method:
info['constraints'] = []
info['method'] = method
if allowed is not None:
info['constraints'] = allowed
if add_to_allowed is not None:
if isinstance(add_to_allowed, list):
info['constraints'].extend(add_to_allowed)
elif isinstance(add_to_allowed, str):
info['constraints'].append(add_to_allowed)
if remove_from_allowed is not None:
if isinstance(remove_from_allowed, list):
to_remove = remove_from_allowed
elif isinstance(remove_from_allowed, str):
to_remove = [remove_from_allowed]
else:
to_remove = []
for item in to_remove:
if item in info['constraints']:
info['constraints'].remove(item)
if permissions is not None:
info['permissions'] = permissions
if add_to_permissions is not None:
if isinstance(add_to_permissions, list):
info['permissions'].extend(add_to_permissions)
elif isinstance(add_to_permissions, str):
info['permissions'].append(add_to_permissions)
if remove_from_permissions is not None:
if isinstance(remove_from_permissions, list):
to_remove = remove_from_permissions
elif isinstance(remove_from_permissions, str):
to_remove = [remove_from_permissions]
else:
to_remove = []
for item in to_remove:
if item in info['permissions']:
info['permissions'].remove(item)
r.set(key, json.dumps(info))
return True
def do_api_user_api(user_id):
if request.method == 'GET':
name = request.args.get('name', None)
api_key = request.args.get('api_key', None)
try:
result = get_api_info(user_id, name=name, api_key=api_key)
except:
return jsonify_with_status("Error accessing API information", 400)
if (name is not None or api_key is not None) and result is None:
return jsonify_with_status("No such API key could be found.", 404)
return jsonify(result)
if request.method == 'DELETE':
api_key = request.args.get('api_key', None)
if api_key is None:
return jsonify_with_status("An API key must supplied", 400)
try:
delete_api_key(user_id, api_key)
except:
return jsonify_with_status("Error deleting API key", 400)
return ('', 204)
if request.method == 'POST':
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
name = post_data.get('name', None)
method = post_data.get('method', 'none')
if method not in ('ip', 'referer', 'none'):
return jsonify_with_status("Invalid security method", 400)
allowed = post_data.get('allowed', [])
if isinstance(allowed, str):
try:
allowed = json.loads(allowed)
except:
return jsonify_with_status("Allowed sites list not a valid list", 400)
if not isinstance(allowed, list):
return jsonify_with_status("Allowed sites list not a valid list", 400)
try:
for item in allowed:
assert isinstance(item, str)
except:
return jsonify_with_status("Allowed sites list not a valid list", 400)
if name is None:
return jsonify_with_status("A name must be supplied", 400)
if name in existing_api_names(user_id):
return jsonify_with_status("The given name already exists", 400)
if len(name) > 255:
return jsonify_with_status("The name is invalid", 400)
new_api_key = add_api_key(user_id, name, method, allowed)
if new_api_key is None:
return jsonify_with_status("Error creating API key", 400)
return jsonify(new_api_key)
if request.method == 'PATCH':
user = db.session.execute(select(UserModel).options(db.joinedload(UserModel.roles)).filter_by(id=user_id)).scalar()
patch_data = request.get_json(silent=True)
if patch_data is None:
patch_data = request.form.copy()
if current_user.id == user_id:
api_key = patch_data.get('api_key', get_api_key())
else:
api_key = patch_data.get('api_key', None)
if api_key is None:
return jsonify_with_status("No API key given", 400)
if not api_key_exists(user_id, api_key):
return jsonify_with_status("The given API key cannot be modified", 400)
name = patch_data.get('name', None)
if name is not None:
if name in existing_api_names(user_id, except_for=api_key):
return jsonify_with_status("The given name already exists", 400)
if len(name) > 255:
return jsonify_with_status("The name is invalid", 400)
method = patch_data.get('method', None)
if method is not None:
if method not in ('ip', 'referer', 'none'):
return jsonify_with_status("Invalid security method", 400)
allowed = patch_data.get('allowed', None)
add_to_allowed = patch_data.get('add_to_allowed', None)
if add_to_allowed is not None:
if add_to_allowed.startswith('['):
try:
add_to_allowed = json.loads(add_to_allowed)
for item in add_to_allowed:
assert isinstance(item, str)
except:
return jsonify_with_status("add_to_allowed is not a valid list", 400)
remove_from_allowed = patch_data.get('remove_from_allowed', None)
if remove_from_allowed is not None:
if remove_from_allowed.startswith('['):
try:
remove_from_allowed = json.loads(remove_from_allowed)
for item in remove_from_allowed:
assert isinstance(item, str)
except:
return jsonify_with_status("remove_from_allowed is not a valid list", 400)
if allowed is not None:
if isinstance(allowed, str):
try:
allowed = json.loads(allowed)
except:
return jsonify_with_status("Allowed sites list not a valid list", 400)
if not isinstance(allowed, list):
return jsonify_with_status("Allowed sites list not a valid list", 400)
try:
for item in allowed:
assert isinstance(item, str)
except:
return jsonify_with_status("Allowed sites list not a valid list", 400)
if not (user.has_role('admin') and current_user.has_role_or_permission('admin')):
permissions = None
add_to_permissions = None
remove_from_permissions = None
else:
permissions = patch_data.get('permissions', None)
add_to_permissions = patch_data.get('add_to_permissions', None)
if add_to_permissions is not None:
if add_to_permissions.startswith('['):
try:
add_to_permissions = json.loads(add_to_permissions)
for item in add_to_permissions:
assert isinstance(item, str)
except:
return jsonify_with_status("add_to_permissions is not a valid list", 400)
try:
for item in add_to_permissions:
assert item in PERMISSIONS_LIST
except:
return jsonify_with_status("add_to_permissions contained a permission that was not recognized", 400)
elif add_to_permissions not in PERMISSIONS_LIST:
return jsonify_with_status("add_to_permissions contained a permission that was not recognized", 400)
remove_from_permissions = patch_data.get('remove_from_permissions', None)
if remove_from_permissions is not None:
if remove_from_permissions.startswith('['):
try:
remove_from_permissions = json.loads(remove_from_permissions)
for item in remove_from_permissions:
assert isinstance(item, str)
except:
return jsonify_with_status("remove_from_permissions is not a valid list", 400)
try:
for item in remove_from_permissions:
assert item in PERMISSIONS_LIST
except:
return jsonify_with_status("remove_from_permissions contained a permission that was not recognized", 400)
elif remove_from_permissions not in PERMISSIONS_LIST:
return jsonify_with_status("remove_from_permissions contained a permission that was not recognized", 400)
if permissions is not None:
if isinstance(permissions, str):
try:
permissions = json.loads(permissions)
except:
return jsonify_with_status("Permissions list not a valid list", 400)
if not isinstance(permissions, list):
return jsonify_with_status("Permissions list not a valid list", 400)
try:
for item in permissions:
assert isinstance(item, str)
except:
return jsonify_with_status("Permissions list not a valid list", 400)
try:
for item in permissions:
assert item in PERMISSIONS_LIST
except:
return jsonify_with_status("Permissions list contained a permission that was not recognized", 400)
result = update_api_key(user_id, api_key, name, method, allowed, add_to_allowed, remove_from_allowed, permissions, add_to_permissions, remove_from_permissions)
if not result:
return jsonify_with_status("Error updating API key", 400)
return ('', 204)
return ('', 204)
@app.route('/api/user/api', methods=['GET', 'POST', 'DELETE', 'PATCH'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'POST', 'DELETE', 'PATCH', 'HEAD'], automatic_options=True)
def api_user_api():
if not api_verify():
return jsonify_with_status("Access denied.", 403)
if current_user.limited_api:
if request.method == 'GET' and not current_user.can_do('access_user_api_info'):
return jsonify_with_status("You do not have sufficient privileges to access user API information", 403)
if request.method in ('PATCH', 'POST', 'DELETE') and not current_user.can_do('edit_user_api_info'):
return jsonify_with_status("You do not have sufficient privileges to edit user API information", 403)
return do_api_user_api(current_user.id)
@app.route('/api/user/<int:user_id>/api', methods=['GET', 'POST', 'DELETE', 'PATCH'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'POST', 'DELETE', 'PATCH', 'HEAD'], automatic_options=True)
def api_user_userid_api(user_id):
if not api_verify():
return jsonify_with_status("Access denied.", 403)
try:
user_id = int(user_id)
except:
return jsonify_with_status("User ID must be an integer.", 400)
if not current_user.same_as(user_id):
if request.method == 'GET' and not current_user.has_role_or_permission('admin', permissions=['access_user_api_info']):
return jsonify_with_status("You do not have sufficient privileges to access user API information", 403)
if request.method in ('POST', 'DELETE', 'PATCH') and not current_user.has_role_or_permission('admin', permissions=['edit_user_api_info']):
return jsonify_with_status("You do not have sufficient privileges to edit user API information", 403)
try:
user_info = get_user_info(user_id=user_id, admin=True)
except Exception as err:
return jsonify_with_status("Error obtaining user information: " + str(err), 400)
if user_info is None:
return jsonify_with_status("User not found.", 404)
return do_api_user_api(user_id)
@app.route('/api/interview_data', methods=['GET'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'HEAD'], automatic_options=True)
def api_interview_data():
if not api_verify(roles=['admin', 'developer'], permissions=['interview_data']):
return jsonify_with_status("Access denied.", 403)
filename = request.args.get('i', None)
if filename is None:
return jsonify_with_status("No filename supplied.", 400)
try:
interview_source = docassemble.base.parse.interview_source_from_string(filename, testing=True)
except Exception as err:
return jsonify_with_status("Error finding interview: " + str(err), 400)
try:
interview = interview_source.get_interview()
except Exception as err:
return jsonify_with_status("Error finding interview: " + str(err), 400)
device_id = docassemble.base.functions.this_thread.current_info['user']['device_id']
interview_status = docassemble.base.parse.InterviewStatus(current_info=current_info(yaml=filename, req=request, action=None, device_id=device_id))
m = re.search('docassemble.playground([0-9]+)([^:]*):', filename)
if m:
use_playground = bool(current_user.id == int(m.group(1)))
if m.group(2) != '':
current_project = m.group(2)
else:
current_project = 'default'
else:
use_playground = False
current_project = 'default'
variables_json, vocab_list, vocab_dict = get_vars_in_use(interview, interview_status, debug_mode=False, return_json=True, use_playground=use_playground, current_project=current_project) # pylint: disable=unused-variable
return jsonify({'names': variables_json, 'vocabulary': list(vocab_list)})
@app.route('/api/stash_data', methods=['POST'])
@csrf.exempt
@cross_origin(origins='*', methods=['POST', 'HEAD'], automatic_options=True)
def api_stash_data():
if not api_verify():
return jsonify_with_status("Access denied.", 403)
post_data = request.get_json(silent=True)
if post_data is None:
post_data = request.form.copy()
if 'data' not in post_data:
return jsonify_with_status("Data must be provided.", 400)
try:
data = json.loads(post_data['data'])
except:
return jsonify_with_status("Malformed data.", 400)
else:
data = post_data['data']
if not true_or_false(post_data.get('raw', False)):
data = transform_json_variables(data)
expire = post_data.get('expire', None)
if expire is None:
expire = 60*60*24*90
try:
expire = int(expire)
assert expire > 0
except:
expire = 60*60*24*90
(key, secret) = stash_data(data, expire)
return jsonify({'stash_key': key, 'secret': secret})
@app.route('/api/retrieve_stashed_data', methods=['GET'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'HEAD'], automatic_options=True)
def api_retrieve_stashed_data():
if not api_verify():
return jsonify_with_status("Access denied.", 403)
do_delete = true_or_false(request.args.get('delete', False))
refresh = request.args.get('refresh', None)
if refresh:
try:
refresh = int(refresh)
assert refresh > 0
except:
refresh = False
stash_key = request.args.get('stash_key', None)
secret = request.args.get('secret', None)
if stash_key is None or secret is None:
return jsonify_with_status("The stash key and secret parameters are required.", 400)
try:
data = retrieve_stashed_data(stash_key, secret, delete=do_delete, refresh=refresh)
assert data is not None
except Exception as err:
return jsonify_with_status("The stashed data could not be retrieved: " + err.__class__.__name__ + " " + str(err) + ".", 400)
return jsonify(docassemble.base.functions.safe_json(data))
@app.route('/manage_api', methods=['GET', 'POST'])
@login_required
def manage_api():
setup_translation()
if not current_user.has_role(*daconfig.get('api privileges', ['admin', 'developer'])):
return ('File not found', 404)
form = APIKey(request.form)
action = request.args.get('action', None)
api_key = request.args.get('key', None)
is_admin = current_user.has_role('admin')
argu = {'is_admin': is_admin}
argu['mode'] = 'list'
if action is None:
action = 'list'
argu['form'] = form
argu['extra_js'] = Markup("""
<script>
function remove_constraint(elem){
$(elem).parents('.daconstraintlist div').remove();
fix_constraints();
}
function fix_constraints(){
var empty;
var filled_exist = 0;
var empty_exist = 0;
if ($("#method").val() == 'none'){
$(".daconstraintlist").hide();
return;
}
else{
$(".daconstraintlist").show();
}
$(".daconstraintlist input").each(function(){
if ($(this).val() == ''){
empty_exist = 1;
}
else{
filled_exist = 1;
}
if (!($(this).next().length)){
var new_button = $('<button>');
var new_i = $('<i>');
$(new_button).addClass('btn btn-outline-secondary');
$(new_i).addClass('fas fa-times');
$(new_button).append(new_i);
$(new_button).on('click', function(){remove_constraint(this);});
$(this).parent().append(new_button);
}
});
if (empty_exist == 0){
var new_div = $('<div>');
var new_input = $('<input>');
$(new_div).append(new_input);
$(new_div).addClass('input-group');
$(new_input).addClass('form-control');
$(new_input).attr('type', 'text');
if ($("#method").val() == 'ip'){
$(new_input).attr('placeholder', """ + json.dumps(word('e.g., 56.33.114.49')) + """);
}
else{
$(new_input).attr('placeholder', """ + json.dumps(word('e.g., *example.com')) + """);
}
$(new_input).on('change', fix_constraints);
$(new_input).on('keyup', fix_constraints);
$(".daconstraintlist").append(new_div);
var new_button = $('<button>');
var new_i = $('<i>');
$(new_button).addClass('btn btn-outline-secondary');
$(new_i).addClass('fas fa-times');
$(new_button).append(new_i);
$(new_button).on('click', function(){remove_constraint(this);});
$(new_div).append(new_button);
}
}
$( document ).ready(function(){
$(".daconstraintlist input").on('change', fix_constraints);
$("#method").on('change', function(){
$(".daconstraintlist div.input-group").remove();
fix_constraints();
});
$("#submit").on('click', function(){
var the_constraints = [];
$(".daconstraintlist input").each(function(){
if ($(this).val() != ''){
the_constraints.push($(this).val());
}
});
$("#security").val(JSON.stringify(the_constraints));
});
fix_constraints();
});
</script>
""")
form.method.choices = [('ip', 'IP Address'), ('referer', 'Referring URL'), ('none', 'No authentication')]
if is_admin:
form.permissions.choices = [(permission, permission) for permission in PERMISSIONS_LIST]
else:
form.permissions.choices = []
ip_address = get_requester_ip(request)
if request.method == 'POST' and form.validate():
action = form.action.data
try:
constraints = json.loads(form.security.data)
if not isinstance(constraints, list):
constraints = []
except:
constraints = []
if action == 'new':
argu['title'] = word("New API Key")
argu['tab_title'] = argu['title']
argu['page_title'] = argu['title']
permissions_data = form.permissions.data if is_admin else []
info = {'name': form.name.data, 'method': form.method.data, 'constraints': constraints, 'limits': permissions_data}
success = False
for attempt in range(10): # pylint: disable=unused-variable
api_key = random_alphanumeric(32)
info['last_four'] = api_key[-4:]
new_api_key = encrypt_api_key(api_key, app.secret_key)
if len(r.keys('da:apikey:userid:*:key:' + new_api_key + ':info')) == 0:
r.set('da:apikey:userid:' + str(current_user.id) + ':key:' + new_api_key + ':info', json.dumps(info))
success = True
break
if not success:
flash(word("Could not create new key"), 'error')
return render_template('pages/manage_api.html', **argu)
argu['description'] = Markup(
"""<div class="card text-bg-light mb-3">
<div class="card-body">
<p class="card-text">
""" + (word("Your new API key, known internally as <strong>%s</strong>, is:<br />%s<br />") % (form.name.data, '<br /><span class="text-success"><i class="fa-solid fa-check"></i></span> <code id="daApiKey">' + api_key + '</code><wbr /><button aria-label=' + json.dumps(word("Copy API key")) + ' onclick="daCopyToClipboard()" class="btn btn-link ps-1 pt-1" type="button"><i class="fa-regular fa-copy"></i></button>')) + """
</p>
<p class="card-text">
""" + word("<strong>This is the only time you will be able to see your API key</strong>, so make sure to make a note of it and keep it in a secure place.") + """
</p>
</div>
</div>""")
elif action == 'edit':
argu['title'] = word("Edit API Key")
argu['tab_title'] = argu['title']
argu['page_title'] = argu['title']
api_key = form.key.data
argu['api_key'] = api_key
rkey = 'da:apikey:userid:' + str(current_user.id) + ':key:' + str(form.key.data) + ':info'
existing_key = r.get(rkey)
if existing_key is None:
flash(word("The key no longer exists"), 'error')
return render_template('pages/manage_api.html', **argu)
existing_key = existing_key.decode()
if form.delete.data:
r.delete(rkey)
flash(word("The key was deleted"), 'info')
else:
try:
info = json.loads(existing_key)
except:
flash(word("The key no longer exists"), 'error')
return render_template('pages/manage_api.html', **argu)
info['name'] = form.name.data
if form.method.data != info['method'] and form.method.data in ('ip', 'referer'):
info['method'] = form.method.data
info['constraints'] = constraints
if is_admin:
info['permissions'] = form.permissions.data
else:
info['permissions'] = []
r.set(rkey, json.dumps(info))
action = 'list'
if action == 'new':
argu['title'] = word("New API Key")
argu['tab_title'] = argu['title']
argu['page_title'] = argu['title']
argu['mode'] = 'new'
if api_key is not None and action == 'edit':
argu['title'] = word("Edit API Key")
argu['tab_title'] = argu['title']
argu['page_title'] = argu['title']
argu['api_key'] = api_key
argu['mode'] = 'edit'
rkey = 'da:apikey:userid:' + str(current_user.id) + ':key:' + api_key + ':info'
info = r.get(rkey)
if info is not None:
info = json.loads(info.decode())
if isinstance(info, dict) and info.get('name', None) and info.get('method', None):
argu['method'] = info.get('method')
form.method.data = info.get('method')
form.action.data = 'edit'
form.key.data = api_key
form.name.data = info.get('name')
if is_admin:
if 'permissions' in info:
form.permissions.data = info['permissions']
else:
form.permissions.data = []
argu['constraints'] = info.get('constraints')
argu['display_key'] = ('*' * 28) + info.get('last_four')
if ip_address != '127.0.0.1':
argu['description'] = Markup(word("Your IP address is") + " <code>" + str(ip_address) + "</code>.")
if action == 'list':
argu['title'] = word("API Keys")
argu['tab_title'] = argu['title']
argu['page_title'] = argu['title']
argu['mode'] = 'list'
avail_keys = []
for rkey in r.keys('da:apikey:userid:' + str(current_user.id) + ':key:*:info'):
rkey = rkey.decode()
try:
info = json.loads(r.get(rkey).decode())
if not isinstance(info, dict):
logmessage("manage_api: response from redis was not a dict")
continue
except:
logmessage("manage_api: response from redis had invalid json")
continue
m = re.match(r'da:apikey:userid:[0-9]+:key:([^:]+):info', rkey)
if not m:
logmessage("manage_api: error with redis key")
continue
api_key = m.group(1)
info['encoded_api_key'] = urllibquoteplus(api_key)
avail_keys.append(info)
argu['avail_keys'] = avail_keys
argu['has_any_keys'] = bool(len(avail_keys) > 0)
return render_template('pages/manage_api.html', **argu)
@app.route(html_index_path, methods=['GET'])
def html_index():
resp = app.send_static_file('index.html')
resp.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return resp
@app.route('/api/interview', methods=['GET', 'POST'])
@csrf.exempt
@cross_origin(origins='*', methods=['GET', 'POST', 'HEAD'], automatic_options=True)
def api_interview():
abort(404)
if request.method == 'POST':
post_data = request.get_json(silent=True)
if post_data is None:
return jsonify_with_status('The request must be JSON', 400)
yaml_filename = post_data.get('i', None)
secret = post_data.get('secret', None)
session_id = post_data.get('session', None)
url_args = post_data.get('url_args', None)
user_code = post_data.get('user_code', None)
command = post_data.get('command', None)
referer = post_data.get('referer', None)
else:
yaml_filename = request.args.get('i', None)
secret = request.args.get('secret', None)
session_id = request.args.get('session', None)
url_args = {}
user_code = request.args.get('user_code', None)
command = request.args.get('command', None)
referer = request.args.get('referer', None)
for key, val in request.args.items():
if key not in ('session', 'secret', 'i', 'user_code', 'command', 'referer', 'action'):
url_args[key] = val
if len(url_args) == 0:
url_args = None
output = {}
action = None
reset_fully = False
is_new = False
changed = False
if user_code:
key = 'da:apiinterview:usercode:' + user_code
user_info = r.get(key)
if user_info is None:
user_code = None
else:
r.expire(key, 60*60*24*30)
try:
user_info = json.loads(user_info)
except:
user_code = None
if user_code:
if user_info['user_id']:
user = db.session.execute(select(UserModel).filter_by(id=user_info['user_id'])).scalar()
if user is None or user.social_id.startswith('disabled$') or not user.active:
user_code = None
else:
login_user(user, remember=False)
update_last_login(user)
else:
session['tempuser'] = user_info['temp_user_id']
if not user_code:
user_code = app.session_interface.manual_save_session(app, session).decode()
if current_user.is_anonymous:
new_temp_user = TempUser()
db.session.add(new_temp_user)
db.session.commit()
session['tempuser'] = new_temp_user.id
user_info = {"user_id": None, "temp_user_id": new_temp_user.id, "sessions": {}}
else:
user_info = {"user_id": current_user.id, "temp_user_id": None, "sessions": {}}
output['user_code'] = user_code
changed = True
need_to_reset = False
new_session = False
send_initial = False
if yaml_filename.startswith('/'):
parts = urlparse(yaml_filename)
params = urllib.parse.parse_qs(parts.query)
if params.get('action', '') != '':
try:
action = tidy_action(json64unquote(params['action']))
assert len(action) > 0
except:
return jsonify_with_status(word("Invalid action."), 400)
url_args = {}
for key, val in dict(params).items():
params[key] = val[0]
if key not in reserved_argnames:
url_args[key] = val[0]
if parts.path == '/launch':
code = params.get('c', None)
if code is None:
abort(403)
the_key = 'da:resume_interview:' + str(code)
data = r.get(the_key)
if data is None:
return jsonify_with_status(word("The link has expired."), 403)
data = json.loads(data.decode())
if data.get('once', False):
r.delete(the_key)
args = {}
for key, val in params.items():
if key != 'session':
args[key] = val
yaml_filename = data['i']
if 'session' in data:
session_id = data['session']
user_info['sessions'][yaml_filename] = session_id
else:
new_session = True
if parts.path in ('/i', '/interview', '/'):
ok = False
if 'i' in params:
yaml_filename = params['i']
ok = True
elif 'state' in params:
try:
yaml_filename = re.sub(r'\^.*', '', from_safeid(params['state']))
ok = True
except:
ok = False
if not ok:
if current_user.is_anonymous and not daconfig.get('allow anonymous access', True):
output['redirect'] = url_for('user.login')
return jsonify(output)
if len(daconfig['dispatch']) > 0:
output['redirect'] = url_for('interview_start')
return jsonify(output)
yaml_filename = final_default_yaml_filename
refer = None
if parts.path.startswith('/start/') or parts.path.startswith('/run/'):
m = re.search(r'/(start|run)/([^/]+)/$', parts.path)
if m:
refer = [m.group(1) + '_dispatch', m.group(2)]
# dispatch = m.group(2)
else:
m = re.search(r'/(start|run)/([^/]+)/([^/]+)/(.*)/$', parts.path)
if m:
refer = [m.group(1) + '_directory', m.group(2), m.group(3), m.group(4)]
yaml_filename = 'docassemble.' + m.group(2) + ':data/questions/' + m.group(3) + '/' + m.group(4) + '.yml'
else:
m = re.search(r'/(start|run)/([^/]+)/(.*)/$', parts.path)
if m:
refer = [m.group(1), m.group(2), m.group(3)]
if re.search(r'playground[0-9]', m.group(2)):
yaml_filename = 'docassemble.' + m.group(2) + ':' + m.group(3) + '.yml'
else:
yaml_filename = 'docassemble.' + m.group(2) + ':data/questions/' + m.group(3) + '.yml'
else:
yaml_filename = None
if yaml_filename is None:
return jsonify_with_status("File not found", 404)
if m.group(1) == 'start':
new_session = True
if true_or_false(params.get('reset', False)):
need_to_reset = True
if str(params['reset']) == '2':
reset_fully = True
if true_or_false(params.get('new_session', False)):
new_session = True
index_params = {'i': yaml_filename}
output['i'] = yaml_filename
output['page_sep'] = "#page"
if refer is None:
output['location_bar'] = url_for('index', **index_params)
elif refer[0] in ('start', 'run'):
output['location_bar'] = url_for('run_interview_in_package', package=refer[1], filename=refer[2])
output['page_sep'] = "#/"
elif refer[0] in ('start_dispatch', 'run_dispatch'):
output['location_bar'] = url_for('run_interview', dispatch=refer[1])
output['page_sep'] = "#/"
elif refer[0] in ('start_directory', 'run_directory'):
output['location_bar'] = url_for('run_interview_in_package_directory', package=refer[1], directory=refer[2], filename=refer[3])
output['page_sep'] = "#/"
else:
output['location_bar'] = None
for k, v in daconfig['dispatch'].items():
if v == yaml_filename:
output['location_bar'] = url_for('run_interview', dispatch=k)
output['page_sep'] = "#/"
break
if output['location_bar'] is None:
output['location_bar'] = url_for('index', **index_params)
send_initial = True
if not yaml_filename:
return jsonify_with_status("Parameter i is required.", 400)
if not secret:
secret = random_string(16)
output['secret'] = secret
secret = str(secret)
docassemble.base.functions.this_thread.current_info = current_info(req=request, interface='api', secret=secret)
if yaml_filename not in user_info['sessions'] or need_to_reset or new_session:
# was_new = True
if PREVENT_DEMO and (yaml_filename.startswith('docassemble.base:') or yaml_filename.startswith('docassemble.demo:')) and (current_user.is_anonymous or not current_user.has_role_or_permission('admin', 'developer', permissions=['demo_interviews'])):
return jsonify_with_status(word("Not authorized"), 403)
if current_user.is_anonymous and not daconfig.get('allow anonymous access', True):
output['redirect'] = url_for('user.login', next=url_for('index', i=yaml_filename, **url_args))
return jsonify(output)
if yaml_filename.startswith('docassemble.playground'):
if not app.config['ENABLE_PLAYGROUND']:
return jsonify_with_status(word("Not authorized"), 403)
else:
yaml_filename = re.sub(r':([^\/]+)$', r':data/questions/\1', yaml_filename)
interview = docassemble.base.interview_cache.get_interview(yaml_filename)
if session_id is None:
if need_to_reset and yaml_filename in user_info['sessions']:
reset_user_dict(user_info['sessions'][yaml_filename], yaml_filename)
del user_info['sessions'][yaml_filename]
unique_sessions = interview.consolidated_metadata.get('sessions are unique', False)
if unique_sessions is not False and not current_user.is_authenticated:
if yaml_filename in user_info['sessions']:
del user_info['sessions'][yaml_filename]
output['redirect'] = url_for('user.login', next=url_for('index', i=yaml_filename, **url_args))
return jsonify(output)
if interview.consolidated_metadata.get('temporary session', False):
if yaml_filename in user_info['sessions']:
reset_user_dict(user_info['sessions'][yaml_filename], yaml_filename)
del user_info['sessions'][yaml_filename]
if current_user.is_authenticated:
while True:
the_session_id, encrypted = get_existing_session(yaml_filename, secret) # pylint: disable=unused-variable
if the_session_id:
reset_user_dict(the_session_id, yaml_filename)
else:
break
need_to_reset = True
if current_user.is_anonymous:
if (not interview.allowed_to_initiate(is_anonymous=True)) or (not interview.allowed_to_access(is_anonymous=True)):
output['redirect'] = url_for('user.login', next=url_for('index', i=yaml_filename, **url_args))
return jsonify(output)
elif not interview.allowed_to_initiate(has_roles=[role.name for role in current_user.roles]):
return jsonify_with_status(word("You are not allowed to access this interview."), 403)
elif not interview.allowed_to_access(has_roles=[role.name for role in current_user.roles]):
return jsonify_with_status(word("You are not allowed to access this interview."), 403)
session_id = None
if reset_fully:
user_info['sessions'] = {}
if (not need_to_reset) and (unique_sessions is True or (isinstance(unique_sessions, list) and len(unique_sessions) and current_user.has_role(*unique_sessions))):
session_id, encrypted = get_existing_session(yaml_filename, secret)
else:
unique_sessions = interview.consolidated_metadata.get('sessions are unique', False)
if unique_sessions is not False and not current_user.is_authenticated:
if yaml_filename in user_info['sessions']:
del user_info['sessions'][yaml_filename]
output['redirect'] = url_for('user.login', next=url_for('index', i=yaml_filename, session=session_id, **url_args))
return jsonify(output)
if current_user.is_anonymous:
if (not interview.allowed_to_initiate(is_anonymous=True)) or (not interview.allowed_to_access(is_anonymous=True)):
output['redirect'] = url_for('user.login', next=url_for('index', i=yaml_filename, session=session_id, **url_args))
return jsonify(output)
elif not interview.allowed_to_initiate(has_roles=[role.name for role in current_user.roles]):
if yaml_filename in user_info['sessions']:
del user_info['sessions'][yaml_filename]
return jsonify_with_status(word("You are not allowed to access this interview."), 403)
elif not interview.allowed_to_access(has_roles=[role.name for role in current_user.roles]):
if yaml_filename in user_info['sessions']:
del user_info['sessions'][yaml_filename]
return jsonify_with_status(word("You are not allowed to access this interview."), 403)
if need_to_reset:
reset_user_dict(session_id, yaml_filename)
session_id = None
if new_session:
session_id = None
if yaml_filename in user_info['sessions']:
del user_info['sessions'][yaml_filename]
if not session_id:
if yaml_filename in user_info['sessions']:
session_id = user_info['sessions'][yaml_filename]
else:
try:
(encrypted, session_id) = create_new_interview(yaml_filename, secret, url_args=url_args, referer=referer, req=request)
except Exception as err:
return jsonify_with_status(err.__class__.__name__ + ': ' + str(err), 400)
user_info['sessions'][yaml_filename] = session_id
changed = True
is_new = True
# output['session'] = session_id
if changed:
key = 'da:apiinterview:usercode:' + user_code
pipe = r.pipeline()
pipe.set(key, json.dumps(user_info))
pipe.expire(key, 60*60*24*30)
pipe.execute()
if not is_new:
if url_args is not None and isinstance(url_args, dict) and len(url_args) > 0:
logmessage("url_args is " + repr(url_args))
variables = {}
for key, val in url_args.items():
variables["url_args[%s]" % (repr(key),)] = val
try:
set_session_variables(yaml_filename, session_id, variables, secret=secret, use_lock=True)
except Exception as the_err:
return jsonify_with_status(str(the_err), 400)
obtain_lock(session_id, yaml_filename)
if request.method == 'POST' and command == 'action':
action = post_data.get('action', None)
if action is not None:
if not isinstance(action, dict) or 'action' not in action or 'arguments' not in action:
release_lock(session_id, yaml_filename)
return jsonify_with_status("Invalid action", 400)
try:
data = get_question_data(yaml_filename, session_id, secret, save=True, use_lock=False, action=action, post_setting=True, advance_progress_meter=True, encode=True)
except Exception as err:
release_lock(session_id, yaml_filename)
return jsonify_with_status(str(err), 400)
else:
try:
data = get_question_data(yaml_filename, session_id, secret, save=False, use_lock=False, encode=True)
except Exception as err:
release_lock(session_id, yaml_filename)
return jsonify_with_status(str(err), 400)
if request.method == 'POST':
if command == 'back':
if data['allow_going_back']:
try:
data = go_back_in_session(yaml_filename, session_id, secret=secret, return_question=True, encode=True)
except Exception as the_err:
release_lock(session_id, yaml_filename)
return jsonify_with_status(str(the_err), 400)
elif command is None:
variables = post_data.get('variables', None)
if not isinstance(variables, dict):
release_lock(session_id, yaml_filename)
return jsonify_with_status("variables must be a dictionary", 400)
if variables is not None:
variables = transform_json_variables(variables)
valid_variables = {}
if 'fields' in data:
for field in data['fields']:
if 'variable_name' in field and field.get('active', False):
valid_variables[field['variable_name']] = field
if field.get('required', False) and 'variable_name' in field:
if field['variable_name'] not in variables:
release_lock(session_id, yaml_filename)
return jsonify_with_status("variable %s is missing" % (field['variable_name'],), 400)
for key, val in variables.items():
if key not in valid_variables:
release_lock(session_id, yaml_filename)
return jsonify_with_status("invalid variable name " + repr(key), 400)
try:
data = set_session_variables(yaml_filename, session_id, variables, secret=secret, return_question=True, event_list=data.get('event_list', None), question_name=data.get('questionName', None), encode=True)
except Exception as the_err:
release_lock(session_id, yaml_filename)
return jsonify_with_status(str(the_err), 400)
elif command != 'action':
release_lock(session_id, yaml_filename)
return jsonify_with_status("Invalid command", 400)
if data.get('questionType', None) in ('response', 'sendfile'):
output['question'] = {
'questionType': data['questionType']
}
else:
output['question'] = data
release_lock(session_id, yaml_filename)
if send_initial:
output['setup'] = {}
if 'google maps api key' in google_config:
api_key = google_config.get('google maps api key')
elif 'api key' in google_config:
api_key = google_config.get('api key')
else:
api_key = None
if api_key:
output['setup']['googleApiKey'] = api_key
if ga_configured and data['interview_options'].get('analytics on', True):
interview_package = re.sub(r'^docassemble\.', '', re.sub(r':.*', '', yaml_filename))
interview_filename = re.sub(r'\.ya?ml$', '', re.sub(r'.*[:\/]', '', yaml_filename), re.IGNORECASE)
output['setup']['googleAnalytics'] = {'enable': True, 'ga_id': google_config.get('analytics id'), 'prefix': interview_package + '/' + interview_filename}
else:
output['setup']['googleAnalytics'] = {'enable': False}
return jsonify(output)
@app.route('/me', methods=['GET'])
def whoami():
if current_user.is_authenticated:
return jsonify(logged_in=True, user_id=current_user.id, email=current_user.email, roles=[role.name for role in current_user.roles], firstname=current_user.first_name, lastname=current_user.last_name, country=current_user.country, subdivisionfirst=current_user.subdivisionfirst, subdivisionsecond=current_user.subdivisionsecond, subdivisionthird=current_user.subdivisionthird, organization=current_user.organization, timezone=current_user.timezone)
return jsonify(logged_in=False)
def retrieve_email(email_id):
if not isinstance(email_id, int):
raise DAError("email_id not provided")
email = db.session.execute(select(Email).filter_by(id=email_id)).scalar()
if email is None:
raise DAError("E-mail did not exist")
short_record = db.session.execute(select(Shortener).filter_by(short=email.short)).scalar()
if short_record is not None and short_record.user_id is not None:
user = db.session.execute(select(UserModel).options(db.joinedload(UserModel.roles)).filter_by(id=short_record.user_id, active=True)).scalar()
else:
user = None
if short_record is None:
raise DAError("Short code did not exist")
return get_email_obj(email, short_record, user)
class AddressEmail:
def __str__(self):
return str(self.address)
def retrieve_emails(**pargs):
key = pargs.get('key', None)
the_index = pargs.get('index', None)
if key is None and the_index is not None:
raise DAError("retrieve_emails: if you provide an index you must provide a key")
yaml_filename = pargs.get('i', docassemble.base.functions.this_thread.current_info.get('yaml_filename', None))
uid = pargs.get('uid', docassemble.base.functions.get_uid())
if 'user_id' in pargs:
user_id = pargs['user_id']
temp_user_id = None
elif 'temp_user_id' in pargs:
user_id = None
temp_user_id = pargs['temp_user_id']
elif current_user.is_anonymous:
user_id = None
temp_user_id = session.get('tempuser', None)
else:
user_id = current_user.id
temp_user_id = None
user_cache = {}
results = []
if key is None:
the_query = db.session.execute(select(Shortener).filter_by(filename=yaml_filename, uid=uid, user_id=user_id, temp_user_id=temp_user_id).order_by(Shortener.modtime)).scalars()
else:
if the_index is None:
the_query = db.session.execute(select(Shortener).filter_by(filename=yaml_filename, uid=uid, user_id=user_id, temp_user_id=temp_user_id, key=key).order_by(Shortener.modtime)).scalars()
else:
the_query = db.session.execute(select(Shortener).filter_by(filename=yaml_filename, uid=uid, user_id=user_id, temp_user_id=temp_user_id, key=key, index=the_index).order_by(Shortener.modtime)).scalars()
for record in the_query:
result_for_short = AddressEmail()
result_for_short.address = record.short
result_for_short.key = record.key
result_for_short.index = record.index
result_for_short.emails = []
if record.user_id is not None:
if record.user_id in user_cache:
user = user_cache[record.user_id]
else:
user = get_user_object(record.user_id)
user_cache[record.user_id] = user
result_for_short.owner = user.email
else:
user = None
result_for_short.owner = None
for email in db.session.execute(select(Email).filter_by(short=record.short).order_by(Email.datetime_received)).scalars():
result_for_short.emails.append(get_email_obj(email, record, user))
results.append(result_for_short)
return results
def get_email_obj(email, short_record, user):
email_obj = DAEmail(short=email.short)
email_obj.key = short_record.key
email_obj.index = short_record.index
email_obj.initializeAttribute('to_address', DAEmailRecipientList, json.loads(email.to_addr), gathered=True)
email_obj.initializeAttribute('cc_address', DAEmailRecipientList, json.loads(email.cc_addr), gathered=True)
email_obj.initializeAttribute('from_address', DAEmailRecipient, **json.loads(email.from_addr))
email_obj.initializeAttribute('reply_to', DAEmailRecipient, **json.loads(email.reply_to_addr))
email_obj.initializeAttribute('return_path', DAEmailRecipient, **json.loads(email.return_path_addr))
email_obj.subject = email.subject
email_obj.datetime_message = email.datetime_message
email_obj.datetime_received = email.datetime_received
email_obj.initializeAttribute('attachment', DAFileList, gathered=True)
if user is None:
email_obj.address_owner = None
else:
email_obj.address_owner = user.email
for attachment_record in db.session.execute(select(EmailAttachment).filter_by(email_id=email.id).order_by(EmailAttachment.index)).scalars():
# logmessage("Attachment record is " + str(attachment_record.id))
upload = db.session.execute(select(Uploads).filter_by(indexno=attachment_record.upload)).scalar()
if upload is None:
continue
# logmessage("Filename is " + upload.filename)
saved_file_att = SavedFile(attachment_record.upload, extension=attachment_record.extension, fix=True)
process_file(saved_file_att, saved_file_att.path, attachment_record.content_type, attachment_record.extension, initial=False)
extension, mimetype = get_ext_and_mimetype(upload.filename)
if upload.filename == 'headers.json':
# logmessage("Processing headers")
email_obj.initializeAttribute('headers', DAFile, mimetype=mimetype, extension=extension, number=attachment_record.upload)
elif upload.filename == 'attachment.txt' and attachment_record.index < 3:
# logmessage("Processing body text")
email_obj.initializeAttribute('body_text', DAFile, mimetype=mimetype, extension=extension, number=attachment_record.upload)
elif upload.filename == 'attachment.html' and attachment_record.index < 3:
email_obj.initializeAttribute('body_html', DAFile, mimetype=mimetype, extension=extension, number=attachment_record.upload)
else:
email_obj.attachment.appendObject(DAFile, mimetype=mimetype, extension=extension, number=attachment_record.upload)
if not hasattr(email_obj, 'headers'):
email_obj.headers = None
if not hasattr(email_obj, 'body_text'):
email_obj.body_text = None
if not hasattr(email_obj, 'body_html'):
email_obj.body_html = None
return email_obj
def da_send_fax(fax_number, the_file, config, country=None):
if clicksend_config is not None and fax_provider == 'clicksend':
if config not in clicksend_config['name']:
raise DAException("There is no ClickSend configuration called " + str(config))
info = docassemble.webapp.clicksend.send_fax(fax_number, the_file, clicksend_config['name'][config], country)
the_key = 'da:faxcallback:sid:' + info['message_id']
pipe = r.pipeline()
pipe.set(the_key, json.dumps(info))
pipe.expire(the_key, 86400)
pipe.execute()
return info['message_id']
if telnyx_config is not None and fax_provider == 'telnyx':
if config not in telnyx_config['name']:
raise DAException("There is no Telnyx configuration called " + str(config))
info = docassemble.webapp.telnyx.send_fax(fax_number, the_file, telnyx_config['name'][config], country)
the_key = 'da:faxcallback:sid:' + info['id']
pipe = r.pipeline()
pipe.set(the_key, json.dumps(info))
pipe.expire(the_key, 86400)
pipe.execute()
return info['id']
if twilio_config is None:
logmessage("da_send_fax: ignoring call to da_send_fax because Twilio not enabled")
return None
if config not in twilio_config['name'] or 'fax' not in twilio_config['name'][config] or twilio_config['name'][config]['fax'] in (False, None):
logmessage("da_send_fax: ignoring call to da_send_fax because fax feature not enabled")
return None
account_sid = twilio_config['name'][config].get('account sid', None)
auth_token = twilio_config['name'][config].get('auth token', None)
from_number = twilio_config['name'][config].get('number', None)
if account_sid is None or auth_token is None or from_number is None:
logmessage("da_send_fax: ignoring call to da_send_fax because account sid, auth token, and/or number missing")
return None
client = TwilioRestClient(account_sid, auth_token)
fax = client.fax.v1.faxes.create(
from_=from_number,
to=fax_number,
media_url=the_file.url_for(temporary=True, seconds=600),
status_callback=url_for('fax_callback', _external=True)
)
return fax.sid
def write_pypirc():
pypirc_file = daconfig.get('pypirc path', '/var/www/.pypirc')
# pypi_username = daconfig.get('pypi username', None)
# pypi_password = daconfig.get('pypi password', None)
pypi_url = daconfig.get('pypi url', 'https://upload.pypi.org/legacy/')
# if pypi_username is None or pypi_password is None:
# return
if os.path.isfile(pypirc_file):
with open(pypirc_file, 'r', encoding='utf-8') as fp:
existing_content = fp.read()
else:
existing_content = None
content = """\
[distutils]
index-servers =
pypi
[pypi]
repository: """ + pypi_url + "\n"
# """
# username: """ + pypi_username + """
# password: """ + pypi_password + "\n"
if existing_content != content:
with open(pypirc_file, 'w', encoding='utf-8') as fp:
fp.write(content)
os.chmod(pypirc_file, stat.S_IRUSR | stat.S_IWUSR)
def url_sanitize(url):
return re.sub(r'\s', ' ', url)
def pypi_status(packagename):
result = {}
pypi_url = daconfig.get('pypi url', 'https://pypi.python.org/pypi')
try:
response = requests.get(url_sanitize(pypi_url + '/' + str(packagename) + '/json'), timeout=30)
assert response.status_code == 200
except AssertionError:
if response.status_code == 404:
result['error'] = False
result['exists'] = False
else:
result['error'] = response.status_code
except requests.exceptions.Timeout:
result['error'] = 'timeout'
except:
result['error'] = 'unknown'
else:
try:
result['info'] = response.json()
except:
result['error'] = 'json'
else:
result['error'] = False
result['exists'] = True
return result
def page_after_login():
if current_user.is_authenticated:
for role, page in daconfig['page after login']:
if role == '*' or current_user.has_role(role):
return page
return 'interview_list'
def path_from_reference(file_reference):
if isinstance(file_reference, DAFileCollection):
file_reference = file_reference._first_file()
if isinstance(file_reference, DAFileList):
file_reference = file_reference[0]
if isinstance(file_reference, DAFile):
file_info = get_info_from_file_number_with_uids(file_reference.number)
if 'fullpath' not in file_info:
raise DAException("File not found")
friendly_path = os.path.join(tempfile.mkdtemp(prefix='SavedFile'), file_reference.filename)
try:
os.symlink(file_info['fullpath'], friendly_path)
except:
shutil.copyfile(file_info['fullpath'], friendly_path)
return friendly_path
if isinstance(file_reference, DAStaticFile):
return file_reference.path()
if file_reference is None:
return None
file_info = get_info_from_file_reference(file_reference)
if 'fullpath' not in file_info:
raise DAException("File not found")
return file_info['fullpath']
def secure_filename_spaces_ok(filename):
filename = unicodedata.normalize("NFKD", filename)
filename = filename.encode("ascii", "ignore").decode("ascii")
for sep in os.path.sep, os.path.altsep:
if sep:
filename = filename.replace(sep, "_")
filename = str(re.sub(r'[^A-Za-z0-9\_\.\- ]', '', " ".join(filename.split(' ')))).strip("._ ")
return filename
def secure_filename(filename):
filename = werkzeug.utils.secure_filename(filename)
extension, mimetype = get_ext_and_mimetype(filename) # pylint: disable=unused-variable
filename = re.sub(r'\.[^\.]+$', '', filename) + '.' + extension
return filename
def sanitize_arguments(*pargs):
for item in pargs:
if isinstance(item, str):
if item.startswith('/') or item.startswith('.') or re.search(r'\s', item):
raise DAException("Invalid parameter " + item)
def get_short_code(**pargs):
key = pargs.get('key', None)
the_index = pargs.get('index', None)
if key is None and the_index is not None:
raise DAError("get_short_code: if you provide an index you must provide a key")
yaml_filename = pargs.get('i', docassemble.base.functions.this_thread.current_info.get('yaml_filename', None))
uid = pargs.get('uid', docassemble.base.functions.get_uid())
if 'user_id' in pargs:
user_id = pargs['user_id']
temp_user_id = None
elif 'temp_user_id' in pargs:
user_id = None
temp_user_id = pargs['temp_user_id']
elif current_user.is_anonymous:
user_id = None
temp_user_id = session.get('tempuser', None)
else:
user_id = current_user.id
temp_user_id = None
short_code = None
for record in db.session.execute(select(Shortener.short).filter_by(filename=yaml_filename, uid=uid, user_id=user_id, temp_user_id=temp_user_id, key=key, index=the_index)):
short_code = record.short
if short_code is not None:
return short_code
counter = 0
new_record = None
while counter < 20:
existing_id = None
new_short = random_lower_string(6)
for record in db.session.execute(select(Shortener.id).filter_by(short=new_short)):
existing_id = record.id
if existing_id is None:
new_record = Shortener(filename=yaml_filename, uid=uid, user_id=user_id, temp_user_id=temp_user_id, short=new_short, key=key, index=the_index)
db.session.add(new_record)
db.session.commit()
break
counter += 1
if new_record is None:
raise SystemError("Failed to generate unique short code")
return new_short
def illegal_variable_name(var):
if re.search(r'[\n\r]', var):
return True
try:
t = ast.parse(var)
except:
return True
detector = docassemble.base.astparser.detectIllegal()
detector.visit(t)
return detector.illegal
def illegal_sessions_query(expr):
if re.search(r'[\n\r]', expr):
return True
try:
t = ast.parse(expr)
except:
return True
detector = docassemble.base.astparser.detectIllegalQuery()
detector.visit(t)
return detector.illegal
emoji_match = re.compile(r':([A-Za-z][A-Za-z0-9\_\-]+):')
html_match = re.compile(r'(</?[A-Za-z\!][^>]*>|https*://[A-Za-z0-9\-\_:\%\/\@\.\#\&\=\~\?]+|mailto*://[A-Za-z0-9\-\_:\%\/\@\.\#\&\=\~]+\?)')
def mako_parts(expression):
in_percent = False
in_var = False
in_square = False
var_depth = 0
in_colon = 0
in_html = 0
in_pre_bracket = False
in_post_bracket = False
output = []
current = ''
i = 0
expression = emoji_match.sub(r'^^\1^^', expression)
expression = html_match.sub(r'!@\1!@', expression)
n = len(expression)
while i < n:
if in_html:
if i + 1 < n and expression[i:i+2] == '!@':
in_html = False
if current != '':
output.append([current, 2])
current = ''
i += 2
else:
current += expression[i]
i += 1
continue
if in_percent:
if expression[i] in ["\n", "\r"]:
in_percent = False
current += expression[i]
output.append([current, 1])
current = ''
i += 1
continue
elif in_var:
if expression[i] == '{' and expression[i-1] != "\\":
var_depth += 1
elif expression[i] == '}' and expression[i-1] != "\\":
var_depth -= 1
if var_depth == 0:
current += expression[i]
if current != '':
output.append([current, 2])
current = ''
in_var = False
i += 1
continue
elif in_pre_bracket:
if i + 2 < n:
if expression[i:i+3] == '</%':
in_pre_bracket = False
in_post_bracket = True
current += expression[i:i+3]
i += 3
continue
if i + 1 < n and expression[i:i+2] == '%>':
in_pre_bracket = False
current += expression[i:i+2]
if current != '':
output.append([current, 1])
current = ''
i += 2
continue
elif in_post_bracket:
if expression[i] == '>' and expression[i-1] != "\\":
current += expression[i]
if current != '':
output.append([current, 1])
current = ''
in_post_bracket = False
i += 1
continue
elif in_square:
if expression[i] == ']' and (i == 0 or expression[i-1] != "\\"):
mode = 0
current += expression[i]
for pattern in ['[FILE', '[TARGET ', '[EMOJI ', '[QR ', '[YOUTUBE', '[VIMEO]', '[PAGENUM]', '[BEGIN_TWOCOL]', '[BREAK]', '[END_TWOCOL', '[BEGIN_CAPTION]', '[VERTICAL_LINE]', '[END_CAPTION]', '[TIGHTSPACING]', '[SINGLESPACING]', '[DOUBLESPACING]', '[ONEANDAHALFSPACING]', '[TRIPLESPACING]', '[START_INDENTATION]', '[STOP_INDENTATION]', '[NBSP]', '[REDACTION', '[ENDASH]', '[EMDASH]', '[HYPHEN]', '[CHECKBOX]', '[BLANK]', '[BLANKFILL]', '[PAGEBREAK]', '[PAGENUM]', '[SECTIONNUM]', '[SKIPLINE]', '[NEWLINE]', '[NEWPAR]', '[BR]', '[TAB]', '[END]', '[BORDER]', '[NOINDENT]', '[FLUSHLEFT]', '[FLUSHRIGHT]', '[CENTER]', '[BOLDCENTER]', '[INDENTBY', '[${']:
if current.startswith(pattern):
mode = 2
break
if current != '':
output.append([current, mode])
current = ''
in_square = False
i += 1
continue
if i + 1 < n and expression[i:i+2] == '^^':
if in_colon:
in_colon = False
current += ':'
output.append([current, 2])
current = ''
else:
in_colon = True
if current.startswith('[${'):
output.append([current, 2])
else:
output.append([current, 0])
current = ':'
i += 2
continue
if i + 1 < n and expression[i:i+2] == '!@':
in_html = True
if current != '':
if current.startswith('[${'):
output.append([current, 2])
else:
output.append([current, 0])
current = ''
i += 2
continue
elif in_colon:
if i + 1 < n and expression[i:i+2] == '^^':
current += ':'
if current != '':
output.append([current, 2])
current = ''
in_colon = False
i += 2
continue
elif i + 1 < n:
if expression[i:i+2] == '${':
in_var = True
var_depth += 1
if current != '':
output.append([current, 0])
current = expression[i:i+2]
i += 2
continue
if expression[i:i+2] == '^^':
in_colon = True
if current != '':
output.append([current, 0])
current = ':'
i += 2
continue
if expression[i:i+2] == '!@':
in_html = True
if current != '':
output.append([current, 0])
current = ''
i += 2
continue
if expression[i:i+2] == '<%':
in_pre_bracket = True
if current != '':
output.append([current, 0])
current = expression[i:i+2]
i += 2
continue
if expression[i:i+2] == '% ' and start_of_line(expression, i):
in_percent = True
if current != '':
output.append([current, 0])
current = expression[i:i+2]
i += 2
continue
if expression[i] == '[' and (i == 0 or expression[i-1] != "\\"):
in_square = True
if current != '':
output.append([current, 0])
current = expression[i]
i += 1
continue
current += expression[i]
i += 1
if current != '':
if in_pre_bracket or in_post_bracket or in_percent:
output.append([current, 1])
elif in_var:
output.append([current, 2])
else:
output.append([current, 0])
return output
def start_of_line(expression, i):
if i == 0:
return True
i -= 1
while i >= 0:
if expression[i] in ("\n", "\r"):
return True
if expression[i] in (" ", "\t"):
i -= 1
continue
return False
return True
def applock(action, application, maxtime=4):
key = 'da:applock:' + application + ':' + hostname
if action == 'obtain':
found = False
count = maxtime
while count > 0:
record = r.get(key)
if record:
logmessage("obtain_applock: waiting for " + key)
time.sleep(1.0)
else:
found = False
break
found = True
count -= 1
if found:
logmessage("Request for applock " + key + " deadlocked")
r.delete(key)
pipe = r.pipeline()
pipe.set(key, 1)
pipe.expire(key, maxtime)
pipe.execute()
elif action == 'release':
r.delete(key)
@app.errorhandler(CSRFError)
def handle_csrf_error(the_error):
if request.method == 'POST' and '/checkout' not in request.url:
setup_translation()
if 'ajax' in request.form and int(request.form['ajax']):
flash(word("Input not processed because the page expired."), "success")
return jsonify({'action': 'reload', 'reason': 'csrf_error'})
try:
referer = str(request.referrer)
except:
referer = None
if referer:
flash(word("Input not processed because the page expired."), "success")
return redirect(referer)
return server_error(the_error)
def error_notification(err, message=None, history=None, trace=None, referer=None, the_request=None, the_vars=None):
recipient_email = daconfig.get('error notification email', None)
if not recipient_email:
return
if err.__class__.__name__ in ['CSRFError', 'ClientDisconnected', 'MethodNotAllowed', 'DANotFoundError', 'DAInvalidFilename'] + ERROR_TYPES_NO_EMAIL:
return
email_recipients = []
if isinstance(recipient_email, list):
email_recipients.extend(recipient_email)
else:
email_recipients.append(recipient_email)
if message is None:
errmess = str(err)
else:
errmess = message
try:
email_address = current_user.email
except:
email_address = None
if the_request:
try:
referer = str(the_request.referrer)
except:
referer = None
ipaddress = get_requester_ip(the_request)
else:
referer = None
ipaddress = None
if daconfig.get('error notification variables', DEBUG):
if the_vars is None:
try:
the_vars = docassemble.base.functions.all_variables(include_internal=True)
except:
pass
else:
the_vars = None
json_filename = None
if the_vars is not None and len(the_vars):
try:
with tempfile.NamedTemporaryFile(mode='w', prefix="datemp", suffix='.json', delete=False, encoding='utf-8') as fp:
fp.write(json.dumps(the_vars, sort_keys=True, indent=2))
json_filename = fp.name
except:
pass
interview_path = docassemble.base.functions.interview_path()
try:
the_key = 'da:errornotification:' + str(ipaddress)
existing = r.get(the_key)
pipe = r.pipeline()
pipe.set(the_key, 1)
pipe.expire(the_key, 60)
pipe.execute()
if existing:
return
except:
pass
try:
try:
html = "<html>\n <body>\n <p>There was an error in the " + app.config['APP_NAME'] + " application.</p>\n <p>The error message was:</p>\n<pre>" + err.__class__.__name__ + ": " + str(errmess) + "</pre>\n"
body = "There was an error in the " + app.config['APP_NAME'] + " application.\n\nThe error message was:\n\n" + err.__class__.__name__ + ": " + str(errmess)
if trace is not None:
body += "\n\n" + str(trace)
html += "<pre>" + str(trace) + "</pre>"
if history is not None:
body += "\n\n" + BeautifulSoup(history, "html.parser").get_text('\n')
html += history
if referer is not None and referer != 'None':
body += "\n\nThe referer URL was " + str(referer)
html += "<p>The referer URL was " + str(referer) + "</p>"
elif interview_path is not None:
body += "\n\nThe interview was " + str(interview_path)
html += "<p>The interview was " + str(interview_path) + "</p>"
if email_address is not None:
body += "\n\nThe user was " + str(email_address)
html += "<p>The user was " + str(email_address) + "</p>"
if trace is not None:
body += "\n\n" + str(trace)
html += "<pre>" + str(trace) + "</pre>"
if 'external hostname' in daconfig and daconfig['external hostname'] is not None:
body += "\n\nThe external hostname was " + str(daconfig['external hostname'])
html += "<p>The external hostname was " + str(daconfig['external hostname']) + "</p>"
html += "\n </body>\n</html>"
msg = Message(app.config['APP_NAME'] + " error: " + err.__class__.__name__, recipients=email_recipients, body=body, html=html)
if json_filename:
with open(json_filename, 'r', encoding='utf-8') as fp:
msg.attach('variables.json', 'application/json', fp.read())
da_send_mail(msg)
except Exception as zerr:
logmessage(str(zerr))
body = "There was an error in the " + app.config['APP_NAME'] + " application."
html = "<html>\n <body>\n <p>There was an error in the " + app.config['APP_NAME'] + " application.</p>\n </body>\n</html>"
msg = Message(app.config['APP_NAME'] + " error: " + err.__class__.__name__, recipients=email_recipients, body=body, html=html)
if json_filename:
with open(json_filename, 'r', encoding='utf-8') as fp:
msg.attach('variables.json', 'application/json', fp.read())
da_send_mail(msg)
except:
pass
def stash_data(data, expire):
while True:
key = random_alphanumeric(16)
if r.get(key) is None:
break
secret = random_string(16)
packed_data = encrypt_dictionary(data, secret)
pipe = r.pipeline()
pipe.set('da:stash:' + key, packed_data)
pipe.expire('da:stash:' + key, expire)
pipe.execute()
return (key, secret)
def retrieve_stashed_data(key, secret, delete=False, refresh=False):
packed_data = r.get('da:stash:' + key)
if packed_data is None:
return None
try:
data = decrypt_dictionary(packed_data.decode(), secret)
except:
return None
if delete:
r.delete('da:stash:' + key)
elif refresh and isinstance(refresh, int) and refresh > 0:
r.expire('da:stash:' + key, refresh)
return data
def make_necessary_dirs():
paths = []
if app.config['ALLOW_UPDATES'] or app.config['ENABLE_PLAYGROUND']:
paths.append(FULL_PACKAGE_DIRECTORY)
if cloud is None:
paths.append(UPLOAD_DIRECTORY)
paths.append(LOG_DIRECTORY)
for path in paths:
if not os.path.isdir(path):
try:
os.makedirs(path, exist_ok=True)
except:
sys.exit("Could not create path: " + path)
if not os.access(path, os.W_OK):
sys.exit("Unable to create files in directory: " + path)
if app.config['ALLOW_RESTARTING'] and not os.access(WEBAPP_PATH, os.W_OK):
sys.exit("Unable to modify the timestamp of the WSGI file: " + WEBAPP_PATH)
if DEBUG_BOOT:
boot_log("server: making directories that do not already exist")
make_necessary_dirs()
if DEBUG_BOOT:
boot_log("server: finished making directories that do not already exist")
docassemble.base.functions.update_server(url_finder=get_url_from_file_reference,
navigation_bar=navigation_bar,
chat_partners_available=chat_partners_available,
get_chat_log=get_current_chat_log,
sms_body=sms_body,
send_fax=da_send_fax,
get_sms_session=get_sms_session,
initiate_sms_session=initiate_sms_session,
terminate_sms_session=terminate_sms_session,
applock=applock,
twilio_config=twilio_config,
server_redis=r,
server_redis_user=r_user,
user_id_dict=user_id_dict,
get_user_object=get_user_object,
retrieve_emails=retrieve_emails,
get_short_code=get_short_code,
make_png_for_pdf=make_png_for_pdf,
ocr_google_in_background=ocr_google_in_background,
task_ready=task_ready,
wait_for_task=wait_for_task,
user_interviews=user_interviews,
interview_menu=interview_menu,
get_user_list=get_user_list,
get_user_info=get_user_info,
set_user_info=set_user_info,
make_user_inactive=make_user_inactive,
get_secret=get_secret,
get_session_variables=get_session_variables,
go_back_in_session=go_back_in_session,
create_session=create_new_interview,
set_session_variables=set_session_variables,
get_privileges_list=get_privileges_list,
add_privilege=add_privilege,
remove_privilege=remove_privilege,
add_user_privilege=add_user_privilege,
remove_user_privilege=remove_user_privilege,
get_permissions_of_privilege=get_permissions_of_privilege,
create_user=create_user,
file_set_attributes=file_set_attributes,
file_user_access=file_user_access,
file_privilege_access=file_privilege_access,
fg_make_png_for_pdf=fg_make_png_for_pdf,
fg_make_png_for_pdf_path=fg_make_png_for_pdf_path,
fg_make_pdf_for_word_path=fg_make_pdf_for_word_path,
get_question_data=get_question_data,
fix_pickle_obj=fix_pickle_obj,
main_page_parts=main_page_parts,
SavedFile=SavedFile,
path_from_reference=path_from_reference,
button_class_prefix=app.config['BUTTON_STYLE'],
write_answer_json=write_answer_json,
read_answer_json=read_answer_json,
delete_answer_json=delete_answer_json,
variables_snapshot_connection=variables_snapshot_connection,
get_referer=get_referer,
stash_data=stash_data,
retrieve_stashed_data=retrieve_stashed_data,
secure_filename_spaces_ok=secure_filename_spaces_ok,
secure_filename=secure_filename,
transform_json_variables=transform_json_variables,
get_login_url=get_login_url,
run_action_in_session=run_action_in_session,
invite_user=invite_user)
# docassemble.base.util.set_user_id_function(user_id_dict)
# docassemble.base.functions.set_generate_csrf(generate_csrf)
# docassemble.base.parse.set_url_finder(get_url_from_file_reference)
# docassemble.base.parse.set_url_for(url_for)
# APPLICATION_NAME = 'docassemble'
if DEBUG_BOOT:
boot_log("server: building documentation")
base_words = get_base_words()
title_documentation = get_title_documentation()
DOCUMENTATION_BASE = daconfig.get('documentation base url', 'https://docassemble.org/docs/')
documentation_dict = get_documentation_dict()
base_name_info = get_name_info()
if DEBUG_BOOT:
boot_log("server: finished building documentation")
# docassemble.base.functions.set_chat_partners_available(chat_partners_available)
password_secret_key = daconfig.get('password secretkey', app.secret_key)
def get_base_url():
return re.sub(r'^(https?://[^/]+).*', r'\1', url_for('rootindex', _external=True))
def null_func(*pargs, **kwargs): # pylint: disable=unused-argument
logmessage("Null function called")
if in_celery:
def illegal_worker_convert(*pargs, **kwargs):
raise DAException("You cannot access the status of a background task from inside of a background task.")
docassemble.base.functions.update_server(bg_action=null_func,
# async_ocr=null_func,
chord=null_func,
ocr_page=null_func,
ocr_finalize=null_func,
worker_convert=illegal_worker_convert)
else:
docassemble.base.functions.update_server(bg_action=docassemble.webapp.worker.background_action,
# async_ocr=docassemble.webapp.worker.async_ocr,
chord=docassemble.webapp.worker.chord,
ocr_page=docassemble.webapp.worker.ocr_page,
ocr_dummy=docassemble.webapp.worker.ocr_dummy,
ocr_finalize=docassemble.webapp.worker.ocr_finalize,
worker_convert=docassemble.webapp.worker.convert)
pg_ex = {}
def define_examples():
if 'encoded_example_html' in pg_ex:
return
example_html = []
example_html.append(' <div class="col-md-2">\n <h5 class="mb-1">Example blocks</h5>')
pg_ex['pg_first_id'] = []
data_dict = {}
make_example_html(get_examples(), pg_ex['pg_first_id'], example_html, data_dict)
if len(data_dict) == 0:
pg_ex['encoded_data_dict'] = None
pg_ex['encoded_example_html'] = ""
return
example_html.append(' </div>')
example_html.append(' <div class="col-md-4 da-example-source-col"><h5 class="mb-1">' + word('Source') + '<a href="#" tabindex="0" class="dabadge btn btn-success da-example-copy">' + word("Insert") + '</a></h5><div id="da-example-source-before" class="dainvisible"></div><div id="da-example-source"></div><div id="da-example-source-after" class="dainvisible"></div><div><a tabindex="0" class="da-example-hider" id="da-show-full-example">' + word("Show context of example") + '</a><a tabindex="0" class="da-example-hider dainvisible" id="da-hide-full-example">' + word("Hide context of example") + '</a></div></div>')
example_html.append(' <div class="col-md-6"><h5 class="mb-1">' + word("Preview") + '<a href="#" target="_blank" class="dabadge btn btn-primary da-example-documentation da-example-hidden" id="da-example-documentation-link">' + word("View documentation") + '</a></h5><a href="#" target="_blank" id="da-example-image-link"><img title=' + json.dumps(word("Click to try this interview")) + ' class="da-example-screenshot" id="da-example-image"></a></div>')
pg_ex['encoded_data_dict'] = safeid(json.dumps(data_dict))
pg_ex['encoded_example_html'] = Markup("\n".join(example_html))
if packaging.version.parse(min_system_version) > packaging.version.parse(daconfig['system version']):
version_warning = word("A new docassemble system version is available. If you are using Docker, install a new Docker image.")
else:
version_warning = None
class AdminInterview:
def is_not(self, interview):
return self.interview != interview
def can_use(self):
if self.require_login and current_user.is_anonymous:
return False
if self.roles is None:
return True
if current_user.is_anonymous:
if 'anonymous' in self.roles:
return True
return False
if current_user.has_roles(self.roles):
return True
return False
def get_title(self, language):
if isinstance(self.title, str):
return word(self.title, language=language)
if language in self.title:
return self.title[language]
if '*' in self.title:
return self.title['*']
if DEFAULT_LANGUAGE in self.title:
return self.title[DEFAULT_LANGUAGE]
for lang, title in self.title.items(): # pylint: disable=unused-variable
return word(title, language=language)
def get_url(self):
return url_for_interview(i=self.interview, new_session='1')
class MenuItem:
def is_not(self, interview): # pylint: disable=unused-argument
return True
def can_use(self):
if self.require_login and current_user.is_anonymous:
return False
if self.roles is None:
return True
if current_user.is_anonymous:
if 'anonymous' in self.roles:
return True
return False
if current_user.has_roles(self.roles):
return True
return False
def get_title(self, language):
if language in self.label:
return self.label[language]
if '*' in self.label:
return self.label['*']
if DEFAULT_LANGUAGE in self.label:
return self.label[DEFAULT_LANGUAGE]
for lang, label in self.label.items(): # pylint: disable=unused-variable
return word(label, language=language)
def get_url(self):
return self.url
def set_admin_interviews():
admin_interviews = []
if 'administrative interviews' in daconfig:
if isinstance(daconfig['administrative interviews'], list):
for item in daconfig['administrative interviews']:
if isinstance(item, dict):
if 'url' in item and 'label' in item and isinstance(item['url'], str) and isinstance(item['label'], dict):
menu_item = MenuItem()
menu_item.url = item['url']
menu_item.label = item['label']
menu_item.roles = item['roles']
menu_item.require_login = item['require_login']
admin_interviews.append(menu_item)
elif 'interview' in item and isinstance(item['interview'], str):
try:
interview = docassemble.base.interview_cache.get_interview(item['interview'])
except:
logmessage("interview " + item['interview'] + " in administrative interviews did not exist")
continue
if 'title' in item:
the_title = item['title']
else:
the_title = interview.consolidated_metadata.get('short title', interview.consolidated_metadata.get('title', None))
if the_title is None:
logmessage("interview in administrative interviews needs to be given a title")
continue
admin_interview = AdminInterview()
admin_interview.interview = item['interview']
if isinstance(the_title, (str, dict)):
if isinstance(the_title, dict):
fault = False
for key, val in the_title.items():
if not (isinstance(key, str) and isinstance(val, str)):
fault = True
break
if fault:
logmessage("title of administrative interviews item must be a string or a dictionary with keys and values that are strings")
continue
admin_interview.title = the_title
else:
logmessage("title of administrative interviews item must be a string or a dictionary")
continue
if 'required privileges' not in item:
roles = set()
for metadata in interview.metadata:
if 'required privileges for listing' in metadata:
roles = set()
privs = metadata['required privileges for listing']
if isinstance(privs, list):
for priv in privs:
if isinstance(priv, str):
roles.add(priv)
elif isinstance(privs, str):
roles.add(privs)
elif 'required privileges' in metadata:
roles = set()
privs = metadata['required privileges']
if isinstance(privs, list):
for priv in privs:
if isinstance(priv, str):
roles.add(priv)
elif isinstance(privs, str):
roles.add(privs)
if len(roles) > 0:
item['required privileges'] = list(roles)
if 'required privileges' in item:
fault = False
if isinstance(item['required privileges'], list):
for rolename in item['required privileges']:
if not isinstance(rolename, str):
fault = True
break
else:
fault = True
if fault:
logmessage("required privileges in administrative interviews item must be a list of strings")
admin_interview.roles = None
else:
admin_interview.roles = item['required privileges']
else:
admin_interview.roles = None
admin_interview.require_login = False
if 'require login' in item and item['require login'] is not None:
admin_interview.require_login = bool(item['require login'])
else:
for metadata in interview.metadata:
if 'require login' in metadata:
admin_interview.require_login = bool(metadata['require login'])
admin_interviews.append(admin_interview)
else:
logmessage("item in administrative interviews must contain a valid interview name")
else:
logmessage("item in administrative interviews is not a dict")
else:
logmessage("administrative interviews is not a list")
return admin_interviews
def fix_api_key(match):
return 'da:apikey:userid:' + match.group(1) + ':key:' + encrypt_api_key(match.group(2), app.secret_key) + ':info'
def fix_api_keys():
to_delete = []
for rkey in r.keys('da:api:userid:*:key:*:info'):
try:
rkey = rkey.decode()
except:
continue
try:
info = json.loads(r.get(rkey).decode())
assert isinstance(info, dict)
except:
to_delete.append(rkey)
continue
info['last_four'] = re.sub(r'da:api:userid:.*:key:.*(....):info', r'\1', rkey)
new_rkey = re.sub(r'da:api:userid:(.*):key:(.*):info', fix_api_key, rkey)
r.set(new_rkey, json.dumps(info))
to_delete.append(rkey)
for rkey in to_delete:
r.delete(rkey)
class TestContext:
def __init__(self, package):
self.package = package
def __enter__(self):
url_root = daconfig.get('url root', 'http://localhost') + daconfig.get('root', '/')
url = url_root + 'interview'
self.app_context = app.app_context()
self.app_context.push()
self.test_context = app.test_request_context(base_url=url_root, path=url)
self.test_context.push()
login_as_admin(url, url_root)
docassemble.base.functions.this_thread.current_package = self.package
docassemble.base.functions.this_thread.current_info.update({'yaml_filename': self.package + ':data/questions/test.yml'})
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
current_app.login_manager._update_request_context_with_user()
self.test_context.pop()
self.app_context.pop()
def initialize():
global global_css
global global_js
if DEBUG_BOOT:
boot_log("server: entering app context")
with app.app_context():
url_root = daconfig.get('url root', 'http://localhost') + daconfig.get('root', '/')
url = url_root + 'interview'
if DEBUG_BOOT:
boot_log("server: entering request context")
with app.test_request_context(base_url=url_root, path=url):
docassemble.webapp.backend.fix_words()
app.config['USE_FAVICON'] = test_favicon_file('favicon.ico')
app.config['USE_APPLE_TOUCH_ICON'] = test_favicon_file('apple-touch-icon.png')
app.config['USE_FAVICON_MD'] = test_favicon_file('favicon-32x32.png')
app.config['USE_FAVICON_SM'] = test_favicon_file('favicon-16x16.png')
app.config['USE_SITE_WEBMANIFEST'] = test_favicon_file('site.webmanifest', alt='manifest.json')
app.config['USE_SAFARI_PINNED_TAB'] = test_favicon_file('safari-pinned-tab.svg')
if 'bootstrap theme' in daconfig and daconfig['bootstrap theme']:
try:
app.config['BOOTSTRAP_THEME'] = get_url_from_file_reference(daconfig['bootstrap theme'])
assert isinstance(app.config['BOOTSTRAP_THEME'], str)
except:
app.config['BOOTSTRAP_THEME'] = None
logmessage("error loading bootstrap theme")
else:
app.config['BOOTSTRAP_THEME'] = None
if 'global css' in daconfig:
for fileref in daconfig['global css']:
try:
global_css_url = get_url_from_file_reference(fileref)
assert isinstance(global_css_url, str)
global_css += "\n" + ' <link href="' + global_css_url + '" rel="stylesheet">'
except:
logmessage("error loading global css: " + repr(fileref))
if 'global javascript' in daconfig:
for fileref in daconfig['global javascript']:
try:
global_js_url = get_url_from_file_reference(fileref)
assert isinstance(global_js_url, str)
global_js += "\n" + ' <script src="' + global_js_url + '"></script>'
except:
logmessage("error loading global js: " + repr(fileref))
if 'raw global css' in daconfig and daconfig['raw global css']:
global_css += "\n" + str(daconfig['raw global css'])
if 'raw global javascript' in daconfig and daconfig['raw global javascript']:
global_js += "\n" + str(daconfig['raw global javascript'])
app.config['GLOBAL_CSS'] = global_css
app.config['GLOBAL_JS'] = global_js
app.config['PARTS'] = page_parts
app.config['ADMIN_INTERVIEWS'] = set_admin_interviews()
try:
if 'image' in daconfig['social'] and isinstance(daconfig['social']['image'], str):
daconfig['social']['image'] = get_url_from_file_reference(daconfig['social']['image'], _external=True)
if daconfig['social']['image'] is None:
del daconfig['social']['image']
for key, subkey in (('og', 'image'), ('twitter', 'image')):
if key in daconfig['social'] and isinstance(daconfig['social'][key], dict) and subkey in daconfig['social'][key] and isinstance(daconfig['social'][key][subkey], str):
daconfig['social'][key][subkey] = get_url_from_file_reference(daconfig['social'][key][subkey], _external=True)
if daconfig['social'][key][subkey] is None:
del daconfig['social'][key][subkey]
except:
logmessage("Error converting social image references")
interviews_to_load = daconfig.get('preloaded interviews', None)
if DEBUG_BOOT:
boot_log("server: loading preloaded interviews")
if isinstance(interviews_to_load, list):
for yaml_filename in daconfig['preloaded interviews']:
try:
docassemble.base.interview_cache.get_interview(yaml_filename)
except:
pass
if DEBUG_BOOT:
boot_log("server: finished loading preloaded interviews")
if app.config['ENABLE_PLAYGROUND']:
if DEBUG_BOOT:
boot_log("server: copying playground modules")
obtain_lock('init' + hostname, 'init')
try:
copy_playground_modules()
except Exception as err:
logmessage("There was an error copying the playground modules: " + err.__class__.__name__)
write_pypirc()
release_lock('init' + hostname, 'init')
if DEBUG_BOOT:
boot_log("server: finished copying playground modules")
try:
if DEBUG_BOOT:
boot_log("server: deleting LibreOffice macro file if necessary")
macro_path = daconfig.get('libreoffice macro file', '/var/www/.config/libreoffice/4/user/basic/Standard/Module1.xba')
if os.path.isfile(macro_path) and os.path.getsize(macro_path) != 7167:
# logmessage("Removing " + macro_path + " because it is out of date")
os.remove(macro_path)
# else:
# logmessage("File " + macro_path + " is missing or has the correct size")
except Exception as err:
logmessage("Error was " + err.__class__.__name__ + ' ' + str(err))
if DEBUG_BOOT:
boot_log("server: fixing API keys")
fix_api_keys()
if DEBUG_BOOT:
boot_log("server: starting importing add-on modules")
import_necessary(url, url_root)
if DEBUG_BOOT:
boot_log("server: finished importing add-on modules")
boot_log("server: running app")
initialize()
if __name__ == "__main__":
app.run()
|
GHSA-pcfx-g2j2-f6f6
|
docassemble_webapp/docassemble/webapp/users/forms.py
|
@@ -5,16 +5,17 @@
from wtforms import DateField, StringField, SubmitField, ValidationError, BooleanField, SelectField, SelectMultipleField, HiddenField, validators, TextAreaField
from wtforms.validators import DataRequired, Email, Optional
from wtforms.widgets import PasswordInput
+from flask import flash, current_app, request, abort
+from flask_login import current_user
+from sqlalchemy import select
from docassemble.base.functions import LazyWord as word, LazyArray
from docassemble.base.config import daconfig
from docassemble.base.generate_key import random_alphanumeric
from docassemble.base.logger import logmessage
from docassemble.webapp.daredis import r
from docassemble.webapp.db_object import db
from docassemble.webapp.users.models import UserModel, Role
-from flask import flash, current_app, request, abort
-from flask_login import current_user
-from sqlalchemy import select
+from docassemble.webapp.validators import html_validator
try:
import ldap
except ImportError:
@@ -207,21 +208,22 @@ def da_registration_restrict_validator(form, field): # pylint: disable=unused-a
class MyRegisterForm(RegisterForm):
- first_name = StringField(word('First name'), [validators.Length(min=0, max=255)])
- last_name = StringField(word('Last name'), [validators.Length(min=0, max=255)])
- country = StringField(word('Country code'), [validators.Length(min=0, max=2)])
- subdivisionfirst = StringField(word('First subdivision'), [validators.Length(min=0, max=64)])
- subdivisionsecond = StringField(word('Second subdivision'), [validators.Length(min=0, max=64)])
- subdivisionthird = StringField(word('Third subdivision'), [validators.Length(min=0, max=64)])
- organization = StringField(word('Organization'), [validators.Length(min=0, max=64)])
- language = StringField(word('Language'), [validators.Length(min=0, max=64)])
- timezone = SelectField(word('Time Zone'), [validators.Length(min=0, max=64)])
- nickname = StringField(word('Nickname'), [fix_nickname])
+ first_name = StringField(word('First name'), [validators.Length(min=0, max=255), html_validator])
+ last_name = StringField(word('Last name'), [validators.Length(min=0, max=255), html_validator])
+ country = StringField(word('Country code'), [validators.Length(min=0, max=2), html_validator])
+ subdivisionfirst = StringField(word('First subdivision'), [validators.Length(min=0, max=64), html_validator])
+ subdivisionsecond = StringField(word('Second subdivision'), [validators.Length(min=0, max=64), html_validator])
+ subdivisionthird = StringField(word('Third subdivision'), [validators.Length(min=0, max=64), html_validator])
+ organization = StringField(word('Organization'), [validators.Length(min=0, max=64), html_validator])
+ language = StringField(word('Language'), [validators.Length(min=0, max=64), html_validator])
+ timezone = SelectField(word('Time Zone'), [validators.Length(min=0, max=64), html_validator])
+ nickname = StringField(word('Nickname'), [fix_nickname, html_validator])
email = StringField(word('Email'), validators=[
validators.DataRequired(word('Email is required')),
validators.Email(word('Invalid Email')),
da_unique_email_validator,
- da_registration_restrict_validator])
+ da_registration_restrict_validator,
+ html_validator])
def length_two(form, field): # pylint: disable=unused-argument
@@ -236,24 +238,24 @@ class NewPrivilegeForm(FlaskForm):
class UserProfileForm(FlaskForm):
- first_name = StringField(word('First name'), [validators.Length(min=0, max=255)])
- last_name = StringField(word('Last name'), [validators.Length(min=0, max=255)])
- country = StringField(word('Country code'), [validators.Length(min=0, max=2)])
- subdivisionfirst = StringField(word('First subdivision'), [validators.Length(min=0, max=64)])
- subdivisionsecond = StringField(word('Second subdivision'), [validators.Length(min=0, max=64)])
- subdivisionthird = StringField(word('Third subdivision'), [validators.Length(min=0, max=64)])
- organization = StringField(word('Organization'), [validators.Length(min=0, max=64)])
- language = StringField(word('Language'), [validators.Length(min=0, max=64)])
- timezone = SelectField(word('Time Zone'), [validators.Length(min=0, max=64)])
- pypi_username = StringField(word('PyPI Username'), [validators.Length(min=0, max=255)])
+ first_name = StringField(word('First name'), [validators.Length(min=0, max=255), html_validator])
+ last_name = StringField(word('Last name'), [validators.Length(min=0, max=255), html_validator])
+ country = StringField(word('Country code'), [validators.Length(min=0, max=2), html_validator])
+ subdivisionfirst = StringField(word('First subdivision'), [validators.Length(min=0, max=64), html_validator])
+ subdivisionsecond = StringField(word('Second subdivision'), [validators.Length(min=0, max=64), html_validator])
+ subdivisionthird = StringField(word('Third subdivision'), [validators.Length(min=0, max=64), html_validator])
+ organization = StringField(word('Organization'), [validators.Length(min=0, max=64), html_validator])
+ language = StringField(word('Language'), [validators.Length(min=0, max=64), html_validator])
+ timezone = SelectField(word('Time Zone'), [validators.Length(min=0, max=64), html_validator])
+ pypi_username = StringField(word('PyPI Username'), [validators.Length(min=0, max=255), html_validator])
pypi_password = StringField(word('PyPI Password'), [validators.Length(min=0, max=255)])
confirmed_at = DateField(word('Confirmation Date'))
submit = SubmitField(word('Save'))
cancel = SubmitField(word('Cancel'))
class EditUserProfileForm(UserProfileForm):
- email = StringField(word('E-mail'))
+ email = StringField(word('E-mail'), validators=[Email(word('Must be a valid e-mail address')), html_validator])
role_id = SelectMultipleField(word('Privileges'), coerce=int)
active = BooleanField(word('Active'))
uses_mfa = BooleanField(word('Uses two-factor authentication'))
@@ -299,11 +301,11 @@ def validate(self): # pylint: disable=arguments-differ
flash(word("Please choose a different e-mail address."), 'error')
return False
return super().validate()
- email = StringField(word('E-mail'), validators=[Optional(), Email(word('Must be a valid e-mail address'))])
+ email = StringField(word('E-mail'), validators=[Optional(), Email(word('Must be a valid e-mail address')), html_validator])
class RequestDeveloperForm(FlaskForm):
- reason = StringField(word('Reason for needing developer account (optional)'))
+ reason = StringField(word('Reason for needing developer account (optional)'), validators=[html_validator])
submit = SubmitField(word('Submit'))
@@ -334,21 +336,21 @@ class UserAddForm(FlaskForm):
email = StringField(word('E-mail'), validators=[
validators.InputRequired(word('E-mail is required')),
validators.Email(word('Invalid E-mail'))])
- first_name = StringField(word('First name'), [validators.Length(min=0, max=255)])
- last_name = StringField(word('Last name'), [validators.Length(min=0, max=255)])
+ first_name = StringField(word('First name'), [validators.Length(min=0, max=255), html_validator])
+ last_name = StringField(word('Last name'), [validators.Length(min=0, max=255), html_validator])
role_id = SelectMultipleField(word('Privileges'), coerce=int)
password = StringField(word('Password'), widget=PasswordInput(hide_value=False), validators=[password_validator])
submit = SubmitField(word('Add'))
class PhoneLoginForm(FlaskForm):
- phone_number = StringField(word('Phone number'), [validators.Length(min=5, max=255)])
+ phone_number = StringField(word('Phone number'), [validators.Length(min=5, max=255), html_validator])
submit = SubmitField(word('Go'))
class PhoneLoginVerifyForm(FlaskForm):
- phone_number = StringField(word('Phone number'), [validators.Length(min=5, max=255)])
- verification_code = StringField(word('Verification code'), [validators.Length(min=daconfig['verification code digits'], max=daconfig['verification code digits'])])
+ phone_number = StringField(word('Phone number'), [validators.Length(min=5, max=255), html_validator])
+ verification_code = StringField(word('Verification code'), [validators.Length(min=daconfig['verification code digits'], max=daconfig['verification code digits']), html_validator])
submit = SubmitField(word('Verify'))
def validate(self): # pylint: disable=arguments-differ
@@ -404,7 +406,7 @@ class MFAChooseForm(FlaskForm):
class MFASMSSetupForm(FlaskForm):
- phone_number = StringField(word('Phone number'), [validators.Length(min=5, max=255)])
+ phone_number = StringField(word('Phone number'), [validators.Length(min=5, max=255), html_validator])
submit = SubmitField(word('Verify'))
@@ -416,8 +418,8 @@ class MFAVerifySMSSetupForm(FlaskForm):
class MyResendConfirmEmailForm(FlaskForm):
email = StringField(word('Your e-mail address'), validators=[
validators.DataRequired(word('E-mail address is required')),
- validators.Email(word('Invalid e-mail address')),
- ])
+ validators.Email(word('Invalid e-mail address'))
+ ])
submit = SubmitField(word('Send confirmation email'))
|
import re
import email.utils
from docassemble_flask_user.forms import RegisterForm, LoginForm, password_validator, unique_email_validator
from flask_wtf import FlaskForm
from wtforms import DateField, StringField, SubmitField, ValidationError, BooleanField, SelectField, SelectMultipleField, HiddenField, validators, TextAreaField
from wtforms.validators import DataRequired, Email, Optional
from wtforms.widgets import PasswordInput
from docassemble.base.functions import LazyWord as word, LazyArray
from docassemble.base.config import daconfig
from docassemble.base.generate_key import random_alphanumeric
from docassemble.base.logger import logmessage
from docassemble.webapp.daredis import r
from docassemble.webapp.db_object import db
from docassemble.webapp.users.models import UserModel, Role
from flask import flash, current_app, request, abort
from flask_login import current_user
from sqlalchemy import select
try:
import ldap
except ImportError:
if 'ldap login' not in daconfig:
daconfig['ldap login'] = {}
daconfig['ldap login']['enable'] = False
HTTP_TO_HTTPS = daconfig.get('behind https load balancer', False)
BAN_IP_ADDRESSES = daconfig.get('ip address ban enabled', True)
def get_requester_ip(req):
if not req:
return '127.0.0.1'
if HTTP_TO_HTTPS:
if 'X-Real-Ip' in req.headers:
return req.headers['X-Real-Ip']
if 'X-Forwarded-For' in req.headers:
return req.headers['X-Forwarded-For']
return req.remote_addr
def fix_nickname(form, field):
field.data = str(form.first_name.data) + ' ' + str(form.last_name.data)
class MySignInForm(LoginForm):
def ldap_bind(self, connect):
base_dn = daconfig['ldap login']['base dn'].strip()
if daconfig['ldap login'].get('anonymous bind', False):
bind_dn = ""
bind_password = ""
else:
bind_dn = daconfig['ldap login']['bind dn'].strip()
bind_password = daconfig['ldap login']['bind password'].strip()
username = ""
password = ""
try:
search_filter = daconfig['ldap login'].get('search pattern',
"mail=%s") % (self.email.data)
connect.simple_bind_s(bind_dn, bind_password)
search_results = connect.search_s(base_dn,
ldap.SCOPE_SUBTREE, search_filter)
if len(search_results) == 0:
logmessage(("Email %s was not found in LDAP "
"using search filter %s, base dn %s") %
(self.email.data, search_filter, base_dn))
else:
if len(search_results) > 1:
logmessage(("Email %s was found multiple times in LDAP "
"using search filter %s, base dn %s "
"- the first result will be used as dn: %s") %
(self.email.data, search_filter, base_dn, search_results[0][0]))
username = search_results[0][0]
password = self.password.data
except (ldap.LDAPError, ldap.INVALID_CREDENTIALS):
# no unbind to make use of the rest of the LDAP workflow
logmessage(("Could not login into LDAP with email %s "
"and given password") %
(self.email.data))
return username, password
def validate(self):
if BAN_IP_ADDRESSES:
key = 'da:failedlogin:ip:' + str(get_requester_ip(request))
failed_attempts = r.get(key)
if failed_attempts is not None and int(failed_attempts) > daconfig['attempt limit']:
abort(404)
ldap_server = daconfig['ldap login'].get('server', 'localhost').strip()
if daconfig['ldap login'].get('enable', False):
if daconfig['ldap login'].get('ldap over TLS', False):
ldap_protocol = "ldaps"
else:
ldap_protocol = "ldap"
connect = ldap.initialize(ldap_protocol + '://' + ldap_server)
if daconfig['ldap login'].get('login with bind_dn', False):
username, password = self.ldap_bind(connect)
else:
username = self.email.data
password = self.password.data
connect.set_option(ldap.OPT_REFERRALS, 0)
try:
connect.simple_bind_s(username, password)
if connect.whoami_s() is not None:
connect.unbind_s()
user_manager = current_app.user_manager
user, user_email = user_manager.find_user_by_email(self.email.data) # pylint: disable=unused-variable
if not user:
while True:
new_social = 'ldap$' + random_alphanumeric(32)
existing_user = db.session.execute(select(UserModel).filter_by(social_id=new_social)).scalar()
if existing_user:
continue
break
user = UserModel(social_id=new_social, email=self.email.data, nickname='', active=True)
user_role = db.session.execute(select(Role).filter_by(name='user')).scalar_one()
user.roles.append(user_role)
db.session.add(user)
db.session.commit()
result = True
else:
connect.unbind_s()
try:
result = super().validate()
except:
result = False
except (ldap.LDAPError, ldap.INVALID_CREDENTIALS):
connect.unbind_s()
try:
result = super().validate()
except:
result = False
else:
user_manager = current_app.user_manager
user, user_email = user_manager.find_user_by_email(self.email.data)
if user is None:
if daconfig.get('confirm registration', False) or not daconfig.get('allow registration', False):
self.email.errors = []
self.email.errors.append(word("Incorrect Email and/or Password"))
self.password.errors = []
self.password.errors.append(word("Incorrect Email and/or Password"))
else:
self.email.errors = list(self.email.errors)
self.email.errors.append(word("Account did not exist."))
return False
if user and (user.password is None or (user.social_id is not None and not user.social_id.startswith('local$'))):
self.email.errors = list(self.email.errors)
if user.social_id.startswith('google$'):
self.email.errors.append(word("You need to log in with Google."))
elif user.social_id.startswith('azure$'):
self.email.errors.append(word("You need to log in with Azure."))
elif user.social_id.startswith('auth0$'):
self.email.errors.append(word("You need to log in with Auth0."))
elif user.social_id.startswith('twitter$'):
self.email.errors.append(word("You need to log in with Twitter."))
elif user.social_id.startswith('facebook$'):
self.email.errors.append(word("You need to log in with Facebook."))
else:
self.email.errors.append(word("You cannot log in this way."))
return False
if self.password.data == 'password':
pipe = r.pipeline()
pipe.set('da:insecure_password_present', '1')
pipe.expire('da:insecure_password_present', 60)
pipe.execute()
# logmessage("Trying super validate")
result = super().validate()
# logmessage("Super validate response was " + repr(result))
if result is False:
r.incr(key)
r.expire(key, daconfig['ban period'])
elif failed_attempts is not None:
r.delete(key)
return result
def da_unique_email_validator(form, field):
if daconfig['ldap login'].get('enable', False) and daconfig['ldap login'].get('base dn', None) is not None and daconfig['ldap login'].get('bind email', None) is not None and daconfig['ldap login'].get('bind password', None) is not None:
ldap_server = daconfig['ldap login'].get('server', 'localhost').strip()
base_dn = daconfig['ldap login']['base dn'].strip()
search_filter = daconfig['ldap login'].get('search pattern', "mail=%s") % (form.email.data,)
connect = ldap.initialize('ldap://' + ldap_server)
try:
connect.simple_bind_s(daconfig['ldap login']['bind email'], daconfig['ldap login']['bind password'])
if len(connect.search_s(base_dn, ldap.SCOPE_SUBTREE, search_filter)) > 0:
raise ValidationError(word("This Email is already in use. Please try another one."))
except ldap.LDAPError:
pass
if daconfig.get('confirm registration', False):
return True
return unique_email_validator(form, field)
def da_registration_restrict_validator(form, field): # pylint: disable=unused-argument
if len(daconfig['authorized registration domains']) == 0:
return True
user_email = str(form.email.data).lower().strip()
for domain in daconfig['authorized registration domains']:
if user_email.endswith(domain):
return True
errors = list(form.email.errors)
errors.append(word('E-mail addresses with this domain are not authorized to register for accounts on this system.'))
form.email.errors = tuple(errors)
return False
class MyRegisterForm(RegisterForm):
first_name = StringField(word('First name'), [validators.Length(min=0, max=255)])
last_name = StringField(word('Last name'), [validators.Length(min=0, max=255)])
country = StringField(word('Country code'), [validators.Length(min=0, max=2)])
subdivisionfirst = StringField(word('First subdivision'), [validators.Length(min=0, max=64)])
subdivisionsecond = StringField(word('Second subdivision'), [validators.Length(min=0, max=64)])
subdivisionthird = StringField(word('Third subdivision'), [validators.Length(min=0, max=64)])
organization = StringField(word('Organization'), [validators.Length(min=0, max=64)])
language = StringField(word('Language'), [validators.Length(min=0, max=64)])
timezone = SelectField(word('Time Zone'), [validators.Length(min=0, max=64)])
nickname = StringField(word('Nickname'), [fix_nickname])
email = StringField(word('Email'), validators=[
validators.DataRequired(word('Email is required')),
validators.Email(word('Invalid Email')),
da_unique_email_validator,
da_registration_restrict_validator])
def length_two(form, field): # pylint: disable=unused-argument
if len(field.data) != 2:
raise ValidationError(word('Must be a two-letter code'))
class NewPrivilegeForm(FlaskForm):
name = StringField(word('Name of new privilege'), validators=[
DataRequired(word('Name of new privilege is required'))])
submit = SubmitField(word('Add'))
class UserProfileForm(FlaskForm):
first_name = StringField(word('First name'), [validators.Length(min=0, max=255)])
last_name = StringField(word('Last name'), [validators.Length(min=0, max=255)])
country = StringField(word('Country code'), [validators.Length(min=0, max=2)])
subdivisionfirst = StringField(word('First subdivision'), [validators.Length(min=0, max=64)])
subdivisionsecond = StringField(word('Second subdivision'), [validators.Length(min=0, max=64)])
subdivisionthird = StringField(word('Third subdivision'), [validators.Length(min=0, max=64)])
organization = StringField(word('Organization'), [validators.Length(min=0, max=64)])
language = StringField(word('Language'), [validators.Length(min=0, max=64)])
timezone = SelectField(word('Time Zone'), [validators.Length(min=0, max=64)])
pypi_username = StringField(word('PyPI Username'), [validators.Length(min=0, max=255)])
pypi_password = StringField(word('PyPI Password'), [validators.Length(min=0, max=255)])
confirmed_at = DateField(word('Confirmation Date'))
submit = SubmitField(word('Save'))
cancel = SubmitField(word('Cancel'))
class EditUserProfileForm(UserProfileForm):
email = StringField(word('E-mail'))
role_id = SelectMultipleField(word('Privileges'), coerce=int)
active = BooleanField(word('Active'))
uses_mfa = BooleanField(word('Uses two-factor authentication'))
def validate(self, user_id, admin_id): # pylint: disable=arguments-differ
existing_user = db.session.execute(select(UserModel).filter_by(id=user_id)).scalar()
phone_user = existing_user.social_id.startswith('phone$')
user_manager = current_app.user_manager
rv = UserProfileForm.validate(self)
if not rv:
return False
if phone_user and self.email.data == '':
self.email.data = None
if not phone_user:
if not (self.email.data and self.email.data.strip()):
self.email.errors.append(word('E-mail is required'))
return False
if self.email.data:
try:
Email("error")(self, self.email)
except ValidationError:
self.email.errors.append(word('Must be a valid e-mail address'))
return False
user, user_email = user_manager.find_user_by_email(self.email.data) # pylint: disable=unused-variable
if user is not None and user.id != user_id:
self.email.errors.append(word('That e-mail address is already taken.'))
return False
if current_user.id == user_id and current_user.has_roles('admin'):
if admin_id not in self.role_id.data:
self.role_id.errors.append(word('You cannot take away your own admin privilege.'))
return False
self.active.data = True
return True
class PhoneUserProfileForm(UserProfileForm):
def validate(self): # pylint: disable=arguments-differ
if self.email.data:
if current_user.social_id.startswith('phone$'):
existing_user = db.session.execute(select(UserModel).filter_by(email=self.email.data, active=True)).scalar()
if existing_user is not None and existing_user.id != current_user.id:
flash(word("Please choose a different e-mail address."), 'error')
return False
return super().validate()
email = StringField(word('E-mail'), validators=[Optional(), Email(word('Must be a valid e-mail address'))])
class RequestDeveloperForm(FlaskForm):
reason = StringField(word('Reason for needing developer account (optional)'))
submit = SubmitField(word('Submit'))
class MyInviteForm(FlaskForm):
def validate(self): # pylint: disable=arguments-differ
has_error = False
if self.email.data:
for email_address in re.split(r'[\n\r]+', self.email.data.strip()):
(part_one, part_two) = email.utils.parseaddr(email_address) # pylint: disable=unused-variable
if part_two == '':
the_errors = list(self.email.errors)
the_errors.append(word("Invalid e-mail address: " + email_address))
self.email.errors = tuple(the_errors)
has_error = True
if has_error:
return False
return super().validate()
email = TextAreaField(word('One or more e-mail addresses (separated by newlines)'), validators=[
validators.InputRequired(word('At least one e-mail address must be listed'))
])
role_id = SelectField(word('Role'))
next = HiddenField()
submit = SubmitField(word('Invite'))
class UserAddForm(FlaskForm):
email = StringField(word('E-mail'), validators=[
validators.InputRequired(word('E-mail is required')),
validators.Email(word('Invalid E-mail'))])
first_name = StringField(word('First name'), [validators.Length(min=0, max=255)])
last_name = StringField(word('Last name'), [validators.Length(min=0, max=255)])
role_id = SelectMultipleField(word('Privileges'), coerce=int)
password = StringField(word('Password'), widget=PasswordInput(hide_value=False), validators=[password_validator])
submit = SubmitField(word('Add'))
class PhoneLoginForm(FlaskForm):
phone_number = StringField(word('Phone number'), [validators.Length(min=5, max=255)])
submit = SubmitField(word('Go'))
class PhoneLoginVerifyForm(FlaskForm):
phone_number = StringField(word('Phone number'), [validators.Length(min=5, max=255)])
verification_code = StringField(word('Verification code'), [validators.Length(min=daconfig['verification code digits'], max=daconfig['verification code digits'])])
submit = SubmitField(word('Verify'))
def validate(self): # pylint: disable=arguments-differ
result = True
if BAN_IP_ADDRESSES:
key = 'da:failedlogin:ip:' + str(get_requester_ip(request))
failed_attempts = r.get(key)
if failed_attempts is not None and int(failed_attempts) > daconfig['attempt limit']:
abort(404)
verification_key = 'da:phonelogin:' + str(self.phone_number.data) + ':code'
verification_code = r.get(verification_key)
# r.delete(verification_key)
supplied_verification_code = re.sub(r'[^0-9]', '', self.verification_code.data)
logmessage("Supplied code is " + str(supplied_verification_code))
if verification_code is None:
logmessage("Verification code with " + str(verification_key) + " is None")
result = False
elif verification_code.decode() != supplied_verification_code:
logmessage("Verification code with " + str(verification_key) + " which is " + str(verification_code.decode()) + " does not match supplied code, which is " + str(self.verification_code.data))
result = False
else:
logmessage("Code matched")
if result is False:
logmessage("Problem with form")
r.incr(key)
r.expire(key, 86400)
elif failed_attempts is not None:
r.delete(key)
return result
class MFASetupForm(FlaskForm):
verification_code = StringField(word('Verification code'))
submit = SubmitField(word('Verify'))
class MFALoginForm(FlaskForm):
verification_code = StringField(word('Verification code'))
next = HiddenField()
submit = SubmitField(word('Verify'))
class MFAReconfigureForm(FlaskForm):
reconfigure = SubmitField(word('Reconfigure'))
disable = SubmitField(word('Disable'))
cancel = SubmitField(word('Cancel'))
class MFAChooseForm(FlaskForm):
auth = SubmitField(word('App'))
sms = SubmitField(word('SMS'))
cancel = SubmitField(word('Cancel'))
class MFASMSSetupForm(FlaskForm):
phone_number = StringField(word('Phone number'), [validators.Length(min=5, max=255)])
submit = SubmitField(word('Verify'))
class MFAVerifySMSSetupForm(FlaskForm):
verification_code = StringField(word('Verification code'))
submit = SubmitField(word('Verify'))
class MyResendConfirmEmailForm(FlaskForm):
email = StringField(word('Your e-mail address'), validators=[
validators.DataRequired(word('E-mail address is required')),
validators.Email(word('Invalid e-mail address')),
])
submit = SubmitField(word('Send confirmation email'))
class ManageAccountForm(FlaskForm):
confirm = StringField(word('Type \"delete my account\" here to confirm that you want to delete your account.'), [validators.AnyOf(LazyArray([word("delete my account")]), message=word('Since you did not type \"delete my account\" I did not delete your account.'))])
delete = SubmitField(word('Delete Account'))
class InterviewsListForm(FlaskForm):
i = StringField()
session = StringField()
tags = StringField()
delete = SubmitField()
delete_all = SubmitField()
|
GHSA-pcfx-g2j2-f6f6
|
src/lxml/html/clean.py
|
@@ -70,9 +70,10 @@
# All kinds of schemes besides just javascript: that can cause
# execution:
-_javascript_scheme_re = re.compile(
- r'\s*(?:javascript|jscript|livescript|vbscript|data|about|mocha):', re.I)
-_substitute_whitespace = re.compile(r'\s+').sub
+_is_javascript_scheme = re.compile(
+ r'(?:javascript|jscript|livescript|vbscript|data|about|mocha):',
+ re.I).search
+_substitute_whitespace = re.compile(r'[\s\x00-\x08\x0B\x0C\x0E-\x19]+').sub
# FIXME: should data: be blocked?
# FIXME: check against: http://msdn2.microsoft.com/en-us/library/ms537512.aspx
@@ -466,7 +467,7 @@ def _kill_elements(self, doc, condition, iterate=None):
def _remove_javascript_link(self, link):
# links like "j a v a s c r i p t:" might be interpreted in IE
new = _substitute_whitespace('', link)
- if _javascript_scheme_re.search(new):
+ if _is_javascript_scheme(new):
# FIXME: should this be None to delete?
return ''
return link
|
"""A cleanup tool for HTML.
Removes unwanted tags and content. See the `Cleaner` class for
details.
"""
import re
import copy
try:
from urlparse import urlsplit
except ImportError:
# Python 3
from urllib.parse import urlsplit
from lxml import etree
from lxml.html import defs
from lxml.html import fromstring, tostring, XHTML_NAMESPACE
from lxml.html import xhtml_to_html, _transform_result
try:
unichr
except NameError:
# Python 3
unichr = chr
try:
unicode
except NameError:
# Python 3
unicode = str
try:
bytes
except NameError:
# Python < 2.6
bytes = str
try:
basestring
except NameError:
basestring = (str, bytes)
__all__ = ['clean_html', 'clean', 'Cleaner', 'autolink', 'autolink_html',
'word_break', 'word_break_html']
# Look at http://code.sixapart.com/trac/livejournal/browser/trunk/cgi-bin/cleanhtml.pl
# Particularly the CSS cleaning; most of the tag cleaning is integrated now
# I have multiple kinds of schemes searched; but should schemes be
# whitelisted instead?
# max height?
# remove images? Also in CSS? background attribute?
# Some way to whitelist object, iframe, etc (e.g., if you want to
# allow *just* embedded YouTube movies)
# Log what was deleted and why?
# style="behavior: ..." might be bad in IE?
# Should we have something for just <meta http-equiv>? That's the worst of the
# metas.
# UTF-7 detections? Example:
# <HEAD><META HTTP-EQUIV="CONTENT-TYPE" CONTENT="text/html; charset=UTF-7"> </HEAD>+ADw-SCRIPT+AD4-alert('XSS');+ADw-/SCRIPT+AD4-
# you don't always have to have the charset set, if the page has no charset
# and there's UTF7-like code in it.
# Look at these tests: http://htmlpurifier.org/live/smoketests/xssAttacks.php
# This is an IE-specific construct you can have in a stylesheet to
# run some Javascript:
_css_javascript_re = re.compile(
r'expression\s*\(.*?\)', re.S|re.I)
# Do I have to worry about @\nimport?
_css_import_re = re.compile(
r'@\s*import', re.I)
# All kinds of schemes besides just javascript: that can cause
# execution:
_javascript_scheme_re = re.compile(
r'\s*(?:javascript|jscript|livescript|vbscript|data|about|mocha):', re.I)
_substitute_whitespace = re.compile(r'\s+').sub
# FIXME: should data: be blocked?
# FIXME: check against: http://msdn2.microsoft.com/en-us/library/ms537512.aspx
_conditional_comment_re = re.compile(
r'\[if[\s\n\r]+.*?][\s\n\r]*>', re.I|re.S)
_find_styled_elements = etree.XPath(
"descendant-or-self::*[@style]")
_find_external_links = etree.XPath(
("descendant-or-self::a [normalize-space(@href) and substring(normalize-space(@href),1,1) != '#'] |"
"descendant-or-self::x:a[normalize-space(@href) and substring(normalize-space(@href),1,1) != '#']"),
namespaces={'x':XHTML_NAMESPACE})
class Cleaner(object):
"""
Instances cleans the document of each of the possible offending
elements. The cleaning is controlled by attributes; you can
override attributes in a subclass, or set them in the constructor.
``scripts``:
Removes any ``<script>`` tags.
``javascript``:
Removes any Javascript, like an ``onclick`` attribute. Also removes stylesheets
as they could contain Javascript.
``comments``:
Removes any comments.
``style``:
Removes any style tags or attributes.
``links``:
Removes any ``<link>`` tags
``meta``:
Removes any ``<meta>`` tags
``page_structure``:
Structural parts of a page: ``<head>``, ``<html>``, ``<title>``.
``processing_instructions``:
Removes any processing instructions.
``embedded``:
Removes any embedded objects (flash, iframes)
``frames``:
Removes any frame-related tags
``forms``:
Removes any form tags
``annoying_tags``:
Tags that aren't *wrong*, but are annoying. ``<blink>`` and ``<marquee>``
``remove_tags``:
A list of tags to remove. Only the tags will be removed,
their content will get pulled up into the parent tag.
``kill_tags``:
A list of tags to kill. Killing also removes the tag's content,
i.e. the whole subtree, not just the tag itself.
``allow_tags``:
A list of tags to include (default include all).
``remove_unknown_tags``:
Remove any tags that aren't standard parts of HTML.
``safe_attrs_only``:
If true, only include 'safe' attributes (specifically the list
from the feedparser HTML sanitisation web site).
``safe_attrs``:
A set of attribute names to override the default list of attributes
considered 'safe' (when safe_attrs_only=True).
``add_nofollow``:
If true, then any <a> tags will have ``rel="nofollow"`` added to them.
``host_whitelist``:
A list or set of hosts that you can use for embedded content
(for content like ``<object>``, ``<link rel="stylesheet">``, etc).
You can also implement/override the method
``allow_embedded_url(el, url)`` or ``allow_element(el)`` to
implement more complex rules for what can be embedded.
Anything that passes this test will be shown, regardless of
the value of (for instance) ``embedded``.
Note that this parameter might not work as intended if you do not
make the links absolute before doing the cleaning.
Note that you may also need to set ``whitelist_tags``.
``whitelist_tags``:
A set of tags that can be included with ``host_whitelist``.
The default is ``iframe`` and ``embed``; you may wish to
include other tags like ``script``, or you may want to
implement ``allow_embedded_url`` for more control. Set to None to
include all tags.
This modifies the document *in place*.
"""
scripts = True
javascript = True
comments = True
style = False
links = True
meta = True
page_structure = True
processing_instructions = True
embedded = True
frames = True
forms = True
annoying_tags = True
remove_tags = None
allow_tags = None
kill_tags = None
remove_unknown_tags = True
safe_attrs_only = True
safe_attrs = defs.safe_attrs
add_nofollow = False
host_whitelist = ()
whitelist_tags = set(['iframe', 'embed'])
def __init__(self, **kw):
for name, value in kw.items():
if not hasattr(self, name):
raise TypeError(
"Unknown parameter: %s=%r" % (name, value))
setattr(self, name, value)
# Used to lookup the primary URL for a given tag that is up for
# removal:
_tag_link_attrs = dict(
script='src',
link='href',
# From: http://java.sun.com/j2se/1.4.2/docs/guide/misc/applet.html
# From what I can tell, both attributes can contain a link:
applet=['code', 'object'],
iframe='src',
embed='src',
layer='src',
# FIXME: there doesn't really seem like a general way to figure out what
# links an <object> tag uses; links often go in <param> tags with values
# that we don't really know. You'd have to have knowledge about specific
# kinds of plugins (probably keyed off classid), and match against those.
##object=?,
# FIXME: not looking at the action currently, because it is more complex
# than than -- if you keep the form, you should keep the form controls.
##form='action',
a='href',
)
def __call__(self, doc):
"""
Cleans the document.
"""
if hasattr(doc, 'getroot'):
# ElementTree instance, instead of an element
doc = doc.getroot()
# convert XHTML to HTML
xhtml_to_html(doc)
# Normalize a case that IE treats <image> like <img>, and that
# can confuse either this step or later steps.
for el in doc.iter('image'):
el.tag = 'img'
if not self.comments:
# Of course, if we were going to kill comments anyway, we don't
# need to worry about this
self.kill_conditional_comments(doc)
kill_tags = set(self.kill_tags or ())
remove_tags = set(self.remove_tags or ())
allow_tags = set(self.allow_tags or ())
if self.scripts:
kill_tags.add('script')
if self.safe_attrs_only:
safe_attrs = set(self.safe_attrs)
for el in doc.iter():
attrib = el.attrib
for aname in attrib.keys():
if aname not in safe_attrs:
del attrib[aname]
if self.javascript:
if not (self.safe_attrs_only and
self.safe_attrs == defs.safe_attrs):
# safe_attrs handles events attributes itself
for el in doc.iter():
attrib = el.attrib
for aname in attrib.keys():
if aname.startswith('on'):
del attrib[aname]
doc.rewrite_links(self._remove_javascript_link,
resolve_base_href=False)
if not self.style:
# If we're deleting style then we don't have to remove JS links
# from styles, otherwise...
for el in _find_styled_elements(doc):
old = el.get('style')
new = _css_javascript_re.sub('', old)
new = _css_import_re.sub('', new)
if self._has_sneaky_javascript(new):
# Something tricky is going on...
del el.attrib['style']
elif new != old:
el.set('style', new)
for el in list(doc.iter('style')):
if el.get('type', '').lower().strip() == 'text/javascript':
el.drop_tree()
continue
old = el.text or ''
new = _css_javascript_re.sub('', old)
# The imported CSS can do anything; we just can't allow:
new = _css_import_re.sub('', old)
if self._has_sneaky_javascript(new):
# Something tricky is going on...
el.text = '/* deleted */'
elif new != old:
el.text = new
if self.comments or self.processing_instructions:
# FIXME: why either? I feel like there's some obscure reason
# because you can put PIs in comments...? But I've already
# forgotten it
kill_tags.add(etree.Comment)
if self.processing_instructions:
kill_tags.add(etree.ProcessingInstruction)
if self.style:
kill_tags.add('style')
etree.strip_attributes(doc, 'style')
if self.links:
kill_tags.add('link')
elif self.style or self.javascript:
# We must get rid of included stylesheets if Javascript is not
# allowed, as you can put Javascript in them
for el in list(doc.iter('link')):
if 'stylesheet' in el.get('rel', '').lower():
# Note this kills alternate stylesheets as well
if not self.allow_element(el):
el.drop_tree()
if self.meta:
kill_tags.add('meta')
if self.page_structure:
remove_tags.update(('head', 'html', 'title'))
if self.embedded:
# FIXME: is <layer> really embedded?
# We should get rid of any <param> tags not inside <applet>;
# These are not really valid anyway.
for el in list(doc.iter('param')):
found_parent = False
parent = el.getparent()
while parent is not None and parent.tag not in ('applet', 'object'):
parent = parent.getparent()
if parent is None:
el.drop_tree()
kill_tags.update(('applet',))
# The alternate contents that are in an iframe are a good fallback:
remove_tags.update(('iframe', 'embed', 'layer', 'object', 'param'))
if self.frames:
# FIXME: ideally we should look at the frame links, but
# generally frames don't mix properly with an HTML
# fragment anyway.
kill_tags.update(defs.frame_tags)
if self.forms:
remove_tags.add('form')
kill_tags.update(('button', 'input', 'select', 'textarea'))
if self.annoying_tags:
remove_tags.update(('blink', 'marquee'))
_remove = []
_kill = []
for el in doc.iter():
if el.tag in kill_tags:
if self.allow_element(el):
continue
_kill.append(el)
elif el.tag in remove_tags:
if self.allow_element(el):
continue
_remove.append(el)
if _remove and _remove[0] == doc:
# We have to drop the parent-most tag, which we can't
# do. Instead we'll rewrite it:
el = _remove.pop(0)
el.tag = 'div'
el.attrib.clear()
elif _kill and _kill[0] == doc:
# We have to drop the parent-most element, which we can't
# do. Instead we'll clear it:
el = _kill.pop(0)
if el.tag != 'html':
el.tag = 'div'
el.clear()
_kill.reverse() # start with innermost tags
for el in _kill:
el.drop_tree()
for el in _remove:
el.drop_tag()
if self.remove_unknown_tags:
if allow_tags:
raise ValueError(
"It does not make sense to pass in both allow_tags and remove_unknown_tags")
allow_tags = set(defs.tags)
if allow_tags:
bad = []
for el in doc.iter():
if el.tag not in allow_tags:
bad.append(el)
if bad:
if bad[0] is doc:
el = bad.pop(0)
el.tag = 'div'
el.attrib.clear()
for el in bad:
el.drop_tag()
if self.add_nofollow:
for el in _find_external_links(doc):
if not self.allow_follow(el):
rel = el.get('rel')
if rel:
if ('nofollow' in rel
and ' nofollow ' in (' %s ' % rel)):
continue
rel = '%s nofollow' % rel
else:
rel = 'nofollow'
el.set('rel', rel)
def allow_follow(self, anchor):
"""
Override to suppress rel="nofollow" on some anchors.
"""
return False
def allow_element(self, el):
if el.tag not in self._tag_link_attrs:
return False
attr = self._tag_link_attrs[el.tag]
if isinstance(attr, (list, tuple)):
for one_attr in attr:
url = el.get(one_attr)
if not url:
return False
if not self.allow_embedded_url(el, url):
return False
return True
else:
url = el.get(attr)
if not url:
return False
return self.allow_embedded_url(el, url)
def allow_embedded_url(self, el, url):
if (self.whitelist_tags is not None
and el.tag not in self.whitelist_tags):
return False
scheme, netloc, path, query, fragment = urlsplit(url)
netloc = netloc.lower().split(':', 1)[0]
if scheme not in ('http', 'https'):
return False
if netloc in self.host_whitelist:
return True
return False
def kill_conditional_comments(self, doc):
"""
IE conditional comments basically embed HTML that the parser
doesn't normally see. We can't allow anything like that, so
we'll kill any comments that could be conditional.
"""
bad = []
self._kill_elements(
doc, lambda el: _conditional_comment_re.search(el.text),
etree.Comment)
def _kill_elements(self, doc, condition, iterate=None):
bad = []
for el in doc.iter(iterate):
if condition(el):
bad.append(el)
for el in bad:
el.drop_tree()
def _remove_javascript_link(self, link):
# links like "j a v a s c r i p t:" might be interpreted in IE
new = _substitute_whitespace('', link)
if _javascript_scheme_re.search(new):
# FIXME: should this be None to delete?
return ''
return link
_substitute_comments = re.compile(r'/\*.*?\*/', re.S).sub
def _has_sneaky_javascript(self, style):
"""
Depending on the browser, stuff like ``e x p r e s s i o n(...)``
can get interpreted, or ``expre/* stuff */ssion(...)``. This
checks for attempt to do stuff like this.
Typically the response will be to kill the entire style; if you
have just a bit of Javascript in the style another rule will catch
that and remove only the Javascript from the style; this catches
more sneaky attempts.
"""
style = self._substitute_comments('', style)
style = style.replace('\\', '')
style = _substitute_whitespace('', style)
style = style.lower()
if 'javascript:' in style:
return True
if 'expression(' in style:
return True
return False
def clean_html(self, html):
result_type = type(html)
if isinstance(html, basestring):
doc = fromstring(html)
else:
doc = copy.deepcopy(html)
self(doc)
return _transform_result(result_type, doc)
clean = Cleaner()
clean_html = clean.clean_html
############################################################
## Autolinking
############################################################
_link_regexes = [
re.compile(r'(?P<body>https?://(?P<host>[a-z0-9._-]+)(?:/[/\-_.,a-z0-9%&?;=~]*)?(?:\([/\-_.,a-z0-9%&?;=~]*\))?)', re.I),
# This is conservative, but autolinking can be a bit conservative:
re.compile(r'mailto:(?P<body>[a-z0-9._-]+@(?P<host>[a-z0-9_._]+[a-z]))', re.I),
]
_avoid_elements = ['textarea', 'pre', 'code', 'head', 'select', 'a']
_avoid_hosts = [
re.compile(r'^localhost', re.I),
re.compile(r'\bexample\.(?:com|org|net)$', re.I),
re.compile(r'^127\.0\.0\.1$'),
]
_avoid_classes = ['nolink']
def autolink(el, link_regexes=_link_regexes,
avoid_elements=_avoid_elements,
avoid_hosts=_avoid_hosts,
avoid_classes=_avoid_classes):
"""
Turn any URLs into links.
It will search for links identified by the given regular
expressions (by default mailto and http(s) links).
It won't link text in an element in avoid_elements, or an element
with a class in avoid_classes. It won't link to anything with a
host that matches one of the regular expressions in avoid_hosts
(default localhost and 127.0.0.1).
If you pass in an element, the element's tail will not be
substituted, only the contents of the element.
"""
if el.tag in avoid_elements:
return
class_name = el.get('class')
if class_name:
class_name = class_name.split()
for match_class in avoid_classes:
if match_class in class_name:
return
for child in list(el):
autolink(child, link_regexes=link_regexes,
avoid_elements=avoid_elements,
avoid_hosts=avoid_hosts,
avoid_classes=avoid_classes)
if child.tail:
text, tail_children = _link_text(
child.tail, link_regexes, avoid_hosts, factory=el.makeelement)
if tail_children:
child.tail = text
index = el.index(child)
el[index+1:index+1] = tail_children
if el.text:
text, pre_children = _link_text(
el.text, link_regexes, avoid_hosts, factory=el.makeelement)
if pre_children:
el.text = text
el[:0] = pre_children
def _link_text(text, link_regexes, avoid_hosts, factory):
leading_text = ''
links = []
last_pos = 0
while 1:
best_match, best_pos = None, None
for regex in link_regexes:
regex_pos = last_pos
while 1:
match = regex.search(text, pos=regex_pos)
if match is None:
break
host = match.group('host')
for host_regex in avoid_hosts:
if host_regex.search(host):
regex_pos = match.end()
break
else:
break
if match is None:
continue
if best_pos is None or match.start() < best_pos:
best_match = match
best_pos = match.start()
if best_match is None:
# No more matches
if links:
assert not links[-1].tail
links[-1].tail = text
else:
assert not leading_text
leading_text = text
break
link = best_match.group(0)
end = best_match.end()
if link.endswith('.') or link.endswith(','):
# These punctuation marks shouldn't end a link
end -= 1
link = link[:-1]
prev_text = text[:best_match.start()]
if links:
assert not links[-1].tail
links[-1].tail = prev_text
else:
assert not leading_text
leading_text = prev_text
anchor = factory('a')
anchor.set('href', link)
body = best_match.group('body')
if not body:
body = link
if body.endswith('.') or body.endswith(','):
body = body[:-1]
anchor.text = body
links.append(anchor)
text = text[end:]
return leading_text, links
def autolink_html(html, *args, **kw):
result_type = type(html)
if isinstance(html, basestring):
doc = fromstring(html)
else:
doc = copy.deepcopy(html)
autolink(doc, *args, **kw)
return _transform_result(result_type, doc)
autolink_html.__doc__ = autolink.__doc__
############################################################
## Word wrapping
############################################################
_avoid_word_break_elements = ['pre', 'textarea', 'code']
_avoid_word_break_classes = ['nobreak']
def word_break(el, max_width=40,
avoid_elements=_avoid_word_break_elements,
avoid_classes=_avoid_word_break_classes,
break_character=unichr(0x200b)):
"""
Breaks any long words found in the body of the text (not attributes).
Doesn't effect any of the tags in avoid_elements, by default
``<textarea>`` and ``<pre>``
Breaks words by inserting ​, which is a unicode character
for Zero Width Space character. This generally takes up no space
in rendering, but does copy as a space, and in monospace contexts
usually takes up space.
See http://www.cs.tut.fi/~jkorpela/html/nobr.html for a discussion
"""
# Character suggestion of ​ comes from:
# http://www.cs.tut.fi/~jkorpela/html/nobr.html
if el.tag in _avoid_word_break_elements:
return
class_name = el.get('class')
if class_name:
dont_break = False
class_name = class_name.split()
for avoid in avoid_classes:
if avoid in class_name:
dont_break = True
break
if dont_break:
return
if el.text:
el.text = _break_text(el.text, max_width, break_character)
for child in el:
word_break(child, max_width=max_width,
avoid_elements=avoid_elements,
avoid_classes=avoid_classes,
break_character=break_character)
if child.tail:
child.tail = _break_text(child.tail, max_width, break_character)
def word_break_html(html, *args, **kw):
result_type = type(html)
doc = fromstring(html)
word_break(doc, *args, **kw)
return _transform_result(result_type, doc)
def _break_text(text, max_width, break_character):
words = text.split()
for word in words:
if len(word) > max_width:
replacement = _insert_break(word, max_width, break_character)
text = text.replace(word, replacement)
return text
_break_prefer_re = re.compile(r'[^a-z]', re.I)
def _insert_break(word, width, break_character):
orig_word = word
result = ''
while len(word) > width:
start = word[:width]
breaks = list(_break_prefer_re.finditer(start))
if breaks:
last_break = breaks[-1]
# Only walk back up to 10 characters to find a nice break:
if last_break.end() > width-10:
# FIXME: should the break character be at the end of the
# chunk, or the beginning of the next chunk?
start = word[:last_break.end()]
result += start + break_character
word = word[len(start):]
result += word
return result
|
GHSA-57qw-cc2g-pv5p
|
gradio/route_utils.py
|
@@ -1,6 +1,7 @@
from __future__ import annotations
import hashlib
+import hmac
import json
import shutil
from collections import deque
@@ -569,8 +570,12 @@ def update_root_in_config(config: dict, root: str) -> dict:
root url has changed, all of the urls in the config that correspond to component
file urls are updated to use the new root url.
"""
- previous_root = config.get("root", None)
+ previous_root = config.get("root")
if previous_root is None or previous_root != root:
config["root"] = root
config = processing_utils.add_root_url(config, root, previous_root)
return config
+
+
+def compare_passwords_securely(input_password: str, correct_password: str) -> bool:
+ return hmac.compare_digest(input_password.encode(), correct_password.encode())
|
from __future__ import annotations
import hashlib
import json
import shutil
from collections import deque
from dataclasses import dataclass as python_dataclass
from tempfile import NamedTemporaryFile, _TemporaryFileWrapper
from typing import TYPE_CHECKING, AsyncGenerator, BinaryIO, List, Optional, Tuple, Union
import fastapi
import httpx
import multipart
from gradio_client.documentation import document
from multipart.multipart import parse_options_header
from starlette.datastructures import FormData, Headers, UploadFile
from starlette.formparsers import MultiPartException, MultipartPart
from gradio import processing_utils, utils
from gradio.data_classes import PredictBody
from gradio.exceptions import Error
from gradio.helpers import EventData
from gradio.state_holder import SessionState
if TYPE_CHECKING:
from gradio.blocks import Blocks
from gradio.routes import App
class Obj:
"""
Using a class to convert dictionaries into objects. Used by the `Request` class.
Credit: https://www.geeksforgeeks.org/convert-nested-python-dictionary-to-object/
"""
def __init__(self, dict_):
self.__dict__.update(dict_)
for key, value in dict_.items():
if isinstance(value, (dict, list)):
value = Obj(value)
setattr(self, key, value)
def __getitem__(self, item):
return self.__dict__[item]
def __setitem__(self, item, value):
self.__dict__[item] = value
def __iter__(self):
for key, value in self.__dict__.items():
if isinstance(value, Obj):
yield (key, dict(value))
else:
yield (key, value)
def __contains__(self, item) -> bool:
if item in self.__dict__:
return True
for value in self.__dict__.values():
if isinstance(value, Obj) and item in value:
return True
return False
def keys(self):
return self.__dict__.keys()
def values(self):
return self.__dict__.values()
def items(self):
return self.__dict__.items()
def __str__(self) -> str:
return str(self.__dict__)
def __repr__(self) -> str:
return str(self.__dict__)
@document()
class Request:
"""
A Gradio request object that can be used to access the request headers, cookies,
query parameters and other information about the request from within the prediction
function. The class is a thin wrapper around the fastapi.Request class. Attributes
of this class include: `headers`, `client`, `query_params`, and `path_params`. If
auth is enabled, the `username` attribute can be used to get the logged in user.
Example:
import gradio as gr
def echo(text, request: gr.Request):
if request:
print("Request headers dictionary:", request.headers)
print("IP address:", request.client.host)
print("Query parameters:", dict(request.query_params))
return text
io = gr.Interface(echo, "textbox", "textbox").launch()
Demos: request_ip_headers
"""
def __init__(
self,
request: fastapi.Request | None = None,
username: str | None = None,
**kwargs,
):
"""
Can be instantiated with either a fastapi.Request or by manually passing in
attributes (needed for queueing).
Parameters:
request: A fastapi.Request
"""
self.request = request
self.username = username
self.kwargs: dict = kwargs
def dict_to_obj(self, d):
if isinstance(d, dict):
return json.loads(json.dumps(d), object_hook=Obj)
else:
return d
def __getattr__(self, name):
if self.request:
return self.dict_to_obj(getattr(self.request, name))
else:
try:
obj = self.kwargs[name]
except KeyError as ke:
raise AttributeError(
f"'Request' object has no attribute '{name}'"
) from ke
return self.dict_to_obj(obj)
class FnIndexInferError(Exception):
pass
def infer_fn_index(app: App, api_name: str, body: PredictBody) -> int:
if body.fn_index is None:
for i, fn in enumerate(app.get_blocks().dependencies):
if fn["api_name"] == api_name:
return i
raise FnIndexInferError(f"Could not infer fn_index for api_name {api_name}.")
else:
return body.fn_index
def compile_gr_request(
app: App,
body: PredictBody,
fn_index_inferred: int,
username: Optional[str],
request: Optional[fastapi.Request],
):
# If this fn_index cancels jobs, then the only input we need is the
# current session hash
if app.get_blocks().dependencies[fn_index_inferred]["cancels"]:
body.data = [body.session_hash]
if body.request:
if body.batched:
gr_request = [Request(username=username, request=request)]
else:
gr_request = Request(username=username, request=body.request)
else:
if request is None:
raise ValueError("request must be provided if body.request is None")
gr_request = Request(username=username, request=request)
return gr_request
def restore_session_state(app: App, body: PredictBody):
event_id = body.event_id
session_hash = getattr(body, "session_hash", None)
if session_hash is not None:
session_state = app.state_holder[session_hash]
# The should_reset set keeps track of the fn_indices
# that have been cancelled. When a job is cancelled,
# the /reset route will mark the jobs as having been reset.
# That way if the cancel job finishes BEFORE the job being cancelled
# the job being cancelled will not overwrite the state of the iterator.
if event_id is None:
iterator = None
elif event_id in app.iterators_to_reset:
iterator = None
app.iterators_to_reset.remove(event_id)
else:
iterator = app.iterators.get(event_id)
else:
session_state = SessionState(app.get_blocks())
iterator = None
return session_state, iterator
def prepare_event_data(
blocks: Blocks,
body: PredictBody,
) -> EventData:
target = body.trigger_id
event_data = EventData(
blocks.blocks.get(target) if target else None,
body.event_data,
)
return event_data
async def call_process_api(
app: App,
body: PredictBody,
gr_request: Union[Request, list[Request]],
fn_index_inferred: int,
):
session_state, iterator = restore_session_state(app=app, body=body)
dependency = app.get_blocks().dependencies[fn_index_inferred]
event_data = prepare_event_data(app.get_blocks(), body)
event_id = body.event_id
session_hash = getattr(body, "session_hash", None)
inputs = body.data
batch_in_single_out = not body.batched and dependency["batch"]
if batch_in_single_out:
inputs = [inputs]
try:
with utils.MatplotlibBackendMananger():
output = await app.get_blocks().process_api(
fn_index=fn_index_inferred,
inputs=inputs,
request=gr_request,
state=session_state,
iterator=iterator,
session_hash=session_hash,
event_id=event_id,
event_data=event_data,
in_event_listener=True,
)
iterator = output.pop("iterator", None)
if event_id is not None:
app.iterators[event_id] = iterator # type: ignore
if isinstance(output, Error):
raise output
except BaseException:
iterator = app.iterators.get(event_id) if event_id is not None else None
if iterator is not None: # close off any streams that are still open
run_id = id(iterator)
pending_streams: dict[int, list] = (
app.get_blocks().pending_streams[session_hash].get(run_id, {})
)
for stream in pending_streams.values():
stream.append(None)
raise
if batch_in_single_out:
output["data"] = output["data"][0]
return output
def get_root_url(
request: fastapi.Request, route_path: str, root_path: str | None
) -> str:
"""
Gets the root url of the request, stripping off any query parameters, the route_path, and trailing slashes.
Also ensures that the root url is https if the request is https. If root_path is provided, it is appended to the root url.
The final root url will not have a trailing slash.
"""
root_url = str(request.url)
root_url = httpx.URL(root_url)
root_url = root_url.copy_with(query=None)
root_url = str(root_url).rstrip("/")
if request.headers.get("x-forwarded-proto") == "https":
root_url = root_url.replace("http://", "https://")
route_path = route_path.rstrip("/")
if len(route_path) > 0:
root_url = root_url[: -len(route_path)]
return (root_url.rstrip("/") + (root_path or "")).rstrip("/")
def _user_safe_decode(src: bytes, codec: str) -> str:
try:
return src.decode(codec)
except (UnicodeDecodeError, LookupError):
return src.decode("latin-1")
class GradioUploadFile(UploadFile):
"""UploadFile with a sha attribute."""
def __init__(
self,
file: BinaryIO,
*,
size: int | None = None,
filename: str | None = None,
headers: Headers | None = None,
) -> None:
super().__init__(file, size=size, filename=filename, headers=headers)
self.sha = hashlib.sha1()
@python_dataclass(frozen=True)
class FileUploadProgressUnit:
filename: str
chunk_size: int
@python_dataclass
class FileUploadProgressTracker:
deque: deque[FileUploadProgressUnit]
is_done: bool
class FileUploadProgressNotTrackedError(Exception):
pass
class FileUploadProgressNotQueuedError(Exception):
pass
class FileUploadProgress:
def __init__(self) -> None:
self._statuses: dict[str, FileUploadProgressTracker] = {}
def track(self, upload_id: str):
if upload_id not in self._statuses:
self._statuses[upload_id] = FileUploadProgressTracker(deque(), False)
def append(self, upload_id: str, filename: str, message_bytes: bytes):
if upload_id not in self._statuses:
self.track(upload_id)
queue = self._statuses[upload_id].deque
if len(queue) == 0:
queue.append(FileUploadProgressUnit(filename, len(message_bytes)))
else:
last_unit = queue.popleft()
if last_unit.filename != filename:
queue.append(FileUploadProgressUnit(filename, len(message_bytes)))
else:
queue.append(
FileUploadProgressUnit(
filename,
last_unit.chunk_size + len(message_bytes),
)
)
def set_done(self, upload_id: str):
if upload_id not in self._statuses:
self.track(upload_id)
self._statuses[upload_id].is_done = True
def is_done(self, upload_id: str):
if upload_id not in self._statuses:
raise FileUploadProgressNotTrackedError()
return self._statuses[upload_id].is_done
def stop_tracking(self, upload_id: str):
if upload_id in self._statuses:
del self._statuses[upload_id]
def pop(self, upload_id: str) -> FileUploadProgressUnit:
if upload_id not in self._statuses:
raise FileUploadProgressNotTrackedError()
try:
return self._statuses[upload_id].deque.pop()
except IndexError as e:
raise FileUploadProgressNotQueuedError() from e
class GradioMultiPartParser:
"""Vendored from starlette.MultipartParser.
Thanks starlette!
Made the following modifications
- Use GradioUploadFile instead of UploadFile
- Use NamedTemporaryFile instead of SpooledTemporaryFile
- Compute hash of data as the request is streamed
"""
max_file_size = 1024 * 1024
def __init__(
self,
headers: Headers,
stream: AsyncGenerator[bytes, None],
*,
max_files: Union[int, float] = 1000,
max_fields: Union[int, float] = 1000,
upload_id: str | None = None,
upload_progress: FileUploadProgress | None = None,
) -> None:
assert (
multipart is not None
), "The `python-multipart` library must be installed to use form parsing."
self.headers = headers
self.stream = stream
self.max_files = max_files
self.max_fields = max_fields
self.items: List[Tuple[str, Union[str, UploadFile]]] = []
self.upload_id = upload_id
self.upload_progress = upload_progress
self._current_files = 0
self._current_fields = 0
self._current_partial_header_name: bytes = b""
self._current_partial_header_value: bytes = b""
self._current_part = MultipartPart()
self._charset = ""
self._file_parts_to_write: List[Tuple[MultipartPart, bytes]] = []
self._file_parts_to_finish: List[MultipartPart] = []
self._files_to_close_on_error: List[_TemporaryFileWrapper] = []
def on_part_begin(self) -> None:
self._current_part = MultipartPart()
def on_part_data(self, data: bytes, start: int, end: int) -> None:
message_bytes = data[start:end]
if self.upload_progress is not None:
self.upload_progress.append(
self.upload_id, # type: ignore
self._current_part.file.filename, # type: ignore
message_bytes,
)
if self._current_part.file is None:
self._current_part.data += message_bytes
else:
self._file_parts_to_write.append((self._current_part, message_bytes))
def on_part_end(self) -> None:
if self._current_part.file is None:
self.items.append(
(
self._current_part.field_name,
_user_safe_decode(self._current_part.data, self._charset),
)
)
else:
self._file_parts_to_finish.append(self._current_part)
# The file can be added to the items right now even though it's not
# finished yet, because it will be finished in the `parse()` method, before
# self.items is used in the return value.
self.items.append((self._current_part.field_name, self._current_part.file))
def on_header_field(self, data: bytes, start: int, end: int) -> None:
self._current_partial_header_name += data[start:end]
def on_header_value(self, data: bytes, start: int, end: int) -> None:
self._current_partial_header_value += data[start:end]
def on_header_end(self) -> None:
field = self._current_partial_header_name.lower()
if field == b"content-disposition":
self._current_part.content_disposition = self._current_partial_header_value
self._current_part.item_headers.append(
(field, self._current_partial_header_value)
)
self._current_partial_header_name = b""
self._current_partial_header_value = b""
def on_headers_finished(self) -> None:
disposition, options = parse_options_header(
self._current_part.content_disposition
)
try:
self._current_part.field_name = _user_safe_decode(
options[b"name"], self._charset
)
except KeyError as e:
raise MultiPartException(
'The Content-Disposition header field "name" must be ' "provided."
) from e
if b"filename" in options:
self._current_files += 1
if self._current_files > self.max_files:
raise MultiPartException(
f"Too many files. Maximum number of files is {self.max_files}."
)
filename = _user_safe_decode(options[b"filename"], self._charset)
tempfile = NamedTemporaryFile(delete=False)
self._files_to_close_on_error.append(tempfile)
self._current_part.file = GradioUploadFile(
file=tempfile, # type: ignore[arg-type]
size=0,
filename=filename,
headers=Headers(raw=self._current_part.item_headers),
)
else:
self._current_fields += 1
if self._current_fields > self.max_fields:
raise MultiPartException(
f"Too many fields. Maximum number of fields is {self.max_fields}."
)
self._current_part.file = None
def on_end(self) -> None:
pass
async def parse(self) -> FormData:
# Parse the Content-Type header to get the multipart boundary.
_, params = parse_options_header(self.headers["Content-Type"])
charset = params.get(b"charset", "utf-8")
if isinstance(charset, bytes):
charset = charset.decode("latin-1")
self._charset = charset
try:
boundary = params[b"boundary"]
except KeyError as e:
raise MultiPartException("Missing boundary in multipart.") from e
# Callbacks dictionary.
callbacks = {
"on_part_begin": self.on_part_begin,
"on_part_data": self.on_part_data,
"on_part_end": self.on_part_end,
"on_header_field": self.on_header_field,
"on_header_value": self.on_header_value,
"on_header_end": self.on_header_end,
"on_headers_finished": self.on_headers_finished,
"on_end": self.on_end,
}
# Create the parser.
parser = multipart.MultipartParser(boundary, callbacks)
try:
# Feed the parser with data from the request.
async for chunk in self.stream:
parser.write(chunk)
# Write file data, it needs to use await with the UploadFile methods
# that call the corresponding file methods *in a threadpool*,
# otherwise, if they were called directly in the callback methods above
# (regular, non-async functions), that would block the event loop in
# the main thread.
for part, data in self._file_parts_to_write:
assert part.file # for type checkers
await part.file.write(data)
part.file.sha.update(data) # type: ignore
for part in self._file_parts_to_finish:
assert part.file # for type checkers
await part.file.seek(0)
self._file_parts_to_write.clear()
self._file_parts_to_finish.clear()
except MultiPartException as exc:
# Close all the files if there was an error.
for file in self._files_to_close_on_error:
file.close()
raise exc
parser.finalize()
if self.upload_progress is not None:
self.upload_progress.set_done(self.upload_id) # type: ignore
return FormData(self.items)
def move_uploaded_files_to_cache(files: list[str], destinations: list[str]) -> None:
for file, dest in zip(files, destinations):
shutil.move(file, dest)
def update_root_in_config(config: dict, root: str) -> dict:
"""
Updates the root "key" in the config dictionary to the new root url. If the
root url has changed, all of the urls in the config that correspond to component
file urls are updated to use the new root url.
"""
previous_root = config.get("root", None)
if previous_root is None or previous_root != root:
config["root"] = root
config = processing_utils.add_root_url(config, root, previous_root)
return config
|
GHSA-hmx6-r76c-85g9
|
gradio/routes.py
|
@@ -63,6 +63,7 @@
GradioUploadFile,
MultiPartException,
Request,
+ compare_passwords_securely,
move_uploaded_files_to_cache,
)
from gradio.state_holder import StateHolder
@@ -271,7 +272,7 @@ def login(form_data: OAuth2PasswordRequestForm = Depends()):
if (
not callable(app.auth)
and username in app.auth
- and app.auth[username] == password
+ and compare_passwords_securely(password, app.auth[username]) # type: ignore
) or (callable(app.auth) and app.auth.__call__(username, password)):
token = secrets.token_urlsafe(16)
app.tokens[token] = username
|
"""Implements a FastAPI server to run the gradio interface. Note that some types in this
module use the Optional/Union notation so that they work correctly with pydantic."""
from __future__ import annotations
import asyncio
import contextlib
import sys
if sys.version_info >= (3, 9):
from importlib.resources import files
else:
from importlib_resources import files
import inspect
import json
import mimetypes
import os
import posixpath
import secrets
import tempfile
import threading
import time
import traceback
from pathlib import Path
from queue import Empty as EmptyQueue
from typing import TYPE_CHECKING, Any, AsyncIterator, Dict, List, Optional, Type
import fastapi
import httpx
import markupsafe
import orjson
from fastapi import BackgroundTasks, Depends, FastAPI, HTTPException, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import (
FileResponse,
HTMLResponse,
JSONResponse,
PlainTextResponse,
)
from fastapi.security import OAuth2PasswordRequestForm
from fastapi.templating import Jinja2Templates
from gradio_client import utils as client_utils
from gradio_client.documentation import document
from gradio_client.utils import ServerMessage
from jinja2.exceptions import TemplateNotFound
from multipart.multipart import parse_options_header
from starlette.background import BackgroundTask
from starlette.responses import RedirectResponse, StreamingResponse
import gradio
from gradio import ranged_response, route_utils, utils, wasm_utils
from gradio.context import Context
from gradio.data_classes import ComponentServerBody, PredictBody, ResetBody
from gradio.exceptions import Error
from gradio.oauth import attach_oauth
from gradio.processing_utils import add_root_url
from gradio.queueing import Estimation
from gradio.route_utils import ( # noqa: F401
FileUploadProgress,
FileUploadProgressNotQueuedError,
FileUploadProgressNotTrackedError,
GradioMultiPartParser,
GradioUploadFile,
MultiPartException,
Request,
move_uploaded_files_to_cache,
)
from gradio.state_holder import StateHolder
from gradio.utils import (
get_package_version,
)
if TYPE_CHECKING:
from gradio.blocks import Block
mimetypes.init()
STATIC_TEMPLATE_LIB = files("gradio").joinpath("templates").as_posix() # type: ignore
STATIC_PATH_LIB = files("gradio").joinpath("templates", "frontend", "static").as_posix() # type: ignore
BUILD_PATH_LIB = files("gradio").joinpath("templates", "frontend", "assets").as_posix() # type: ignore
VERSION = get_package_version()
class ORJSONResponse(JSONResponse):
media_type = "application/json"
@staticmethod
def _render(content: Any) -> bytes:
return orjson.dumps(
content,
option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_PASSTHROUGH_DATETIME,
default=str,
)
def render(self, content: Any) -> bytes:
return ORJSONResponse._render(content)
@staticmethod
def _render_str(content: Any) -> str:
return ORJSONResponse._render(content).decode("utf-8")
def toorjson(value):
return markupsafe.Markup(
ORJSONResponse._render_str(value)
.replace("<", "\\u003c")
.replace(">", "\\u003e")
.replace("&", "\\u0026")
.replace("'", "\\u0027")
)
templates = Jinja2Templates(directory=STATIC_TEMPLATE_LIB)
templates.env.filters["toorjson"] = toorjson
client = httpx.AsyncClient()
file_upload_statuses = FileUploadProgress()
class App(FastAPI):
"""
FastAPI App Wrapper
"""
def __init__(self, **kwargs):
self.tokens = {}
self.auth = None
self.blocks: gradio.Blocks | None = None
self.state_holder = StateHolder()
self.iterators: dict[str, AsyncIterator] = {}
self.iterators_to_reset: set[str] = set()
self.lock = utils.safe_get_lock()
self.cookie_id = secrets.token_urlsafe(32)
self.queue_token = secrets.token_urlsafe(32)
self.startup_events_triggered = False
self.uploaded_file_dir = os.environ.get("GRADIO_TEMP_DIR") or str(
(Path(tempfile.gettempdir()) / "gradio").resolve()
)
self.change_event: None | threading.Event = None
self._asyncio_tasks: list[asyncio.Task] = []
# Allow user to manually set `docs_url` and `redoc_url`
# when instantiating an App; when they're not set, disable docs and redoc.
kwargs.setdefault("docs_url", None)
kwargs.setdefault("redoc_url", None)
super().__init__(**kwargs)
def configure_app(self, blocks: gradio.Blocks) -> None:
auth = blocks.auth
if auth is not None:
if not callable(auth):
self.auth = {account[0]: account[1] for account in auth}
else:
self.auth = auth
else:
self.auth = None
self.blocks = blocks
self.cwd = os.getcwd()
self.favicon_path = blocks.favicon_path
self.tokens = {}
self.root_path = blocks.root_path
self.state_holder.set_blocks(blocks)
def get_blocks(self) -> gradio.Blocks:
if self.blocks is None:
raise ValueError("No Blocks has been configured for this app.")
return self.blocks
def build_proxy_request(self, url_path):
url = httpx.URL(url_path)
assert self.blocks
# Don't proxy a URL unless it's a URL specifically loaded by the user using
# gr.load() to prevent SSRF or harvesting of HF tokens by malicious Spaces.
is_safe_url = any(
url.host == httpx.URL(root).host for root in self.blocks.proxy_urls
)
if not is_safe_url:
raise PermissionError("This URL cannot be proxied.")
is_hf_url = url.host.endswith(".hf.space")
headers = {}
if Context.hf_token is not None and is_hf_url:
headers["Authorization"] = f"Bearer {Context.hf_token}"
rp_req = client.build_request("GET", url, headers=headers)
return rp_req
def _cancel_asyncio_tasks(self):
for task in self._asyncio_tasks:
task.cancel()
self._asyncio_tasks = []
@staticmethod
def create_app(
blocks: gradio.Blocks, app_kwargs: Dict[str, Any] | None = None
) -> App:
app_kwargs = app_kwargs or {}
app_kwargs.setdefault("default_response_class", ORJSONResponse)
app = App(**app_kwargs)
app.configure_app(blocks)
if not wasm_utils.IS_WASM:
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/user")
@app.get("/user/")
def get_current_user(request: fastapi.Request) -> Optional[str]:
token = request.cookies.get(
f"access-token-{app.cookie_id}"
) or request.cookies.get(f"access-token-unsecure-{app.cookie_id}")
return app.tokens.get(token)
@app.get("/login_check")
@app.get("/login_check/")
def login_check(user: str = Depends(get_current_user)):
if app.auth is None or user is not None:
return
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail="Not authenticated"
)
@app.get("/token")
@app.get("/token/")
def get_token(request: fastapi.Request) -> dict:
token = request.cookies.get(f"access-token-{app.cookie_id}")
return {"token": token, "user": app.tokens.get(token)}
@app.get("/app_id")
@app.get("/app_id/")
def app_id(request: fastapi.Request) -> dict: # noqa: ARG001
return {"app_id": app.get_blocks().app_id}
@app.get("/dev/reload", dependencies=[Depends(login_check)])
async def notify_changes(
request: fastapi.Request,
):
async def reload_checker(request: fastapi.Request):
heartbeat_rate = 15
check_rate = 0.05
last_heartbeat = time.perf_counter()
while True:
if await request.is_disconnected():
return
if app.change_event and app.change_event.is_set():
app.change_event.clear()
yield """data: CHANGE\n\n"""
await asyncio.sleep(check_rate)
if time.perf_counter() - last_heartbeat > heartbeat_rate:
yield """data: HEARTBEAT\n\n"""
last_heartbeat = time.time()
return StreamingResponse(
reload_checker(request),
media_type="text/event-stream",
)
@app.post("/login")
@app.post("/login/")
def login(form_data: OAuth2PasswordRequestForm = Depends()):
username, password = form_data.username.strip(), form_data.password
if app.auth is None:
return RedirectResponse(url="/", status_code=status.HTTP_302_FOUND)
if (
not callable(app.auth)
and username in app.auth
and app.auth[username] == password
) or (callable(app.auth) and app.auth.__call__(username, password)):
token = secrets.token_urlsafe(16)
app.tokens[token] = username
response = JSONResponse(content={"success": True})
response.set_cookie(
key=f"access-token-{app.cookie_id}",
value=token,
httponly=True,
samesite="none",
secure=True,
)
response.set_cookie(
key=f"access-token-unsecure-{app.cookie_id}",
value=token,
httponly=True,
)
return response
else:
raise HTTPException(status_code=400, detail="Incorrect credentials.")
###############
# OAuth Routes
###############
# Define OAuth routes if the app expects it (i.e. a LoginButton is defined).
# It allows users to "Sign in with HuggingFace".
if app.blocks is not None and app.blocks.expects_oauth:
attach_oauth(app)
###############
# Main Routes
###############
@app.head("/", response_class=HTMLResponse)
@app.get("/", response_class=HTMLResponse)
def main(request: fastapi.Request, user: str = Depends(get_current_user)):
mimetypes.add_type("application/javascript", ".js")
blocks = app.get_blocks()
root = route_utils.get_root_url(
request=request, route_path="/", root_path=app.root_path
)
if app.auth is None or user is not None:
config = app.get_blocks().config
config = route_utils.update_root_in_config(config, root)
else:
config = {
"auth_required": True,
"auth_message": blocks.auth_message,
"space_id": app.get_blocks().space_id,
"root": root,
}
try:
template = (
"frontend/share.html" if blocks.share else "frontend/index.html"
)
return templates.TemplateResponse(
template,
{"request": request, "config": config},
)
except TemplateNotFound as err:
if blocks.share:
raise ValueError(
"Did you install Gradio from source files? Share mode only "
"works when Gradio is installed through the pip package."
) from err
else:
raise ValueError(
"Did you install Gradio from source files? You need to build "
"the frontend by running /scripts/build_frontend.sh"
) from err
@app.get("/info/", dependencies=[Depends(login_check)])
@app.get("/info", dependencies=[Depends(login_check)])
def api_info():
return app.get_blocks().get_api_info() # type: ignore
@app.get("/config/", dependencies=[Depends(login_check)])
@app.get("/config", dependencies=[Depends(login_check)])
def get_config(request: fastapi.Request):
config = app.get_blocks().config
root = route_utils.get_root_url(
request=request, route_path="/config", root_path=app.root_path
)
config = route_utils.update_root_in_config(config, root)
return ORJSONResponse(content=config)
@app.get("/static/{path:path}")
def static_resource(path: str):
static_file = safe_join(STATIC_PATH_LIB, path)
return FileResponse(static_file)
@app.get("/custom_component/{id}/{type}/{file_name}")
def custom_component_path(id: str, type: str, file_name: str):
config = app.get_blocks().config
components = config["components"]
location = next(
(item for item in components if item["component_class_id"] == id), None
)
if location is None:
raise HTTPException(status_code=404, detail="Component not found.")
component_instance = app.get_blocks().get_component(location["id"])
module_name = component_instance.__class__.__module__
module_path = sys.modules[module_name].__file__
if module_path is None or component_instance is None:
raise HTTPException(status_code=404, detail="Component not found.")
return FileResponse(
safe_join(
str(Path(module_path).parent),
f"{component_instance.__class__.TEMPLATE_DIR}/{type}/{file_name}",
)
)
@app.get("/assets/{path:path}")
def build_resource(path: str):
build_file = safe_join(BUILD_PATH_LIB, path)
return FileResponse(build_file)
@app.get("/favicon.ico")
async def favicon():
blocks = app.get_blocks()
if blocks.favicon_path is None:
return static_resource("img/logo.svg")
else:
return FileResponse(blocks.favicon_path)
@app.head("/proxy={url_path:path}", dependencies=[Depends(login_check)])
@app.get("/proxy={url_path:path}", dependencies=[Depends(login_check)])
async def reverse_proxy(url_path: str):
# Adapted from: https://github.com/tiangolo/fastapi/issues/1788
try:
rp_req = app.build_proxy_request(url_path)
except PermissionError as err:
raise HTTPException(status_code=400, detail=str(err)) from err
rp_resp = await client.send(rp_req, stream=True)
return StreamingResponse(
rp_resp.aiter_raw(),
status_code=rp_resp.status_code,
headers=rp_resp.headers, # type: ignore
background=BackgroundTask(rp_resp.aclose),
)
@app.head("/file={path_or_url:path}", dependencies=[Depends(login_check)])
@app.get("/file={path_or_url:path}", dependencies=[Depends(login_check)])
async def file(path_or_url: str, request: fastapi.Request):
blocks = app.get_blocks()
if client_utils.is_http_url_like(path_or_url):
return RedirectResponse(
url=path_or_url, status_code=status.HTTP_302_FOUND
)
abs_path = utils.abspath(path_or_url)
in_blocklist = any(
utils.is_in_or_equal(abs_path, blocked_path)
for blocked_path in blocks.blocked_paths
)
is_dir = abs_path.is_dir()
if in_blocklist or is_dir:
raise HTTPException(403, f"File not allowed: {path_or_url}.")
created_by_app = str(abs_path) in set().union(*blocks.temp_file_sets)
in_allowlist = any(
utils.is_in_or_equal(abs_path, allowed_path)
for allowed_path in blocks.allowed_paths
)
was_uploaded = utils.is_in_or_equal(abs_path, app.uploaded_file_dir)
is_cached_example = utils.is_in_or_equal(
abs_path, utils.abspath(utils.get_cache_folder())
)
if not (
created_by_app or in_allowlist or was_uploaded or is_cached_example
):
raise HTTPException(403, f"File not allowed: {path_or_url}.")
if not abs_path.exists():
raise HTTPException(404, f"File not found: {path_or_url}.")
range_val = request.headers.get("Range", "").strip()
if range_val.startswith("bytes=") and "-" in range_val:
range_val = range_val[6:]
start, end = range_val.split("-")
if start.isnumeric() and end.isnumeric():
start = int(start)
end = int(end)
response = ranged_response.RangedFileResponse(
abs_path,
ranged_response.OpenRange(start, end),
dict(request.headers),
stat_result=os.stat(abs_path),
)
return response
return FileResponse(abs_path, headers={"Accept-Ranges": "bytes"})
@app.get(
"/stream/{session_hash}/{run}/{component_id}",
dependencies=[Depends(login_check)],
)
async def stream(
session_hash: str,
run: int,
component_id: int,
request: fastapi.Request, # noqa: ARG001
):
stream: list = (
app.get_blocks()
.pending_streams[session_hash]
.get(run, {})
.get(component_id, None)
)
if stream is None:
raise HTTPException(404, "Stream not found.")
def stream_wrapper():
check_stream_rate = 0.01
max_wait_time = 120 # maximum wait between yields - assume generator thread has crashed otherwise.
wait_time = 0
while True:
if len(stream) == 0:
if wait_time > max_wait_time:
return
wait_time += check_stream_rate
time.sleep(check_stream_rate)
continue
wait_time = 0
next_stream = stream.pop(0)
if next_stream is None:
return
yield next_stream
return StreamingResponse(stream_wrapper())
@app.get("/file/{path:path}", dependencies=[Depends(login_check)])
async def file_deprecated(path: str, request: fastapi.Request):
return await file(path, request)
@app.post("/reset/")
@app.post("/reset")
async def reset_iterator(body: ResetBody):
if body.event_id not in app.iterators:
return {"success": False}
async with app.lock:
del app.iterators[body.event_id]
app.iterators_to_reset.add(body.event_id)
await app.get_blocks()._queue.clean_events(event_id=body.event_id)
return {"success": True}
# had to use '/run' endpoint for Colab compatibility, '/api' supported for backwards compatibility
@app.post("/run/{api_name}", dependencies=[Depends(login_check)])
@app.post("/run/{api_name}/", dependencies=[Depends(login_check)])
@app.post("/api/{api_name}", dependencies=[Depends(login_check)])
@app.post("/api/{api_name}/", dependencies=[Depends(login_check)])
async def predict(
api_name: str,
body: PredictBody,
request: fastapi.Request,
username: str = Depends(get_current_user),
):
fn_index_inferred = route_utils.infer_fn_index(
app=app, api_name=api_name, body=body
)
if not app.get_blocks().api_open and app.get_blocks().queue_enabled_for_fn(
fn_index_inferred
):
raise HTTPException(
detail="This API endpoint does not accept direct HTTP POST requests. Please join the queue to use this API.",
status_code=status.HTTP_404_NOT_FOUND,
)
gr_request = route_utils.compile_gr_request(
app,
body,
fn_index_inferred=fn_index_inferred,
username=username,
request=request,
)
try:
output = await route_utils.call_process_api(
app=app,
body=body,
gr_request=gr_request,
fn_index_inferred=fn_index_inferred,
)
except BaseException as error:
show_error = app.get_blocks().show_error or isinstance(error, Error)
traceback.print_exc()
return JSONResponse(
content={"error": str(error) if show_error else None},
status_code=500,
)
root_path = route_utils.get_root_url(
request=request, route_path=f"/api/{api_name}", root_path=app.root_path
)
output = add_root_url(output, root_path, None)
return output
@app.get("/queue/data", dependencies=[Depends(login_check)])
async def queue_data(
request: fastapi.Request,
session_hash: str,
):
blocks = app.get_blocks()
root_path = route_utils.get_root_url(
request=request, route_path="/queue/data", root_path=app.root_path
)
async def sse_stream(request: fastapi.Request):
try:
last_heartbeat = time.perf_counter()
while True:
if await request.is_disconnected():
await blocks._queue.clean_events(session_hash=session_hash)
return
if (
session_hash
not in blocks._queue.pending_messages_per_session
):
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Session not found.",
)
heartbeat_rate = 15
check_rate = 0.05
message = None
try:
messages = blocks._queue.pending_messages_per_session[
session_hash
]
message = messages.get_nowait()
except EmptyQueue:
await asyncio.sleep(check_rate)
if time.perf_counter() - last_heartbeat > heartbeat_rate:
# Fix this
message = {
"msg": ServerMessage.heartbeat,
}
# Need to reset last_heartbeat with perf_counter
# otherwise only a single hearbeat msg will be sent
# and then the stream will retry leading to infinite queue 😬
last_heartbeat = time.perf_counter()
if blocks._queue.stopped:
message = {
"msg": "unexpected_error",
"message": "Server stopped unexpectedly.",
"success": False,
}
if message:
add_root_url(message, root_path, None)
yield f"data: {json.dumps(message)}\n\n"
if message["msg"] == ServerMessage.process_completed:
blocks._queue.pending_event_ids_session[
session_hash
].remove(message["event_id"])
if message["msg"] == ServerMessage.server_stopped or (
message["msg"] == ServerMessage.process_completed
and (
len(
blocks._queue.pending_event_ids_session[
session_hash
]
)
== 0
)
):
return
except BaseException as e:
message = {
"msg": "unexpected_error",
"success": False,
"message": str(e),
}
yield f"data: {json.dumps(message)}\n\n"
if isinstance(e, asyncio.CancelledError):
del blocks._queue.pending_messages_per_session[session_hash]
await blocks._queue.clean_events(session_hash=session_hash)
raise e
return StreamingResponse(
sse_stream(request),
media_type="text/event-stream",
)
@app.post("/queue/join", dependencies=[Depends(login_check)])
async def queue_join(
body: PredictBody,
request: fastapi.Request,
username: str = Depends(get_current_user),
):
blocks = app.get_blocks()
if blocks._queue.server_app is None:
blocks._queue.set_server_app(app)
if blocks._queue.stopped:
raise HTTPException(
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
detail="Queue is stopped.",
)
success, event_id = await blocks._queue.push(body, request, username)
if not success:
status_code = (
status.HTTP_503_SERVICE_UNAVAILABLE
if "Queue is full." in event_id
else status.HTTP_400_BAD_REQUEST
)
raise HTTPException(status_code=status_code, detail=event_id)
return {"event_id": event_id}
@app.post("/component_server", dependencies=[Depends(login_check)])
@app.post("/component_server/", dependencies=[Depends(login_check)])
def component_server(body: ComponentServerBody):
state = app.state_holder[body.session_hash]
component_id = body.component_id
block: Block
if component_id in state:
block = state[component_id]
else:
block = app.get_blocks().blocks[component_id]
fn = getattr(block, body.fn_name, None)
if fn is None or not getattr(fn, "_is_server_fn", False):
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Function not found.",
)
return fn(body.data)
@app.get(
"/queue/status",
dependencies=[Depends(login_check)],
response_model=Estimation,
)
async def get_queue_status():
return app.get_blocks()._queue.get_status()
@app.get("/upload_progress")
def get_upload_progress(upload_id: str, request: fastapi.Request):
async def sse_stream(request: fastapi.Request):
last_heartbeat = time.perf_counter()
is_done = False
while True:
if await request.is_disconnected():
file_upload_statuses.stop_tracking(upload_id)
return
if is_done:
file_upload_statuses.stop_tracking(upload_id)
return
heartbeat_rate = 15
check_rate = 0.05
try:
if file_upload_statuses.is_done(upload_id):
message = {"msg": "done"}
is_done = True
else:
update = file_upload_statuses.pop(upload_id)
message = {
"msg": "update",
"orig_name": update.filename,
"chunk_size": update.chunk_size,
}
yield f"data: {json.dumps(message)}\n\n"
except FileUploadProgressNotTrackedError:
return
except FileUploadProgressNotQueuedError:
await asyncio.sleep(check_rate)
if time.perf_counter() - last_heartbeat > heartbeat_rate:
message = {"msg": "heartbeat"}
yield f"data: {json.dumps(message)}\n\n"
last_heartbeat = time.perf_counter()
return StreamingResponse(
sse_stream(request),
media_type="text/event-stream",
)
@app.post("/upload", dependencies=[Depends(login_check)])
async def upload_file(
request: fastapi.Request,
bg_tasks: BackgroundTasks,
upload_id: Optional[str] = None,
):
content_type_header = request.headers.get("Content-Type")
content_type: bytes
content_type, _ = parse_options_header(content_type_header)
if content_type != b"multipart/form-data":
raise HTTPException(status_code=400, detail="Invalid content type.")
try:
if upload_id:
file_upload_statuses.track(upload_id)
multipart_parser = GradioMultiPartParser(
request.headers,
request.stream(),
max_files=1000,
max_fields=1000,
upload_id=upload_id if upload_id else None,
upload_progress=file_upload_statuses if upload_id else None,
)
form = await multipart_parser.parse()
except MultiPartException as exc:
raise HTTPException(status_code=400, detail=exc.message) from exc
output_files = []
files_to_copy = []
locations: list[str] = []
for temp_file in form.getlist("files"):
assert isinstance(temp_file, GradioUploadFile)
if temp_file.filename:
file_name = Path(temp_file.filename).name
name = client_utils.strip_invalid_filename_characters(file_name)
else:
name = f"tmp{secrets.token_hex(5)}"
directory = Path(app.uploaded_file_dir) / temp_file.sha.hexdigest()
directory.mkdir(exist_ok=True, parents=True)
dest = (directory / name).resolve()
temp_file.file.close()
# we need to move the temp file to the cache directory
# but that's possibly blocking and we're in an async function
# so we try to rename (this is what shutil.move tries first)
# which should be super fast.
# if that fails, we move in the background.
try:
os.rename(temp_file.file.name, dest)
except OSError:
files_to_copy.append(temp_file.file.name)
locations.append(str(dest))
output_files.append(dest)
if files_to_copy:
bg_tasks.add_task(
move_uploaded_files_to_cache, files_to_copy, locations
)
return output_files
@app.on_event("startup")
@app.get("/startup-events")
async def startup_events():
if not app.startup_events_triggered:
app.get_blocks().startup_events()
app.startup_events_triggered = True
return True
return False
@app.get("/theme.css", response_class=PlainTextResponse)
def theme_css():
return PlainTextResponse(app.get_blocks().theme_css, media_type="text/css")
@app.get("/robots.txt", response_class=PlainTextResponse)
def robots_txt():
if app.get_blocks().share:
return "User-agent: *\nDisallow: /"
else:
return "User-agent: *\nDisallow: "
return app
########
# Helper functions
########
def safe_join(directory: str, path: str) -> str:
"""Safely path to a base directory to avoid escaping the base directory.
Borrowed from: werkzeug.security.safe_join"""
_os_alt_seps: List[str] = [
sep for sep in [os.path.sep, os.path.altsep] if sep is not None and sep != "/"
]
if path == "":
raise HTTPException(400)
filename = posixpath.normpath(path)
fullpath = os.path.join(directory, filename)
if (
any(sep in filename for sep in _os_alt_seps)
or os.path.isabs(filename)
or filename == ".."
or filename.startswith("../")
or os.path.isdir(fullpath)
):
raise HTTPException(403)
if not os.path.exists(fullpath):
raise HTTPException(404, "File not found")
return fullpath
def get_types(cls_set: List[Type]):
docset = []
types = []
for cls in cls_set:
doc = inspect.getdoc(cls) or ""
doc_lines = doc.split("\n")
for line in doc_lines:
if "value (" in line:
types.append(line.split("value (")[1].split(")")[0])
docset.append(doc_lines[1].split(":")[-1])
return docset, types
@document()
def mount_gradio_app(
app: fastapi.FastAPI,
blocks: gradio.Blocks,
path: str,
app_kwargs: dict[str, Any] | None = None,
) -> fastapi.FastAPI:
"""Mount a gradio.Blocks to an existing FastAPI application.
Parameters:
app: The parent FastAPI application.
blocks: The blocks object we want to mount to the parent app.
path: The path at which the gradio application will be mounted.
app_kwargs: Additional keyword arguments to pass to the underlying FastAPI app as a dictionary of parameter keys and argument values. For example, `{"docs_url": "/docs"}`
Example:
from fastapi import FastAPI
import gradio as gr
app = FastAPI()
@app.get("/")
def read_main():
return {"message": "This is your main app"}
io = gr.Interface(lambda x: "Hello, " + x + "!", "textbox", "textbox")
app = gr.mount_gradio_app(app, io, path="/gradio")
# Then run `uvicorn run:app` from the terminal and navigate to http://localhost:8000/gradio.
"""
blocks.dev_mode = False
blocks.config = blocks.get_config_file()
blocks.validate_queue_settings()
gradio_app = App.create_app(blocks, app_kwargs=app_kwargs)
old_lifespan = app.router.lifespan_context
@contextlib.asynccontextmanager
async def new_lifespan(app: FastAPI):
async with old_lifespan(
app
): # Instert the startup events inside the FastAPI context manager
gradio_app.get_blocks().startup_events()
yield
app.router.lifespan_context = new_lifespan
app.mount(path, gradio_app)
return app
|
GHSA-hmx6-r76c-85g9
|
test/test_routes.py
|
@@ -25,7 +25,11 @@
routes,
wasm_utils,
)
-from gradio.route_utils import FnIndexInferError, get_root_url
+from gradio.route_utils import (
+ FnIndexInferError,
+ compare_passwords_securely,
+ get_root_url,
+)
@pytest.fixture()
@@ -921,3 +925,11 @@ def test_component_server_endpoints(connect):
def test_get_root_url(request_url, route_path, root_path, expected_root_url):
request = Request({"path": request_url, "type": "http", "headers": {}})
assert get_root_url(request, route_path, root_path) == expected_root_url
+
+
+def test_compare_passwords_securely():
+ password1 = "password"
+ password2 = "pässword"
+ assert compare_passwords_securely(password1, password1)
+ assert not compare_passwords_securely(password1, password2)
+ assert compare_passwords_securely(password2, password2)
|
"""Contains tests for networking.py and app.py"""
import functools
import os
import tempfile
from contextlib import asynccontextmanager, closing
from unittest.mock import patch
import gradio_client as grc
import numpy as np
import pandas as pd
import pytest
import starlette.routing
from fastapi import FastAPI, Request
from fastapi.testclient import TestClient
from gradio_client import media_data
import gradio as gr
from gradio import (
Blocks,
Button,
Interface,
Number,
Textbox,
close_all,
routes,
wasm_utils,
)
from gradio.route_utils import FnIndexInferError, get_root_url
@pytest.fixture()
def test_client():
io = Interface(lambda x: x + x, "text", "text")
app, _, _ = io.launch(prevent_thread_lock=True)
test_client = TestClient(app)
yield test_client
io.close()
close_all()
class TestRoutes:
def test_get_main_route(self, test_client):
response = test_client.get("/")
assert response.status_code == 200
def test_static_files_served_safely(self, test_client):
# Make sure things outside the static folder are not accessible
response = test_client.get(r"/static/..%2findex.html")
assert response.status_code == 403
response = test_client.get(r"/static/..%2f..%2fapi_docs.html")
assert response.status_code == 403
def test_get_config_route(self, test_client):
response = test_client.get("/config/")
assert response.status_code == 200
def test_favicon_route(self, test_client):
response = test_client.get("/favicon.ico")
assert response.status_code == 200
def test_upload_path(self, test_client):
with open("test/test_files/alphabet.txt", "rb") as f:
response = test_client.post("/upload", files={"files": f})
assert response.status_code == 200
file = response.json()[0]
assert "alphabet" in file
assert file.endswith(".txt")
with open(file, "rb") as saved_file:
assert saved_file.read() == b"abcdefghijklmnopqrstuvwxyz"
def test_custom_upload_path(self, gradio_temp_dir):
io = Interface(lambda x: x + x, "text", "text")
app, _, _ = io.launch(prevent_thread_lock=True)
test_client = TestClient(app)
with open("test/test_files/alphabet.txt", "rb") as f:
response = test_client.post("/upload", files={"files": f})
assert response.status_code == 200
file = response.json()[0]
assert "alphabet" in file
assert file.startswith(str(gradio_temp_dir))
assert file.endswith(".txt")
with open(file, "rb") as saved_file:
assert saved_file.read() == b"abcdefghijklmnopqrstuvwxyz"
def test_predict_route(self, test_client):
response = test_client.post(
"/api/predict/", json={"data": ["test"], "fn_index": 0}
)
assert response.status_code == 200
output = dict(response.json())
assert output["data"] == ["testtest"]
def test_named_predict_route(self):
with Blocks() as demo:
i = Textbox()
o = Textbox()
i.change(lambda x: f"{x}1", i, o, api_name="p")
i.change(lambda x: f"{x}2", i, o, api_name="q")
app, _, _ = demo.launch(prevent_thread_lock=True)
client = TestClient(app)
response = client.post("/api/p/", json={"data": ["test"]})
assert response.status_code == 200
output = dict(response.json())
assert output["data"] == ["test1"]
response = client.post("/api/q/", json={"data": ["test"]})
assert response.status_code == 200
output = dict(response.json())
assert output["data"] == ["test2"]
def test_same_named_predict_route(self):
with Blocks() as demo:
i = Textbox()
o = Textbox()
i.change(lambda x: f"{x}0", i, o, api_name="p")
i.change(lambda x: f"{x}1", i, o, api_name="p")
app, _, _ = demo.launch(prevent_thread_lock=True)
client = TestClient(app)
response = client.post("/api/p/", json={"data": ["test"]})
assert response.status_code == 200
output = dict(response.json())
assert output["data"] == ["test0"]
response = client.post("/api/p_1/", json={"data": ["test"]})
assert response.status_code == 200
output = dict(response.json())
assert output["data"] == ["test1"]
def test_multiple_renamed(self):
with Blocks() as demo:
i = Textbox()
o = Textbox()
i.change(lambda x: f"{x}0", i, o, api_name="p")
i.change(lambda x: f"{x}1", i, o, api_name="p")
i.change(lambda x: f"{x}2", i, o, api_name="p_1")
app, _, _ = demo.launch(prevent_thread_lock=True)
client = TestClient(app)
response = client.post("/api/p/", json={"data": ["test"]})
assert response.status_code == 200
output = dict(response.json())
assert output["data"] == ["test0"]
response = client.post("/api/p_1/", json={"data": ["test"]})
assert response.status_code == 200
output = dict(response.json())
assert output["data"] == ["test1"]
response = client.post("/api/p_1_1/", json={"data": ["test"]})
assert response.status_code == 200
output = dict(response.json())
assert output["data"] == ["test2"]
def test_predict_route_without_fn_index(self, test_client):
response = test_client.post("/api/predict/", json={"data": ["test"]})
assert response.status_code == 200
output = dict(response.json())
assert output["data"] == ["testtest"]
def test_predict_route_batching(self):
def batch_fn(x):
results = []
for word in x:
results.append(f"Hello {word}")
return (results,)
with gr.Blocks() as demo:
text = gr.Textbox()
btn = gr.Button()
btn.click(batch_fn, inputs=text, outputs=text, batch=True, api_name="pred")
demo.queue(api_open=True)
app, _, _ = demo.launch(prevent_thread_lock=True)
client = TestClient(app)
response = client.post("/api/pred/", json={"data": ["test"]})
output = dict(response.json())
assert output["data"] == ["Hello test"]
app, _, _ = demo.launch(prevent_thread_lock=True)
client = TestClient(app)
response = client.post(
"/api/pred/", json={"data": [["test", "test2"]], "batched": True}
)
output = dict(response.json())
assert output["data"] == [["Hello test", "Hello test2"]]
def test_state(self):
def predict(input, history):
if history is None:
history = ""
history += input
return history, history
io = Interface(predict, ["textbox", "state"], ["textbox", "state"])
app, _, _ = io.launch(prevent_thread_lock=True)
client = TestClient(app)
response = client.post(
"/api/predict/",
json={"data": ["test", None], "fn_index": 0, "session_hash": "_"},
)
output = dict(response.json())
assert output["data"] == ["test", None]
response = client.post(
"/api/predict/",
json={"data": ["test", None], "fn_index": 0, "session_hash": "_"},
)
output = dict(response.json())
assert output["data"] == ["testtest", None]
def test_get_allowed_paths(self):
allowed_file = tempfile.NamedTemporaryFile(mode="w", delete=False)
allowed_file.write(media_data.BASE64_IMAGE)
allowed_file.flush()
io = gr.Interface(lambda s: s.name, gr.File(), gr.File())
app, _, _ = io.launch(prevent_thread_lock=True)
client = TestClient(app)
file_response = client.get(f"/file={allowed_file.name}")
assert file_response.status_code == 403
io.close()
io = gr.Interface(lambda s: s.name, gr.File(), gr.File())
app, _, _ = io.launch(
prevent_thread_lock=True,
allowed_paths=[os.path.dirname(allowed_file.name)],
)
client = TestClient(app)
file_response = client.get(f"/file={allowed_file.name}")
assert file_response.status_code == 200
assert len(file_response.text) == len(media_data.BASE64_IMAGE)
io.close()
io = gr.Interface(lambda s: s.name, gr.File(), gr.File())
app, _, _ = io.launch(
prevent_thread_lock=True,
allowed_paths=[os.path.abspath(allowed_file.name)],
)
client = TestClient(app)
file_response = client.get(f"/file={allowed_file.name}")
assert file_response.status_code == 200
assert len(file_response.text) == len(media_data.BASE64_IMAGE)
io.close()
def test_allowed_and_blocked_paths(self):
with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tmp_file:
io = gr.Interface(lambda s: s.name, gr.File(), gr.File())
app, _, _ = io.launch(
prevent_thread_lock=True,
allowed_paths=[os.path.dirname(tmp_file.name)],
)
client = TestClient(app)
file_response = client.get(f"/file={tmp_file.name}")
assert file_response.status_code == 200
io.close()
os.remove(tmp_file.name)
with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tmp_file:
io = gr.Interface(lambda s: s.name, gr.File(), gr.File())
app, _, _ = io.launch(
prevent_thread_lock=True,
allowed_paths=[os.path.dirname(tmp_file.name)],
blocked_paths=[os.path.dirname(tmp_file.name)],
)
client = TestClient(app)
file_response = client.get(f"/file={tmp_file.name}")
assert file_response.status_code == 403
io.close()
os.remove(tmp_file.name)
def test_get_file_created_by_app(self, test_client):
app, _, _ = gr.Interface(lambda s: s.name, gr.File(), gr.File()).launch(
prevent_thread_lock=True
)
client = TestClient(app)
with open("test/test_files/alphabet.txt", "rb") as f:
file_response = test_client.post("/upload", files={"files": f})
response = client.post(
"/api/predict/",
json={
"data": [
{
"path": file_response.json()[0],
"size": os.path.getsize("test/test_files/alphabet.txt"),
}
],
"fn_index": 0,
"session_hash": "_",
},
).json()
created_file = response["data"][0]["path"]
file_response = client.get(f"/file={created_file}")
assert file_response.is_success
backwards_compatible_file_response = client.get(f"/file/{created_file}")
assert backwards_compatible_file_response.is_success
file_response_with_full_range = client.get(
f"/file={created_file}", headers={"Range": "bytes=0-"}
)
assert file_response_with_full_range.is_success
assert file_response.text == file_response_with_full_range.text
file_response_with_partial_range = client.get(
f"/file={created_file}", headers={"Range": "bytes=0-10"}
)
assert file_response_with_partial_range.is_success
assert len(file_response_with_partial_range.text) == 11
def test_mount_gradio_app(self):
app = FastAPI()
demo = gr.Interface(
lambda s: f"Hello from ps, {s}!", "textbox", "textbox"
).queue()
demo1 = gr.Interface(
lambda s: f"Hello from py, {s}!", "textbox", "textbox"
).queue()
app = gr.mount_gradio_app(app, demo, path="/ps")
app = gr.mount_gradio_app(app, demo1, path="/py")
# Use context manager to trigger start up events
with TestClient(app) as client:
assert client.get("/ps").is_success
assert client.get("/py").is_success
def test_mount_gradio_app_with_app_kwargs(self):
app = FastAPI()
demo = gr.Interface(lambda s: f"You said {s}!", "textbox", "textbox").queue()
app = gr.mount_gradio_app(
app, demo, path="/echo", app_kwargs={"docs_url": "/docs-custom"}
)
# Use context manager to trigger start up events
with TestClient(app) as client:
assert client.get("/echo/docs-custom").is_success
def test_mount_gradio_app_with_lifespan(self):
@asynccontextmanager
async def empty_lifespan(app: FastAPI):
yield
app = FastAPI(lifespan=empty_lifespan)
demo = gr.Interface(
lambda s: f"Hello from ps, {s}!", "textbox", "textbox"
).queue()
demo1 = gr.Interface(
lambda s: f"Hello from py, {s}!", "textbox", "textbox"
).queue()
app = gr.mount_gradio_app(app, demo, path="/ps")
app = gr.mount_gradio_app(app, demo1, path="/py")
# Use context manager to trigger start up events
with TestClient(app) as client:
assert client.get("/ps").is_success
assert client.get("/py").is_success
def test_mount_gradio_app_with_startup(self):
app = FastAPI()
@app.on_event("startup")
async def empty_startup():
return
demo = gr.Interface(
lambda s: f"Hello from ps, {s}!", "textbox", "textbox"
).queue()
demo1 = gr.Interface(
lambda s: f"Hello from py, {s}!", "textbox", "textbox"
).queue()
app = gr.mount_gradio_app(app, demo, path="/ps")
app = gr.mount_gradio_app(app, demo1, path="/py")
# Use context manager to trigger start up events
with TestClient(app) as client:
assert client.get("/ps").is_success
assert client.get("/py").is_success
def test_static_file_missing(self, test_client):
response = test_client.get(r"/static/not-here.js")
assert response.status_code == 404
def test_asset_file_missing(self, test_client):
response = test_client.get(r"/assets/not-here.js")
assert response.status_code == 404
def test_cannot_access_files_in_working_directory(self, test_client):
response = test_client.get(r"/file=not-here.js")
assert response.status_code == 403
def test_cannot_access_directories_in_working_directory(self, test_client):
response = test_client.get(r"/file=gradio")
assert response.status_code == 403
def test_do_not_expose_existence_of_files_outside_working_directory(
self, test_client
):
response = test_client.get(r"/file=../fake-file-that-does-not-exist.js")
assert response.status_code == 403 # not a 404
def test_proxy_route_is_restricted_to_load_urls(self):
gr.context.Context.hf_token = "abcdef"
app = routes.App()
interface = gr.Interface(lambda x: x, "text", "text")
app.configure_app(interface)
with pytest.raises(PermissionError):
app.build_proxy_request(
"https://gradio-tests-test-loading-examples-private.hf.space/file=Bunny.obj"
)
with pytest.raises(PermissionError):
app.build_proxy_request("https://google.com")
interface.proxy_urls = {
"https://gradio-tests-test-loading-examples-private.hf.space"
}
app.build_proxy_request(
"https://gradio-tests-test-loading-examples-private.hf.space/file=Bunny.obj"
)
def test_proxy_does_not_leak_hf_token_externally(self):
gr.context.Context.hf_token = "abcdef"
app = routes.App()
interface = gr.Interface(lambda x: x, "text", "text")
interface.proxy_urls = {
"https://gradio-tests-test-loading-examples-private.hf.space",
"https://google.com",
}
app.configure_app(interface)
r = app.build_proxy_request(
"https://gradio-tests-test-loading-examples-private.hf.space/file=Bunny.obj"
)
assert "authorization" in dict(r.headers)
r = app.build_proxy_request("https://google.com")
assert "authorization" not in dict(r.headers)
def test_can_get_config_that_includes_non_pickle_able_objects(self):
my_dict = {"a": 1, "b": 2, "c": 3}
with Blocks() as demo:
gr.JSON(my_dict.keys())
app, _, _ = demo.launch(prevent_thread_lock=True)
client = TestClient(app)
response = client.get("/")
assert response.is_success
response = client.get("/config/")
assert response.is_success
class TestApp:
def test_create_app(self):
app = routes.App.create_app(Interface(lambda x: x, "text", "text"))
assert isinstance(app, FastAPI)
class TestAuthenticatedRoutes:
def test_post_login(self):
io = Interface(lambda x: x, "text", "text")
app, _, _ = io.launch(
auth=("test", "correct_password"),
prevent_thread_lock=True,
)
client = TestClient(app)
response = client.post(
"/login",
data={"username": "test", "password": "correct_password"},
)
assert response.status_code == 200
response = client.post(
"/login",
data={"username": "test", "password": "incorrect_password"},
)
assert response.status_code == 400
response = client.post(
"/login",
data={"username": " test ", "password": "correct_password"},
)
assert response.status_code == 200
class TestQueueRoutes:
@pytest.mark.asyncio
async def test_queue_join_routes_sets_app_if_none_set(self):
io = Interface(lambda x: x, "text", "text").queue()
io.launch(prevent_thread_lock=True)
io._queue.server_path = None
client = grc.Client(io.local_url)
client.predict("test")
assert io._queue.server_app == io.server_app
class TestDevMode:
def test_mount_gradio_app_set_dev_mode_false(self):
app = FastAPI()
@app.get("/")
def read_main():
return {"message": "Hello!"}
with gr.Blocks() as blocks:
gr.Textbox("Hello from gradio!")
app = routes.mount_gradio_app(app, blocks, path="/gradio")
gradio_fast_api = next(
route for route in app.routes if isinstance(route, starlette.routing.Mount)
)
assert not gradio_fast_api.app.blocks.dev_mode
class TestPassingRequest:
def test_request_included_with_interface(self):
def identity(name, request: gr.Request):
assert isinstance(request.client.host, str)
return name
app, _, _ = gr.Interface(identity, "textbox", "textbox").launch(
prevent_thread_lock=True,
)
client = TestClient(app)
response = client.post("/api/predict/", json={"data": ["test"]})
assert response.status_code == 200
output = dict(response.json())
assert output["data"] == ["test"]
def test_request_included_with_chat_interface(self):
def identity(x, y, request: gr.Request):
assert isinstance(request.client.host, str)
return x
app, _, _ = gr.ChatInterface(identity).launch(
prevent_thread_lock=True,
)
client = TestClient(app)
response = client.post("/api/chat/", json={"data": ["test", None]})
assert response.status_code == 200
output = dict(response.json())
assert output["data"] == ["test", None]
def test_request_included_with_chat_interface_when_streaming(self):
def identity(x, y, request: gr.Request):
assert isinstance(request.client.host, str)
for i in range(len(x)):
yield x[: i + 1]
app, _, _ = (
gr.ChatInterface(identity)
.queue(api_open=True)
.launch(
prevent_thread_lock=True,
)
)
client = TestClient(app)
response = client.post("/api/chat/", json={"data": ["test", None]})
assert response.status_code == 200
output = dict(response.json())
assert output["data"] == ["t", None]
def test_request_get_headers(self):
def identity(name, request: gr.Request):
assert isinstance(request.headers["user-agent"], str)
assert isinstance(request.headers.items(), list)
assert isinstance(request.headers.keys(), list)
assert isinstance(request.headers.values(), list)
assert isinstance(dict(request.headers), dict)
user_agent = request.headers["user-agent"]
assert "testclient" in user_agent
return name
app, _, _ = gr.Interface(identity, "textbox", "textbox").launch(
prevent_thread_lock=True,
)
client = TestClient(app)
response = client.post("/api/predict/", json={"data": ["test"]})
assert response.status_code == 200
output = dict(response.json())
assert output["data"] == ["test"]
def test_request_includes_username_as_none_if_no_auth(self):
def identity(name, request: gr.Request):
assert request.username is None
return name
app, _, _ = gr.Interface(identity, "textbox", "textbox").launch(
prevent_thread_lock=True,
)
client = TestClient(app)
response = client.post("/api/predict/", json={"data": ["test"]})
assert response.status_code == 200
output = dict(response.json())
assert output["data"] == ["test"]
def test_request_includes_username_with_auth(self):
def identity(name, request: gr.Request):
assert request.username == "admin"
return name
app, _, _ = gr.Interface(identity, "textbox", "textbox").launch(
prevent_thread_lock=True, auth=("admin", "password")
)
client = TestClient(app)
client.post(
"/login",
data={"username": "admin", "password": "password"},
)
response = client.post("/api/predict/", json={"data": ["test"]})
assert response.status_code == 200
output = dict(response.json())
assert output["data"] == ["test"]
def test_predict_route_is_blocked_if_api_open_false():
io = Interface(lambda x: x, "text", "text", examples=[["freddy"]]).queue(
api_open=False
)
app, _, _ = io.launch(prevent_thread_lock=True)
assert io.show_api
client = TestClient(app)
result = client.post(
"/api/predict", json={"fn_index": 0, "data": [5], "session_hash": "foo"}
)
assert result.status_code == 404
def test_predict_route_not_blocked_if_queue_disabled():
with Blocks() as demo:
input = Textbox()
output = Textbox()
number = Number()
button = Button()
button.click(
lambda x: f"Hello, {x}!", input, output, queue=False, api_name="not_blocked"
)
button.click(lambda: 42, None, number, queue=True, api_name="blocked")
app, _, _ = demo.queue(api_open=False).launch(
prevent_thread_lock=True, show_api=True
)
assert demo.show_api
client = TestClient(app)
result = client.post("/api/blocked", json={"data": [], "session_hash": "foo"})
assert result.status_code == 404
result = client.post(
"/api/not_blocked", json={"data": ["freddy"], "session_hash": "foo"}
)
assert result.status_code == 200
assert result.json()["data"] == ["Hello, freddy!"]
def test_predict_route_not_blocked_if_routes_open():
with Blocks() as demo:
input = Textbox()
output = Textbox()
button = Button()
button.click(
lambda x: f"Hello, {x}!", input, output, queue=True, api_name="not_blocked"
)
app, _, _ = demo.queue(api_open=True).launch(
prevent_thread_lock=True, show_api=False
)
assert not demo.show_api
client = TestClient(app)
result = client.post(
"/api/not_blocked", json={"data": ["freddy"], "session_hash": "foo"}
)
assert result.status_code == 200
assert result.json()["data"] == ["Hello, freddy!"]
demo.close()
demo.queue(api_open=False).launch(prevent_thread_lock=True, show_api=False)
assert not demo.show_api
def test_show_api_queue_not_enabled():
io = Interface(lambda x: x, "text", "text", examples=[["freddy"]])
app, _, _ = io.launch(prevent_thread_lock=True)
assert io.show_api
io.close()
io.launch(prevent_thread_lock=True, show_api=False)
assert not io.show_api
def test_orjson_serialization():
df = pd.DataFrame(
{
"date_1": pd.date_range("2021-01-01", periods=2),
"date_2": pd.date_range("2022-02-15", periods=2).strftime("%B %d, %Y, %r"),
"number": np.array([0.2233, 0.57281]),
"number_2": np.array([84, 23]).astype(np.int64),
"bool": [True, False],
"markdown": ["# Hello", "# Goodbye"],
}
)
with gr.Blocks() as demo:
gr.DataFrame(df)
app, _, _ = demo.launch(prevent_thread_lock=True)
test_client = TestClient(app)
response = test_client.get("/")
assert response.status_code == 200
demo.close()
def test_file_route_does_not_allow_dot_paths(tmp_path):
dot_file = tmp_path / ".env"
dot_file.write_text("secret=1234")
subdir = tmp_path / "subdir"
subdir.mkdir()
sub_dot_file = subdir / ".env"
sub_dot_file.write_text("secret=1234")
secret_sub_dir = tmp_path / ".versioncontrol"
secret_sub_dir.mkdir()
secret_sub_dir_regular_file = secret_sub_dir / "settings"
secret_sub_dir_regular_file.write_text("token = 8")
with closing(gr.Interface(lambda s: s.name, gr.File(), gr.File())) as io:
app, _, _ = io.launch(prevent_thread_lock=True)
client = TestClient(app)
assert client.get("/file=.env").status_code == 403
assert client.get("/file=subdir/.env").status_code == 403
assert client.get("/file=.versioncontrol/settings").status_code == 403
def test_api_name_set_for_all_events(connect):
with gr.Blocks() as demo:
i = Textbox()
o = Textbox()
btn = Button()
btn1 = Button()
btn2 = Button()
btn3 = Button()
btn4 = Button()
btn5 = Button()
btn6 = Button()
btn7 = Button()
btn8 = Button()
def greet(i):
return "Hello " + i
def goodbye(i):
return "Goodbye " + i
def greet_me(i):
return "Hello"
def say_goodbye(i):
return "Goodbye"
say_goodbye.__name__ = "Say_$$_goodbye"
# Otherwise changed by ruff
foo = lambda s: s # noqa
def foo2(s):
return s + " foo"
foo2.__name__ = "foo-2"
class Callable:
def __call__(self, a) -> str:
return "From __call__"
def from_partial(a, b):
return b + a
part = functools.partial(from_partial, b="From partial: ")
btn.click(greet, i, o)
btn1.click(goodbye, i, o)
btn2.click(greet_me, i, o)
btn3.click(say_goodbye, i, o)
btn4.click(None, i, o)
btn5.click(foo, i, o)
btn6.click(foo2, i, o)
btn7.click(Callable(), i, o)
btn8.click(part, i, o)
with closing(demo) as io:
app, _, _ = io.launch(prevent_thread_lock=True)
client = TestClient(app)
assert client.post(
"/api/greet", json={"data": ["freddy"], "session_hash": "foo"}
).json()["data"] == ["Hello freddy"]
assert client.post(
"/api/goodbye", json={"data": ["freddy"], "session_hash": "foo"}
).json()["data"] == ["Goodbye freddy"]
assert client.post(
"/api/greet_me", json={"data": ["freddy"], "session_hash": "foo"}
).json()["data"] == ["Hello"]
assert client.post(
"/api/Say__goodbye", json={"data": ["freddy"], "session_hash": "foo"}
).json()["data"] == ["Goodbye"]
assert client.post(
"/api/lambda", json={"data": ["freddy"], "session_hash": "foo"}
).json()["data"] == ["freddy"]
assert client.post(
"/api/foo-2", json={"data": ["freddy"], "session_hash": "foo"}
).json()["data"] == ["freddy foo"]
assert client.post(
"/api/Callable", json={"data": ["freddy"], "session_hash": "foo"}
).json()["data"] == ["From __call__"]
assert client.post(
"/api/partial", json={"data": ["freddy"], "session_hash": "foo"}
).json()["data"] == ["From partial: freddy"]
with pytest.raises(FnIndexInferError):
client.post(
"/api/Say_goodbye", json={"data": ["freddy"], "session_hash": "foo"}
)
with connect(demo) as client:
assert client.predict("freddy", api_name="/greet") == "Hello freddy"
assert client.predict("freddy", api_name="/goodbye") == "Goodbye freddy"
assert client.predict("freddy", api_name="/greet_me") == "Hello"
assert client.predict("freddy", api_name="/Say__goodbye") == "Goodbye"
class TestShowAPI:
@patch.object(wasm_utils, "IS_WASM", True)
def test_show_api_false_when_is_wasm_true(self):
interface = Interface(lambda x: x, "text", "text", examples=[["hannah"]])
assert (
interface.show_api is False
), "show_api should be False when IS_WASM is True"
@patch.object(wasm_utils, "IS_WASM", False)
def test_show_api_true_when_is_wasm_false(self):
interface = Interface(lambda x: x, "text", "text", examples=[["hannah"]])
assert (
interface.show_api is True
), "show_api should be True when IS_WASM is False"
def test_component_server_endpoints(connect):
here = os.path.dirname(os.path.abspath(__file__))
with gr.Blocks() as demo:
file_explorer = gr.FileExplorer(root=here)
with closing(demo) as io:
app, _, _ = io.launch(prevent_thread_lock=True)
client = TestClient(app)
success_req = client.post(
"/component_server/",
json={
"session_hash": "123",
"component_id": file_explorer._id,
"fn_name": "ls",
"data": None,
},
)
assert success_req.status_code == 200
assert len(success_req.json()) > 0
fail_req = client.post(
"/component_server/",
json={
"session_hash": "123",
"component_id": file_explorer._id,
"fn_name": "preprocess",
"data": None,
},
)
assert fail_req.status_code == 404
@pytest.mark.parametrize(
"request_url, route_path, root_path, expected_root_url",
[
("http://localhost:7860/", "/", None, "http://localhost:7860"),
(
"http://localhost:7860/demo/test",
"/demo/test",
None,
"http://localhost:7860",
),
(
"http://localhost:7860/demo/test/",
"/demo/test",
None,
"http://localhost:7860",
),
(
"http://localhost:7860/demo/test?query=1",
"/demo/test",
None,
"http://localhost:7860",
),
(
"http://localhost:7860/demo/test?query=1",
"/demo/test/",
"/gradio/",
"http://localhost:7860/gradio",
),
(
"http://localhost:7860/demo/test?query=1",
"/demo/test",
"/gradio/",
"http://localhost:7860/gradio",
),
(
"https://localhost:7860/demo/test?query=1",
"/demo/test",
"/gradio/",
"https://localhost:7860/gradio",
),
],
)
def test_get_root_url(request_url, route_path, root_path, expected_root_url):
request = Request({"path": request_url, "type": "http", "headers": {}})
assert get_root_url(request, route_path, root_path) == expected_root_url
|
GHSA-hmx6-r76c-85g9
|
glance/store/swift.py
|
@@ -136,7 +136,7 @@ def parse_uri(self, uri):
", you need to change it to use the "
"swift+http:// scheme, like so: "
"swift+http://user:[email protected]/v1/container/obj")
- LOG.debug(_("Invalid store uri %(uri)s: %(reason)s") % locals())
+ LOG.debug(_("Invalid store URI: %(reason)s") % locals())
raise exception.BadStoreUri(message=reason)
pieces = urlparse.urlparse(uri)
@@ -162,8 +162,7 @@ def parse_uri(self, uri):
if creds:
cred_parts = creds.split(':')
if len(cred_parts) != 2:
- reason = (_("Badly formed credentials '%(creds)s' in Swift "
- "URI") % locals())
+ reason = (_("Badly formed credentials in Swift URI."))
LOG.debug(reason)
raise exception.BadStoreUri()
user, key = cred_parts
@@ -181,7 +180,7 @@ def parse_uri(self, uri):
path_parts.insert(0, netloc)
self.auth_or_store_url = '/'.join(path_parts)
except IndexError:
- reason = _("Badly formed Swift URI: %s") % uri
+ reason = _("Badly formed Swift URI.")
LOG.debug(reason)
raise exception.BadStoreUri()
@@ -241,8 +240,8 @@ def get(self, location, connection=None):
except swiftclient.ClientException, e:
if e.http_status == httplib.NOT_FOUND:
uri = location.get_uri()
- raise exception.NotFound(_("Swift could not find image at "
- "uri %(uri)s") % locals())
+ msg = _("Swift could not find image at URI.")
+ raise exception.NotFound(msg)
else:
raise
@@ -375,8 +374,7 @@ def add(self, image_id, image_file, image_size, connection=None):
except swiftclient.ClientException, e:
if e.http_status == httplib.CONFLICT:
raise exception.Duplicate(_("Swift already has an image at "
- "location %s") %
- location.get_uri())
+ "this location"))
msg = (_("Failed to add object to Swift.\n"
"Got error from Swift: %(e)s") % locals())
LOG.error(msg)
@@ -419,8 +417,8 @@ def delete(self, location, connection=None):
except swiftclient.ClientException, e:
if e.http_status == httplib.NOT_FOUND:
uri = location.get_uri()
- raise exception.NotFound(_("Swift could not find image at "
- "uri %(uri)s") % locals())
+ msg = _("Swift could not find image at URI.")
+ raise exception.NotFound(msg)
else:
raise
@@ -578,8 +576,8 @@ def set_acls(self, location, public=False, read_tenants=None,
except swiftclient.ClientException, e:
if e.http_status == httplib.NOT_FOUND:
uri = location.get_uri()
- raise exception.NotFound(_("Swift could not find image at "
- "uri %(uri)s") % locals())
+ msg = _("Swift could not find image at URI.")
+ raise exception.NotFound(msg)
else:
raise
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Storage backend for SWIFT"""
from __future__ import absolute_import
import hashlib
import httplib
import math
import urllib
import urlparse
from glance.common import auth
from glance.common import exception
from glance.openstack.common import cfg
import glance.openstack.common.log as logging
import glance.store
import glance.store.base
import glance.store.location
try:
import swiftclient
except ImportError:
pass
LOG = logging.getLogger(__name__)
DEFAULT_CONTAINER = 'glance'
DEFAULT_LARGE_OBJECT_SIZE = 5 * 1024 # 5GB
DEFAULT_LARGE_OBJECT_CHUNK_SIZE = 200 # 200M
ONE_MB = 1000 * 1024
swift_opts = [
cfg.BoolOpt('swift_enable_snet', default=False),
cfg.StrOpt('swift_store_auth_address'),
cfg.StrOpt('swift_store_user', secret=True),
cfg.StrOpt('swift_store_key', secret=True),
cfg.StrOpt('swift_store_auth_version', default='2'),
cfg.StrOpt('swift_store_region'),
cfg.StrOpt('swift_store_endpoint_type', default='publicURL'),
cfg.StrOpt('swift_store_service_type', default='object-store'),
cfg.StrOpt('swift_store_container',
default=DEFAULT_CONTAINER),
cfg.IntOpt('swift_store_large_object_size',
default=DEFAULT_LARGE_OBJECT_SIZE),
cfg.IntOpt('swift_store_large_object_chunk_size',
default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE),
cfg.BoolOpt('swift_store_create_container_on_put', default=False),
cfg.BoolOpt('swift_store_multi_tenant', default=False),
cfg.ListOpt('swift_store_admin_tenants', default=[]),
]
CONF = cfg.CONF
CONF.register_opts(swift_opts)
class StoreLocation(glance.store.location.StoreLocation):
"""
Class describing a Swift URI. A Swift URI can look like any of
the following:
swift://user:[email protected]/container/obj-id
swift://account:user:[email protected]/container/obj-id
swift+http://user:[email protected]/container/obj-id
swift+https://user:[email protected]/container/obj-id
When using multi-tenant a URI might look like this (a storage URL):
swift+https://example.com/container/obj-id
The swift+http:// URIs indicate there is an HTTP authentication URL.
The default for Swift is an HTTPS authentication URL, so swift:// and
swift+https:// are the same...
"""
def process_specs(self):
self.scheme = self.specs.get('scheme', 'swift+https')
self.user = self.specs.get('user')
self.key = self.specs.get('key')
self.auth_or_store_url = self.specs.get('auth_or_store_url')
self.container = self.specs.get('container')
self.obj = self.specs.get('obj')
def _get_credstring(self):
if self.user and self.key:
return '%s:%s@' % (urllib.quote(self.user), urllib.quote(self.key))
return ''
def get_uri(self):
auth_or_store_url = self.auth_or_store_url
if auth_or_store_url.startswith('http://'):
auth_or_store_url = auth_or_store_url[len('http://'):]
elif auth_or_store_url.startswith('https://'):
auth_or_store_url = auth_or_store_url[len('https://'):]
credstring = self._get_credstring()
auth_or_store_url = auth_or_store_url.strip('/')
container = self.container.strip('/')
obj = self.obj.strip('/')
return '%s://%s%s/%s/%s' % (self.scheme, credstring, auth_or_store_url,
container, obj)
def parse_uri(self, uri):
"""
Parse URLs. This method fixes an issue where credentials specified
in the URL are interpreted differently in Python 2.6.1+ than prior
versions of Python. It also deals with the peculiarity that new-style
Swift URIs have where a username can contain a ':', like so:
swift://account:user:[email protected]/container/obj
"""
# Make sure that URIs that contain multiple schemes, such as:
# swift://user:pass@http://authurl.com/v1/container/obj
# are immediately rejected.
if uri.count('://') != 1:
reason = _("URI cannot contain more than one occurrence "
"of a scheme. If you have specified a URI like "
"swift://user:pass@http://authurl.com/v1/container/obj"
", you need to change it to use the "
"swift+http:// scheme, like so: "
"swift+http://user:[email protected]/v1/container/obj")
LOG.debug(_("Invalid store uri %(uri)s: %(reason)s") % locals())
raise exception.BadStoreUri(message=reason)
pieces = urlparse.urlparse(uri)
assert pieces.scheme in ('swift', 'swift+http', 'swift+https')
self.scheme = pieces.scheme
netloc = pieces.netloc
path = pieces.path.lstrip('/')
if netloc != '':
# > Python 2.6.1
if '@' in netloc:
creds, netloc = netloc.split('@')
else:
creds = None
else:
# Python 2.6.1 compat
# see lp659445 and Python issue7904
if '@' in path:
creds, path = path.split('@')
else:
creds = None
netloc = path[0:path.find('/')].strip('/')
path = path[path.find('/'):].strip('/')
if creds:
cred_parts = creds.split(':')
if len(cred_parts) != 2:
reason = (_("Badly formed credentials '%(creds)s' in Swift "
"URI") % locals())
LOG.debug(reason)
raise exception.BadStoreUri()
user, key = cred_parts
self.user = urllib.unquote(user)
self.key = urllib.unquote(key)
else:
self.user = None
self.key = None
path_parts = path.split('/')
try:
self.obj = path_parts.pop()
self.container = path_parts.pop()
if not netloc.startswith('http'):
# push hostname back into the remaining to build full authurl
path_parts.insert(0, netloc)
self.auth_or_store_url = '/'.join(path_parts)
except IndexError:
reason = _("Badly formed Swift URI: %s") % uri
LOG.debug(reason)
raise exception.BadStoreUri()
@property
def swift_url(self):
"""
Creates a fully-qualified auth url that the Swift client library can
use. The scheme for the auth_url is determined using the scheme
included in the `location` field.
HTTPS is assumed, unless 'swift+http' is specified.
"""
if self.auth_or_store_url.startswith('http'):
return self.auth_or_store_url
else:
if self.scheme in ('swift+https', 'swift'):
auth_scheme = 'https://'
else:
auth_scheme = 'http://'
return ''.join([auth_scheme, self.auth_or_store_url])
def Store(context=None, loc=None):
if (CONF.swift_store_multi_tenant and
(loc is None or loc.store_location.user is None)):
return MultiTenantStore(context, loc)
return SingleTenantStore(context, loc)
class BaseStore(glance.store.base.Store):
CHUNKSIZE = 65536
def get_schemes(self):
return ('swift+https', 'swift', 'swift+http')
def configure(self):
_obj_size = self._option_get('swift_store_large_object_size')
self.large_object_size = _obj_size * ONE_MB
_chunk_size = self._option_get('swift_store_large_object_chunk_size')
self.large_object_chunk_size = _chunk_size * ONE_MB
self.admin_tenants = CONF.swift_store_admin_tenants
self.region = CONF.swift_store_region
self.service_type = CONF.swift_store_service_type
self.endpoint_type = CONF.swift_store_endpoint_type
self.snet = CONF.swift_enable_snet
def get(self, location, connection=None):
location = location.store_location
if not connection:
connection = self.get_connection(location)
try:
resp_headers, resp_body = connection.get_object(
container=location.container, obj=location.obj,
resp_chunk_size=self.CHUNKSIZE)
except swiftclient.ClientException, e:
if e.http_status == httplib.NOT_FOUND:
uri = location.get_uri()
raise exception.NotFound(_("Swift could not find image at "
"uri %(uri)s") % locals())
else:
raise
class ResponseIndexable(glance.store.Indexable):
def another(self):
try:
return self.wrapped.next()
except StopIteration:
return ''
length = int(resp_headers.get('content-length', 0))
return (ResponseIndexable(resp_body, length), length)
def get_size(self, location, connection=None):
location = location.store_location
if not connection:
connection = self.get_connection(location)
try:
resp_headers = connection.head_object(
container=location.container, obj=location.obj)
return int(resp_headers.get('content-length', 0))
except Exception:
return 0
def _option_get(self, param):
result = getattr(CONF, param)
if not result:
reason = (_("Could not find %(param)s in configuration "
"options.") % locals())
LOG.error(reason)
raise exception.BadStoreConfiguration(store_name="swift",
reason=reason)
return result
def add(self, image_id, image_file, image_size, connection=None):
location = self.create_location(image_id)
if not connection:
connection = self.get_connection(location)
self._create_container_if_missing(location.container, connection)
LOG.debug(_("Adding image object '%(obj_name)s' "
"to Swift") % dict(obj_name=location.obj))
try:
if image_size > 0 and image_size < self.large_object_size:
# Image size is known, and is less than large_object_size.
# Send to Swift with regular PUT.
obj_etag = connection.put_object(location.container,
location.obj, image_file,
content_length=image_size)
else:
# Write the image into Swift in chunks.
chunk_id = 1
if image_size > 0:
total_chunks = str(int(
math.ceil(float(image_size) /
float(self.large_object_chunk_size))))
else:
# image_size == 0 is when we don't know the size
# of the image. This can occur with older clients
# that don't inspect the payload size.
LOG.debug(_("Cannot determine image size. Adding as a "
"segmented object to Swift."))
total_chunks = '?'
checksum = hashlib.md5()
combined_chunks_size = 0
while True:
chunk_size = self.large_object_chunk_size
if image_size == 0:
content_length = None
else:
left = image_size - combined_chunks_size
if left == 0:
break
if chunk_size > left:
chunk_size = left
content_length = chunk_size
chunk_name = "%s-%05d" % (location.obj, chunk_id)
reader = ChunkReader(image_file, checksum, chunk_size)
chunk_etag = connection.put_object(
location.container, chunk_name, reader,
content_length=content_length)
bytes_read = reader.bytes_read
msg = _("Wrote chunk %(chunk_name)s (%(chunk_id)d/"
"%(total_chunks)s) of length %(bytes_read)d "
"to Swift returning MD5 of content: "
"%(chunk_etag)s")
LOG.debug(msg % locals())
if bytes_read == 0:
# Delete the last chunk, because it's of zero size.
# This will happen if size == 0.
LOG.debug(_("Deleting final zero-length chunk"))
connection.delete_object(location.container,
chunk_name)
break
chunk_id += 1
combined_chunks_size += bytes_read
# In the case we have been given an unknown image size,
# set the size to the total size of the combined chunks.
if image_size == 0:
image_size = combined_chunks_size
# Now we write the object manifest and return the
# manifest's etag...
manifest = "%s/%s" % (location.container, location.obj)
headers = {'ETag': hashlib.md5("").hexdigest(),
'X-Object-Manifest': manifest}
# The ETag returned for the manifest is actually the
# MD5 hash of the concatenated checksums of the strings
# of each chunk...so we ignore this result in favour of
# the MD5 of the entire image file contents, so that
# users can verify the image file contents accordingly
connection.put_object(location.container, location.obj,
None, headers=headers)
obj_etag = checksum.hexdigest()
# NOTE: We return the user and key here! Have to because
# location is used by the API server to return the actual
# image data. We *really* should consider NOT returning
# the location attribute from GET /images/<ID> and
# GET /images/details
return (location.get_uri(), image_size, obj_etag)
except swiftclient.ClientException, e:
if e.http_status == httplib.CONFLICT:
raise exception.Duplicate(_("Swift already has an image at "
"location %s") %
location.get_uri())
msg = (_("Failed to add object to Swift.\n"
"Got error from Swift: %(e)s") % locals())
LOG.error(msg)
raise glance.store.BackendException(msg)
def delete(self, location, connection=None):
location = location.store_location
if not connection:
connection = self.get_connection(location)
try:
# We request the manifest for the object. If one exists,
# that means the object was uploaded in chunks/segments,
# and we need to delete all the chunks as well as the
# manifest.
manifest = None
try:
headers = connection.head_object(
location.container, location.obj)
manifest = headers.get('x-object-manifest')
except swiftclient.ClientException, e:
if e.http_status != httplib.NOT_FOUND:
raise
if manifest:
# Delete all the chunks before the object manifest itself
obj_container, obj_prefix = manifest.split('/', 1)
segments = connection.get_container(
obj_container, prefix=obj_prefix)[1]
for segment in segments:
# TODO(jaypipes): This would be an easy area to parallelize
# since we're simply sending off parallelizable requests
# to Swift to delete stuff. It's not like we're going to
# be hogging up network or file I/O here...
connection.delete_object(
obj_container, segment['name'])
else:
connection.delete_object(location.container, location.obj)
except swiftclient.ClientException, e:
if e.http_status == httplib.NOT_FOUND:
uri = location.get_uri()
raise exception.NotFound(_("Swift could not find image at "
"uri %(uri)s") % locals())
else:
raise
def _create_container_if_missing(self, container, connection):
"""
Creates a missing container in Swift if the
``swift_store_create_container_on_put`` option is set.
:param container: Name of container to create
:param connection: Connection to swift service
"""
try:
connection.head_container(container)
except swiftclient.ClientException, e:
if e.http_status == httplib.NOT_FOUND:
if CONF.swift_store_create_container_on_put:
try:
connection.put_container(container)
except swiftclient.ClientException, e:
msg = _("Failed to add container to Swift.\n"
"Got error from Swift: %(e)s") % locals()
raise glance.store.BackendException(msg)
else:
msg = (_("The container %(container)s does not exist in "
"Swift. Please set the "
"swift_store_create_container_on_put option"
"to add container to Swift automatically.") %
locals())
raise glance.store.BackendException(msg)
else:
raise
def get_connection(self):
raise NotImplemented()
def create_location(self):
raise NotImplemented()
class SingleTenantStore(BaseStore):
EXAMPLE_URL = "swift://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<FILE>"
def configure(self):
super(SingleTenantStore, self).configure()
self.auth_version = self._option_get('swift_store_auth_version')
def configure_add(self):
self.auth_address = self._option_get('swift_store_auth_address')
if self.auth_address.startswith('http://'):
self.scheme = 'swift+http'
else:
self.scheme = 'swift+https'
self.container = CONF.swift_store_container
self.user = self._option_get('swift_store_user')
self.key = self._option_get('swift_store_key')
def create_location(self, image_id):
specs = {'scheme': self.scheme,
'container': self.container,
'obj': str(image_id),
'auth_or_store_url': self.auth_address,
'user': self.user,
'key': self.key}
return StoreLocation(specs)
def get_connection(self, location):
if not location.user:
reason = (_("Location is missing user:password information."))
LOG.debug(reason)
raise exception.BadStoreUri(message=reason)
auth_url = location.swift_url
if not auth_url.endswith('/'):
auth_url += '/'
if self.auth_version == '2':
try:
tenant_name, user = location.user.split(':')
except ValueError:
reason = (_("Badly formed tenant:user '%(user)s' in "
"Swift URI") % {'user': location.user})
LOG.debug(reason)
raise exception.BadStoreUri()
else:
tenant_name = None
user = location.user
os_options = {}
if self.region:
os_options['region_name'] = self.region
os_options['endpoint_type'] = self.endpoint_type
os_options['service_type'] = self.service_type
return swiftclient.Connection(
auth_url, user, location.key,
tenant_name=tenant_name, snet=self.snet,
auth_version=self.auth_version, os_options=os_options)
class MultiTenantStore(BaseStore):
EXAMPLE_URL = "swift://<SWIFT_URL>/<CONTAINER>/<FILE>"
def configure_add(self):
self.container = CONF.swift_store_container
if self.context is None:
reason = _("Multi-tenant Swift storage requires a context.")
raise exception.BadStoreConfiguration(store_name="swift",
reason=reason)
if self.context.service_catalog is None:
reason = _("Multi-tenant Swift storage requires "
"a service catalog.")
raise exception.BadStoreConfiguration(store_name="swift",
reason=reason)
self.storage_url = auth.get_endpoint(
self.context.service_catalog, service_type=self.service_type,
endpoint_region=self.region, endpoint_type=self.endpoint_type)
if self.storage_url.startswith('http://'):
self.scheme = 'swift+http'
else:
self.scheme = 'swift+https'
def delete(self, location, connection=None):
if not connection:
connection = self.get_connection(location.store_location)
super(MultiTenantStore, self).delete(location, connection)
connection.delete_container(location.store_location.container)
def set_acls(self, location, public=False, read_tenants=None,
write_tenants=None, connection=None):
location = location.store_location
if not connection:
connection = self.get_connection(location)
if read_tenants is None:
read_tenants = []
if write_tenants is None:
write_tenants = []
headers = {}
if public:
headers['X-Container-Read'] = ".r:*"
elif read_tenants:
headers['X-Container-Read'] = ','.join(read_tenants)
else:
headers['X-Container-Read'] = ''
write_tenants.extend(self.admin_tenants)
if write_tenants:
headers['X-Container-Write'] = ','.join(write_tenants)
else:
headers['X-Container-Write'] = ''
try:
connection.post_container(location.container, headers=headers)
except swiftclient.ClientException, e:
if e.http_status == httplib.NOT_FOUND:
uri = location.get_uri()
raise exception.NotFound(_("Swift could not find image at "
"uri %(uri)s") % locals())
else:
raise
def create_location(self, image_id):
specs = {'scheme': self.scheme,
'container': self.container + '_' + str(image_id),
'obj': str(image_id),
'auth_or_store_url': self.storage_url}
return StoreLocation(specs)
def get_connection(self, location):
return swiftclient.Connection(
None, self.context.user, None,
preauthurl=location.swift_url,
preauthtoken=self.context.auth_tok,
tenant_name=self.context.tenant,
auth_version='2', snet=self.snet)
class ChunkReader(object):
def __init__(self, fd, checksum, total):
self.fd = fd
self.checksum = checksum
self.total = total
self.bytes_read = 0
def read(self, i):
left = self.total - self.bytes_read
if i > left:
i = left
result = self.fd.read(i)
self.bytes_read += len(result)
self.checksum.update(result)
return result
|
GHSA-xv7j-2v4w-cjvh
|
PIL/EpsImagePlugin.py
|
@@ -67,7 +67,8 @@ def Ghostscript(tile, size, fp, scale=1):
import tempfile, os, subprocess
- file = tempfile.mktemp()
+ out_fd, file = tempfile.mkstemp()
+ os.close(out_fd)
# Build ghostscript command
command = ["gs",
|
#
# The Python Imaging Library.
# $Id$
#
# EPS file handling
#
# History:
# 1995-09-01 fl Created (0.1)
# 1996-05-18 fl Don't choke on "atend" fields, Ghostscript interface (0.2)
# 1996-08-22 fl Don't choke on floating point BoundingBox values
# 1996-08-23 fl Handle files from Macintosh (0.3)
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)
# 2003-09-07 fl Check gs.close status (from Federico Di Gregorio) (0.5)
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.5"
import re
import io
from PIL import Image, ImageFile, _binary
#
# --------------------------------------------------------------------
i32 = _binary.i32le
o32 = _binary.o32le
split = re.compile(r"^%%([^:]*):[ \t]*(.*)[ \t]*$")
field = re.compile(r"^%[%!\w]([^:]*)[ \t]*$")
gs_windows_binary = None
import sys
if sys.platform.startswith('win'):
import shutil
if hasattr(shutil, 'which'):
which = shutil.which
else:
# Python < 3.3
import distutils.spawn
which = distutils.spawn.find_executable
for binary in ('gswin32c', 'gswin64c', 'gs'):
if which(binary) is not None:
gs_windows_binary = binary
break
else:
gs_windows_binary = False
def Ghostscript(tile, size, fp, scale=1):
"""Render an image using Ghostscript"""
# Unpack decoder tile
decoder, tile, offset, data = tile[0]
length, bbox = data
#Hack to support hi-res rendering
scale = int(scale) or 1
orig_size = size
orig_bbox = bbox
size = (size[0] * scale, size[1] * scale)
bbox = [bbox[0], bbox[1], bbox[2] * scale, bbox[3] * scale]
#print("Ghostscript", scale, size, orig_size, bbox, orig_bbox)
import tempfile, os, subprocess
file = tempfile.mktemp()
# Build ghostscript command
command = ["gs",
"-q", # quite mode
"-g%dx%d" % size, # set output geometry (pixels)
"-r%d" % (72*scale), # set input DPI (dots per inch)
"-dNOPAUSE -dSAFER", # don't pause between pages, safe mode
"-sDEVICE=ppmraw", # ppm driver
"-sOutputFile=%s" % file,# output file
]
if gs_windows_binary is not None:
if gs_windows_binary is False:
raise WindowsError('Unable to locate Ghostscript on paths')
command[0] = gs_windows_binary
# push data through ghostscript
try:
gs = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# adjust for image origin
if bbox[0] != 0 or bbox[1] != 0:
gs.stdin.write(("%d %d translate\n" % (-bbox[0], -bbox[1])).encode('ascii'))
fp.seek(offset)
while length > 0:
s = fp.read(8192)
if not s:
break
length = length - len(s)
gs.stdin.write(s)
gs.stdin.close()
status = gs.wait()
if status:
raise IOError("gs failed (status %d)" % status)
im = Image.core.open_ppm(file)
finally:
try: os.unlink(file)
except: pass
return im
class PSFile:
"""Wrapper that treats either CR or LF as end of line."""
def __init__(self, fp):
self.fp = fp
self.char = None
def __getattr__(self, id):
v = getattr(self.fp, id)
setattr(self, id, v)
return v
def seek(self, offset, whence=0):
self.char = None
self.fp.seek(offset, whence)
def read(self, count):
return self.fp.read(count).decode('latin-1')
def tell(self):
pos = self.fp.tell()
if self.char:
pos = pos - 1
return pos
def readline(self):
s = b""
if self.char:
c = self.char
self.char = None
else:
c = self.fp.read(1)
while c not in b"\r\n":
s = s + c
c = self.fp.read(1)
if c == b"\r":
self.char = self.fp.read(1)
if self.char == b"\n":
self.char = None
return s.decode('latin-1') + "\n"
def _accept(prefix):
return prefix[:4] == b"%!PS" or i32(prefix) == 0xC6D3D0C5
##
# Image plugin for Encapsulated Postscript. This plugin supports only
# a few variants of this format.
class EpsImageFile(ImageFile.ImageFile):
"""EPS File Parser for the Python Imaging Library"""
format = "EPS"
format_description = "Encapsulated Postscript"
def _open(self):
# FIXME: should check the first 512 bytes to see if this
# really is necessary (platform-dependent, though...)
fp = PSFile(self.fp)
# HEAD
s = fp.read(512)
if s[:4] == "%!PS":
offset = 0
fp.seek(0, 2)
length = fp.tell()
elif i32(s) == 0xC6D3D0C5:
offset = i32(s[4:])
length = i32(s[8:])
fp.seek(offset)
else:
raise SyntaxError("not an EPS file")
fp.seek(offset)
box = None
self.mode = "RGB"
self.size = 1, 1 # FIXME: huh?
#
# Load EPS header
s = fp.readline()
while s:
if len(s) > 255:
raise SyntaxError("not an EPS file")
if s[-2:] == '\r\n':
s = s[:-2]
elif s[-1:] == '\n':
s = s[:-1]
try:
m = split.match(s)
except re.error as v:
raise SyntaxError("not an EPS file")
if m:
k, v = m.group(1, 2)
self.info[k] = v
if k == "BoundingBox":
try:
# Note: The DSC spec says that BoundingBox
# fields should be integers, but some drivers
# put floating point values there anyway.
box = [int(float(s)) for s in v.split()]
self.size = box[2] - box[0], box[3] - box[1]
self.tile = [("eps", (0,0) + self.size, offset,
(length, box))]
except:
pass
else:
m = field.match(s)
if m:
k = m.group(1)
if k == "EndComments":
break
if k[:8] == "PS-Adobe":
self.info[k[:8]] = k[9:]
else:
self.info[k] = ""
elif s[0:1] == '%':
# handle non-DSC Postscript comments that some
# tools mistakenly put in the Comments section
pass
else:
raise IOError("bad EPS header")
s = fp.readline()
if s[:1] != "%":
break
#
# Scan for an "ImageData" descriptor
while s[0] == "%":
if len(s) > 255:
raise SyntaxError("not an EPS file")
if s[-2:] == '\r\n':
s = s[:-2]
elif s[-1:] == '\n':
s = s[:-1]
if s[:11] == "%ImageData:":
[x, y, bi, mo, z3, z4, en, id] =\
s[11:].split(None, 7)
x = int(x); y = int(y)
bi = int(bi)
mo = int(mo)
en = int(en)
if en == 1:
decoder = "eps_binary"
elif en == 2:
decoder = "eps_hex"
else:
break
if bi != 8:
break
if mo == 1:
self.mode = "L"
elif mo == 2:
self.mode = "LAB"
elif mo == 3:
self.mode = "RGB"
else:
break
if id[:1] == id[-1:] == '"':
id = id[1:-1]
# Scan forward to the actual image data
while True:
s = fp.readline()
if not s:
break
if s[:len(id)] == id:
self.size = x, y
self.tile2 = [(decoder,
(0, 0, x, y),
fp.tell(),
0)]
return
s = fp.readline()
if not s:
break
if not box:
raise IOError("cannot determine EPS bounding box")
def load(self, scale=1):
# Load EPS via Ghostscript
if not self.tile:
return
self.im = Ghostscript(self.tile, self.size, self.fp, scale)
self.mode = self.im.mode
self.size = self.im.size
self.tile = []
#
# --------------------------------------------------------------------
def _save(im, fp, filename, eps=1):
"""EPS Writer for the Python Imaging Library."""
#
# make sure image data is available
im.load()
#
# determine postscript image mode
if im.mode == "L":
operator = (8, 1, "image")
elif im.mode == "RGB":
operator = (8, 3, "false 3 colorimage")
elif im.mode == "CMYK":
operator = (8, 4, "false 4 colorimage")
else:
raise ValueError("image mode is not supported")
class NoCloseStream:
def __init__(self, fp):
self.fp = fp
def __getattr__(self, name):
return getattr(self.fp, name)
def close(self):
pass
base_fp = fp
fp = io.TextIOWrapper(NoCloseStream(fp), encoding='latin-1')
if eps:
#
# write EPS header
fp.write("%!PS-Adobe-3.0 EPSF-3.0\n")
fp.write("%%Creator: PIL 0.1 EpsEncode\n")
#fp.write("%%CreationDate: %s"...)
fp.write("%%%%BoundingBox: 0 0 %d %d\n" % im.size)
fp.write("%%Pages: 1\n")
fp.write("%%EndComments\n")
fp.write("%%Page: 1 1\n")
fp.write("%%ImageData: %d %d " % im.size)
fp.write("%d %d 0 1 1 \"%s\"\n" % operator)
#
# image header
fp.write("gsave\n")
fp.write("10 dict begin\n")
fp.write("/buf %d string def\n" % (im.size[0] * operator[1]))
fp.write("%d %d scale\n" % im.size)
fp.write("%d %d 8\n" % im.size) # <= bits
fp.write("[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1]))
fp.write("{ currentfile buf readhexstring pop } bind\n")
fp.write(operator[2] + "\n")
fp.flush()
ImageFile._save(im, base_fp, [("eps", (0,0)+im.size, 0, None)])
fp.write("\n%%%%EndBinary\n")
fp.write("grestore end\n")
fp.flush()
#
# --------------------------------------------------------------------
Image.register_open(EpsImageFile.format, EpsImageFile, _accept)
Image.register_save(EpsImageFile.format, _save)
Image.register_extension(EpsImageFile.format, ".ps")
Image.register_extension(EpsImageFile.format, ".eps")
Image.register_mime(EpsImageFile.format, "application/postscript")
|
PYSEC-2014-22
|
PIL/Image.py
|
@@ -495,14 +495,17 @@ def _copy(self):
self.readonly = 0
def _dump(self, file=None, format=None):
- import tempfile
+ import tempfile, os
if not file:
- file = tempfile.mktemp()
+ f, file = tempfile.mkstemp(format or '')
+ os.close(f)
+
self.load()
if not format or format == "PPM":
self.im.save_ppm(file)
else:
- file = file + "." + format
+ if file.endswith(format):
+ file = file + "." + format
self.save(file, format)
return file
|
#
# The Python Imaging Library.
# $Id$
#
# the Image class wrapper
#
# partial release history:
# 1995-09-09 fl Created
# 1996-03-11 fl PIL release 0.0 (proof of concept)
# 1996-04-30 fl PIL release 0.1b1
# 1999-07-28 fl PIL release 1.0 final
# 2000-06-07 fl PIL release 1.1
# 2000-10-20 fl PIL release 1.1.1
# 2001-05-07 fl PIL release 1.1.2
# 2002-03-15 fl PIL release 1.1.3
# 2003-05-10 fl PIL release 1.1.4
# 2005-03-28 fl PIL release 1.1.5
# 2006-12-02 fl PIL release 1.1.6
# 2009-11-15 fl PIL release 1.1.7
#
# Copyright (c) 1997-2009 by Secret Labs AB. All rights reserved.
# Copyright (c) 1995-2009 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from __future__ import print_function
from PIL import VERSION, PILLOW_VERSION, _plugins
import warnings
class _imaging_not_installed:
# module placeholder
def __getattr__(self, id):
raise ImportError("The _imaging C module is not installed")
try:
# give Tk a chance to set up the environment, in case we're
# using an _imaging module linked against libtcl/libtk (use
# __import__ to hide this from naive packagers; we don't really
# depend on Tk unless ImageTk is used, and that module already
# imports Tkinter)
__import__("FixTk")
except ImportError:
pass
try:
# If the _imaging C module is not present, you can still use
# the "open" function to identify files, but you cannot load
# them. Note that other modules should not refer to _imaging
# directly; import Image and use the Image.core variable instead.
from PIL import _imaging as core
if PILLOW_VERSION != getattr(core, 'PILLOW_VERSION', None):
raise ImportError("The _imaging extension was built for another "
" version of Pillow or PIL")
except ImportError as v:
core = _imaging_not_installed()
# Explanations for ways that we know we might have an import error
if str(v).startswith("Module use of python"):
# The _imaging C module is present, but not compiled for
# the right version (windows only). Print a warning, if
# possible.
warnings.warn(
"The _imaging extension was built for another version "
"of Python.",
RuntimeWarning
)
elif str(v).startswith("The _imaging extension"):
warnings.warn(str(v), RuntimeWarning)
elif "Symbol not found: _PyUnicodeUCS2_FromString" in str(v):
warnings.warn(
"The _imaging extension was built for Python with UCS2 support; "
"recompile PIL or build Python --without-wide-unicode. ",
RuntimeWarning
)
elif "Symbol not found: _PyUnicodeUCS4_FromString" in str(v):
warnings.warn(
"The _imaging extension was built for Python with UCS4 support; "
"recompile PIL or build Python --with-wide-unicode. ",
RuntimeWarning
)
# Fail here anyway. Don't let people run with a mostly broken Pillow.
raise
try:
import builtins
except ImportError:
import __builtin__
builtins = __builtin__
from PIL import ImageMode
from PIL._binary import i8, o8
from PIL._util import isPath, isStringType
import os, sys
# type stuff
import collections
import numbers
def isImageType(t):
"""
Checks if an object is an image object.
.. warning::
This function is for internal use only.
:param t: object to check if it's an image
:returns: True if the object is an image
"""
return hasattr(t, "im")
#
# Debug level
DEBUG = 0
#
# Constants (also defined in _imagingmodule.c!)
NONE = 0
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
ROTATE_90 = 2
ROTATE_180 = 3
ROTATE_270 = 4
# transforms
AFFINE = 0
EXTENT = 1
PERSPECTIVE = 2
QUAD = 3
MESH = 4
# resampling filters
NONE = 0
NEAREST = 0
ANTIALIAS = 1 # 3-lobed lanczos
LINEAR = BILINEAR = 2
CUBIC = BICUBIC = 3
# dithers
NONE = 0
NEAREST = 0
ORDERED = 1 # Not yet implemented
RASTERIZE = 2 # Not yet implemented
FLOYDSTEINBERG = 3 # default
# palettes/quantizers
WEB = 0
ADAPTIVE = 1
MEDIANCUT = 0
MAXCOVERAGE = 1
FASTOCTREE = 2
# categories
NORMAL = 0
SEQUENCE = 1
CONTAINER = 2
if hasattr(core, 'DEFAULT_STRATEGY'):
DEFAULT_STRATEGY = core.DEFAULT_STRATEGY
FILTERED = core.FILTERED
HUFFMAN_ONLY = core.HUFFMAN_ONLY
RLE = core.RLE
FIXED = core.FIXED
# --------------------------------------------------------------------
# Registries
ID = []
OPEN = {}
MIME = {}
SAVE = {}
EXTENSION = {}
# --------------------------------------------------------------------
# Modes supported by this version
_MODEINFO = {
# NOTE: this table will be removed in future versions. use
# getmode* functions or ImageMode descriptors instead.
# official modes
"1": ("L", "L", ("1",)),
"L": ("L", "L", ("L",)),
"I": ("L", "I", ("I",)),
"F": ("L", "F", ("F",)),
"P": ("RGB", "L", ("P",)),
"RGB": ("RGB", "L", ("R", "G", "B")),
"RGBX": ("RGB", "L", ("R", "G", "B", "X")),
"RGBA": ("RGB", "L", ("R", "G", "B", "A")),
"CMYK": ("RGB", "L", ("C", "M", "Y", "K")),
"YCbCr": ("RGB", "L", ("Y", "Cb", "Cr")),
"LAB": ("RGB", "L", ("L", "A", "B")),
# Experimental modes include I;16, I;16L, I;16B, RGBa, BGR;15, and
# BGR;24. Use these modes only if you know exactly what you're
# doing...
}
if sys.byteorder == 'little':
_ENDIAN = '<'
else:
_ENDIAN = '>'
_MODE_CONV = {
# official modes
"1": ('|b1', None), # broken
"L": ('|u1', None),
"I": (_ENDIAN + 'i4', None),
"F": (_ENDIAN + 'f4', None),
"P": ('|u1', None),
"RGB": ('|u1', 3),
"RGBX": ('|u1', 4),
"RGBA": ('|u1', 4),
"CMYK": ('|u1', 4),
"YCbCr": ('|u1', 3),
"LAB": ('|u1', 3), # UNDONE - unsigned |u1i1i1
# I;16 == I;16L, and I;32 == I;32L
"I;16": ('<u2', None),
"I;16B": ('>u2', None),
"I;16L": ('<u2', None),
"I;16S": ('<i2', None),
"I;16BS": ('>i2', None),
"I;16LS": ('<i2', None),
"I;32": ('<u4', None),
"I;32B": ('>u4', None),
"I;32L": ('<u4', None),
"I;32S": ('<i4', None),
"I;32BS": ('>i4', None),
"I;32LS": ('<i4', None),
}
def _conv_type_shape(im):
shape = im.size[1], im.size[0]
typ, extra = _MODE_CONV[im.mode]
if extra is None:
return shape, typ
else:
return shape+(extra,), typ
MODES = sorted(_MODEINFO.keys())
# raw modes that may be memory mapped. NOTE: if you change this, you
# may have to modify the stride calculation in map.c too!
_MAPMODES = ("L", "P", "RGBX", "RGBA", "CMYK", "I;16", "I;16L", "I;16B")
def getmodebase(mode):
"""
Gets the "base" mode for given mode. This function returns "L" for
images that contain grayscale data, and "RGB" for images that
contain color data.
:param mode: Input mode.
:returns: "L" or "RGB".
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode.getmode(mode).basemode
def getmodetype(mode):
"""
Gets the storage type mode. Given a mode, this function returns a
single-layer mode suitable for storing individual bands.
:param mode: Input mode.
:returns: "L", "I", or "F".
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode.getmode(mode).basetype
def getmodebandnames(mode):
"""
Gets a list of individual band names. Given a mode, this function returns
a tuple containing the names of individual bands (use
:py:method:`~PIL.Image.getmodetype` to get the mode used to store each
individual band.
:param mode: Input mode.
:returns: A tuple containing band names. The length of the tuple
gives the number of bands in an image of the given mode.
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode.getmode(mode).bands
def getmodebands(mode):
"""
Gets the number of individual bands for this mode.
:param mode: Input mode.
:returns: The number of bands in this mode.
:exception KeyError: If the input mode was not a standard mode.
"""
return len(ImageMode.getmode(mode).bands)
# --------------------------------------------------------------------
# Helpers
_initialized = 0
def preinit():
"Explicitly load standard file format drivers."
global _initialized
if _initialized >= 1:
return
try:
from PIL import BmpImagePlugin
except ImportError:
pass
try:
from PIL import GifImagePlugin
except ImportError:
pass
try:
from PIL import JpegImagePlugin
except ImportError:
pass
try:
from PIL import PpmImagePlugin
except ImportError:
pass
try:
from PIL import PngImagePlugin
except ImportError:
pass
# try:
# import TiffImagePlugin
# except ImportError:
# pass
_initialized = 1
def init():
"""
Explicitly initializes the Python Imaging Library. This function
loads all available file format drivers.
"""
global _initialized
if _initialized >= 2:
return 0
for plugin in _plugins:
try:
if DEBUG:
print ("Importing %s"%plugin)
__import__("PIL.%s"%plugin, globals(), locals(), [])
except ImportError:
if DEBUG:
print("Image: failed to import", end=' ')
print(plugin, ":", sys.exc_info()[1])
if OPEN or SAVE:
_initialized = 2
return 1
# --------------------------------------------------------------------
# Codec factories (used by tobytes/frombytes and ImageFile.load)
def _getdecoder(mode, decoder_name, args, extra=()):
# tweak arguments
if args is None:
args = ()
elif not isinstance(args, tuple):
args = (args,)
try:
# get decoder
decoder = getattr(core, decoder_name + "_decoder")
# print(decoder, mode, args + extra)
return decoder(mode, *args + extra)
except AttributeError:
raise IOError("decoder %s not available" % decoder_name)
def _getencoder(mode, encoder_name, args, extra=()):
# tweak arguments
if args is None:
args = ()
elif not isinstance(args, tuple):
args = (args,)
try:
# get encoder
encoder = getattr(core, encoder_name + "_encoder")
# print(encoder, mode, args + extra)
return encoder(mode, *args + extra)
except AttributeError:
raise IOError("encoder %s not available" % encoder_name)
# --------------------------------------------------------------------
# Simple expression analyzer
def coerce_e(value):
return value if isinstance(value, _E) else _E(value)
class _E:
def __init__(self, data):
self.data = data
def __add__(self, other):
return _E((self.data, "__add__", coerce_e(other).data))
def __mul__(self, other):
return _E((self.data, "__mul__", coerce_e(other).data))
def _getscaleoffset(expr):
stub = ["stub"]
data = expr(_E(stub)).data
try:
(a, b, c) = data # simplified syntax
if (a is stub and b == "__mul__" and isinstance(c, numbers.Number)):
return c, 0.0
if (a is stub and b == "__add__" and isinstance(c, numbers.Number)):
return 1.0, c
except TypeError: pass
try:
((a, b, c), d, e) = data # full syntax
if (a is stub and b == "__mul__" and isinstance(c, numbers.Number) and
d == "__add__" and isinstance(e, numbers.Number)):
return c, e
except TypeError: pass
raise ValueError("illegal expression")
# --------------------------------------------------------------------
# Implementation wrapper
class Image:
"""
This class represents an image object. To create
:py:class:`~PIL.Image.Image` objects, use the appropriate factory
functions. There's hardly ever any reason to call the Image constructor
directly.
* :py:func:`~PIL.Image.open`
* :py:func:`~PIL.Image.new`
* :py:func:`~PIL.Image.frombytes`
"""
format = None
format_description = None
def __init__(self):
# FIXME: take "new" parameters / other image?
# FIXME: turn mode and size into delegating properties?
self.im = None
self.mode = ""
self.size = (0, 0)
self.palette = None
self.info = {}
self.category = NORMAL
self.readonly = 0
def _new(self, im):
new = Image()
new.im = im
new.mode = im.mode
new.size = im.size
new.palette = self.palette
if im.mode == "P" and not new.palette:
from PIL import ImagePalette
new.palette = ImagePalette.ImagePalette()
try:
new.info = self.info.copy()
except AttributeError:
# fallback (pre-1.5.2)
new.info = {}
for k, v in self.info:
new.info[k] = v
return new
_makeself = _new # compatibility
def _copy(self):
self.load()
self.im = self.im.copy()
self.readonly = 0
def _dump(self, file=None, format=None):
import tempfile
if not file:
file = tempfile.mktemp()
self.load()
if not format or format == "PPM":
self.im.save_ppm(file)
else:
file = file + "." + format
self.save(file, format)
return file
def __repr__(self):
return "<%s.%s image mode=%s size=%dx%d at 0x%X>" % (
self.__class__.__module__, self.__class__.__name__,
self.mode, self.size[0], self.size[1],
id(self)
)
def __getattr__(self, name):
if name == "__array_interface__":
# numpy array interface support
new = {}
shape, typestr = _conv_type_shape(self)
new['shape'] = shape
new['typestr'] = typestr
new['data'] = self.tobytes()
return new
raise AttributeError(name)
def tobytes(self, encoder_name="raw", *args):
"""
Return image as a bytes object
:param encoder_name: What encoder to use. The default is to
use the standard "raw" encoder.
:param args: Extra arguments to the encoder.
:rtype: A bytes object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if encoder_name == "raw" and args == ():
args = self.mode
self.load()
# unpack data
e = _getencoder(self.mode, encoder_name, args)
e.setimage(self.im)
bufsize = max(65536, self.size[0] * 4) # see RawEncode.c
data = []
while True:
l, s, d = e.encode(bufsize)
data.append(d)
if s:
break
if s < 0:
raise RuntimeError("encoder error %d in tobytes" % s)
return b"".join(data)
# Declare tostring as alias to tobytes
def tostring(self, *args, **kw):
warnings.warn(
'tostring() is deprecated. Please call tobytes() instead.',
DeprecationWarning,
stacklevel=2,
)
return self.tobytes(*args, **kw)
def tobitmap(self, name="image"):
"""
Returns the image converted to an X11 bitmap.
.. note:: This method only works for mode "1" images.
:param name: The name prefix to use for the bitmap variables.
:returns: A string containing an X11 bitmap.
:raises ValueError: If the mode is not "1"
"""
self.load()
if self.mode != "1":
raise ValueError("not a bitmap")
data = self.tobytes("xbm")
return b"".join([("#define %s_width %d\n" % (name, self.size[0])).encode('ascii'),
("#define %s_height %d\n"% (name, self.size[1])).encode('ascii'),
("static char %s_bits[] = {\n" % name).encode('ascii'), data, b"};"])
def frombytes(self, data, decoder_name="raw", *args):
"""
Loads this image with pixel data from a bytes object.
This method is similar to the :py:func:`~PIL.Image.frombytes` function,
but loads data into this image instead of creating a new image object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
# default format
if decoder_name == "raw" and args == ():
args = self.mode
# unpack data
d = _getdecoder(self.mode, decoder_name, args)
d.setimage(self.im)
s = d.decode(data)
if s[0] >= 0:
raise ValueError("not enough image data")
if s[1] != 0:
raise ValueError("cannot decode image data")
def fromstring(self, *args, **kw):
"""Deprecated alias to frombytes.
.. deprecated:: 2.0
"""
warnings.warn('fromstring() is deprecated. Please call frombytes() instead.', DeprecationWarning)
return self.frombytes(*args, **kw)
def load(self):
"""
Allocates storage for the image and loads the pixel data. In
normal cases, you don't need to call this method, since the
Image class automatically loads an opened image when it is
accessed for the first time.
:returns: An image access object.
"""
if self.im and self.palette and self.palette.dirty:
# realize palette
self.im.putpalette(*self.palette.getdata())
self.palette.dirty = 0
self.palette.mode = "RGB"
self.palette.rawmode = None
if "transparency" in self.info:
if isinstance(self.info["transparency"], int):
self.im.putpalettealpha(self.info["transparency"], 0)
else:
self.im.putpalettealphas(self.info["transparency"])
self.palette.mode = "RGBA"
if self.im:
return self.im.pixel_access(self.readonly)
def verify(self):
"""
Verifies the contents of a file. For data read from a file, this
method attempts to determine if the file is broken, without
actually decoding the image data. If this method finds any
problems, it raises suitable exceptions. If you need to load
the image after using this method, you must reopen the image
file.
"""
pass
def convert(self, mode=None, matrix=None, dither=None,
palette=WEB, colors=256):
"""
Returns a converted copy of this image. For the "P" mode, this
method translates pixels through the palette. If mode is
omitted, a mode is chosen so that all information in the image
and the palette can be represented without a palette.
The current version supports all possible conversions between
"L", "RGB" and "CMYK." The **matrix** argument only supports "L"
and "RGB".
When translating a color image to black and white (mode "L"),
the library uses the ITU-R 601-2 luma transform::
L = R * 299/1000 + G * 587/1000 + B * 114/1000
The default method of converting a greyscale ("L") or "RGB"
image into a bilevel (mode "1") image uses Floyd-Steinberg
dither to approximate the original image luminosity levels. If
dither is NONE, all non-zero values are set to 255 (white). To
use other thresholds, use the :py:meth:`~PIL.Image.Image.point`
method.
:param mode: The requested mode.
:param matrix: An optional conversion matrix. If given, this
should be 4- or 16-tuple containing floating point values.
:param dither: Dithering method, used when converting from
mode "RGB" to "P" or from "RGB" or "L" to "1".
Available methods are NONE or FLOYDSTEINBERG (default).
:param palette: Palette to use when converting from mode "RGB"
to "P". Available palettes are WEB or ADAPTIVE.
:param colors: Number of colors to use for the ADAPTIVE palette.
Defaults to 256.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if not mode:
# determine default mode
if self.mode == "P":
self.load()
if self.palette:
mode = self.palette.mode
else:
mode = "RGB"
else:
return self.copy()
self.load()
if matrix:
# matrix conversion
if mode not in ("L", "RGB"):
raise ValueError("illegal conversion")
im = self.im.convert_matrix(mode, matrix)
return self._new(im)
if mode == "P" and palette == ADAPTIVE:
im = self.im.quantize(colors)
return self._new(im)
# colorspace conversion
if dither is None:
dither = FLOYDSTEINBERG
# Use transparent conversion to promote from transparent color to an alpha channel.
if self.mode in ("L", "RGB") and mode == "RGBA" and "transparency" in self.info:
return self._new(self.im.convert_transparent(mode, self.info['transparency']))
try:
im = self.im.convert(mode, dither)
except ValueError:
try:
# normalize source image and try again
im = self.im.convert(getmodebase(self.mode))
im = im.convert(mode, dither)
except KeyError:
raise ValueError("illegal conversion")
return self._new(im)
def quantize(self, colors=256, method=0, kmeans=0, palette=None):
# methods:
# 0 = median cut
# 1 = maximum coverage
# 2 = fast octree
# NOTE: this functionality will be moved to the extended
# quantizer interface in a later version of PIL.
self.load()
if palette:
# use palette from reference image
palette.load()
if palette.mode != "P":
raise ValueError("bad mode for palette image")
if self.mode != "RGB" and self.mode != "L":
raise ValueError(
"only RGB or L mode images can be quantized to a palette"
)
im = self.im.convert("P", 1, palette.im)
return self._makeself(im)
im = self.im.quantize(colors, method, kmeans)
return self._new(im)
def copy(self):
"""
Copies this image. Use this method if you wish to paste things
into an image, but still retain the original.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
im = self.im.copy()
return self._new(im)
def crop(self, box=None):
"""
Returns a rectangular region from this image. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate.
This is a lazy operation. Changes to the source image may or
may not be reflected in the cropped image. To break the
connection, call the :py:meth:`~PIL.Image.Image.load` method on
the cropped copy.
:param box: The crop rectangle, as a (left, upper, right, lower)-tuple.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
if box is None:
return self.copy()
# lazy operation
return _ImageCrop(self, box)
def draft(self, mode, size):
"""
Configures the image file loader so it returns a version of the
image that as closely as possible matches the given mode and
size. For example, you can use this method to convert a color
JPEG to greyscale while loading it, or to extract a 128x192
version from a PCD file.
Note that this method modifies the :py:class:`~PIL.Image.Image` object
in place. If the image has already been loaded, this method has no
effect.
:param mode: The requested mode.
:param size: The requested size.
"""
pass
def _expand(self, xmargin, ymargin=None):
if ymargin is None:
ymargin = xmargin
self.load()
return self._new(self.im.expand(xmargin, ymargin, 0))
def filter(self, filter):
"""
Filters this image using the given filter. For a list of
available filters, see the :py:mod:`~PIL.ImageFilter` module.
:param filter: Filter kernel.
:returns: An :py:class:`~PIL.Image.Image` object. """
self.load()
if isinstance(filter, collections.Callable):
filter = filter()
if not hasattr(filter, "filter"):
raise TypeError("filter argument should be ImageFilter.Filter instance or class")
if self.im.bands == 1:
return self._new(filter.filter(self.im))
# fix to handle multiband images since _imaging doesn't
ims = []
for c in range(self.im.bands):
ims.append(self._new(filter.filter(self.im.getband(c))))
return merge(self.mode, ims)
def getbands(self):
"""
Returns a tuple containing the name of each band in this image.
For example, **getbands** on an RGB image returns ("R", "G", "B").
:returns: A tuple containing band names.
:rtype: tuple
"""
return ImageMode.getmode(self.mode).bands
def getbbox(self):
"""
Calculates the bounding box of the non-zero regions in the
image.
:returns: The bounding box is returned as a 4-tuple defining the
left, upper, right, and lower pixel coordinate. If the image
is completely empty, this method returns None.
"""
self.load()
return self.im.getbbox()
def getcolors(self, maxcolors=256):
"""
Returns a list of colors used in this image.
:param maxcolors: Maximum number of colors. If this number is
exceeded, this method returns None. The default limit is
256 colors.
:returns: An unsorted list of (count, pixel) values.
"""
self.load()
if self.mode in ("1", "L", "P"):
h = self.im.histogram()
out = []
for i in range(256):
if h[i]:
out.append((h[i], i))
if len(out) > maxcolors:
return None
return out
return self.im.getcolors(maxcolors)
def getdata(self, band = None):
"""
Returns the contents of this image as a sequence object
containing pixel values. The sequence object is flattened, so
that values for line one follow directly after the values of
line zero, and so on.
Note that the sequence object returned by this method is an
internal PIL data type, which only supports certain sequence
operations. To convert it to an ordinary sequence (e.g. for
printing), use **list(im.getdata())**.
:param band: What band to return. The default is to return
all bands. To return a single band, pass in the index
value (e.g. 0 to get the "R" band from an "RGB" image).
:returns: A sequence-like object.
"""
self.load()
if band is not None:
return self.im.getband(band)
return self.im # could be abused
def getextrema(self):
"""
Gets the the minimum and maximum pixel values for each band in
the image.
:returns: For a single-band image, a 2-tuple containing the
minimum and maximum pixel value. For a multi-band image,
a tuple containing one 2-tuple for each band.
"""
self.load()
if self.im.bands > 1:
extrema = []
for i in range(self.im.bands):
extrema.append(self.im.getband(i).getextrema())
return tuple(extrema)
return self.im.getextrema()
def getim(self):
"""
Returns a capsule that points to the internal image memory.
:returns: A capsule object.
"""
self.load()
return self.im.ptr
def getpalette(self):
"""
Returns the image palette as a list.
:returns: A list of color values [r, g, b, ...], or None if the
image has no palette.
"""
self.load()
try:
if bytes is str:
return [i8(c) for c in self.im.getpalette()]
else:
return list(self.im.getpalette())
except ValueError:
return None # no palette
def getpixel(self, xy):
"""
Returns the pixel value at a given position.
:param xy: The coordinate, given as (x, y).
:returns: The pixel value. If the image is a multi-layer image,
this method returns a tuple.
"""
self.load()
return self.im.getpixel(xy)
def getprojection(self):
"""
Get projection to x and y axes
:returns: Two sequences, indicating where there are non-zero
pixels along the X-axis and the Y-axis, respectively.
"""
self.load()
x, y = self.im.getprojection()
return [i8(c) for c in x], [i8(c) for c in y]
def histogram(self, mask=None, extrema=None):
"""
Returns a histogram for the image. The histogram is returned as
a list of pixel counts, one for each pixel value in the source
image. If the image has more than one band, the histograms for
all bands are concatenated (for example, the histogram for an
"RGB" image contains 768 values).
A bilevel image (mode "1") is treated as a greyscale ("L") image
by this method.
If a mask is provided, the method returns a histogram for those
parts of the image where the mask image is non-zero. The mask
image must have the same size as the image, and be either a
bi-level image (mode "1") or a greyscale image ("L").
:param mask: An optional mask.
:returns: A list containing pixel counts.
"""
self.load()
if mask:
mask.load()
return self.im.histogram((0, 0), mask.im)
if self.mode in ("I", "F"):
if extrema is None:
extrema = self.getextrema()
return self.im.histogram(extrema)
return self.im.histogram()
def offset(self, xoffset, yoffset=None):
"""
.. deprecated:: 2.0
.. note:: New code should use :py:func:`PIL.ImageChops.offset`.
Returns a copy of the image where the data has been offset by the given
distances. Data wraps around the edges. If **yoffset** is omitted, it
is assumed to be equal to **xoffset**.
:param xoffset: The horizontal distance.
:param yoffset: The vertical distance. If omitted, both
distances are set to the same value.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if warnings:
warnings.warn(
"'offset' is deprecated; use 'ImageChops.offset' instead",
DeprecationWarning, stacklevel=2
)
from PIL import ImageChops
return ImageChops.offset(self, xoffset, yoffset)
def paste(self, im, box=None, mask=None):
"""
Pastes another image into this image. The box argument is either
a 2-tuple giving the upper left corner, a 4-tuple defining the
left, upper, right, and lower pixel coordinate, or None (same as
(0, 0)). If a 4-tuple is given, the size of the pasted image
must match the size of the region.
If the modes don't match, the pasted image is converted to the mode of
this image (see the :py:meth:`~PIL.Image.Image.convert` method for
details).
Instead of an image, the source can be a integer or tuple
containing pixel values. The method then fills the region
with the given color. When creating RGB images, you can
also use color strings as supported by the ImageColor module.
If a mask is given, this method updates only the regions
indicated by the mask. You can use either "1", "L" or "RGBA"
images (in the latter case, the alpha band is used as mask).
Where the mask is 255, the given image is copied as is. Where
the mask is 0, the current value is preserved. Intermediate
values can be used for transparency effects.
Note that if you paste an "RGBA" image, the alpha band is
ignored. You can work around this by using the same image as
both source image and mask.
:param im: Source image or pixel value (integer or tuple).
:param box: An optional 4-tuple giving the region to paste into.
If a 2-tuple is used instead, it's treated as the upper left
corner. If omitted or None, the source is pasted into the
upper left corner.
If an image is given as the second argument and there is no
third, the box defaults to (0, 0), and the second argument
is interpreted as a mask image.
:param mask: An optional mask image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if isImageType(box) and mask is None:
# abbreviated paste(im, mask) syntax
mask = box; box = None
if box is None:
# cover all of self
box = (0, 0) + self.size
if len(box) == 2:
# lower left corner given; get size from image or mask
if isImageType(im):
size = im.size
elif isImageType(mask):
size = mask.size
else:
# FIXME: use self.size here?
raise ValueError(
"cannot determine region size; use 4-item box"
)
box = box + (box[0]+size[0], box[1]+size[1])
if isStringType(im):
from PIL import ImageColor
im = ImageColor.getcolor(im, self.mode)
elif isImageType(im):
im.load()
if self.mode != im.mode:
if self.mode != "RGB" or im.mode not in ("RGBA", "RGBa"):
# should use an adapter for this!
im = im.convert(self.mode)
im = im.im
self.load()
if self.readonly:
self._copy()
if mask:
mask.load()
self.im.paste(im, box, mask.im)
else:
self.im.paste(im, box)
def point(self, lut, mode=None):
"""
Maps this image through a lookup table or function.
:param lut: A lookup table, containing 256 (or 65336 if
self.mode=="I" and mode == "L") values per band in the
image. A function can be used instead, it should take a
single argument. The function is called once for each
possible pixel value, and the resulting table is applied to
all bands of the image.
:param mode: Output mode (default is same as input). In the
current version, this can only be used if the source image
has mode "L" or "P", and the output has mode "1" or the
source image mode is "I" and the output mode is "L".
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
if isinstance(lut, ImagePointHandler):
return lut.point(self)
if callable(lut):
# if it isn't a list, it should be a function
if self.mode in ("I", "I;16", "F"):
# check if the function can be used with point_transform
# UNDONE wiredfool -- I think this prevents us from ever doing
# a gamma function point transform on > 8bit images.
scale, offset = _getscaleoffset(lut)
return self._new(self.im.point_transform(scale, offset))
# for other modes, convert the function to a table
lut = [lut(i) for i in range(256)] * self.im.bands
if self.mode == "F":
# FIXME: _imaging returns a confusing error message for this case
raise ValueError("point operation not supported for this mode")
return self._new(self.im.point(lut, mode))
def putalpha(self, alpha):
"""
Adds or replaces the alpha layer in this image. If the image
does not have an alpha layer, it's converted to "LA" or "RGBA".
The new layer must be either "L" or "1".
:param alpha: The new alpha layer. This can either be an "L" or "1"
image having the same size as this image, or an integer or
other color value.
"""
self.load()
if self.readonly:
self._copy()
if self.mode not in ("LA", "RGBA"):
# attempt to promote self to a matching alpha mode
try:
mode = getmodebase(self.mode) + "A"
try:
self.im.setmode(mode)
except (AttributeError, ValueError):
# do things the hard way
im = self.im.convert(mode)
if im.mode not in ("LA", "RGBA"):
raise ValueError # sanity check
self.im = im
self.mode = self.im.mode
except (KeyError, ValueError):
raise ValueError("illegal image mode")
if self.mode == "LA":
band = 1
else:
band = 3
if isImageType(alpha):
# alpha layer
if alpha.mode not in ("1", "L"):
raise ValueError("illegal image mode")
alpha.load()
if alpha.mode == "1":
alpha = alpha.convert("L")
else:
# constant alpha
try:
self.im.fillband(band, alpha)
except (AttributeError, ValueError):
# do things the hard way
alpha = new("L", self.size, alpha)
else:
return
self.im.putband(alpha.im, band)
def putdata(self, data, scale=1.0, offset=0.0):
"""
Copies pixel data to this image. This method copies data from a
sequence object into the image, starting at the upper left
corner (0, 0), and continuing until either the image or the
sequence ends. The scale and offset values are used to adjust
the sequence values: **pixel = value*scale + offset**.
:param data: A sequence object.
:param scale: An optional scale value. The default is 1.0.
:param offset: An optional offset value. The default is 0.0.
"""
self.load()
if self.readonly:
self._copy()
self.im.putdata(data, scale, offset)
def putpalette(self, data, rawmode="RGB"):
"""
Attaches a palette to this image. The image must be a "P" or
"L" image, and the palette sequence must contain 768 integer
values, where each group of three values represent the red,
green, and blue values for the corresponding pixel
index. Instead of an integer sequence, you can use an 8-bit
string.
:param data: A palette sequence (either a list or a string).
"""
from PIL import ImagePalette
if self.mode not in ("L", "P"):
raise ValueError("illegal image mode")
self.load()
if isinstance(data, ImagePalette.ImagePalette):
palette = ImagePalette.raw(data.rawmode, data.palette)
else:
if not isinstance(data, bytes):
if bytes is str:
data = "".join(chr(x) for x in data)
else:
data = bytes(data)
palette = ImagePalette.raw(rawmode, data)
self.mode = "P"
self.palette = palette
self.palette.mode = "RGB"
self.load() # install new palette
def putpixel(self, xy, value):
"""
Modifies the pixel at the given position. The color is given as
a single numerical value for single-band images, and a tuple for
multi-band images.
Note that this method is relatively slow. For more extensive changes,
use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw`
module instead.
See:
* :py:meth:`~PIL.Image.Image.paste`
* :py:meth:`~PIL.Image.Image.putdata`
* :py:mod:`~PIL.ImageDraw`
:param xy: The pixel coordinate, given as (x, y).
:param value: The pixel value.
"""
self.load()
if self.readonly:
self._copy()
return self.im.putpixel(xy, value)
def resize(self, size, resample=NEAREST):
"""
Returns a resized copy of this image.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param filter: An optional resampling filter. This can be
one of :py:attr:`PIL.Image.NEAREST` (use nearest neighbour),
:py:attr:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), :py:attr:`PIL.Image.BICUBIC` (cubic spline
interpolation in a 4x4 environment), or
:py:attr:`PIL.Image.ANTIALIAS` (a high-quality downsampling filter).
If omitted, or if the image has mode "1" or "P", it is
set :py:attr:`PIL.Image.NEAREST`.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if resample not in (NEAREST, BILINEAR, BICUBIC, ANTIALIAS):
raise ValueError("unknown resampling filter")
self.load()
if self.mode in ("1", "P"):
resample = NEAREST
if self.mode == 'RGBA':
return self.convert('RGBa').resize(size, resample).convert('RGBA')
if resample == ANTIALIAS:
# requires stretch support (imToolkit & PIL 1.1.3)
try:
im = self.im.stretch(size, resample)
except AttributeError:
raise ValueError("unsupported resampling filter")
else:
im = self.im.resize(size, resample)
return self._new(im)
def rotate(self, angle, resample=NEAREST, expand=0):
"""
Returns a rotated copy of this image. This method returns a
copy of this image, rotated the given number of degrees counter
clockwise around its centre.
:param angle: In degrees counter clockwise.
:param filter: An optional resampling filter. This can be
one of :py:attr:`PIL.Image.NEAREST` (use nearest neighbour),
:py:attr:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), or :py:attr:`PIL.Image.BICUBIC`
(cubic spline interpolation in a 4x4 environment).
If omitted, or if the image has mode "1" or "P", it is
set :py:attr:`PIL.Image.NEAREST`.
:param expand: Optional expansion flag. If true, expands the output
image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the
input image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if expand:
import math
angle = -angle * math.pi / 180
matrix = [
math.cos(angle), math.sin(angle), 0.0,
-math.sin(angle), math.cos(angle), 0.0
]
def transform(x, y, matrix=matrix):
(a, b, c, d, e, f) = matrix
return a*x + b*y + c, d*x + e*y + f
# calculate output size
w, h = self.size
xx = []
yy = []
for x, y in ((0, 0), (w, 0), (w, h), (0, h)):
x, y = transform(x, y)
xx.append(x)
yy.append(y)
w = int(math.ceil(max(xx)) - math.floor(min(xx)))
h = int(math.ceil(max(yy)) - math.floor(min(yy)))
# adjust center
x, y = transform(w / 2.0, h / 2.0)
matrix[2] = self.size[0] / 2.0 - x
matrix[5] = self.size[1] / 2.0 - y
return self.transform((w, h), AFFINE, matrix, resample)
if resample not in (NEAREST, BILINEAR, BICUBIC):
raise ValueError("unknown resampling filter")
self.load()
if self.mode in ("1", "P"):
resample = NEAREST
return self._new(self.im.rotate(angle, resample))
def save(self, fp, format=None, **params):
"""
Saves this image under the given filename. If no format is
specified, the format to use is determined from the filename
extension, if possible.
Keyword options can be used to provide additional instructions
to the writer. If a writer doesn't recognise an option, it is
silently ignored. The available options are described later in
this handbook.
You can use a file object instead of a filename. In this case,
you must always specify the format. The file object must
implement the **seek**, **tell**, and **write**
methods, and be opened in binary mode.
:param file: File name or file object.
:param format: Optional format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
:param options: Extra parameters to the image writer.
:returns: None
:exception KeyError: If the output format could not be determined
from the file name. Use the format option to solve this.
:exception IOError: If the file could not be written. The file
may have been created, and may contain partial data.
"""
if isPath(fp):
filename = fp
else:
if hasattr(fp, "name") and isPath(fp.name):
filename = fp.name
else:
filename = ""
# may mutate self!
self.load()
self.encoderinfo = params
self.encoderconfig = ()
preinit()
ext = os.path.splitext(filename)[1].lower()
if not format:
try:
format = EXTENSION[ext]
except KeyError:
init()
try:
format = EXTENSION[ext]
except KeyError:
raise KeyError(ext) # unknown extension
try:
save_handler = SAVE[format.upper()]
except KeyError:
init()
save_handler = SAVE[format.upper()] # unknown format
if isPath(fp):
fp = builtins.open(fp, "wb")
close = 1
else:
close = 0
try:
save_handler(self, fp, filename)
finally:
# do what we can to clean up
if close:
fp.close()
def seek(self, frame):
"""
Seeks to the given frame in this sequence file. If you seek
beyond the end of the sequence, the method raises an
**EOFError** exception. When a sequence file is opened, the
library automatically seeks to frame 0.
Note that in the current version of the library, most sequence
formats only allows you to seek to the next frame.
See :py:meth:`~PIL.Image.Image.tell`.
:param frame: Frame number, starting at 0.
:exception EOFError: If the call attempts to seek beyond the end
of the sequence.
"""
# overridden by file handlers
if frame != 0:
raise EOFError
def show(self, title=None, command=None):
"""
Displays this image. This method is mainly intended for
debugging purposes.
On Unix platforms, this method saves the image to a temporary
PPM file, and calls the **xv** utility.
On Windows, it saves the image to a temporary BMP file, and uses
the standard BMP display utility to show it (usually Paint).
:param title: Optional title to use for the image window,
where possible.
:param command: command used to show the image
"""
_show(self, title=title, command=command)
def split(self):
"""
Split this image into individual bands. This method returns a
tuple of individual image bands from an image. For example,
splitting an "RGB" image creates three new images each
containing a copy of one of the original bands (red, green,
blue).
:returns: A tuple containing bands.
"""
self.load()
if self.im.bands == 1:
ims = [self.copy()]
else:
ims = []
for i in range(self.im.bands):
ims.append(self._new(self.im.getband(i)))
return tuple(ims)
def tell(self):
"""
Returns the current frame number. See :py:meth:`~PIL.Image.Image.seek`.
:returns: Frame number, starting with 0.
"""
return 0
def thumbnail(self, size, resample=NEAREST):
"""
Make this image into a thumbnail. This method modifies the
image to contain a thumbnail version of itself, no larger than
the given size. This method calculates an appropriate thumbnail
size to preserve the aspect of the image, calls the
:py:meth:`~PIL.Image.Image.draft` method to configure the file reader
(where applicable), and finally resizes the image.
Note that the bilinear and bicubic filters in the current
version of PIL are not well-suited for thumbnail generation.
You should use :py:attr:`PIL.Image.ANTIALIAS` unless speed is much more
important than quality.
Also note that this function modifies the :py:class:`~PIL.Image.Image`
object in place. If you need to use the full resolution image as well, apply
this method to a :py:meth:`~PIL.Image.Image.copy` of the original image.
:param size: Requested size.
:param resample: Optional resampling filter. This can be one
of :py:attr:`PIL.Image.NEAREST`, :py:attr:`PIL.Image.BILINEAR`,
:py:attr:`PIL.Image.BICUBIC`, or :py:attr:`PIL.Image.ANTIALIAS`
(best quality). If omitted, it defaults to
:py:attr:`PIL.Image.NEAREST` (this will be changed to ANTIALIAS in a
future version).
:returns: None
"""
# FIXME: the default resampling filter will be changed
# to ANTIALIAS in future versions
# preserve aspect ratio
x, y = self.size
if x > size[0]: y = int(max(y * size[0] / x, 1)); x = int(size[0])
if y > size[1]: x = int(max(x * size[1] / y, 1)); y = int(size[1])
size = x, y
if size == self.size:
return
self.draft(None, size)
self.load()
try:
im = self.resize(size, resample)
except ValueError:
if resample != ANTIALIAS:
raise
im = self.resize(size, NEAREST) # fallback
self.im = im.im
self.mode = im.mode
self.size = size
self.readonly = 0
# FIXME: the different tranform methods need further explanation
# instead of bloating the method docs, add a separate chapter.
def transform(self, size, method, data=None, resample=NEAREST, fill=1):
"""
Transforms this image. This method creates a new image with the
given size, and the same mode as the original, and copies data
to the new image using the given transform.
:param size: The output size.
:param method: The transformation method. This is one of
:py:attr:`PIL.Image.EXTENT` (cut out a rectangular subregion),
:py:attr:`PIL.Image.AFFINE` (affine transform),
:py:attr:`PIL.Image.PERSPECTIVE` (perspective transform),
:py:attr:`PIL.Image.QUAD` (map a quadrilateral to a rectangle), or
:py:attr:`PIL.Image.MESH` (map a number of source quadrilaterals
in one operation).
:param data: Extra data to the transformation method.
:param resample: Optional resampling filter. It can be one of
:py:attr:`PIL.Image.NEAREST` (use nearest neighbour),
:py:attr:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), or :py:attr:`PIL.Image.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image
has mode "1" or "P", it is set to :py:attr:`PIL.Image.NEAREST`.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if self.mode == 'RGBA':
return self.convert('RGBa').transform(size, method, data, resample, fill).convert('RGBA')
if isinstance(method, ImageTransformHandler):
return method.transform(size, self, resample=resample, fill=fill)
if hasattr(method, "getdata"):
# compatibility w. old-style transform objects
method, data = method.getdata()
if data is None:
raise ValueError("missing method data")
im = new(self.mode, size, None)
if method == MESH:
# list of quads
for box, quad in data:
im.__transformer(box, self, QUAD, quad, resample, fill)
else:
im.__transformer((0, 0)+size, self, method, data, resample, fill)
return im
def __transformer(self, box, image, method, data,
resample=NEAREST, fill=1):
# FIXME: this should be turned into a lazy operation (?)
w = box[2]-box[0]
h = box[3]-box[1]
if method == AFFINE:
# change argument order to match implementation
data = (data[2], data[0], data[1],
data[5], data[3], data[4])
elif method == EXTENT:
# convert extent to an affine transform
x0, y0, x1, y1 = data
xs = float(x1 - x0) / w
ys = float(y1 - y0) / h
method = AFFINE
data = (x0 + xs/2, xs, 0, y0 + ys/2, 0, ys)
elif method == PERSPECTIVE:
# change argument order to match implementation
data = (data[2], data[0], data[1],
data[5], data[3], data[4],
data[6], data[7])
elif method == QUAD:
# quadrilateral warp. data specifies the four corners
# given as NW, SW, SE, and NE.
nw = data[0:2]; sw = data[2:4]; se = data[4:6]; ne = data[6:8]
x0, y0 = nw; As = 1.0 / w; At = 1.0 / h
data = (x0, (ne[0]-x0)*As, (sw[0]-x0)*At,
(se[0]-sw[0]-ne[0]+x0)*As*At,
y0, (ne[1]-y0)*As, (sw[1]-y0)*At,
(se[1]-sw[1]-ne[1]+y0)*As*At)
else:
raise ValueError("unknown transformation method")
if resample not in (NEAREST, BILINEAR, BICUBIC):
raise ValueError("unknown resampling filter")
image.load()
self.load()
if image.mode in ("1", "P"):
resample = NEAREST
self.im.transform2(box, image.im, method, data, resample, fill)
def transpose(self, method):
"""
Transpose image (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, or :py:attr:`PIL.Image.ROTATE_270`.
:returns: Returns a flipped or rotated copy of this image.
"""
self.load()
im = self.im.transpose(method)
return self._new(im)
# --------------------------------------------------------------------
# Lazy operations
class _ImageCrop(Image):
def __init__(self, im, box):
Image.__init__(self)
x0, y0, x1, y1 = box
if x1 < x0:
x1 = x0
if y1 < y0:
y1 = y0
self.mode = im.mode
self.size = x1-x0, y1-y0
self.__crop = x0, y0, x1, y1
self.im = im.im
def load(self):
# lazy evaluation!
if self.__crop:
self.im = self.im.crop(self.__crop)
self.__crop = None
if self.im:
return self.im.pixel_access(self.readonly)
# FIXME: future versions should optimize crop/paste
# sequences!
# --------------------------------------------------------------------
# Abstract handlers.
class ImagePointHandler:
# used as a mixin by point transforms (for use with im.point)
pass
class ImageTransformHandler:
# used as a mixin by geometry transforms (for use with im.transform)
pass
# --------------------------------------------------------------------
# Factories
#
# Debugging
def _wedge():
"Create greyscale wedge (for debugging only)"
return Image()._new(core.wedge("L"))
def new(mode, size, color=0):
"""
Creates a new image with the given mode and size.
:param mode: The mode to use for the new image.
:param size: A 2-tuple, containing (width, height) in pixels.
:param color: What color to use for the image. Default is black.
If given, this should be a single integer or floating point value
for single-band modes, and a tuple for multi-band modes (one value
per band). When creating RGB images, you can also use color
strings as supported by the ImageColor module. If the color is
None, the image is not initialised.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if color is None:
# don't initialize
return Image()._new(core.new(mode, size))
if isStringType(color):
# css3-style specifier
from PIL import ImageColor
color = ImageColor.getcolor(color, mode)
return Image()._new(core.fill(mode, size, color))
def frombytes(mode, size, data, decoder_name="raw", *args):
"""
Creates a copy of an image memory from pixel data in a buffer.
In its simplest form, this function takes three arguments
(mode, size, and unpacked pixel data).
You can also use any pixel decoder supported by PIL. For more
information on available decoders, see the section
**Writing Your Own File Decoder**.
Note that this function decodes pixel data only, not entire images.
If you have an entire image in a string, wrap it in a
:py:class:`~io.BytesIO` object, and use :py:func:`~PIL.Image.open` to load
it.
:param mode: The image mode.
:param size: The image size.
:param data: A byte buffer containing raw data for the given mode.
:param decoder_name: What decoder to use.
:param args: Additional parameters for the given decoder.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if decoder_name == "raw" and args == ():
args = mode
im = new(mode, size)
im.frombytes(data, decoder_name, args)
return im
def fromstring(*args, **kw):
"""Deprecated alias to frombytes.
.. deprecated:: 2.0
"""
warnings.warn(
'fromstring() is deprecated. Please call frombytes() instead.',
DeprecationWarning,
stacklevel=2
)
return frombytes(*args, **kw)
def frombuffer(mode, size, data, decoder_name="raw", *args):
"""
Creates an image memory referencing pixel data in a byte buffer.
This function is similar to :py:func:`~PIL.Image.frombytes`, but uses data
in the byte buffer, where possible. This means that changes to the
original buffer object are reflected in this image). Not all modes can
share memory; supported modes include "L", "RGBX", "RGBA", and "CMYK".
Note that this function decodes pixel data only, not entire images.
If you have an entire image file in a string, wrap it in a
**BytesIO** object, and use :py:func:`~PIL.Image.open` to load it.
In the current version, the default parameters used for the "raw" decoder
differs from that used for :py:func:`~PIL.Image.fromstring`. This is a
bug, and will probably be fixed in a future release. The current release
issues a warning if you do this; to disable the warning, you should provide
the full set of parameters. See below for details.
:param mode: The image mode.
:param size: The image size.
:param data: A bytes or other buffer object containing raw
data for the given mode.
:param decoder_name: What decoder to use.
:param args: Additional parameters for the given decoder. For the
default encoder ("raw"), it's recommended that you provide the
full set of parameters::
frombuffer(mode, size, data, "raw", mode, 0, 1)
:returns: An :py:class:`~PIL.Image.Image` object.
.. versionadded:: 1.1.4
"""
"Load image from bytes or buffer"
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if decoder_name == "raw":
if args == ():
if warnings:
warnings.warn(
"the frombuffer defaults may change in a future release; "
"for portability, change the call to read:\n"
" frombuffer(mode, size, data, 'raw', mode, 0, 1)",
RuntimeWarning, stacklevel=2
)
args = mode, 0, -1 # may change to (mode, 0, 1) post-1.1.6
if args[0] in _MAPMODES:
im = new(mode, (1,1))
im = im._new(
core.map_buffer(data, size, decoder_name, None, 0, args)
)
im.readonly = 1
return im
return frombytes(mode, size, data, decoder_name, args)
def fromarray(obj, mode=None):
"""
Creates an image memory from an object exporting the array interface
(using the buffer protocol).
If obj is not contiguous, then the tobytes method is called
and :py:func:`~PIL.Image.frombuffer` is used.
:param obj: Object with array interface
:param mode: Mode to use (will be determined from type if None)
:returns: An image memory.
.. versionadded:: 1.1.6
"""
arr = obj.__array_interface__
shape = arr['shape']
ndim = len(shape)
try:
strides = arr['strides']
except KeyError:
strides = None
if mode is None:
try:
typekey = (1, 1) + shape[2:], arr['typestr']
mode, rawmode = _fromarray_typemap[typekey]
except KeyError:
# print typekey
raise TypeError("Cannot handle this data type")
else:
rawmode = mode
if mode in ["1", "L", "I", "P", "F"]:
ndmax = 2
elif mode == "RGB":
ndmax = 3
else:
ndmax = 4
if ndim > ndmax:
raise ValueError("Too many dimensions.")
size = shape[1], shape[0]
if strides is not None:
if hasattr(obj, 'tobytes'):
obj = obj.tobytes()
else:
obj = obj.tostring()
return frombuffer(mode, size, obj, "raw", rawmode, 0, 1)
_fromarray_typemap = {
# (shape, typestr) => mode, rawmode
# first two members of shape are set to one
# ((1, 1), "|b1"): ("1", "1"), # broken
((1, 1), "|u1"): ("L", "L"),
((1, 1), "|i1"): ("I", "I;8"),
((1, 1), "<i2"): ("I", "I;16"),
((1, 1), ">i2"): ("I", "I;16B"),
((1, 1), "<i4"): ("I", "I;32"),
((1, 1), ">i4"): ("I", "I;32B"),
((1, 1), "<f4"): ("F", "F;32F"),
((1, 1), ">f4"): ("F", "F;32BF"),
((1, 1), "<f8"): ("F", "F;64F"),
((1, 1), ">f8"): ("F", "F;64BF"),
((1, 1, 3), "|u1"): ("RGB", "RGB"),
((1, 1, 4), "|u1"): ("RGBA", "RGBA"),
}
# shortcuts
_fromarray_typemap[((1, 1), _ENDIAN + "i4")] = ("I", "I")
_fromarray_typemap[((1, 1), _ENDIAN + "f4")] = ("F", "F")
def open(fp, mode="r"):
"""
Opens and identifies the given image file.
This is a lazy operation; this function identifies the file, but the
actual image data is not read from the file until you try to process
the data (or call the :py:meth:`~PIL.Image.Image.load` method).
See :py:func:`~PIL.Image.new`.
:param file: A filename (string) or a file object. The file object
must implement :py:meth:`~file.read`, :py:meth:`~file.seek`, and
:py:meth:`~file.tell` methods, and be opened in binary mode.
:param mode: The mode. If given, this argument must be "r".
:returns: An :py:class:`~PIL.Image.Image` object.
:exception IOError: If the file cannot be found, or the image cannot be
opened and identified.
"""
if mode != "r":
raise ValueError("bad mode")
if isPath(fp):
filename = fp
fp = builtins.open(fp, "rb")
else:
filename = ""
prefix = fp.read(16)
preinit()
for i in ID:
try:
factory, accept = OPEN[i]
if not accept or accept(prefix):
fp.seek(0)
return factory(fp, filename)
except (SyntaxError, IndexError, TypeError):
#import traceback
#traceback.print_exc()
pass
if init():
for i in ID:
try:
factory, accept = OPEN[i]
if not accept or accept(prefix):
fp.seek(0)
return factory(fp, filename)
except (SyntaxError, IndexError, TypeError):
#import traceback
#traceback.print_exc()
pass
raise IOError("cannot identify image file")
#
# Image processing.
def alpha_composite(im1, im2):
"""
Alpha composite im2 over im1.
:param im1: The first image.
:param im2: The second image. Must have the same mode and size as
the first image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
im1.load()
im2.load()
return im1._new(core.alpha_composite(im1.im, im2.im))
def blend(im1, im2, alpha):
"""
Creates a new image by interpolating between two input images, using
a constant alpha.::
out = image1 * (1.0 - alpha) + image2 * alpha
:param im1: The first image.
:param im2: The second image. Must have the same mode and size as
the first image.
:param alpha: The interpolation alpha factor. If alpha is 0.0, a
copy of the first image is returned. If alpha is 1.0, a copy of
the second image is returned. There are no restrictions on the
alpha value. If necessary, the result is clipped to fit into
the allowed output range.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
im1.load()
im2.load()
return im1._new(core.blend(im1.im, im2.im, alpha))
def composite(image1, image2, mask):
"""
Create composite image by blending images using a transparency mask.
:param image1: The first image.
:param image2: The second image. Must have the same mode and
size as the first image.
:param mask: A mask image. This image can can have mode
"1", "L", or "RGBA", and must have the same size as the
other two images.
"""
image = image2.copy()
image.paste(image1, None, mask)
return image
def eval(image, *args):
"""
Applies the function (which should take one argument) to each pixel
in the given image. If the image has more than one band, the same
function is applied to each band. Note that the function is
evaluated once for each possible pixel value, so you cannot use
random components or other generators.
:param image: The input image.
:param function: A function object, taking one integer argument.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
return image.point(args[0])
def merge(mode, bands):
"""
Merge a set of single band images into a new multiband image.
:param mode: The mode to use for the output image.
:param bands: A sequence containing one single-band image for
each band in the output image. All bands must have the
same size.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if getmodebands(mode) != len(bands) or "*" in mode:
raise ValueError("wrong number of bands")
for im in bands[1:]:
if im.mode != getmodetype(mode):
raise ValueError("mode mismatch")
if im.size != bands[0].size:
raise ValueError("size mismatch")
im = core.new(mode, bands[0].size)
for i in range(getmodebands(mode)):
bands[i].load()
im.putband(bands[i].im, i)
return bands[0]._new(im)
# --------------------------------------------------------------------
# Plugin registry
def register_open(id, factory, accept=None):
"""
Register an image file plugin. This function should not be used
in application code.
:param id: An image format identifier.
:param factory: An image file factory method.
:param accept: An optional function that can be used to quickly
reject images having another format.
"""
id = id.upper()
ID.append(id)
OPEN[id] = factory, accept
def register_mime(id, mimetype):
"""
Registers an image MIME type. This function should not be used
in application code.
:param id: An image format identifier.
:param mimetype: The image MIME type for this format.
"""
MIME[id.upper()] = mimetype
def register_save(id, driver):
"""
Registers an image save function. This function should not be
used in application code.
:param id: An image format identifier.
:param driver: A function to save images in this format.
"""
SAVE[id.upper()] = driver
def register_extension(id, extension):
"""
Registers an image extension. This function should not be
used in application code.
:param id: An image format identifier.
:param extension: An extension used for this format.
"""
EXTENSION[extension.lower()] = id.upper()
# --------------------------------------------------------------------
# Simple display support. User code may override this.
def _show(image, **options):
# override me, as necessary
_showxv(image, **options)
def _showxv(image, title=None, **options):
from PIL import ImageShow
ImageShow.show(image, title, **options)
|
PYSEC-2014-22
|
PIL/IptcImagePlugin.py
|
@@ -172,8 +172,8 @@ def load(self):
self.fp.seek(offset)
# Copy image data to temporary file
- outfile = tempfile.mktemp()
- o = open(outfile, "wb")
+ o_fd, outfile = tempfile.mkstemp(text=False)
+ o = os.fdopen(o_fd)
if encoding == "raw":
# To simplify access to the extracted file,
# prepend a PPM header
|
#
# The Python Imaging Library.
# $Id$
#
# IPTC/NAA file handling
#
# history:
# 1995-10-01 fl Created
# 1998-03-09 fl Cleaned up and added to PIL
# 2002-06-18 fl Added getiptcinfo helper
#
# Copyright (c) Secret Labs AB 1997-2002.
# Copyright (c) Fredrik Lundh 1995.
#
# See the README file for information on usage and redistribution.
#
from __future__ import print_function
__version__ = "0.3"
from PIL import Image, ImageFile, _binary
import os, tempfile
i8 = _binary.i8
i16 = _binary.i16be
i32 = _binary.i32be
o8 = _binary.o8
COMPRESSION = {
1: "raw",
5: "jpeg"
}
PAD = o8(0) * 4
#
# Helpers
def i(c):
return i32((PAD + c)[-4:])
def dump(c):
for i in c:
print("%02x" % i8(i), end=' ')
print()
##
# Image plugin for IPTC/NAA datastreams. To read IPTC/NAA fields
# from TIFF and JPEG files, use the <b>getiptcinfo</b> function.
class IptcImageFile(ImageFile.ImageFile):
format = "IPTC"
format_description = "IPTC/NAA"
def getint(self, key):
return i(self.info[key])
def field(self):
#
# get a IPTC field header
s = self.fp.read(5)
if not len(s):
return None, 0
tag = i8(s[1]), i8(s[2])
# syntax
if i8(s[0]) != 0x1C or tag[0] < 1 or tag[0] > 9:
raise SyntaxError("invalid IPTC/NAA file")
# field size
size = i8(s[3])
if size > 132:
raise IOError("illegal field length in IPTC/NAA file")
elif size == 128:
size = 0
elif size > 128:
size = i(self.fp.read(size-128))
else:
size = i16(s[3:])
return tag, size
def _is_raw(self, offset, size):
#
# check if the file can be mapped
# DISABLED: the following only slows things down...
return 0
self.fp.seek(offset)
t, sz = self.field()
if sz != size[0]:
return 0
y = 1
while True:
self.fp.seek(sz, 1)
t, s = self.field()
if t != (8, 10):
break
if s != sz:
return 0
y = y + 1
return y == size[1]
def _open(self):
# load descriptive fields
while True:
offset = self.fp.tell()
tag, size = self.field()
if not tag or tag == (8,10):
break
if size:
tagdata = self.fp.read(size)
else:
tagdata = None
if tag in list(self.info.keys()):
if isinstance(self.info[tag], list):
self.info[tag].append(tagdata)
else:
self.info[tag] = [self.info[tag], tagdata]
else:
self.info[tag] = tagdata
# print tag, self.info[tag]
# mode
layers = i8(self.info[(3,60)][0])
component = i8(self.info[(3,60)][1])
if (3,65) in self.info:
id = i8(self.info[(3,65)][0])-1
else:
id = 0
if layers == 1 and not component:
self.mode = "L"
elif layers == 3 and component:
self.mode = "RGB"[id]
elif layers == 4 and component:
self.mode = "CMYK"[id]
# size
self.size = self.getint((3,20)), self.getint((3,30))
# compression
try:
compression = COMPRESSION[self.getint((3,120))]
except KeyError:
raise IOError("Unknown IPTC image compression")
# tile
if tag == (8,10):
if compression == "raw" and self._is_raw(offset, self.size):
self.tile = [(compression, (offset, size + 5, -1),
(0, 0, self.size[0], self.size[1]))]
else:
self.tile = [("iptc", (compression, offset),
(0, 0, self.size[0], self.size[1]))]
def load(self):
if len(self.tile) != 1 or self.tile[0][0] != "iptc":
return ImageFile.ImageFile.load(self)
type, tile, box = self.tile[0]
encoding, offset = tile
self.fp.seek(offset)
# Copy image data to temporary file
outfile = tempfile.mktemp()
o = open(outfile, "wb")
if encoding == "raw":
# To simplify access to the extracted file,
# prepend a PPM header
o.write("P5\n%d %d\n255\n" % self.size)
while True:
type, size = self.field()
if type != (8, 10):
break
while size > 0:
s = self.fp.read(min(size, 8192))
if not s:
break
o.write(s)
size = size - len(s)
o.close()
try:
try:
# fast
self.im = Image.core.open_ppm(outfile)
except:
# slightly slower
im = Image.open(outfile)
im.load()
self.im = im.im
finally:
try: os.unlink(outfile)
except: pass
Image.register_open("IPTC", IptcImageFile)
Image.register_extension("IPTC", ".iim")
##
# Get IPTC information from TIFF, JPEG, or IPTC file.
#
# @param im An image containing IPTC data.
# @return A dictionary containing IPTC information, or None if
# no IPTC information block was found.
def getiptcinfo(im):
from PIL import TiffImagePlugin, JpegImagePlugin
import io
data = None
if isinstance(im, IptcImageFile):
# return info dictionary right away
return im.info
elif isinstance(im, JpegImagePlugin.JpegImageFile):
# extract the IPTC/NAA resource
try:
app = im.app["APP13"]
if app[:14] == "Photoshop 3.0\x00":
app = app[14:]
# parse the image resource block
offset = 0
while app[offset:offset+4] == "8BIM":
offset = offset + 4
# resource code
code = JpegImagePlugin.i16(app, offset)
offset = offset + 2
# resource name (usually empty)
name_len = i8(app[offset])
name = app[offset+1:offset+1+name_len]
offset = 1 + offset + name_len
if offset & 1:
offset = offset + 1
# resource data block
size = JpegImagePlugin.i32(app, offset)
offset = offset + 4
if code == 0x0404:
# 0x0404 contains IPTC/NAA data
data = app[offset:offset+size]
break
offset = offset + size
if offset & 1:
offset = offset + 1
except (AttributeError, KeyError):
pass
elif isinstance(im, TiffImagePlugin.TiffImageFile):
# get raw data from the IPTC/NAA tag (PhotoShop tags the data
# as 4-byte integers, so we cannot use the get method...)
try:
data = im.tag.tagdata[TiffImagePlugin.IPTC_NAA_CHUNK]
except (AttributeError, KeyError):
pass
if data is None:
return None # no properties
# create an IptcImagePlugin object without initializing it
class FakeImage:
pass
im = FakeImage()
im.__class__ = IptcImageFile
# parse the IPTC information chunk
im.info = {}
im.fp = io.BytesIO(data)
try:
im._open()
except (IndexError, KeyError):
pass # expected failure
return im.info
|
PYSEC-2014-22
|
PIL/JpegImagePlugin.py
|
@@ -344,13 +344,17 @@ def load_djpeg(self):
# ALTERNATIVE: handle JPEGs via the IJG command line utilities
import tempfile, os
- file = tempfile.mktemp()
- os.system("djpeg %s >%s" % (self.filename, file))
+ f, path = tempfile.mkstemp()
+ os.close(f)
+ if os.path.exists(self.filename):
+ os.system("djpeg '%s' >'%s'" % (self.filename, path))
+ else:
+ raise ValueError("Invalid Filename")
try:
- self.im = Image.core.open_ppm(file)
+ self.im = Image.core.open_ppm(path)
finally:
- try: os.unlink(file)
+ try: os.unlink(path)
except: pass
self.mode = self.im.mode
|
#
# The Python Imaging Library.
# $Id$
#
# JPEG (JFIF) file handling
#
# See "Digital Compression and Coding of Continous-Tone Still Images,
# Part 1, Requirements and Guidelines" (CCITT T.81 / ISO 10918-1)
#
# History:
# 1995-09-09 fl Created
# 1995-09-13 fl Added full parser
# 1996-03-25 fl Added hack to use the IJG command line utilities
# 1996-05-05 fl Workaround Photoshop 2.5 CMYK polarity bug
# 1996-05-28 fl Added draft support, JFIF version (0.1)
# 1996-12-30 fl Added encoder options, added progression property (0.2)
# 1997-08-27 fl Save mode 1 images as BW (0.3)
# 1998-07-12 fl Added YCbCr to draft and save methods (0.4)
# 1998-10-19 fl Don't hang on files using 16-bit DQT's (0.4.1)
# 2001-04-16 fl Extract DPI settings from JFIF files (0.4.2)
# 2002-07-01 fl Skip pad bytes before markers; identify Exif files (0.4.3)
# 2003-04-25 fl Added experimental EXIF decoder (0.5)
# 2003-06-06 fl Added experimental EXIF GPSinfo decoder
# 2003-09-13 fl Extract COM markers
# 2009-09-06 fl Added icc_profile support (from Florian Hoech)
# 2009-03-06 fl Changed CMYK handling; always use Adobe polarity (0.6)
# 2009-03-08 fl Added subsampling support (from Justin Huff).
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-1996 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.6"
import array, struct
from PIL import Image, ImageFile, _binary
from PIL.JpegPresets import presets
from PIL._util import isStringType
i8 = _binary.i8
o8 = _binary.o8
i16 = _binary.i16be
i32 = _binary.i32be
#
# Parser
def Skip(self, marker):
n = i16(self.fp.read(2))-2
ImageFile._safe_read(self.fp, n)
def APP(self, marker):
#
# Application marker. Store these in the APP dictionary.
# Also look for well-known application markers.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
app = "APP%d" % (marker&15)
self.app[app] = s # compatibility
self.applist.append((app, s))
if marker == 0xFFE0 and s[:4] == b"JFIF":
# extract JFIF information
self.info["jfif"] = version = i16(s, 5) # version
self.info["jfif_version"] = divmod(version, 256)
# extract JFIF properties
try:
jfif_unit = i8(s[7])
jfif_density = i16(s, 8), i16(s, 10)
except:
pass
else:
if jfif_unit == 1:
self.info["dpi"] = jfif_density
self.info["jfif_unit"] = jfif_unit
self.info["jfif_density"] = jfif_density
elif marker == 0xFFE1 and s[:5] == b"Exif\0":
# extract Exif information (incomplete)
self.info["exif"] = s # FIXME: value will change
elif marker == 0xFFE2 and s[:5] == b"FPXR\0":
# extract FlashPix information (incomplete)
self.info["flashpix"] = s # FIXME: value will change
elif marker == 0xFFE2 and s[:12] == b"ICC_PROFILE\0":
# Since an ICC profile can be larger than the maximum size of
# a JPEG marker (64K), we need provisions to split it into
# multiple markers. The format defined by the ICC specifies
# one or more APP2 markers containing the following data:
# Identifying string ASCII "ICC_PROFILE\0" (12 bytes)
# Marker sequence number 1, 2, etc (1 byte)
# Number of markers Total of APP2's used (1 byte)
# Profile data (remainder of APP2 data)
# Decoders should use the marker sequence numbers to
# reassemble the profile, rather than assuming that the APP2
# markers appear in the correct sequence.
self.icclist.append(s)
elif marker == 0xFFEE and s[:5] == b"Adobe":
self.info["adobe"] = i16(s, 5)
# extract Adobe custom properties
try:
adobe_transform = i8(s[1])
except:
pass
else:
self.info["adobe_transform"] = adobe_transform
def COM(self, marker):
#
# Comment marker. Store these in the APP dictionary.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
self.app["COM"] = s # compatibility
self.applist.append(("COM", s))
def SOF(self, marker):
#
# Start of frame marker. Defines the size and mode of the
# image. JPEG is colour blind, so we use some simple
# heuristics to map the number of layers to an appropriate
# mode. Note that this could be made a bit brighter, by
# looking for JFIF and Adobe APP markers.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
self.size = i16(s[3:]), i16(s[1:])
self.bits = i8(s[0])
if self.bits != 8:
raise SyntaxError("cannot handle %d-bit layers" % self.bits)
self.layers = i8(s[5])
if self.layers == 1:
self.mode = "L"
elif self.layers == 3:
self.mode = "RGB"
elif self.layers == 4:
self.mode = "CMYK"
else:
raise SyntaxError("cannot handle %d-layer images" % self.layers)
if marker in [0xFFC2, 0xFFC6, 0xFFCA, 0xFFCE]:
self.info["progressive"] = self.info["progression"] = 1
if self.icclist:
# fixup icc profile
self.icclist.sort() # sort by sequence number
if i8(self.icclist[0][13]) == len(self.icclist):
profile = []
for p in self.icclist:
profile.append(p[14:])
icc_profile = b"".join(profile)
else:
icc_profile = None # wrong number of fragments
self.info["icc_profile"] = icc_profile
self.icclist = None
for i in range(6, len(s), 3):
t = s[i:i+3]
# 4-tuples: id, vsamp, hsamp, qtable
self.layer.append((t[0], i8(t[1])//16, i8(t[1])&15, i8(t[2])))
def DQT(self, marker):
#
# Define quantization table. Support baseline 8-bit tables
# only. Note that there might be more than one table in
# each marker.
# FIXME: The quantization tables can be used to estimate the
# compression quality.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
while len(s):
if len(s) < 65:
raise SyntaxError("bad quantization table marker")
v = i8(s[0])
if v//16 == 0:
self.quantization[v&15] = array.array("b", s[1:65])
s = s[65:]
else:
return # FIXME: add code to read 16-bit tables!
# raise SyntaxError, "bad quantization table element size"
#
# JPEG marker table
MARKER = {
0xFFC0: ("SOF0", "Baseline DCT", SOF),
0xFFC1: ("SOF1", "Extended Sequential DCT", SOF),
0xFFC2: ("SOF2", "Progressive DCT", SOF),
0xFFC3: ("SOF3", "Spatial lossless", SOF),
0xFFC4: ("DHT", "Define Huffman table", Skip),
0xFFC5: ("SOF5", "Differential sequential DCT", SOF),
0xFFC6: ("SOF6", "Differential progressive DCT", SOF),
0xFFC7: ("SOF7", "Differential spatial", SOF),
0xFFC8: ("JPG", "Extension", None),
0xFFC9: ("SOF9", "Extended sequential DCT (AC)", SOF),
0xFFCA: ("SOF10", "Progressive DCT (AC)", SOF),
0xFFCB: ("SOF11", "Spatial lossless DCT (AC)", SOF),
0xFFCC: ("DAC", "Define arithmetic coding conditioning", Skip),
0xFFCD: ("SOF13", "Differential sequential DCT (AC)", SOF),
0xFFCE: ("SOF14", "Differential progressive DCT (AC)", SOF),
0xFFCF: ("SOF15", "Differential spatial (AC)", SOF),
0xFFD0: ("RST0", "Restart 0", None),
0xFFD1: ("RST1", "Restart 1", None),
0xFFD2: ("RST2", "Restart 2", None),
0xFFD3: ("RST3", "Restart 3", None),
0xFFD4: ("RST4", "Restart 4", None),
0xFFD5: ("RST5", "Restart 5", None),
0xFFD6: ("RST6", "Restart 6", None),
0xFFD7: ("RST7", "Restart 7", None),
0xFFD8: ("SOI", "Start of image", None),
0xFFD9: ("EOI", "End of image", None),
0xFFDA: ("SOS", "Start of scan", Skip),
0xFFDB: ("DQT", "Define quantization table", DQT),
0xFFDC: ("DNL", "Define number of lines", Skip),
0xFFDD: ("DRI", "Define restart interval", Skip),
0xFFDE: ("DHP", "Define hierarchical progression", SOF),
0xFFDF: ("EXP", "Expand reference component", Skip),
0xFFE0: ("APP0", "Application segment 0", APP),
0xFFE1: ("APP1", "Application segment 1", APP),
0xFFE2: ("APP2", "Application segment 2", APP),
0xFFE3: ("APP3", "Application segment 3", APP),
0xFFE4: ("APP4", "Application segment 4", APP),
0xFFE5: ("APP5", "Application segment 5", APP),
0xFFE6: ("APP6", "Application segment 6", APP),
0xFFE7: ("APP7", "Application segment 7", APP),
0xFFE8: ("APP8", "Application segment 8", APP),
0xFFE9: ("APP9", "Application segment 9", APP),
0xFFEA: ("APP10", "Application segment 10", APP),
0xFFEB: ("APP11", "Application segment 11", APP),
0xFFEC: ("APP12", "Application segment 12", APP),
0xFFED: ("APP13", "Application segment 13", APP),
0xFFEE: ("APP14", "Application segment 14", APP),
0xFFEF: ("APP15", "Application segment 15", APP),
0xFFF0: ("JPG0", "Extension 0", None),
0xFFF1: ("JPG1", "Extension 1", None),
0xFFF2: ("JPG2", "Extension 2", None),
0xFFF3: ("JPG3", "Extension 3", None),
0xFFF4: ("JPG4", "Extension 4", None),
0xFFF5: ("JPG5", "Extension 5", None),
0xFFF6: ("JPG6", "Extension 6", None),
0xFFF7: ("JPG7", "Extension 7", None),
0xFFF8: ("JPG8", "Extension 8", None),
0xFFF9: ("JPG9", "Extension 9", None),
0xFFFA: ("JPG10", "Extension 10", None),
0xFFFB: ("JPG11", "Extension 11", None),
0xFFFC: ("JPG12", "Extension 12", None),
0xFFFD: ("JPG13", "Extension 13", None),
0xFFFE: ("COM", "Comment", COM)
}
def _accept(prefix):
return prefix[0:1] == b"\377"
##
# Image plugin for JPEG and JFIF images.
class JpegImageFile(ImageFile.ImageFile):
format = "JPEG"
format_description = "JPEG (ISO 10918)"
def _open(self):
s = self.fp.read(1)
if i8(s[0]) != 255:
raise SyntaxError("not a JPEG file")
# Create attributes
self.bits = self.layers = 0
# JPEG specifics (internal)
self.layer = []
self.huffman_dc = {}
self.huffman_ac = {}
self.quantization = {}
self.app = {} # compatibility
self.applist = []
self.icclist = []
while True:
s = s + self.fp.read(1)
i = i16(s)
if i in MARKER:
name, description, handler = MARKER[i]
# print hex(i), name, description
if handler is not None:
handler(self, i)
if i == 0xFFDA: # start of scan
rawmode = self.mode
if self.mode == "CMYK":
rawmode = "CMYK;I" # assume adobe conventions
self.tile = [("jpeg", (0,0) + self.size, 0, (rawmode, ""))]
# self.__offset = self.fp.tell()
break
s = self.fp.read(1)
elif i == 0 or i == 65535:
# padded marker or junk; move on
s = "\xff"
else:
raise SyntaxError("no marker found")
def draft(self, mode, size):
if len(self.tile) != 1:
return
d, e, o, a = self.tile[0]
scale = 0
if a[0] == "RGB" and mode in ["L", "YCbCr"]:
self.mode = mode
a = mode, ""
if size:
scale = max(self.size[0] // size[0], self.size[1] // size[1])
for s in [8, 4, 2, 1]:
if scale >= s:
break
e = e[0], e[1], (e[2]-e[0]+s-1)//s+e[0], (e[3]-e[1]+s-1)//s+e[1]
self.size = ((self.size[0]+s-1)//s, (self.size[1]+s-1)//s)
scale = s
self.tile = [(d, e, o, a)]
self.decoderconfig = (scale, 1)
return self
def load_djpeg(self):
# ALTERNATIVE: handle JPEGs via the IJG command line utilities
import tempfile, os
file = tempfile.mktemp()
os.system("djpeg %s >%s" % (self.filename, file))
try:
self.im = Image.core.open_ppm(file)
finally:
try: os.unlink(file)
except: pass
self.mode = self.im.mode
self.size = self.im.size
self.tile = []
def _getexif(self):
return _getexif(self)
def _getexif(self):
# Extract EXIF information. This method is highly experimental,
# and is likely to be replaced with something better in a future
# version.
from PIL import TiffImagePlugin
import io
def fixup(value):
if len(value) == 1:
return value[0]
return value
# The EXIF record consists of a TIFF file embedded in a JPEG
# application marker (!).
try:
data = self.info["exif"]
except KeyError:
return None
file = io.BytesIO(data[6:])
head = file.read(8)
exif = {}
# process dictionary
info = TiffImagePlugin.ImageFileDirectory(head)
info.load(file)
for key, value in info.items():
exif[key] = fixup(value)
# get exif extension
try:
file.seek(exif[0x8769])
except KeyError:
pass
else:
info = TiffImagePlugin.ImageFileDirectory(head)
info.load(file)
for key, value in info.items():
exif[key] = fixup(value)
# get gpsinfo extension
try:
file.seek(exif[0x8825])
except KeyError:
pass
else:
info = TiffImagePlugin.ImageFileDirectory(head)
info.load(file)
exif[0x8825] = gps = {}
for key, value in info.items():
gps[key] = fixup(value)
return exif
# --------------------------------------------------------------------
# stuff to save JPEG files
RAWMODE = {
"1": "L",
"L": "L",
"RGB": "RGB",
"RGBA": "RGB",
"RGBX": "RGB",
"CMYK": "CMYK;I", # assume adobe conventions
"YCbCr": "YCbCr",
}
zigzag_index = ( 0, 1, 5, 6, 14, 15, 27, 28,
2, 4, 7, 13, 16, 26, 29, 42,
3, 8, 12, 17, 25, 30, 41, 43,
9, 11, 18, 24, 31, 40, 44, 53,
10, 19, 23, 32, 39, 45, 52, 54,
20, 22, 33, 38, 46, 51, 55, 60,
21, 34, 37, 47, 50, 56, 59, 61,
35, 36, 48, 49, 57, 58, 62, 63)
samplings = {
(1, 1, 1, 1, 1, 1): 0,
(2, 1, 1, 1, 1, 1): 1,
(2, 2, 1, 1, 1, 1): 2,
}
def convert_dict_qtables(qtables):
qtables = [qtables[key] for key in xrange(len(qtables)) if qtables.has_key(key)]
for idx, table in enumerate(qtables):
qtables[idx] = [table[i] for i in zigzag_index]
return qtables
def get_sampling(im):
sampling = im.layer[0][1:3] + im.layer[1][1:3] + im.layer[2][1:3]
return samplings.get(sampling, -1)
def _save(im, fp, filename):
try:
rawmode = RAWMODE[im.mode]
except KeyError:
raise IOError("cannot write mode %s as JPEG" % im.mode)
info = im.encoderinfo
dpi = info.get("dpi", (0, 0))
quality = info.get("quality", 0)
subsampling = info.get("subsampling", -1)
qtables = info.get("qtables")
if quality == "keep":
quality = 0
subsampling = "keep"
qtables = "keep"
elif quality in presets:
preset = presets[quality]
quality = 0
subsampling = preset.get('subsampling', -1)
qtables = preset.get('quantization')
elif not isinstance(quality, int):
raise ValueError("Invalid quality setting")
else:
if subsampling in presets:
subsampling = presets[subsampling].get('subsampling', -1)
if qtables in presets:
qtables = presets[qtables].get('quantization')
if subsampling == "4:4:4":
subsampling = 0
elif subsampling == "4:2:2":
subsampling = 1
elif subsampling == "4:1:1":
subsampling = 2
elif subsampling == "keep":
if im.format != "JPEG":
raise ValueError("Cannot use 'keep' when original image is not a JPEG")
subsampling = get_sampling(im)
def validate_qtables(qtables):
if qtables is None:
return qtables
if isStringType(qtables):
try:
lines = [int(num) for line in qtables.splitlines()
for num in line.split('#', 1)[0].split()]
except ValueError:
raise ValueError("Invalid quantization table")
else:
qtables = [lines[s:s+64] for s in xrange(0, len(lines), 64)]
if isinstance(qtables, (tuple, list, dict)):
if isinstance(qtables, dict):
qtables = convert_dict_qtables(qtables)
elif isinstance(qtables, tuple):
qtables = list(qtables)
if not (0 < len(qtables) < 5):
raise ValueError("None or too many quantization tables")
for idx, table in enumerate(qtables):
try:
if len(table) != 64:
raise
table = array.array('b', table)
except TypeError:
raise ValueError("Invalid quantization table")
else:
qtables[idx] = list(table)
return qtables
if qtables == "keep":
if im.format != "JPEG":
raise ValueError("Cannot use 'keep' when original image is not a JPEG")
qtables = getattr(im, "quantization", None)
qtables = validate_qtables(qtables)
extra = b""
icc_profile = info.get("icc_profile")
if icc_profile:
ICC_OVERHEAD_LEN = 14
MAX_BYTES_IN_MARKER = 65533
MAX_DATA_BYTES_IN_MARKER = MAX_BYTES_IN_MARKER - ICC_OVERHEAD_LEN
markers = []
while icc_profile:
markers.append(icc_profile[:MAX_DATA_BYTES_IN_MARKER])
icc_profile = icc_profile[MAX_DATA_BYTES_IN_MARKER:]
i = 1
for marker in markers:
size = struct.pack(">H", 2 + ICC_OVERHEAD_LEN + len(marker))
extra = extra + (b"\xFF\xE2" + size + b"ICC_PROFILE\0" + o8(i) + o8(len(markers)) + marker)
i = i + 1
# get keyword arguments
im.encoderconfig = (
quality,
# "progressive" is the official name, but older documentation
# says "progression"
# FIXME: issue a warning if the wrong form is used (post-1.1.7)
"progressive" in info or "progression" in info,
info.get("smooth", 0),
"optimize" in info,
info.get("streamtype", 0),
dpi[0], dpi[1],
subsampling,
qtables,
extra,
info.get("exif", b"")
)
# if we optimize, libjpeg needs a buffer big enough to hold the whole image in a shot.
# Guessing on the size, at im.size bytes. (raw pizel size is channels*size, this
# is a value that's been used in a django patch.
# https://github.com/jdriscoll/django-imagekit/issues/50
bufsize=0
if "optimize" in info or "progressive" in info or "progression" in info:
bufsize = im.size[0]*im.size[1]
# The exif info needs to be written as one block, + APP1, + one spare byte.
# Ensure that our buffer is big enough
bufsize = max(ImageFile.MAXBLOCK, bufsize, len(info.get("exif",b"")) + 5 )
ImageFile._save(im, fp, [("jpeg", (0,0)+im.size, 0, rawmode)], bufsize)
def _save_cjpeg(im, fp, filename):
# ALTERNATIVE: handle JPEGs via the IJG command line utilities.
import os
file = im._dump()
os.system("cjpeg %s >%s" % (file, filename))
try: os.unlink(file)
except: pass
# -------------------------------------------------------------------q-
# Registry stuff
Image.register_open("JPEG", JpegImageFile, _accept)
Image.register_save("JPEG", _save)
Image.register_extension("JPEG", ".jfif")
Image.register_extension("JPEG", ".jpe")
Image.register_extension("JPEG", ".jpg")
Image.register_extension("JPEG", ".jpeg")
Image.register_mime("JPEG", "image/jpeg")
|
PYSEC-2014-22
|
tensorflow/python/kernel_tests/shape_ops_test.py
|
@@ -723,6 +723,17 @@ def testShapeFunctionEdgeCases(self):
inp, array_ops.placeholder(
dtypes.int32, shape=[3]))
+ def testLargeTensor(self):
+ # Test case for GItHub issue 46911.
+ if test_util.is_xla_enabled():
+ # The following test fails with XLA enabled.
+ return
+ with self.assertRaises(errors_impl.InternalError):
+ with self.cached_session():
+ tiled = array_ops.tile(
+ np.ones((1, 1, 1)), [100000000, 100000000, 100000000])
+ self.evaluate(tiled)
+
if __name__ == "__main__":
test.main()
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for various tensorflow.ops.tf."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.platform import test
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
class ShapeOpsTest(test.TestCase):
def _compareShape(self, x, use_gpu=False):
np_ans = np.array(np.shape(x))
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.shape(x)
tf_ans_64 = array_ops.shape(x, out_type=dtypes.int64)
result = self.evaluate(tf_ans)
result_64 = self.evaluate(tf_ans_64)
self.assertAllEqual(np_ans, result)
self.assertAllEqual(np_ans, result_64)
self.assertShapeEqual(np_ans, tf_ans)
def _compareShapeSparse(self, x_np, use_gpu=False):
np_ans = np.array(np.shape(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.shape(x_tf)
result = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareShapeN(self, x, use_gpu=False):
np_ans = np.array(np.shape(x))
with self.cached_session(use_gpu=use_gpu) as sess:
tf_ans = array_ops.shape_n([x, x, x])
tf_ans_64 = array_ops.shape_n([x, x, x], out_type=dtypes.int64)
result = self.evaluate(tf_ans)
result_64 = self.evaluate(tf_ans_64)
for i in range(3):
self.assertAllEqual(np_ans, result[i])
self.assertAllEqual(np_ans, result_64[i])
self.assertShapeEqual(np_ans, tf_ans[i])
def _compareRank(self, x, use_gpu=False):
np_ans = np.asarray(np.ndim(x))
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.rank(x)
result = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareRankSparse(self, x_np, use_gpu=False):
np_ans = np.asarray(np.ndim(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.rank(x_tf)
result = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareSize(self, x, use_gpu=False):
np_ans = np.asarray(np.size(x))
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.size(x)
result = self.evaluate(tf_ans)
tf_ans_64 = array_ops.size(x, out_type=dtypes.int64)
result_64 = self.evaluate(tf_ans_64)
self.assertAllEqual(np_ans, result)
self.assertAllEqual(np_ans, result_64)
self.assertShapeEqual(np_ans, tf_ans)
def _compareSizeSparse(self, x_np, use_gpu=False):
np_ans = np.asarray(np.size(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.size(x_tf)
result = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _testCpu(self, x):
self._compareShape(x, use_gpu=False)
self._compareShapeN(x, use_gpu=False)
self._compareRank(x, use_gpu=False)
self._compareSize(x, use_gpu=False)
self._compareShapeSparse(x, use_gpu=False)
self._compareRankSparse(x, use_gpu=False)
self._compareSizeSparse(x, use_gpu=False)
def _testGpu(self, x):
self._compareShape(x, use_gpu=True)
self._compareShapeN(x, use_gpu=True)
self._compareRank(x, use_gpu=True)
self._compareSize(x, use_gpu=True)
self._compareShapeSparse(x, use_gpu=True)
self._compareRankSparse(x, use_gpu=True)
self._compareSizeSparse(x, use_gpu=True)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testBasic(self):
self._testAll(np.random.randn(2))
self._testAll(np.random.randn(2, 3))
self._testAll(np.random.randn(2, 3, 5))
self._testAll(np.random.randn(2, 3, 5, 7))
self._testAll(np.random.randn(2, 3, 5, 7, 11))
self._testAll(np.random.randn(2, 3, 5, 7, 11, 13))
def testBool(self):
self._testAll(np.random.choice((False, True), size=(2,)))
self._testAll(np.random.choice((False, True), size=(2, 3)))
self._testAll(np.random.choice((False, True), size=(2, 3, 5)))
self._testAll(np.random.choice((False, True), size=(2, 3, 5, 7)))
self._testAll(np.random.choice((False, True), size=(2, 3, 5, 7, 11)))
self._testAll(np.random.choice((False, True), size=(2, 3, 5, 7, 11, 13)))
# Disabled because it takes too long to run, but manually verified
# as passing at time of writing.
def _test64BitOutput(self):
with self.cached_session():
inp = array_ops.zeros([2**31])
num_elements = array_ops.size_internal(
inp, optimize=False, out_type=dtypes.int64)
self.assertEqual(2**31, self.evaluate(num_elements))
# Too large for tf.int32 output.
with self.assertRaises(errors_impl.InvalidArgumentError):
with self.cached_session():
inp = array_ops.zeros([2**31])
num_elements = array_ops.size_internal(
inp, optimize=False, out_type=dtypes.int32)
self.assertEqual(2**31, self.evaluate(num_elements))
def _compareExpandDims(self, x, dim, use_gpu):
np_ans = np.expand_dims(x, axis=dim)
with self.cached_session(use_gpu=use_gpu):
tensor = array_ops.expand_dims(x, dim)
tf_ans = self.evaluate(tensor)
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareExpandDimsAll(self, x, dim):
self._compareExpandDims(x, dim, False)
self._compareExpandDims(x, dim, True)
def testExpandDims(self):
self._compareExpandDimsAll(np.zeros([2]), 0)
self._compareExpandDimsAll(np.zeros([2]), 1)
self._compareExpandDimsAll(np.zeros([2]), -1)
self._compareExpandDimsAll(np.zeros([2, 3]), 0)
self._compareExpandDimsAll(np.zeros([2, 3]), 1)
self._compareExpandDimsAll(np.zeros([2, 3]), 2)
self._compareExpandDimsAll(np.zeros([2, 3]), -1)
self._compareExpandDimsAll(np.zeros([2, 3]), -2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 0)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 1)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 3)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -1)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -3)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -4)
def testExpandDimsBool(self):
choice = lambda s: np.random.choice((False, True), size=s)
self._compareExpandDimsAll(choice([2]), 0)
self._compareExpandDimsAll(choice([2]), 1)
self._compareExpandDimsAll(choice([2]), -1)
self._compareExpandDimsAll(choice([2, 3]), 0)
self._compareExpandDimsAll(choice([2, 3]), 1)
self._compareExpandDimsAll(choice([2, 3]), 2)
self._compareExpandDimsAll(choice([2, 3]), -1)
self._compareExpandDimsAll(choice([2, 3]), -2)
self._compareExpandDimsAll(choice([2, 3, 5]), 0)
self._compareExpandDimsAll(choice([2, 3, 5]), 1)
self._compareExpandDimsAll(choice([2, 3, 5]), 2)
self._compareExpandDimsAll(choice([2, 3, 5]), 3)
self._compareExpandDimsAll(choice([2, 3, 5]), -1)
self._compareExpandDimsAll(choice([2, 3, 5]), -2)
self._compareExpandDimsAll(choice([2, 3, 5]), -3)
self._compareExpandDimsAll(choice([2, 3, 5]), -4)
@test_util.run_deprecated_v1
def testExpandDimsErrors(self):
with self.cached_session():
self.assertRaises(ValueError, array_ops.expand_dims,
np.zeros([2, 3, 5]), -5)
self.assertRaises(ValueError, array_ops.expand_dims,
[False, True, True], -5)
self.assertRaises(ValueError, array_ops.expand_dims,
np.zeros([2, 3, 5]), 4)
self.assertRaises(ValueError, array_ops.expand_dims,
[False, True, True], 4)
@test_util.run_deprecated_v1
def testExpandDimsGradient(self):
with self.cached_session():
inp = constant_op.constant(
np.random.rand(4, 2).astype("f"), dtype=dtypes.float32)
squeezed = array_ops.expand_dims(inp, 1)
err = gradient_checker.compute_gradient_error(inp, [4, 2], squeezed,
[4, 1, 2])
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testExpandDimsScalar(self):
with self.cached_session():
inp = constant_op.constant(7)
self.assertAllEqual([7], array_ops.expand_dims(inp, 0))
self.assertAllEqual([7], array_ops.expand_dims(inp, -1))
inp = constant_op.constant(True)
self.assertAllEqual([True], array_ops.expand_dims(inp, 0))
self.assertAllEqual([True], array_ops.expand_dims(inp, -1))
def testExpandDimsDimType(self):
for dtype in [dtypes.int32, dtypes.int64]:
x = np.zeros([2])
np_ans = np.expand_dims(x, axis=0)
with self.cached_session():
tensor = array_ops.expand_dims(x, constant_op.constant(0, dtype))
tf_ans = self.evaluate(tensor)
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareSqueeze(self, x, squeeze_dims, use_gpu):
with self.cached_session(use_gpu=use_gpu):
if squeeze_dims:
np_ans = np.squeeze(x, axis=tuple(squeeze_dims))
tensor = array_ops.squeeze(x, squeeze_dims)
tf_ans = self.evaluate(tensor)
else:
np_ans = np.squeeze(x)
tensor = array_ops.squeeze(x)
tf_ans = self.evaluate(tensor)
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareSqueezeAll(self, x, squeeze_dims=None):
if squeeze_dims is None:
squeeze_dims = []
self._compareSqueeze(x, squeeze_dims, False)
self._compareSqueeze(x, squeeze_dims, True)
def testSqueeze(self):
# Nothing to squeeze.
self._compareSqueezeAll(np.zeros([2]))
self._compareSqueezeAll(np.zeros([2, 3]))
# Squeeze the middle element away.
self._compareSqueezeAll(np.zeros([2, 1, 2]))
# Squeeze on both ends.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]))
def testSqueezeBool(self):
choice = lambda s: np.random.choice((False, True), size=s)
# Nothing to squeeze.
self._compareSqueezeAll(choice([2]))
self._compareSqueezeAll(choice([2, 3]))
# Squeeze the middle element away.
self._compareSqueezeAll(choice([2, 1, 2]))
# Squeeze on both ends.
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]))
def testSqueezeSpecificDimension(self):
# Positive squeeze dim index.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [2, 4])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0, 4, 2])
# Negative squeeze dim index.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-1])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5, -1])
def testSqueezeSpecificDimensionBool(self):
choice = lambda s: np.random.choice((False, True), size=s)
# Positive squeeze dim index.
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [0])
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [2, 4])
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [0, 4, 2])
# Negative squeeze dim index.
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [-1])
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [-3, -5])
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [-3, -5, -1])
def testSqueezeAllOnes(self):
# Numpy squeezes a 1 element tensor into a zero dimensional tensor.
# Verify that we do the same.
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
tensor = array_ops.squeeze(np.zeros([1, 1, 1]), [])
self.assertEqual(np.shape(1), tensor.get_shape())
tf_ans = self.evaluate(tensor)
self.assertEqual(np.shape(1), tf_ans.shape)
def testSqueezeAllOnesBool(self):
# Numpy squeezes a 1 element tensor into a zero dimensional tensor.
# Verify that we do the same.
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
tensor = array_ops.squeeze([[[False]]], [])
self.assertEqual(np.shape(1), tensor.get_shape())
tf_ans = self.evaluate(tensor)
self.assertEqual(np.shape(1), tf_ans.shape)
@test_util.run_deprecated_v1
def testSqueezeOnlyOnes(self):
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
input_1x1x3 = np.zeros([1, 1, 3])
self._compareSqueezeAll(input_1x1x3)
self._compareSqueezeAll(input_1x1x3, [0])
self._compareSqueezeAll(input_1x1x3, [1])
self.assertRaises(ValueError, array_ops.squeeze, input_1x1x3, [2])
@test_util.run_deprecated_v1
def testSqueezeErrors(self):
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [-4])
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [0, -4])
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [3])
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [2, 3])
@test_util.run_deprecated_v1
def testSqueezeGradient(self):
with self.cached_session():
inp = np.random.rand(4, 2).astype("f")
a = array_ops.reshape(inp, [4, 1, 2])
squeezed = array_ops.squeeze(a, [])
err = gradient_checker.compute_gradient_error(a, [4, 1, 2], squeezed,
[4, 2])
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testSqueezeGradientWithSqueezeDims(self):
with self.cached_session():
inp = np.random.rand(4, 2).astype("f")
a = array_ops.reshape(inp, [4, 1, 2, 1])
squeezed = array_ops.squeeze(a, [1])
err = gradient_checker.compute_gradient_error(a, [4, 1, 2, 1], squeezed,
[4, 2, 1])
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testSqueezeWithUnknownShape(self):
with self.cached_session():
a = array_ops.placeholder(dtypes.float32, shape=[2, None])
squeezed = array_ops.squeeze(a, [1])
self.assertEqual([2], squeezed.get_shape().as_list())
squeezed = array_ops.squeeze(a)
self.assertEqual(None, squeezed.get_shape())
self.assertRaises(ValueError, array_ops.squeeze, a, [0])
self.assertRaises(ValueError, array_ops.squeeze, a, [100])
class TileTest(test.TestCase, parameterized.TestCase):
def testScalar(self):
for use_gpu in False, True:
with self.cached_session(use_gpu=use_gpu):
a = constant_op.constant(7, shape=[], dtype=dtypes.float32)
tiled = array_ops.tile(a, [])
result = self.evaluate(tiled)
self.assertEqual(result.shape, ())
self.assertEqual([], tiled.get_shape())
self.assertEqual(7, result)
def testSimple(self):
# multiples could be int32 or int64
for dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session():
inp = np.random.rand(4, 1).astype(np.float32)
a = constant_op.constant(inp)
tiled = array_ops.tile(a, constant_op.constant([1, 4], dtype=dtype))
result = self.evaluate(tiled)
self.assertEqual(result.shape, (4, 4))
self.assertEqual([4, 4], tiled.get_shape())
self.assertTrue((result == np.tile(inp, (1, 4))).all())
def testIdentityTileAndGrad(self):
with self.cached_session():
inp = np.random.rand(4, 1).astype(np.float32)
a = constant_op.constant(inp)
tiled = array_ops.tile(a, [1, 1])
result = self.evaluate(tiled)
self.assertEqual(result.shape, (4, 1))
self.assertEqual([4, 1], tiled.get_shape())
self.assertTrue((result == np.tile(inp, (1, 1))).all())
def testEmpty(self):
with self.cached_session():
inp = np.random.rand(2, 3).astype(np.float32)
a = constant_op.constant(inp)
tiled = array_ops.tile(a, [5, 0])
result = self.evaluate(tiled)
self.assertEqual(result.shape, (10, 0))
self.assertEqual([10, 0], tiled.get_shape())
@test_util.run_deprecated_v1
def testUnknownInputShape(self):
"""Importing can call _TileShape without shape of <multiples> known."""
with self.cached_session():
inp = array_ops.placeholder(dtypes.float32) # unknown shape
multiples = constant_op.constant([1, 2, 3, 4], dtype=np.int32)
tiled = array_ops.tile(inp, multiples)
gdef = tiled.graph.as_graph_def()
# Move the tile op to the start of the graph so that shapes of its inputs
# are not available when the shape function runs on import.
swapped = False
for i, n in enumerate(gdef.node):
if n.op == "Tile":
# Swap tile op to be first in gdef.node
assert i != 0
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(gdef.node[i])
gdef.node[i].CopyFrom(gdef.node[0])
gdef.node[0].CopyFrom(new_node)
swapped = True
assert swapped
tiled_imported, = importer.import_graph_def(
gdef, return_elements=[tiled.name])
self.assertEqual(4, tiled_imported.get_shape().ndims)
def testTypes(self):
types_to_test = {
"bool": (dtypes.bool, bool),
"float32": (dtypes.float32, float),
"float64": (dtypes.float64, float),
"complex64": (dtypes.complex64, complex),
"complex128": (dtypes.complex128, complex),
"uint8": (dtypes.uint8, int),
"int8": (dtypes.int8, int),
"int16": (dtypes.int16, int),
"int32": (dtypes.int32, int),
"int64": (dtypes.int64, int),
"uint32": (dtypes.uint32, int),
"uint64": (dtypes.uint64, int),
bytes: (dtypes.string, bytes)
}
for dtype_np, (dtype_tf, cast) in types_to_test.items():
with self.cached_session():
inp = np.random.rand(4, 1).astype(dtype_np)
a = constant_op.constant(
[cast(x) for x in inp.ravel(order="C")],
shape=[4, 1],
dtype=dtype_tf)
tiled = array_ops.tile(a, [1, 4])
result = self.evaluate(tiled)
self.assertEqual(result.shape, (4, 4))
self.assertEqual([4, 4], tiled.get_shape())
self.assertAllEqual(result, np.tile(inp, (1, 4)))
@test_util.run_deprecated_v1
def testInvalidDim(self):
with self.cached_session():
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=[4, 1],
dtype=dtypes.float32)
# Wrong length of multiples.
with self.assertRaises(ValueError):
array_ops.tile(a, [1, 4, 2])
# Wrong rank for multiples.
with self.assertRaises(ValueError):
array_ops.tile(a, [[2, 3], [3, 4]]).eval()
def _RunAndVerifyResult(self, rank, use_gpu):
with self.cached_session(use_gpu=use_gpu):
# Random dims of given rank
input_shape = np.random.randint(1, 4, size=rank)
inp = np.random.rand(*input_shape).astype("f")
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
multiples = np.random.randint(1, 4, size=rank).astype(np.int32)
tiled = array_ops.tile(a, multiples)
result = self.evaluate(tiled)
self.assertTrue((np.array(multiples) * np.array(inp.shape) == np.array(
result.shape)).all())
self.assertAllEqual(result, np.tile(inp, tuple(multiples)))
self.assertShapeEqual(result, tiled)
def testRandom(self):
# test low rank, like 5
for _ in range(5):
self._RunAndVerifyResult(5, use_gpu=False)
for _ in range(5):
self._RunAndVerifyResult(5, use_gpu=True)
# test high rank, like 10
for _ in range(5):
self._RunAndVerifyResult(10, use_gpu=False)
for _ in range(5):
self._RunAndVerifyResult(10, use_gpu=True)
@parameterized.parameters(dtypes.int32, dtypes.int64)
@test_util.run_deprecated_v1
def testGradientSimpleReduction(self, multiples_dtype):
with self.cached_session():
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32)
multiples = constant_op.constant([1, 4], dtype=multiples_dtype)
tiled = array_ops.tile(a, multiples)
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
self.assertShapeEqual(inp, grad)
result = self.evaluate(grad)
self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
@test_util.run_deprecated_v1
def testGradientStridedReduction(self):
with self.cached_session():
inp = np.random.rand(4, 2).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 2])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
self.assertShapeEqual(inp, grad)
result = self.evaluate(grad)
expected_shape = [4, 2]
expected = np.zeros(expected_shape)
expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2]
expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3]
self.assertTrue((np.abs(expected - result) < 1e-3).all())
@test_util.run_deprecated_v1
def testGradientSimpleReductionOnGPU(self):
with self.session():
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 4])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
result = self.evaluate(grad)
self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
@test_util.run_deprecated_v1
def testGradientStridedReductionOnGPU(self):
with self.session():
inp = np.random.rand(4, 2).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 2])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
result = self.evaluate(grad)
expected_shape = [4, 2]
expected = np.zeros(expected_shape)
expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2]
expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3]
self.assertAllClose(expected, result, 1e-3)
def _RunAndVerifyGradientResult(self, input_shape, multiples):
for use_gpu in False, True:
with self.cached_session(use_gpu=use_gpu):
# Random values
inp = np.asarray(np.random.rand(*input_shape))
a = constant_op.constant(inp, dtype=dtypes.float64)
tiled = array_ops.tile(a, multiples)
grad_shape = list(np.array(multiples) * np.array(inp.shape))
err = gradient_checker.compute_gradient_error(
a, list(input_shape), tiled, grad_shape, x_init_value=inp)
print("tile(float) error = ", err)
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testGradientRandomScalar(self):
self._RunAndVerifyGradientResult([], [])
@test_util.run_deprecated_v1
def testGradientRandom(self):
self._RunAndVerifyGradientResult([2, 2, 1, 1, 3], [1, 1, 1, 1, 1])
self._RunAndVerifyGradientResult([2, 2, 1, 1, 3], [1, 2, 1, 3, 1])
self._RunAndVerifyGradientResult([2, 3, 1, 1, 3], [3, 1, 1, 2, 2])
self._RunAndVerifyGradientResult([2, 1, 3, 3, 2], [1, 3, 3, 1, 2])
@test_util.run_deprecated_v1
def testGradientStridedReductionGC(self):
with self.cached_session():
inp = np.random.rand(4, 2).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 2])
err = gradient_checker.compute_gradient_error(a, [4, 2], tiled, [4, 4])
self.assertLess(err, 1e-3)
@parameterized.parameters(dtypes.int32, dtypes.int64)
@test_util.run_deprecated_v1
def testGradientWithSparseGradWithRank1(self, multiples_dtype):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0],
dtype=dtypes.float32)
multiples = constant_op.constant([3], dtype=dtypes.int64)
outputs = array_ops.gather(array_ops.tile(inputs, multiples),
[1, 5, 9, 3, 7, 2, 2, 2])
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testGradientWithSparseGradWithRank3(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0],
dtype=dtypes.float32)
inputs = array_ops.reshape(inputs, [-1, 1, 1])
outputs = array_ops.gather(array_ops.tile(inputs, [3, 4, 2]),
[1, 5, 9, 3, 7, 2, 2, 2])
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testShapeFunctionEdgeCases(self):
# Unknown multiples shape.
inp = constant_op.constant(0.0, shape=[4, 4, 4, 4])
tiled = array_ops.tile(inp, array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input shape.
inp = array_ops.placeholder(dtypes.float32)
tiled = array_ops.tile(inp, [2, 2, 2, 2])
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input and multiples shape.
inp = array_ops.placeholder(dtypes.float32)
tiled = array_ops.tile(inp, array_ops.placeholder(dtypes.int32))
self.assertIs(None, tiled.get_shape().ndims)
# Known input and partially known multiples.
inp = constant_op.constant(0.0, shape=[1, 1])
tiled = array_ops.tile(inp, [array_ops.placeholder(dtypes.int32), 7])
self.assertEqual([None, 7], tiled.get_shape().as_list())
# Mismatched input rank and multiples length.
inp = array_ops.placeholder(dtypes.float32, shape=[None, None])
with self.assertRaises(ValueError):
tiled = array_ops.tile(
inp, array_ops.placeholder(
dtypes.int32, shape=[3]))
if __name__ == "__main__":
test.main()
|
PYSEC-2021-391
|
httpie/client.py
|
@@ -44,6 +44,7 @@ def collect_messages(
httpie_session_headers = None
if args.session or args.session_read_only:
httpie_session = get_httpie_session(
+ env=env,
config_dir=env.config.directory,
session_name=args.session or args.session_read_only,
host=args.headers.get('Host'),
@@ -130,10 +131,7 @@ def collect_messages(
if httpie_session:
if httpie_session.is_new() or not args.session_read_only:
httpie_session.cookies = requests_session.cookies
- httpie_session.remove_cookies(
- # TODO: take path & domain into account?
- cookie['name'] for cookie in expired_cookies
- )
+ httpie_session.remove_cookies(expired_cookies)
httpie_session.save()
|
import argparse
import http.client
import json
import sys
from contextlib import contextmanager
from time import monotonic
from typing import Any, Dict, Callable, Iterable
from urllib.parse import urlparse, urlunparse
import requests
# noinspection PyPackageRequirements
import urllib3
from . import __version__
from .adapters import HTTPieHTTPAdapter
from .context import Environment
from .cli.constants import EMPTY_STRING
from .cli.dicts import HTTPHeadersDict, NestedJSONArray
from .encoding import UTF8
from .models import RequestsMessage
from .plugins.registry import plugin_manager
from .sessions import get_httpie_session
from .ssl_ import AVAILABLE_SSL_VERSION_ARG_MAPPING, HTTPieCertificate, HTTPieHTTPSAdapter
from .uploads import (
compress_request, prepare_request_body,
get_multipart_data_and_content_type,
)
from .utils import get_expired_cookies, repr_dict
urllib3.disable_warnings()
FORM_CONTENT_TYPE = f'application/x-www-form-urlencoded; charset={UTF8}'
JSON_CONTENT_TYPE = 'application/json'
JSON_ACCEPT = f'{JSON_CONTENT_TYPE}, */*;q=0.5'
DEFAULT_UA = f'HTTPie/{__version__}'
def collect_messages(
env: Environment,
args: argparse.Namespace,
request_body_read_callback: Callable[[bytes], None] = None,
) -> Iterable[RequestsMessage]:
httpie_session = None
httpie_session_headers = None
if args.session or args.session_read_only:
httpie_session = get_httpie_session(
config_dir=env.config.directory,
session_name=args.session or args.session_read_only,
host=args.headers.get('Host'),
url=args.url,
)
httpie_session_headers = httpie_session.headers
request_kwargs = make_request_kwargs(
env,
args=args,
base_headers=httpie_session_headers,
request_body_read_callback=request_body_read_callback
)
send_kwargs = make_send_kwargs(args)
send_kwargs_mergeable_from_env = make_send_kwargs_mergeable_from_env(args)
requests_session = build_requests_session(
ssl_version=args.ssl_version,
ciphers=args.ciphers,
verify=bool(send_kwargs_mergeable_from_env['verify'])
)
if httpie_session:
httpie_session.update_headers(request_kwargs['headers'])
requests_session.cookies = httpie_session.cookies
if args.auth_plugin:
# Save auth from CLI to HTTPie session.
httpie_session.auth = {
'type': args.auth_plugin.auth_type,
'raw_auth': args.auth_plugin.raw_auth,
}
elif httpie_session.auth:
# Apply auth from HTTPie session
request_kwargs['auth'] = httpie_session.auth
if args.debug:
# TODO: reflect the split between request and send kwargs.
dump_request(request_kwargs)
request = requests.Request(**request_kwargs)
prepared_request = requests_session.prepare_request(request)
apply_missing_repeated_headers(prepared_request, request.headers)
if args.path_as_is:
prepared_request.url = ensure_path_as_is(
orig_url=args.url,
prepped_url=prepared_request.url,
)
if args.compress and prepared_request.body:
compress_request(
request=prepared_request,
always=args.compress > 1,
)
response_count = 0
expired_cookies = []
while prepared_request:
yield prepared_request
if not args.offline:
send_kwargs_merged = requests_session.merge_environment_settings(
url=prepared_request.url,
**send_kwargs_mergeable_from_env,
)
with max_headers(args.max_headers):
response = requests_session.send(
request=prepared_request,
**send_kwargs_merged,
**send_kwargs,
)
response._httpie_headers_parsed_at = monotonic()
expired_cookies += get_expired_cookies(
response.headers.get('Set-Cookie', '')
)
response_count += 1
if response.next:
if args.max_redirects and response_count == args.max_redirects:
raise requests.TooManyRedirects
if args.follow:
prepared_request = response.next
if args.all:
yield response
continue
yield response
break
if httpie_session:
if httpie_session.is_new() or not args.session_read_only:
httpie_session.cookies = requests_session.cookies
httpie_session.remove_cookies(
# TODO: take path & domain into account?
cookie['name'] for cookie in expired_cookies
)
httpie_session.save()
# noinspection PyProtectedMember
@contextmanager
def max_headers(limit):
# <https://github.com/httpie/httpie/issues/802>
# noinspection PyUnresolvedReferences
orig = http.client._MAXHEADERS
http.client._MAXHEADERS = limit or float('Inf')
try:
yield
finally:
http.client._MAXHEADERS = orig
def build_requests_session(
verify: bool,
ssl_version: str = None,
ciphers: str = None,
) -> requests.Session:
requests_session = requests.Session()
# Install our adapter.
http_adapter = HTTPieHTTPAdapter()
https_adapter = HTTPieHTTPSAdapter(
ciphers=ciphers,
verify=verify,
ssl_version=(
AVAILABLE_SSL_VERSION_ARG_MAPPING[ssl_version]
if ssl_version else None
),
)
requests_session.mount('http://', http_adapter)
requests_session.mount('https://', https_adapter)
# Install adapters from plugins.
for plugin_cls in plugin_manager.get_transport_plugins():
transport_plugin = plugin_cls()
requests_session.mount(
prefix=transport_plugin.prefix,
adapter=transport_plugin.get_adapter(),
)
return requests_session
def dump_request(kwargs: dict):
sys.stderr.write(
f'\n>>> requests.request(**{repr_dict(kwargs)})\n\n')
def finalize_headers(headers: HTTPHeadersDict) -> HTTPHeadersDict:
final_headers = HTTPHeadersDict()
for name, value in headers.items():
if value is not None:
# “leading or trailing LWS MAY be removed without
# changing the semantics of the field value”
# <https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html>
# Also, requests raises `InvalidHeader` for leading spaces.
value = value.strip()
if isinstance(value, str):
# See <https://github.com/httpie/httpie/issues/212>
value = value.encode()
final_headers.add(name, value)
return final_headers
def apply_missing_repeated_headers(
prepared_request: requests.PreparedRequest,
original_headers: HTTPHeadersDict
) -> None:
"""Update the given `prepared_request`'s headers with the original
ones. This allows the requests to be prepared as usual, and then later
merged with headers that are specified multiple times."""
new_headers = HTTPHeadersDict(prepared_request.headers)
for prepared_name, prepared_value in prepared_request.headers.items():
if prepared_name not in original_headers:
continue
original_keys, original_values = zip(*filter(
lambda item: item[0].casefold() == prepared_name.casefold(),
original_headers.items()
))
if prepared_value not in original_values:
# If the current value is not among the initial values
# set for this field, then it means that this field got
# overridden on the way, and we should preserve it.
continue
new_headers.popone(prepared_name)
new_headers.update(zip(original_keys, original_values))
prepared_request.headers = new_headers
def make_default_headers(args: argparse.Namespace) -> HTTPHeadersDict:
default_headers = HTTPHeadersDict({
'User-Agent': DEFAULT_UA
})
auto_json = args.data and not args.form
if args.json or auto_json:
default_headers['Accept'] = JSON_ACCEPT
if args.json or (auto_json and args.data):
default_headers['Content-Type'] = JSON_CONTENT_TYPE
elif args.form and not args.files:
# If sending files, `requests` will set
# the `Content-Type` for us.
default_headers['Content-Type'] = FORM_CONTENT_TYPE
return default_headers
def make_send_kwargs(args: argparse.Namespace) -> dict:
return {
'timeout': args.timeout or None,
'allow_redirects': False,
}
def make_send_kwargs_mergeable_from_env(args: argparse.Namespace) -> dict:
cert = None
if args.cert:
cert = args.cert
if args.cert_key:
# Having a client certificate key passphrase is not supported
# by requests. So we are using our own transportation structure
# which is compatible with their format (a tuple of minimum two
# items).
#
# See: https://github.com/psf/requests/issues/2519
cert = HTTPieCertificate(cert, args.cert_key, args.cert_key_pass.value)
return {
'proxies': {p.key: p.value for p in args.proxy},
'stream': True,
'verify': {
'yes': True,
'true': True,
'no': False,
'false': False,
}.get(args.verify.lower(), args.verify),
'cert': cert,
}
def json_dict_to_request_body(data: Dict[str, Any]) -> str:
# Propagate the top-level list if there is only one
# item in the object, with an en empty key.
if len(data) == 1:
[(key, value)] = data.items()
if isinstance(value, NestedJSONArray):
assert key == EMPTY_STRING
data = value
if data:
data = json.dumps(data)
else:
# We need to set data to an empty string to prevent requests
# from assigning an empty list to `response.request.data`.
data = ''
return data
def make_request_kwargs(
env: Environment,
args: argparse.Namespace,
base_headers: HTTPHeadersDict = None,
request_body_read_callback=lambda chunk: chunk
) -> dict:
"""
Translate our `args` into `requests.Request` keyword arguments.
"""
files = args.files
# Serialize JSON data, if needed.
data = args.data
auto_json = data and not args.form
if (args.json or auto_json) and isinstance(data, dict):
data = json_dict_to_request_body(data)
# Finalize headers.
headers = make_default_headers(args)
if base_headers:
headers.update(base_headers)
headers.update(args.headers)
if args.offline and args.chunked and 'Transfer-Encoding' not in headers:
# When online, we let requests set the header instead to be able more
# easily verify chunking is taking place.
headers['Transfer-Encoding'] = 'chunked'
headers = finalize_headers(headers)
if (args.form and files) or args.multipart:
data, headers['Content-Type'] = get_multipart_data_and_content_type(
data=args.multipart_data,
boundary=args.boundary,
content_type=args.headers.get('Content-Type'),
)
return {
'method': args.method.lower(),
'url': args.url,
'headers': headers,
'data': prepare_request_body(
env,
data,
body_read_callback=request_body_read_callback,
chunked=args.chunked,
offline=args.offline,
content_length_header_value=headers.get('Content-Length'),
),
'auth': args.auth,
'params': args.params.items(),
}
def ensure_path_as_is(orig_url: str, prepped_url: str) -> str:
"""
Handle `--path-as-is` by replacing the path component of the prepared
URL with the path component from the original URL. Other parts stay
untouched because other (welcome) processing on the URL might have
taken place.
<https://github.com/httpie/httpie/issues/895>
<https://ec.haxx.se/http/http-basics#path-as-is>
<https://curl.haxx.se/libcurl/c/CURLOPT_PATH_AS_IS.html>
>>> ensure_path_as_is('http://foo/../', 'http://foo/?foo=bar')
'http://foo/../?foo=bar'
"""
parsed_orig, parsed_prepped = urlparse(orig_url), urlparse(prepped_url)
final_dict = {
# noinspection PyProtectedMember
**parsed_prepped._asdict(),
'path': parsed_orig.path,
}
return urlunparse(tuple(final_dict.values()))
|
PYSEC-2022-167
|
httpie/config.py
|
@@ -1,7 +1,7 @@
import json
import os
from pathlib import Path
-from typing import Union
+from typing import Any, Dict, Union
from . import __version__
from .compat import is_windows
@@ -62,6 +62,21 @@ class ConfigFileError(Exception):
pass
+def read_raw_config(config_type: str, path: Path) -> Dict[str, Any]:
+ try:
+ with path.open(encoding=UTF8) as f:
+ try:
+ return json.load(f)
+ except ValueError as e:
+ raise ConfigFileError(
+ f'invalid {config_type} file: {e} [{path}]'
+ )
+ except FileNotFoundError:
+ pass
+ except OSError as e:
+ raise ConfigFileError(f'cannot read {config_type} file: {e}')
+
+
class BaseConfigDict(dict):
name = None
helpurl = None
@@ -77,26 +92,25 @@ def ensure_directory(self):
def is_new(self) -> bool:
return not self.path.exists()
+ def pre_process_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
+ """Hook for processing the incoming config data."""
+ return data
+
+ def post_process_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
+ """Hook for processing the outgoing config data."""
+ return data
+
def load(self):
config_type = type(self).__name__.lower()
- try:
- with self.path.open(encoding=UTF8) as f:
- try:
- data = json.load(f)
- except ValueError as e:
- raise ConfigFileError(
- f'invalid {config_type} file: {e} [{self.path}]'
- )
- self.update(data)
- except FileNotFoundError:
- pass
- except OSError as e:
- raise ConfigFileError(f'cannot read {config_type} file: {e}')
-
- def save(self):
- self['__meta__'] = {
- 'httpie': __version__
- }
+ data = read_raw_config(config_type, self.path)
+ if data is not None:
+ data = self.pre_process_data(data)
+ self.update(data)
+
+ def save(self, *, bump_version: bool = False):
+ self.setdefault('__meta__', {})
+ if bump_version or 'httpie' not in self['__meta__']:
+ self['__meta__']['httpie'] = __version__
if self.helpurl:
self['__meta__']['help'] = self.helpurl
@@ -106,13 +120,19 @@ def save(self):
self.ensure_directory()
json_string = json.dumps(
- obj=self,
+ obj=self.post_process_data(self),
indent=4,
sort_keys=True,
ensure_ascii=True,
)
self.path.write_text(json_string + '\n', encoding=UTF8)
+ @property
+ def version(self):
+ return self.get(
+ '__meta__', {}
+ ).get('httpie', __version__)
+
class Config(BaseConfigDict):
FILENAME = 'config.json'
|
import json
import os
from pathlib import Path
from typing import Union
from . import __version__
from .compat import is_windows
from .encoding import UTF8
ENV_XDG_CONFIG_HOME = 'XDG_CONFIG_HOME'
ENV_HTTPIE_CONFIG_DIR = 'HTTPIE_CONFIG_DIR'
DEFAULT_CONFIG_DIRNAME = 'httpie'
DEFAULT_RELATIVE_XDG_CONFIG_HOME = Path('.config')
DEFAULT_RELATIVE_LEGACY_CONFIG_DIR = Path('.httpie')
DEFAULT_WINDOWS_CONFIG_DIR = Path(
os.path.expandvars('%APPDATA%')) / DEFAULT_CONFIG_DIRNAME
def get_default_config_dir() -> Path:
"""
Return the path to the httpie configuration directory.
This directory isn't guaranteed to exist, and nor are any of its
ancestors (only the legacy ~/.httpie, if returned, is guaranteed to exist).
XDG Base Directory Specification support:
<https://wiki.archlinux.org/index.php/XDG_Base_Directory>
$XDG_CONFIG_HOME is supported; $XDG_CONFIG_DIRS is not
"""
# 1. explicitly set through env
env_config_dir = os.environ.get(ENV_HTTPIE_CONFIG_DIR)
if env_config_dir:
return Path(env_config_dir)
# 2. Windows
if is_windows:
return DEFAULT_WINDOWS_CONFIG_DIR
home_dir = Path.home()
# 3. legacy ~/.httpie
legacy_config_dir = home_dir / DEFAULT_RELATIVE_LEGACY_CONFIG_DIR
if legacy_config_dir.exists():
return legacy_config_dir
# 4. XDG
xdg_config_home_dir = os.environ.get(
ENV_XDG_CONFIG_HOME, # 4.1. explicit
home_dir / DEFAULT_RELATIVE_XDG_CONFIG_HOME # 4.2. default
)
return Path(xdg_config_home_dir) / DEFAULT_CONFIG_DIRNAME
DEFAULT_CONFIG_DIR = get_default_config_dir()
class ConfigFileError(Exception):
pass
class BaseConfigDict(dict):
name = None
helpurl = None
about = None
def __init__(self, path: Path):
super().__init__()
self.path = path
def ensure_directory(self):
self.path.parent.mkdir(mode=0o700, parents=True, exist_ok=True)
def is_new(self) -> bool:
return not self.path.exists()
def load(self):
config_type = type(self).__name__.lower()
try:
with self.path.open(encoding=UTF8) as f:
try:
data = json.load(f)
except ValueError as e:
raise ConfigFileError(
f'invalid {config_type} file: {e} [{self.path}]'
)
self.update(data)
except FileNotFoundError:
pass
except OSError as e:
raise ConfigFileError(f'cannot read {config_type} file: {e}')
def save(self):
self['__meta__'] = {
'httpie': __version__
}
if self.helpurl:
self['__meta__']['help'] = self.helpurl
if self.about:
self['__meta__']['about'] = self.about
self.ensure_directory()
json_string = json.dumps(
obj=self,
indent=4,
sort_keys=True,
ensure_ascii=True,
)
self.path.write_text(json_string + '\n', encoding=UTF8)
class Config(BaseConfigDict):
FILENAME = 'config.json'
DEFAULTS = {
'default_options': []
}
def __init__(self, directory: Union[str, Path] = DEFAULT_CONFIG_DIR):
self.directory = Path(directory)
super().__init__(path=self.directory / self.FILENAME)
self.update(self.DEFAULTS)
@property
def default_options(self) -> list:
return self['default_options']
@property
def plugins_dir(self) -> Path:
return Path(self.get('plugins_dir', self.directory / 'plugins')).resolve()
|
PYSEC-2022-167
|
httpie/manager/cli.py
|
@@ -2,6 +2,15 @@
from httpie.cli.argparser import HTTPieManagerArgumentParser
from httpie import __version__
+CLI_SESSION_UPGRADE_FLAGS = [
+ {
+ 'variadic': ['--bind-cookies'],
+ 'action': 'store_true',
+ 'default': False,
+ 'help': 'Bind domainless cookies to the host that session belongs.'
+ }
+]
+
COMMANDS = {
'plugins': {
'help': 'Manage HTTPie plugins.',
@@ -34,6 +43,34 @@
'List all installed HTTPie plugins.'
],
},
+ 'cli': {
+ 'help': 'Manage HTTPie for Terminal',
+ 'sessions': {
+ 'help': 'Manage HTTPie sessions',
+ 'upgrade': [
+ 'Upgrade the given HTTPie session with the latest '
+ 'layout. A list of changes between different session versions '
+ 'can be found in the official documentation.',
+ {
+ 'dest': 'hostname',
+ 'metavar': 'HOSTNAME',
+ 'help': 'The host this session belongs.'
+ },
+ {
+ 'dest': 'session',
+ 'metavar': 'SESSION_NAME_OR_PATH',
+ 'help': 'The name or the path for the session that will be upgraded.'
+ },
+ *CLI_SESSION_UPGRADE_FLAGS
+ ],
+ 'upgrade-all': [
+ 'Upgrade all named sessions with the latest layout. A list of '
+ 'changes between different session versions can be found in the official '
+ 'documentation.',
+ *CLI_SESSION_UPGRADE_FLAGS
+ ],
+ }
+ }
}
@@ -54,6 +91,8 @@ def generate_subparsers(root, parent_parser, definitions):
)
for command, properties in definitions.items():
is_subparser = isinstance(properties, dict)
+ properties = properties.copy()
+
descr = properties.pop('help', None) if is_subparser else properties.pop(0)
command_parser = actions.add_parser(command, description=descr)
command_parser.root = root
@@ -62,7 +101,9 @@ def generate_subparsers(root, parent_parser, definitions):
continue
for argument in properties:
- command_parser.add_argument(**argument)
+ argument = argument.copy()
+ variadic = argument.pop('variadic', [])
+ command_parser.add_argument(*variadic, **argument)
parser = HTTPieManagerArgumentParser(
|
from textwrap import dedent
from httpie.cli.argparser import HTTPieManagerArgumentParser
from httpie import __version__
COMMANDS = {
'plugins': {
'help': 'Manage HTTPie plugins.',
'install': [
'Install the given targets from PyPI '
'or from a local paths.',
{
'dest': 'targets',
'nargs': '+',
'help': 'targets to install'
}
],
'upgrade': [
'Upgrade the given plugins',
{
'dest': 'targets',
'nargs': '+',
'help': 'targets to upgrade'
}
],
'uninstall': [
'Uninstall the given HTTPie plugins.',
{
'dest': 'targets',
'nargs': '+',
'help': 'targets to install'
}
],
'list': [
'List all installed HTTPie plugins.'
],
},
}
def missing_subcommand(*args) -> str:
base = COMMANDS
for arg in args:
base = base[arg]
assert isinstance(base, dict)
subcommands = ', '.join(map(repr, base.keys()))
return f'Please specify one of these: {subcommands}'
def generate_subparsers(root, parent_parser, definitions):
action_dest = '_'.join(parent_parser.prog.split()[1:] + ['action'])
actions = parent_parser.add_subparsers(
dest=action_dest
)
for command, properties in definitions.items():
is_subparser = isinstance(properties, dict)
descr = properties.pop('help', None) if is_subparser else properties.pop(0)
command_parser = actions.add_parser(command, description=descr)
command_parser.root = root
if is_subparser:
generate_subparsers(root, command_parser, properties)
continue
for argument in properties:
command_parser.add_argument(**argument)
parser = HTTPieManagerArgumentParser(
prog='httpie',
description=dedent(
'''
Managing interface for the HTTPie itself. <https://httpie.io/docs#manager>
Be aware that you might be looking for http/https commands for sending
HTTP requests. This command is only available for managing the HTTTPie
plugins and the configuration around it.
'''
),
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help='''
Prints the exception traceback should one occur, as well as other
information useful for debugging HTTPie itself and for reporting bugs.
'''
)
parser.add_argument(
'--traceback',
action='store_true',
default=False,
help='''
Prints the exception traceback should one occur.
'''
)
parser.add_argument(
'--version',
action='version',
version=__version__,
help='''
Show version and exit.
'''
)
generate_subparsers(parser, parser, COMMANDS)
|
PYSEC-2022-167
|
httpie/manager/core.py
|
@@ -1,9 +1,11 @@
import argparse
+from typing import Optional
from httpie.context import Environment
from httpie.manager.plugins import PluginInstaller
from httpie.status import ExitStatus
from httpie.manager.cli import missing_subcommand, parser
+from httpie.manager.tasks import CLI_TASKS
MSG_COMMAND_CONFUSION = '''\
This command is only for managing HTTPie plugins.
@@ -22,12 +24,21 @@
'''.rstrip("\n").format(args='POST pie.dev/post hello=world')
+def dispatch_cli_task(env: Environment, action: Optional[str], args: argparse.Namespace) -> ExitStatus:
+ if action is None:
+ parser.error(missing_subcommand('cli'))
+
+ return CLI_TASKS[action](env, args)
+
+
def program(args: argparse.Namespace, env: Environment) -> ExitStatus:
if args.action is None:
parser.error(MSG_NAKED_INVOCATION)
if args.action == 'plugins':
plugins = PluginInstaller(env, debug=args.debug)
return plugins.run(args.plugins_action, args)
+ elif args.action == 'cli':
+ return dispatch_cli_task(env, args.cli_action, args)
return ExitStatus.SUCCESS
|
import argparse
from httpie.context import Environment
from httpie.manager.plugins import PluginInstaller
from httpie.status import ExitStatus
from httpie.manager.cli import missing_subcommand, parser
MSG_COMMAND_CONFUSION = '''\
This command is only for managing HTTPie plugins.
To send a request, please use the http/https commands:
$ http {args}
$ https {args}
'''
# noinspection PyStringFormat
MSG_NAKED_INVOCATION = f'''\
{missing_subcommand()}
{MSG_COMMAND_CONFUSION}
'''.rstrip("\n").format(args='POST pie.dev/post hello=world')
def program(args: argparse.Namespace, env: Environment) -> ExitStatus:
if args.action is None:
parser.error(MSG_NAKED_INVOCATION)
if args.action == 'plugins':
plugins = PluginInstaller(env, debug=args.debug)
return plugins.run(args.plugins_action, args)
return ExitStatus.SUCCESS
|
PYSEC-2022-167
|
httpie/sessions.py
|
@@ -6,15 +6,17 @@
import re
from http.cookies import SimpleCookie
+from http.cookiejar import Cookie
from pathlib import Path
-from typing import Iterable, Optional, Union
-from urllib.parse import urlsplit
+from typing import Any, Dict, Optional, Union
from requests.auth import AuthBase
-from requests.cookies import RequestsCookieJar, create_cookie
+from requests.cookies import RequestsCookieJar, remove_cookie_by_name
+from .context import Environment
from .cli.dicts import HTTPHeadersDict
from .config import BaseConfigDict, DEFAULT_CONFIG_DIR
+from .utils import url_as_host
from .plugins.registry import plugin_manager
@@ -26,27 +28,88 @@
# <https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#Requests>
SESSION_IGNORED_HEADER_PREFIXES = ['Content-', 'If-']
+# Cookie related options
+KEPT_COOKIE_OPTIONS = ['name', 'expires', 'path', 'value', 'domain', 'secure']
+DEFAULT_COOKIE_PATH = '/'
+
+INSECURE_COOKIE_JAR_WARNING = '''\
+Outdated layout detected for the current session. Please consider updating it,
+in order to not get affected by potential security problems.
+
+For fixing the current session:
+
+ With binding all cookies to the current host (secure):
+ $ httpie cli sessions upgrade --bind-cookies {hostname} {session_id}
+
+ Without binding cookies (leaving them as is) (insecure):
+ $ httpie cli sessions upgrade {hostname} {session_id}
+'''
+
+INSECURE_COOKIE_JAR_WARNING_FOR_NAMED_SESSIONS = '''\
+
+For fixing all named sessions:
+
+ With binding all cookies to the current host (secure):
+ $ httpie cli sessions upgrade-all --bind-cookies
+
+ Without binding cookies (leaving them as is) (insecure):
+ $ httpie cli sessions upgrade-all
+
+See https://pie.co/docs/security for more information.
+'''
+
+
+def is_anonymous_session(session_name: str) -> bool:
+ return os.path.sep in session_name
+
+
+def materialize_cookie(cookie: Cookie) -> Dict[str, Any]:
+ materialized_cookie = {
+ option: getattr(cookie, option)
+ for option in KEPT_COOKIE_OPTIONS
+ }
+
+ if (
+ cookie._rest.get('is_explicit_none')
+ and materialized_cookie['domain'] == ''
+ ):
+ materialized_cookie['domain'] = None
+
+ return materialized_cookie
+
def get_httpie_session(
+ env: Environment,
config_dir: Path,
session_name: str,
host: Optional[str],
url: str,
+ *,
+ refactor_mode: bool = False
) -> 'Session':
- if os.path.sep in session_name:
+ bound_hostname = host or url_as_host(url)
+ if not bound_hostname:
+ # HACK/FIXME: httpie-unixsocket's URLs have no hostname.
+ bound_hostname = 'localhost'
+
+ # host:port => host_port
+ hostname = bound_hostname.replace(':', '_')
+ if is_anonymous_session(session_name):
path = os.path.expanduser(session_name)
+ session_id = path
else:
- hostname = host or urlsplit(url).netloc.split('@')[-1]
- if not hostname:
- # HACK/FIXME: httpie-unixsocket's URLs have no hostname.
- hostname = 'localhost'
-
- # host:port => host_port
- hostname = hostname.replace(':', '_')
path = (
config_dir / SESSIONS_DIR_NAME / hostname / f'{session_name}.json'
)
- session = Session(path)
+ session_id = session_name
+
+ session = Session(
+ path,
+ env=env,
+ session_id=session_id,
+ bound_host=bound_hostname.split(':')[0],
+ refactor_mode=refactor_mode
+ )
session.load()
return session
@@ -55,15 +118,86 @@ class Session(BaseConfigDict):
helpurl = 'https://httpie.io/docs#sessions'
about = 'HTTPie session file'
- def __init__(self, path: Union[str, Path]):
+ def __init__(
+ self,
+ path: Union[str, Path],
+ env: Environment,
+ bound_host: str,
+ session_id: str,
+ refactor_mode: bool = False,
+ ):
super().__init__(path=Path(path))
self['headers'] = {}
- self['cookies'] = {}
+ self['cookies'] = []
self['auth'] = {
'type': None,
'username': None,
'password': None
}
+ self.env = env
+ self.cookie_jar = RequestsCookieJar()
+ self.session_id = session_id
+ self.bound_host = bound_host
+ self.refactor_mode = refactor_mode
+
+ def pre_process_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
+ cookies = data.get('cookies')
+ if isinstance(cookies, dict):
+ normalized_cookies = [
+ {
+ 'name': key,
+ **value
+ }
+ for key, value in cookies.items()
+ ]
+ elif isinstance(cookies, list):
+ normalized_cookies = cookies
+ else:
+ normalized_cookies = []
+
+ should_issue_warning = False
+ for cookie in normalized_cookies:
+ domain = cookie.get('domain', '')
+ if domain == '' and isinstance(cookies, dict):
+ should_issue_warning = True
+ elif domain is None:
+ # domain = None means explicitly lack of cookie, though
+ # requests requires domain to be string so we'll cast it
+ # manually.
+ cookie['domain'] = ''
+ cookie['rest'] = {'is_explicit_none': True}
+
+ self.cookie_jar.set(**cookie)
+
+ if should_issue_warning and not self.refactor_mode:
+ warning = INSECURE_COOKIE_JAR_WARNING.format(hostname=self.bound_host, session_id=self.session_id)
+ if not is_anonymous_session(self.session_id):
+ warning += INSECURE_COOKIE_JAR_WARNING_FOR_NAMED_SESSIONS
+
+ self.env.log_error(
+ warning,
+ level='warning'
+ )
+
+ return data
+
+ def post_process_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
+ cookies = data.get('cookies')
+ # Save in the old-style fashion
+
+ normalized_cookies = [
+ materialize_cookie(cookie)
+ for cookie in self.cookie_jar
+ ]
+ if isinstance(cookies, dict):
+ data['cookies'] = {
+ cookie.pop('name'): cookie
+ for cookie in normalized_cookies
+ }
+ else:
+ data['cookies'] = normalized_cookies
+
+ return data
def update_headers(self, request_headers: HTTPHeadersDict):
"""
@@ -73,10 +207,10 @@ def update_headers(self, request_headers: HTTPHeadersDict):
"""
headers = self.headers
for name, value in request_headers.copy().items():
-
if value is None:
continue # Ignore explicitly unset headers
+ original_value = value
if type(value) is not str:
value = value.decode()
@@ -85,8 +219,15 @@ def update_headers(self, request_headers: HTTPHeadersDict):
if name.lower() == 'cookie':
for cookie_name, morsel in SimpleCookie(value).items():
- self['cookies'][cookie_name] = {'value': morsel.value}
- del request_headers[name]
+ if not morsel['path']:
+ morsel['path'] = DEFAULT_COOKIE_PATH
+ self.cookie_jar.set(cookie_name, morsel)
+
+ all_cookie_headers = request_headers.getall(name)
+ if len(all_cookie_headers) > 1:
+ all_cookie_headers.remove(original_value)
+ else:
+ request_headers.popall(name)
continue
for prefix in SESSION_IGNORED_HEADER_PREFIXES:
@@ -103,23 +244,21 @@ def headers(self) -> HTTPHeadersDict:
@property
def cookies(self) -> RequestsCookieJar:
- jar = RequestsCookieJar()
- for name, cookie_dict in self['cookies'].items():
- jar.set_cookie(create_cookie(
- name, cookie_dict.pop('value'), **cookie_dict))
- jar.clear_expired_cookies()
- return jar
+ self.cookie_jar.clear_expired_cookies()
+ return self.cookie_jar
@cookies.setter
def cookies(self, jar: RequestsCookieJar):
- # <https://docs.python.org/3/library/cookielib.html#cookie-objects>
- stored_attrs = ['value', 'path', 'secure', 'expires']
- self['cookies'] = {}
- for cookie in jar:
- self['cookies'][cookie.name] = {
- attname: getattr(cookie, attname)
- for attname in stored_attrs
- }
+ self.cookie_jar = jar
+
+ def remove_cookies(self, cookies: Dict[str, str]):
+ for cookie in cookies:
+ remove_cookie_by_name(
+ self.cookie_jar,
+ cookie['name'],
+ domain=cookie.get('domain', None),
+ path=cookie.get('path', None)
+ )
@property
def auth(self) -> Optional[AuthBase]:
@@ -154,8 +293,3 @@ def auth(self) -> Optional[AuthBase]:
def auth(self, auth: dict):
assert {'type', 'raw_auth'} == auth.keys()
self['auth'] = auth
-
- def remove_cookies(self, names: Iterable[str]):
- for name in names:
- if name in self['cookies']:
- del self['cookies'][name]
|
"""
Persistent, JSON-serialized sessions.
"""
import os
import re
from http.cookies import SimpleCookie
from pathlib import Path
from typing import Iterable, Optional, Union
from urllib.parse import urlsplit
from requests.auth import AuthBase
from requests.cookies import RequestsCookieJar, create_cookie
from .cli.dicts import HTTPHeadersDict
from .config import BaseConfigDict, DEFAULT_CONFIG_DIR
from .plugins.registry import plugin_manager
SESSIONS_DIR_NAME = 'sessions'
DEFAULT_SESSIONS_DIR = DEFAULT_CONFIG_DIR / SESSIONS_DIR_NAME
VALID_SESSION_NAME_PATTERN = re.compile('^[a-zA-Z0-9_.-]+$')
# Request headers starting with these prefixes won't be stored in sessions.
# They are specific to each request.
# <https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#Requests>
SESSION_IGNORED_HEADER_PREFIXES = ['Content-', 'If-']
def get_httpie_session(
config_dir: Path,
session_name: str,
host: Optional[str],
url: str,
) -> 'Session':
if os.path.sep in session_name:
path = os.path.expanduser(session_name)
else:
hostname = host or urlsplit(url).netloc.split('@')[-1]
if not hostname:
# HACK/FIXME: httpie-unixsocket's URLs have no hostname.
hostname = 'localhost'
# host:port => host_port
hostname = hostname.replace(':', '_')
path = (
config_dir / SESSIONS_DIR_NAME / hostname / f'{session_name}.json'
)
session = Session(path)
session.load()
return session
class Session(BaseConfigDict):
helpurl = 'https://httpie.io/docs#sessions'
about = 'HTTPie session file'
def __init__(self, path: Union[str, Path]):
super().__init__(path=Path(path))
self['headers'] = {}
self['cookies'] = {}
self['auth'] = {
'type': None,
'username': None,
'password': None
}
def update_headers(self, request_headers: HTTPHeadersDict):
"""
Update the session headers with the request ones while ignoring
certain name prefixes.
"""
headers = self.headers
for name, value in request_headers.copy().items():
if value is None:
continue # Ignore explicitly unset headers
if type(value) is not str:
value = value.decode()
if name.lower() == 'user-agent' and value.startswith('HTTPie/'):
continue
if name.lower() == 'cookie':
for cookie_name, morsel in SimpleCookie(value).items():
self['cookies'][cookie_name] = {'value': morsel.value}
del request_headers[name]
continue
for prefix in SESSION_IGNORED_HEADER_PREFIXES:
if name.lower().startswith(prefix.lower()):
break
else:
headers[name] = value
self['headers'] = dict(headers)
@property
def headers(self) -> HTTPHeadersDict:
return HTTPHeadersDict(self['headers'])
@property
def cookies(self) -> RequestsCookieJar:
jar = RequestsCookieJar()
for name, cookie_dict in self['cookies'].items():
jar.set_cookie(create_cookie(
name, cookie_dict.pop('value'), **cookie_dict))
jar.clear_expired_cookies()
return jar
@cookies.setter
def cookies(self, jar: RequestsCookieJar):
# <https://docs.python.org/3/library/cookielib.html#cookie-objects>
stored_attrs = ['value', 'path', 'secure', 'expires']
self['cookies'] = {}
for cookie in jar:
self['cookies'][cookie.name] = {
attname: getattr(cookie, attname)
for attname in stored_attrs
}
@property
def auth(self) -> Optional[AuthBase]:
auth = self.get('auth', None)
if not auth or not auth['type']:
return
plugin = plugin_manager.get_auth_plugin(auth['type'])()
credentials = {'username': None, 'password': None}
try:
# New style
plugin.raw_auth = auth['raw_auth']
except KeyError:
# Old style
credentials = {
'username': auth['username'],
'password': auth['password'],
}
else:
if plugin.auth_parse:
from .cli.argtypes import parse_auth
parsed = parse_auth(plugin.raw_auth)
credentials = {
'username': parsed.key,
'password': parsed.value,
}
return plugin.get_auth(**credentials)
@auth.setter
def auth(self, auth: dict):
assert {'type', 'raw_auth'} == auth.keys()
self['auth'] = auth
def remove_cookies(self, names: Iterable[str]):
for name in names:
if name in self['cookies']:
del self['cookies'][name]
|
PYSEC-2022-167
|
httpie/utils.py
|
@@ -9,6 +9,7 @@
from http.cookiejar import parse_ns_headers
from pathlib import Path
from pprint import pformat
+from urllib.parse import urlsplit
from typing import Any, List, Optional, Tuple, Callable, Iterable, TypeVar
import requests.auth
@@ -237,3 +238,7 @@ def unwrap_context(exc: Exception) -> Optional[Exception]:
return unwrap_context(context)
else:
return exc
+
+
+def url_as_host(url: str) -> str:
+ return urlsplit(url).netloc.split('@')[-1]
|
import json
import mimetypes
import re
import sys
import time
import sysconfig
from collections import OrderedDict
from http.cookiejar import parse_ns_headers
from pathlib import Path
from pprint import pformat
from typing import Any, List, Optional, Tuple, Callable, Iterable, TypeVar
import requests.auth
RE_COOKIE_SPLIT = re.compile(r', (?=[^ ;]+=)')
Item = Tuple[str, Any]
Items = List[Item]
T = TypeVar("T")
class JsonDictPreservingDuplicateKeys(OrderedDict):
"""A specialized JSON dict preserving duplicate keys."""
# Python versions prior to 3.8 suffer from an issue with multiple keys with the same name.
# `json.dumps(obj, indent=N, sort_keys=True)` will output sorted keys when they are unique, and
# duplicate keys will be outputted as they were defined in the original data.
# See <https://bugs.python.org/issue23493#msg400929> for the behavior change between Python versions.
SUPPORTS_SORTING = sys.version_info >= (3, 8)
def __init__(self, items: Items):
self._items = items
self._ensure_items_used()
def _ensure_items_used(self) -> None:
"""HACK: Force `json.dumps()` to use `self.items()` instead of an empty dict.
Two JSON encoders are available on CPython: pure-Python (1) and C (2) implementations.
(1) The pure-python implementation will do a simple `if not dict: return '{}'`,
and we could fake that check by implementing the `__bool__()` method.
Source:
- <https://github.com/python/cpython/blob/9d318ad/Lib/json/encoder.py#L334-L336>
(2) On the other hand, the C implementation will do a check on the number of
items contained inside the dict, using a verification on `dict->ma_used`, which
is updated only when an item is added/removed from the dict. For that case,
there is no workaround but to add an item into the dict.
Sources:
- <https://github.com/python/cpython/blob/9d318ad/Modules/_json.c#L1581-L1582>
- <https://github.com/python/cpython/blob/9d318ad/Include/cpython/dictobject.h#L53>
- <https://github.com/python/cpython/blob/9d318ad/Include/cpython/dictobject.h#L17-L18>
To please both implementations, we simply add one item to the dict.
"""
if self._items:
self['__hack__'] = '__hack__'
def items(self) -> Items:
"""Return all items, duplicate ones included.
"""
return self._items
def load_json_preserve_order_and_dupe_keys(s):
return json.loads(s, object_pairs_hook=JsonDictPreservingDuplicateKeys)
def repr_dict(d: dict) -> str:
return pformat(d)
def humanize_bytes(n, precision=2):
# Author: Doug Latornell
# Licence: MIT
# URL: https://code.activestate.com/recipes/577081/
"""Return a humanized string representation of a number of bytes.
>>> humanize_bytes(1)
'1 B'
>>> humanize_bytes(1024, precision=1)
'1.0 kB'
>>> humanize_bytes(1024 * 123, precision=1)
'123.0 kB'
>>> humanize_bytes(1024 * 12342, precision=1)
'12.1 MB'
>>> humanize_bytes(1024 * 12342, precision=2)
'12.05 MB'
>>> humanize_bytes(1024 * 1234, precision=2)
'1.21 MB'
>>> humanize_bytes(1024 * 1234 * 1111, precision=2)
'1.31 GB'
>>> humanize_bytes(1024 * 1234 * 1111, precision=1)
'1.3 GB'
"""
abbrevs = [
(1 << 50, 'PB'),
(1 << 40, 'TB'),
(1 << 30, 'GB'),
(1 << 20, 'MB'),
(1 << 10, 'kB'),
(1, 'B')
]
if n == 1:
return '1 B'
for factor, suffix in abbrevs:
if n >= factor:
break
# noinspection PyUnboundLocalVariable
return f'{n / factor:.{precision}f} {suffix}'
class ExplicitNullAuth(requests.auth.AuthBase):
"""Forces requests to ignore the ``.netrc``.
<https://github.com/psf/requests/issues/2773#issuecomment-174312831>
"""
def __call__(self, r):
return r
def get_content_type(filename):
"""
Return the content type for ``filename`` in format appropriate
for Content-Type headers, or ``None`` if the file type is unknown
to ``mimetypes``.
"""
return mimetypes.guess_type(filename, strict=False)[0]
def split_cookies(cookies):
"""
When ``requests`` stores cookies in ``response.headers['Set-Cookie']``
it concatenates all of them through ``, ``.
This function splits cookies apart being careful to not to
split on ``, `` which may be part of cookie value.
"""
if not cookies:
return []
return RE_COOKIE_SPLIT.split(cookies)
def get_expired_cookies(
cookies: str,
now: float = None
) -> List[dict]:
now = now or time.time()
def is_expired(expires: Optional[float]) -> bool:
return expires is not None and expires <= now
attr_sets: List[Tuple[str, str]] = parse_ns_headers(
split_cookies(cookies)
)
cookies = [
# The first attr name is the cookie name.
dict(attrs[1:], name=attrs[0][0])
for attrs in attr_sets
]
_max_age_to_expires(cookies=cookies, now=now)
return [
{
'name': cookie['name'],
'path': cookie.get('path', '/')
}
for cookie in cookies
if is_expired(expires=cookie.get('expires'))
]
def _max_age_to_expires(cookies, now):
"""
Translate `max-age` into `expires` for Requests to take it into account.
HACK/FIXME: <https://github.com/psf/requests/issues/5743>
"""
for cookie in cookies:
if 'expires' in cookie:
continue
max_age = cookie.get('max-age')
if max_age and max_age.isdigit():
cookie['expires'] = now + float(max_age)
def parse_content_type_header(header):
"""Borrowed from requests."""
tokens = header.split(';')
content_type, params = tokens[0].strip(), tokens[1:]
params_dict = {}
items_to_strip = "\"' "
for param in params:
param = param.strip()
if param:
key, value = param, True
index_of_equals = param.find("=")
if index_of_equals != -1:
key = param[:index_of_equals].strip(items_to_strip)
value = param[index_of_equals + 1:].strip(items_to_strip)
params_dict[key.lower()] = value
return content_type, params_dict
def as_site(path: Path) -> Path:
site_packages_path = sysconfig.get_path(
'purelib',
vars={'base': str(path)}
)
return Path(site_packages_path)
def split(iterable: Iterable[T], key: Callable[[T], bool]) -> Tuple[List[T], List[T]]:
left, right = [], []
for item in iterable:
if key(item):
left.append(item)
else:
right.append(item)
return left, right
def unwrap_context(exc: Exception) -> Optional[Exception]:
context = exc.__context__
if isinstance(context, Exception):
return unwrap_context(context)
else:
return exc
|
PYSEC-2022-167
|
setup.py
|
@@ -11,6 +11,7 @@
tests_require = [
'pytest',
'pytest-httpbin>=0.0.6',
+ 'pytest-lazy-fixture>=0.0.6',
'responses',
]
dev_require = [
|
# This is purely the result of trial and error.
import sys
from setuptools import setup, find_packages
import httpie
# Note: keep requirements here to ease distributions packaging
tests_require = [
'pytest',
'pytest-httpbin>=0.0.6',
'responses',
]
dev_require = [
*tests_require,
'flake8',
'flake8-comprehensions',
'flake8-deprecated',
'flake8-mutable',
'flake8-tuple',
'pyopenssl',
'pytest-cov',
'pyyaml',
'twine',
'wheel',
'Jinja2'
]
install_requires = [
'charset_normalizer>=2.0.0',
'defusedxml>=0.6.0',
'requests[socks]>=2.22.0',
'Pygments>=2.5.2',
'requests-toolbelt>=0.9.1',
'multidict>=4.7.0',
'setuptools',
'importlib-metadata>=1.4.0; python_version < "3.8"',
]
install_requires_win_only = [
'colorama>=0.2.4',
]
# Conditional dependencies:
# sdist
if 'bdist_wheel' not in sys.argv:
if 'win32' in str(sys.platform).lower():
# Terminal colors for Windows
install_requires.extend(install_requires_win_only)
# bdist_wheel
extras_require = {
'dev': dev_require,
'test': tests_require,
# https://wheel.readthedocs.io/en/latest/#defining-conditional-dependencies
':sys_platform == "win32"': install_requires_win_only,
}
def long_description():
with open('README.md', encoding='utf-8') as f:
return f.read()
setup(
name='httpie',
version=httpie.__version__,
description=httpie.__doc__.strip(),
long_description=long_description(),
long_description_content_type='text/markdown',
url='https://httpie.io/',
download_url=f'https://github.com/httpie/httpie/archive/{httpie.__version__}.tar.gz',
author=httpie.__author__,
author_email='[email protected]',
license=httpie.__licence__,
packages=find_packages(include=['httpie', 'httpie.*']),
entry_points={
'console_scripts': [
'http = httpie.__main__:main',
'https = httpie.__main__:main',
'httpie = httpie.manager.__main__:main',
],
},
python_requires='>=3.7',
extras_require=extras_require,
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development',
'Topic :: System :: Networking',
'Topic :: Terminals',
'Topic :: Text Processing',
'Topic :: Utilities'
],
project_urls={
'GitHub': 'https://github.com/httpie/httpie',
'Twitter': 'https://twitter.com/httpie',
'Discord': 'https://httpie.io/discord',
'Documentation': 'https://httpie.io/docs',
'Online Demo': 'https://httpie.io/run',
},
)
|
PYSEC-2022-167
|
tests/conftest.py
|
@@ -4,7 +4,11 @@
import pytest
from pytest_httpbin import certs
-from .utils import HTTPBIN_WITH_CHUNKED_SUPPORT_DOMAIN, HTTPBIN_WITH_CHUNKED_SUPPORT
+from .utils import ( # noqa
+ HTTPBIN_WITH_CHUNKED_SUPPORT_DOMAIN,
+ HTTPBIN_WITH_CHUNKED_SUPPORT,
+ mock_env
+)
from .utils.plugins_cli import ( # noqa
broken_plugin,
dummy_plugin,
|
import os
import socket
import pytest
from pytest_httpbin import certs
from .utils import HTTPBIN_WITH_CHUNKED_SUPPORT_DOMAIN, HTTPBIN_WITH_CHUNKED_SUPPORT
from .utils.plugins_cli import ( # noqa
broken_plugin,
dummy_plugin,
dummy_plugins,
httpie_plugins,
httpie_plugins_success,
interface,
)
from .utils.http_server import http_server # noqa
@pytest.fixture(scope='function', autouse=True)
def httpbin_add_ca_bundle(monkeypatch):
"""
Make pytest-httpbin's CA trusted by default.
(Same as `httpbin_ca_bundle`, just auto-used.).
"""
monkeypatch.setenv('REQUESTS_CA_BUNDLE', certs.where())
@pytest.fixture(scope='function')
def httpbin_secure_untrusted(monkeypatch, httpbin_secure):
"""
Like the `httpbin_secure` fixture, but without the
make-CA-trusted-by-default.
"""
monkeypatch.delenv('REQUESTS_CA_BUNDLE')
return httpbin_secure
@pytest.fixture(scope='session')
def _httpbin_with_chunked_support_available():
try:
socket.gethostbyname(HTTPBIN_WITH_CHUNKED_SUPPORT_DOMAIN)
return True
except OSError:
return False
@pytest.fixture(scope='function')
def httpbin_with_chunked_support(_httpbin_with_chunked_support_available):
if _httpbin_with_chunked_support_available:
return HTTPBIN_WITH_CHUNKED_SUPPORT
pytest.skip(f'{HTTPBIN_WITH_CHUNKED_SUPPORT_DOMAIN} not resolvable')
@pytest.fixture(autouse=True, scope='session')
def pyopenssl_inject():
"""
Injects `pyOpenSSL` module to make sure `requests` will use it.
<https://github.com/psf/requests/pull/5443#issuecomment-645740394>
"""
if os.getenv('HTTPIE_TEST_WITH_PYOPENSSL', '0') == '1':
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ModuleNotFoundError:
pytest.fail('Missing "pyopenssl" module.')
yield
|
PYSEC-2022-167
|
tests/fixtures/__init__.py
|
@@ -1,6 +1,9 @@
"""Test data"""
+import json
from pathlib import Path
+from typing import Optional, Dict, Any
+import httpie
from httpie.encoding import UTF8
from httpie.output.formatters.xml import pretty_xml, parse_xml
@@ -19,10 +22,20 @@ def patharg(path):
JSON_FILE_PATH = FIXTURES_ROOT / 'test.json'
JSON_WITH_DUPE_KEYS_FILE_PATH = FIXTURES_ROOT / 'test_with_dupe_keys.json'
BIN_FILE_PATH = FIXTURES_ROOT / 'test.bin'
+
XML_FILES_PATH = FIXTURES_ROOT / 'xmldata'
XML_FILES_VALID = list((XML_FILES_PATH / 'valid').glob('*_raw.xml'))
XML_FILES_INVALID = list((XML_FILES_PATH / 'invalid').glob('*.xml'))
+SESSION_FILES_PATH = FIXTURES_ROOT / 'session_data'
+SESSION_FILES_OLD = sorted((SESSION_FILES_PATH / 'old').glob('*.json'))
+SESSION_FILES_NEW = sorted((SESSION_FILES_PATH / 'new').glob('*.json'))
+
+SESSION_VARIABLES = {
+ '__version__': httpie.__version__,
+ '__host__': 'null',
+}
+
FILE_PATH_ARG = patharg(FILE_PATH)
BIN_FILE_PATH_ARG = patharg(BIN_FILE_PATH)
JSON_FILE_PATH_ARG = patharg(JSON_FILE_PATH)
@@ -40,3 +53,14 @@ def patharg(path):
UNICODE = FILE_CONTENT
XML_DATA_RAW = '<?xml version="1.0" encoding="utf-8"?><root><e>text</e></root>'
XML_DATA_FORMATTED = pretty_xml(parse_xml(XML_DATA_RAW))
+
+
+def read_session_file(session_file: Path, *, extra_variables: Optional[Dict[str, str]] = None) -> Any:
+ with open(session_file) as stream:
+ data = stream.read()
+
+ session_vars = {**SESSION_VARIABLES, **(extra_variables or {})}
+ for variable, value in session_vars.items():
+ data = data.replace(variable, value)
+
+ return json.loads(data)
|
"""Test data"""
from pathlib import Path
from httpie.encoding import UTF8
from httpie.output.formatters.xml import pretty_xml, parse_xml
def patharg(path):
"""
Back slashes need to be escaped in ITEM args,
even in Windows paths.
"""
return str(path).replace('\\', '\\\\\\')
FIXTURES_ROOT = Path(__file__).parent
FILE_PATH = FIXTURES_ROOT / 'test.txt'
JSON_FILE_PATH = FIXTURES_ROOT / 'test.json'
JSON_WITH_DUPE_KEYS_FILE_PATH = FIXTURES_ROOT / 'test_with_dupe_keys.json'
BIN_FILE_PATH = FIXTURES_ROOT / 'test.bin'
XML_FILES_PATH = FIXTURES_ROOT / 'xmldata'
XML_FILES_VALID = list((XML_FILES_PATH / 'valid').glob('*_raw.xml'))
XML_FILES_INVALID = list((XML_FILES_PATH / 'invalid').glob('*.xml'))
FILE_PATH_ARG = patharg(FILE_PATH)
BIN_FILE_PATH_ARG = patharg(BIN_FILE_PATH)
JSON_FILE_PATH_ARG = patharg(JSON_FILE_PATH)
# Strip because we don't want new lines in the data so that we can
# easily count occurrences also when embedded in JSON (where the new
# line would be escaped).
FILE_CONTENT = FILE_PATH.read_text(encoding=UTF8).strip()
ASCII_FILE_CONTENT = "random text" * 10
JSON_FILE_CONTENT = JSON_FILE_PATH.read_text(encoding=UTF8)
BIN_FILE_CONTENT = BIN_FILE_PATH.read_bytes()
UNICODE = FILE_CONTENT
XML_DATA_RAW = '<?xml version="1.0" encoding="utf-8"?><root><e>text</e></root>'
XML_DATA_FORMATTED = pretty_xml(parse_xml(XML_DATA_RAW))
|
PYSEC-2022-167
|
tests/test_plugins_cli.py
|
@@ -1,7 +1,6 @@
import pytest
from httpie.status import ExitStatus
-from tests.utils import httpie
from tests.utils.plugins_cli import parse_listing
@@ -149,45 +148,3 @@ def test_broken_plugins(httpie_plugins, httpie_plugins_success, dummy_plugin, br
# No warning now, since it is uninstalled.
data = parse_listing(httpie_plugins_success('list'))
assert len(data) == 1
-
-
[email protected]_installation
-def test_plugins_cli_error_message_without_args():
- # No arguments
- result = httpie(no_debug=True)
- assert result.exit_status == ExitStatus.ERROR
- assert 'usage: ' in result.stderr
- assert 'specify one of these' in result.stderr
- assert 'please use the http/https commands:' in result.stderr
-
-
[email protected](
- 'example', [
- 'pie.dev/get',
- 'DELETE localhost:8000/delete',
- 'POST pie.dev/post header:value a=b header_2:value x:=1'
- ]
-)
[email protected]_installation
-def test_plugins_cli_error_messages_with_example(example):
- result = httpie(*example.split(), no_debug=True)
- assert result.exit_status == ExitStatus.ERROR
- assert 'usage: ' in result.stderr
- assert f'http {example}' in result.stderr
- assert f'https {example}' in result.stderr
-
-
[email protected](
- 'example', [
- 'plugins unknown',
- 'plugins unknown.com A:B c=d',
- 'unknown.com UNPARSABLE????SYNTAX',
- ]
-)
[email protected]_installation
-def test_plugins_cli_error_messages_invalid_example(example):
- result = httpie(*example.split(), no_debug=True)
- assert result.exit_status == ExitStatus.ERROR
- assert 'usage: ' in result.stderr
- assert f'http {example}' not in result.stderr
- assert f'https {example}' not in result.stderr
|
import pytest
from httpie.status import ExitStatus
from tests.utils import httpie
from tests.utils.plugins_cli import parse_listing
@pytest.mark.requires_installation
def test_plugins_installation(httpie_plugins_success, interface, dummy_plugin):
lines = httpie_plugins_success('install', dummy_plugin.path)
assert lines[0].startswith(
f'Installing {dummy_plugin.path}'
)
assert f'Successfully installed {dummy_plugin.name}-{dummy_plugin.version}' in lines
assert interface.is_installed(dummy_plugin.name)
@pytest.mark.requires_installation
def test_plugin_installation_with_custom_config(httpie_plugins_success, interface, dummy_plugin):
interface.environment.config['default_options'] = ['--session-read-only', 'some-path.json', 'other', 'args']
interface.environment.config.save()
lines = httpie_plugins_success('install', dummy_plugin.path)
assert lines[0].startswith(
f'Installing {dummy_plugin.path}'
)
assert f'Successfully installed {dummy_plugin.name}-{dummy_plugin.version}' in lines
assert interface.is_installed(dummy_plugin.name)
@pytest.mark.requires_installation
def test_plugins_listing(httpie_plugins_success, interface, dummy_plugin):
httpie_plugins_success('install', dummy_plugin.path)
data = parse_listing(httpie_plugins_success('list'))
assert data == {
dummy_plugin.name: dummy_plugin.dump()
}
@pytest.mark.requires_installation
def test_plugins_listing_multiple(interface, httpie_plugins_success, dummy_plugins):
paths = [plugin.path for plugin in dummy_plugins]
httpie_plugins_success('install', *paths)
data = parse_listing(httpie_plugins_success('list'))
assert data == {
plugin.name: plugin.dump()
for plugin in dummy_plugins
}
@pytest.mark.requires_installation
def test_plugins_uninstall(interface, httpie_plugins_success, dummy_plugin):
httpie_plugins_success('install', dummy_plugin.path)
httpie_plugins_success('uninstall', dummy_plugin.name)
assert not interface.is_installed(dummy_plugin.name)
@pytest.mark.requires_installation
def test_plugins_listing_after_uninstall(interface, httpie_plugins_success, dummy_plugin):
httpie_plugins_success('install', dummy_plugin.path)
httpie_plugins_success('uninstall', dummy_plugin.name)
data = parse_listing(httpie_plugins_success('list'))
assert len(data) == 0
@pytest.mark.requires_installation
def test_plugins_uninstall_specific(interface, httpie_plugins_success):
new_plugin_1 = interface.make_dummy_plugin()
new_plugin_2 = interface.make_dummy_plugin()
target_plugin = interface.make_dummy_plugin()
httpie_plugins_success('install', new_plugin_1.path, new_plugin_2.path, target_plugin.path)
httpie_plugins_success('uninstall', target_plugin.name)
assert interface.is_installed(new_plugin_1.name)
assert interface.is_installed(new_plugin_2.name)
assert not interface.is_installed(target_plugin.name)
@pytest.mark.requires_installation
def test_plugins_installation_failed(httpie_plugins, interface):
plugin = interface.make_dummy_plugin(build=False)
result = httpie_plugins('install', plugin.path)
assert result.exit_status == ExitStatus.ERROR
assert result.stderr.splitlines()[-1].strip().startswith("Can't install")
@pytest.mark.requires_installation
def test_plugins_uninstall_non_existent(httpie_plugins, interface):
plugin = interface.make_dummy_plugin(build=False)
result = httpie_plugins('uninstall', plugin.name)
assert result.exit_status == ExitStatus.ERROR
assert (
result.stderr.splitlines()[-1].strip()
== f"Can't uninstall '{plugin.name}': package is not installed"
)
@pytest.mark.requires_installation
def test_plugins_double_uninstall(httpie_plugins, httpie_plugins_success, dummy_plugin):
httpie_plugins_success("install", dummy_plugin.path)
httpie_plugins_success("uninstall", dummy_plugin.name)
result = httpie_plugins("uninstall", dummy_plugin.name)
assert result.exit_status == ExitStatus.ERROR
assert (
result.stderr.splitlines()[-1].strip()
== f"Can't uninstall '{dummy_plugin.name}': package is not installed"
)
@pytest.mark.requires_installation
def test_plugins_upgrade(httpie_plugins, httpie_plugins_success, dummy_plugin):
httpie_plugins_success("install", dummy_plugin.path)
# Make a new version of the plugin
dummy_plugin.version = '2.0.0'
dummy_plugin.build()
httpie_plugins_success("upgrade", dummy_plugin.path)
data = parse_listing(httpie_plugins_success('list'))
assert data[dummy_plugin.name]['version'] == '2.0.0'
@pytest.mark.requires_installation
def test_broken_plugins(httpie_plugins, httpie_plugins_success, dummy_plugin, broken_plugin):
httpie_plugins_success("install", dummy_plugin.path, broken_plugin.path)
with pytest.warns(
UserWarning,
match=(
f'While loading "{broken_plugin.name}", an error'
' ocurred: broken plugin'
)
):
data = parse_listing(httpie_plugins_success('list'))
assert len(data) == 2
# We load before the uninstallation, so it will warn again.
with pytest.warns(UserWarning):
httpie_plugins_success("uninstall", broken_plugin.name)
# No warning now, since it is uninstalled.
data = parse_listing(httpie_plugins_success('list'))
assert len(data) == 1
@pytest.mark.requires_installation
def test_plugins_cli_error_message_without_args():
# No arguments
result = httpie(no_debug=True)
assert result.exit_status == ExitStatus.ERROR
assert 'usage: ' in result.stderr
assert 'specify one of these' in result.stderr
assert 'please use the http/https commands:' in result.stderr
@pytest.mark.parametrize(
'example', [
'pie.dev/get',
'DELETE localhost:8000/delete',
'POST pie.dev/post header:value a=b header_2:value x:=1'
]
)
@pytest.mark.requires_installation
def test_plugins_cli_error_messages_with_example(example):
result = httpie(*example.split(), no_debug=True)
assert result.exit_status == ExitStatus.ERROR
assert 'usage: ' in result.stderr
assert f'http {example}' in result.stderr
assert f'https {example}' in result.stderr
@pytest.mark.parametrize(
'example', [
'plugins unknown',
'plugins unknown.com A:B c=d',
'unknown.com UNPARSABLE????SYNTAX',
]
)
@pytest.mark.requires_installation
def test_plugins_cli_error_messages_invalid_example(example):
result = httpie(*example.split(), no_debug=True)
assert result.exit_status == ExitStatus.ERROR
assert 'usage: ' in result.stderr
assert f'http {example}' not in result.stderr
assert f'https {example}' not in result.stderr
|
PYSEC-2022-167
|
tests/test_sessions.py
|
@@ -1,20 +1,24 @@
import json
import os
import shutil
+from contextlib import contextmanager
from datetime import datetime
from unittest import mock
+from pathlib import Path
+from typing import Iterator
import pytest
from .fixtures import FILE_PATH_ARG, UNICODE
+from httpie.context import Environment
from httpie.encoding import UTF8
from httpie.plugins import AuthPlugin
from httpie.plugins.builtin import HTTPBasicAuth
from httpie.plugins.registry import plugin_manager
from httpie.sessions import Session
from httpie.utils import get_expired_cookies
from .test_auth_plugins import basic_auth
-from .utils import HTTP_OK, MockEnvironment, http, mk_config_dir
+from .utils import DUMMY_HOST, HTTP_OK, MockEnvironment, http, mk_config_dir
from base64 import b64encode
@@ -203,9 +207,9 @@ def test_session_with_cookie_followed_by_another_header(self, httpbin):
"""
self.start_session(httpbin)
session_data = {
- "headers": {
- "cookie": "...",
- "zzz": "..."
+ 'headers': {
+ 'cookie': '...',
+ 'zzz': '...'
}
}
session_path = self.config_dir / 'session-data.json'
@@ -307,7 +311,7 @@ class Plugin(AuthPlugin):
auth_type = 'test-prompted'
def get_auth(self, username=None, password=None):
- basic_auth_header = "Basic " + b64encode(self.raw_auth.encode()).strip().decode('latin1')
+ basic_auth_header = 'Basic ' + b64encode(self.raw_auth.encode()).strip().decode('latin1')
return basic_auth(basic_auth_header)
plugin_manager.register(Plugin)
@@ -359,7 +363,7 @@ def get_auth(self, username=None, password=None):
)
updated_session = json.loads(self.session_path.read_text(encoding=UTF8))
assert updated_session['auth']['type'] == 'test-saved'
- assert updated_session['auth']['raw_auth'] == "user:password"
+ assert updated_session['auth']['raw_auth'] == 'user:password'
plugin_manager.unregister(Plugin)
@@ -368,12 +372,12 @@ class TestExpiredCookies(CookieTestBase):
@pytest.mark.parametrize(
'initial_cookie, expired_cookie',
[
- ({'id': {'value': 123}}, 'id'),
- ({'id': {'value': 123}}, 'token')
+ ({'id': {'value': 123}}, {'name': 'id'}),
+ ({'id': {'value': 123}}, {'name': 'token'})
]
)
- def test_removes_expired_cookies_from_session_obj(self, initial_cookie, expired_cookie, httpbin):
- session = Session(self.config_dir)
+ def test_removes_expired_cookies_from_session_obj(self, initial_cookie, expired_cookie, httpbin, mock_env):
+ session = Session(self.config_dir, env=mock_env, session_id=None, bound_host=None)
session['cookies'] = initial_cookie
session.remove_cookies([expired_cookie])
assert expired_cookie not in session.cookies
@@ -524,3 +528,165 @@ def test_cookie_storage_priority(self, cli_cookie, set_cookie, expected, httpbin
updated_session = json.loads(self.session_path.read_text(encoding=UTF8))
assert updated_session['cookies']['cookie1']['value'] == expected
+
+
[email protected]
+def basic_session(httpbin, tmp_path):
+ session_path = tmp_path / 'session.json'
+ http(
+ '--session', str(session_path),
+ httpbin + '/get'
+ )
+ return session_path
+
+
+@contextmanager
+def open_session(path: Path, env: Environment, read_only: bool = False) -> Iterator[Session]:
+ session = Session(path, env, session_id='test', bound_host=DUMMY_HOST)
+ session.load()
+ yield session
+ if not read_only:
+ session.save()
+
+
+@contextmanager
+def open_raw_session(path: Path, read_only: bool = False) -> None:
+ with open(path) as stream:
+ raw_session = json.load(stream)
+
+ yield raw_session
+
+ if not read_only:
+ with open(path, 'w') as stream:
+ json.dump(raw_session, stream)
+
+
+def read_stderr(env: Environment) -> bytes:
+ env.stderr.seek(0)
+ stderr_data = env.stderr.read()
+ if isinstance(stderr_data, str):
+ return stderr_data.encode()
+ else:
+ return stderr_data
+
+
+def test_old_session_version_saved_as_is(basic_session, mock_env):
+ with open_session(basic_session, mock_env) as session:
+ session['__meta__'] = {'httpie': '0.0.1'}
+
+ with open_session(basic_session, mock_env, read_only=True) as session:
+ assert session['__meta__']['httpie'] == '0.0.1'
+
+
+def test_old_session_cookie_layout_warning(basic_session, mock_env):
+ with open_session(basic_session, mock_env) as session:
+ # Use the old layout & set a cookie
+ session['cookies'] = {}
+ session.cookies.set('foo', 'bar')
+
+ assert read_stderr(mock_env) == b''
+
+ with open_session(basic_session, mock_env, read_only=True) as session:
+ assert b'Outdated layout detected' in read_stderr(mock_env)
+
+
[email protected]('cookies, expect_warning', [
+ # Old-style cookie format
+ (
+ # Without 'domain' set
+ {'foo': {'value': 'bar'}},
+ True
+ ),
+ (
+ # With 'domain' set to empty string
+ {'foo': {'value': 'bar', 'domain': ''}},
+ True
+ ),
+ (
+ # With 'domain' set to null
+ {'foo': {'value': 'bar', 'domain': None}},
+ False,
+ ),
+ (
+ # With 'domain' set to a URL
+ {'foo': {'value': 'bar', 'domain': DUMMY_HOST}},
+ False,
+ ),
+ # New style cookie format
+ (
+ # Without 'domain' set
+ [{'name': 'foo', 'value': 'bar'}],
+ False
+ ),
+ (
+ # With 'domain' set to empty string
+ [{'name': 'foo', 'value': 'bar', 'domain': ''}],
+ False
+ ),
+ (
+ # With 'domain' set to null
+ [{'name': 'foo', 'value': 'bar', 'domain': None}],
+ False,
+ ),
+ (
+ # With 'domain' set to a URL
+ [{'name': 'foo', 'value': 'bar', 'domain': DUMMY_HOST}],
+ False,
+ ),
+])
+def test_cookie_security_warnings_on_raw_cookies(basic_session, mock_env, cookies, expect_warning):
+ with open_raw_session(basic_session) as raw_session:
+ raw_session['cookies'] = cookies
+
+ with open_session(basic_session, mock_env, read_only=True):
+ warning = b'Outdated layout detected'
+ stderr = read_stderr(mock_env)
+
+ if expect_warning:
+ assert warning in stderr
+ else:
+ assert warning not in stderr
+
+
+def test_old_session_cookie_layout_loading(basic_session, httpbin, mock_env):
+ with open_session(basic_session, mock_env) as session:
+ # Use the old layout & set a cookie
+ session['cookies'] = {}
+ session.cookies.set('foo', 'bar')
+
+ response = http(
+ '--session', str(basic_session),
+ httpbin + '/cookies'
+ )
+ assert response.json['cookies'] == {'foo': 'bar'}
+
+
[email protected]('layout_type', [
+ dict, list
+])
+def test_session_cookie_layout_preservance(basic_session, mock_env, layout_type):
+ with open_session(basic_session, mock_env) as session:
+ session['cookies'] = layout_type()
+ session.cookies.set('foo', 'bar')
+ session.save()
+
+ with open_session(basic_session, mock_env, read_only=True) as session:
+ assert isinstance(session['cookies'], layout_type)
+
+
[email protected]('layout_type', [
+ dict, list
+])
+def test_session_cookie_layout_preservance_on_new_cookies(basic_session, httpbin, mock_env, layout_type):
+ with open_session(basic_session, mock_env) as session:
+ session['cookies'] = layout_type()
+ session.cookies.set('foo', 'bar')
+ session.save()
+
+ http(
+ '--session', str(basic_session),
+ httpbin + '/cookies/set/baz/quux'
+ )
+
+ with open_session(basic_session, mock_env, read_only=True) as session:
+ assert isinstance(session['cookies'], layout_type)
|
import json
import os
import shutil
from datetime import datetime
from unittest import mock
import pytest
from .fixtures import FILE_PATH_ARG, UNICODE
from httpie.encoding import UTF8
from httpie.plugins import AuthPlugin
from httpie.plugins.builtin import HTTPBasicAuth
from httpie.plugins.registry import plugin_manager
from httpie.sessions import Session
from httpie.utils import get_expired_cookies
from .test_auth_plugins import basic_auth
from .utils import HTTP_OK, MockEnvironment, http, mk_config_dir
from base64 import b64encode
class SessionTestBase:
def start_session(self, httpbin):
"""Create and reuse a unique config dir for each test."""
self.config_dir = mk_config_dir()
def teardown_method(self, method):
shutil.rmtree(self.config_dir)
def env(self):
"""
Return an environment.
Each environment created within a test method
will share the same config_dir. It is necessary
for session files being reused.
"""
return MockEnvironment(config_dir=self.config_dir)
class CookieTestBase:
def setup_method(self, method):
self.config_dir = mk_config_dir()
orig_session = {
'cookies': {
'cookie1': {
'value': 'foo',
},
'cookie2': {
'value': 'foo',
}
}
}
self.session_path = self.config_dir / 'test-session.json'
self.session_path.write_text(json.dumps(orig_session), encoding=UTF8)
def teardown_method(self, method):
shutil.rmtree(self.config_dir)
class TestSessionFlow(SessionTestBase):
"""
These tests start with an existing session created in `setup_method()`.
"""
def start_session(self, httpbin):
"""
Start a full-blown session with a custom request header,
authorization, and response cookies.
"""
super().start_session(httpbin)
r1 = http(
'--follow',
'--session=test',
'--auth=username:password',
'GET',
httpbin.url + '/cookies/set?hello=world',
'Hello:World',
env=self.env()
)
assert HTTP_OK in r1
def test_session_created_and_reused(self, httpbin):
self.start_session(httpbin)
# Verify that the session created in setup_method() has been used.
r2 = http('--session=test',
'GET', httpbin.url + '/get', env=self.env())
assert HTTP_OK in r2
assert r2.json['headers']['Hello'] == 'World'
assert r2.json['headers']['Cookie'] == 'hello=world'
assert 'Basic ' in r2.json['headers']['Authorization']
def test_session_update(self, httpbin):
self.start_session(httpbin)
# Get a response to a request from the original session.
r2 = http('--session=test', 'GET', httpbin.url + '/get',
env=self.env())
assert HTTP_OK in r2
# Make a request modifying the session data.
r3 = http('--follow', '--session=test', '--auth=username:password2',
'GET', httpbin.url + '/cookies/set?hello=world2',
'Hello:World2',
env=self.env())
assert HTTP_OK in r3
# Get a response to a request from the updated session.
r4 = http('--session=test', 'GET', httpbin.url + '/get',
env=self.env())
assert HTTP_OK in r4
assert r4.json['headers']['Hello'] == 'World2'
assert r4.json['headers']['Cookie'] == 'hello=world2'
assert (r2.json['headers']['Authorization']
!= r4.json['headers']['Authorization'])
def test_session_read_only(self, httpbin):
self.start_session(httpbin)
# Get a response from the original session.
r2 = http('--session=test', 'GET', httpbin.url + '/get',
env=self.env())
assert HTTP_OK in r2
# Make a request modifying the session data but
# with --session-read-only.
r3 = http('--follow', '--session-read-only=test',
'--auth=username:password2', 'GET',
httpbin.url + '/cookies/set?hello=world2', 'Hello:World2',
env=self.env())
assert HTTP_OK in r3
# Get a response from the updated session.
r4 = http('--session=test', 'GET', httpbin.url + '/get',
env=self.env())
assert HTTP_OK in r4
# Origin can differ on Travis.
del r2.json['origin'], r4.json['origin']
# Different for each request.
# Should be the same as before r3.
assert r2.json == r4.json
def test_session_overwrite_header(self, httpbin):
self.start_session(httpbin)
r2 = http('--session=test', 'GET', httpbin.url + '/get',
'Hello:World2', env=self.env())
assert HTTP_OK in r2
assert r2.json['headers']['Hello'] == 'World2'
r3 = http('--session=test', 'GET', httpbin.url + '/get',
'Hello:World2', 'Hello:World3', env=self.env())
assert HTTP_OK in r3
assert r3.json['headers']['Hello'] == 'World2,World3'
r3 = http('--session=test', 'GET', httpbin.url + '/get',
'Hello:', 'Hello:World3', env=self.env())
assert HTTP_OK in r3
assert 'Hello' not in r3.json['headers']['Hello']
class TestSession(SessionTestBase):
"""Stand-alone session tests."""
def test_session_ignored_header_prefixes(self, httpbin):
self.start_session(httpbin)
r1 = http('--session=test', 'GET', httpbin.url + '/get',
'Content-Type: text/plain',
'If-Unmodified-Since: Sat, 29 Oct 1994 19:43:31 GMT',
env=self.env())
assert HTTP_OK in r1
r2 = http('--session=test', 'GET', httpbin.url + '/get',
env=self.env())
assert HTTP_OK in r2
assert 'Content-Type' not in r2.json['headers']
assert 'If-Unmodified-Since' not in r2.json['headers']
def test_session_with_upload(self, httpbin):
self.start_session(httpbin)
r = http('--session=test', '--form', '--verbose', 'POST', httpbin.url + '/post',
f'test-file@{FILE_PATH_ARG}', 'foo=bar', env=self.env())
assert HTTP_OK in r
def test_session_by_path(self, httpbin):
self.start_session(httpbin)
session_path = self.config_dir / 'session-by-path.json'
r1 = http('--session', str(session_path), 'GET', httpbin.url + '/get',
'Foo:Bar', env=self.env())
assert HTTP_OK in r1
r2 = http('--session', str(session_path), 'GET', httpbin.url + '/get',
env=self.env())
assert HTTP_OK in r2
assert r2.json['headers']['Foo'] == 'Bar'
def test_session_with_cookie_followed_by_another_header(self, httpbin):
"""
Make sure headers don’t get mutated — <https://github.com/httpie/httpie/issues/1126>
"""
self.start_session(httpbin)
session_data = {
"headers": {
"cookie": "...",
"zzz": "..."
}
}
session_path = self.config_dir / 'session-data.json'
session_path.write_text(json.dumps(session_data))
r = http('--session', str(session_path), 'GET', httpbin.url + '/get',
env=self.env())
assert HTTP_OK in r
assert 'Zzz' in r
def test_session_unicode(self, httpbin):
self.start_session(httpbin)
r1 = http('--session=test', f'--auth=test:{UNICODE}',
'GET', httpbin.url + '/get', f'Test:{UNICODE}',
env=self.env())
assert HTTP_OK in r1
r2 = http('--session=test', '--verbose', 'GET',
httpbin.url + '/get', env=self.env())
assert HTTP_OK in r2
# FIXME: Authorization *sometimes* is not present
assert (r2.json['headers']['Authorization']
== HTTPBasicAuth.make_header('test', UNICODE))
# httpbin doesn't interpret UTF-8 headers
assert UNICODE in r2
def test_session_default_header_value_overwritten(self, httpbin):
self.start_session(httpbin)
# https://github.com/httpie/httpie/issues/180
r1 = http('--session=test',
httpbin.url + '/headers', 'User-Agent:custom',
env=self.env())
assert HTTP_OK in r1
assert r1.json['headers']['User-Agent'] == 'custom'
r2 = http('--session=test', httpbin.url + '/headers', env=self.env())
assert HTTP_OK in r2
assert r2.json['headers']['User-Agent'] == 'custom'
def test_download_in_session(self, tmp_path, httpbin):
# https://github.com/httpie/httpie/issues/412
self.start_session(httpbin)
cwd = os.getcwd()
os.chdir(tmp_path)
try:
http('--session=test', '--download',
httpbin.url + '/get', env=self.env())
finally:
os.chdir(cwd)
@pytest.mark.parametrize(
'auth_require_param, auth_parse_param',
[
(False, False),
(False, True),
(True, False)
]
)
def test_auth_type_reused_in_session(self, auth_require_param, auth_parse_param, httpbin):
self.start_session(httpbin)
session_path = self.config_dir / 'test-session.json'
header = 'Custom dXNlcjpwYXNzd29yZA'
class Plugin(AuthPlugin):
auth_type = 'test-reused'
auth_require = auth_require_param
auth_parse = auth_parse_param
def get_auth(self, username=None, password=None):
return basic_auth(header=f'{header}==')
plugin_manager.register(Plugin)
r1 = http(
'--session', str(session_path),
httpbin + '/basic-auth/user/password',
'--auth-type',
Plugin.auth_type,
'--auth', 'user:password',
'--print=H',
)
r2 = http(
'--session', str(session_path),
httpbin + '/basic-auth/user/password',
'--print=H',
)
assert f'Authorization: {header}' in r1
assert f'Authorization: {header}' in r2
plugin_manager.unregister(Plugin)
def test_auth_plugin_prompt_password_in_session(self, httpbin):
self.start_session(httpbin)
session_path = self.config_dir / 'test-session.json'
class Plugin(AuthPlugin):
auth_type = 'test-prompted'
def get_auth(self, username=None, password=None):
basic_auth_header = "Basic " + b64encode(self.raw_auth.encode()).strip().decode('latin1')
return basic_auth(basic_auth_header)
plugin_manager.register(Plugin)
with mock.patch(
'httpie.cli.argtypes.AuthCredentials._getpass',
new=lambda self, prompt: 'password'
):
r1 = http(
'--session', str(session_path),
httpbin + '/basic-auth/user/password',
'--auth-type',
Plugin.auth_type,
'--auth', 'user',
)
r2 = http(
'--session', str(session_path),
httpbin + '/basic-auth/user/password',
)
assert HTTP_OK in r1
assert HTTP_OK in r2
# additional test for issue: https://github.com/httpie/httpie/issues/1098
with open(session_path) as session_file:
session_file_lines = ''.join(session_file.readlines())
assert "\"type\": \"test-prompted\"" in session_file_lines
assert "\"raw_auth\": \"user:password\"" in session_file_lines
plugin_manager.unregister(Plugin)
def test_auth_type_stored_in_session_file(self, httpbin):
self.config_dir = mk_config_dir()
self.session_path = self.config_dir / 'test-session.json'
class Plugin(AuthPlugin):
auth_type = 'test-saved'
auth_require = True
def get_auth(self, username=None, password=None):
return basic_auth()
plugin_manager.register(Plugin)
http('--session', str(self.session_path),
httpbin + '/basic-auth/user/password',
'--auth-type',
Plugin.auth_type,
'--auth', 'user:password',
)
updated_session = json.loads(self.session_path.read_text(encoding=UTF8))
assert updated_session['auth']['type'] == 'test-saved'
assert updated_session['auth']['raw_auth'] == "user:password"
plugin_manager.unregister(Plugin)
class TestExpiredCookies(CookieTestBase):
@pytest.mark.parametrize(
'initial_cookie, expired_cookie',
[
({'id': {'value': 123}}, 'id'),
({'id': {'value': 123}}, 'token')
]
)
def test_removes_expired_cookies_from_session_obj(self, initial_cookie, expired_cookie, httpbin):
session = Session(self.config_dir)
session['cookies'] = initial_cookie
session.remove_cookies([expired_cookie])
assert expired_cookie not in session.cookies
def test_expired_cookies(self, httpbin):
r = http(
'--session', str(self.session_path),
'--print=H',
httpbin.url + '/cookies/delete?cookie2',
)
assert 'Cookie: cookie1=foo; cookie2=foo' in r
updated_session = json.loads(self.session_path.read_text(encoding=UTF8))
assert 'cookie1' in updated_session['cookies']
assert 'cookie2' not in updated_session['cookies']
def test_get_expired_cookies_using_max_age(self):
cookies = 'one=two; Max-Age=0; path=/; domain=.tumblr.com; HttpOnly'
expected_expired = [
{'name': 'one', 'path': '/'}
]
assert get_expired_cookies(cookies, now=None) == expected_expired
@pytest.mark.parametrize(
'cookies, now, expected_expired',
[
(
'hello=world; Path=/; Expires=Thu, 01-Jan-1970 00:00:00 GMT; HttpOnly',
None,
[
{
'name': 'hello',
'path': '/'
}
]
),
(
(
'hello=world; Path=/; Expires=Thu, 01-Jan-1970 00:00:00 GMT; HttpOnly, '
'pea=pod; Path=/ab; Expires=Thu, 01-Jan-1970 00:00:00 GMT; HttpOnly'
),
None,
[
{'name': 'hello', 'path': '/'},
{'name': 'pea', 'path': '/ab'}
]
),
(
# Checks we gracefully ignore expires date in invalid format.
# <https://github.com/httpie/httpie/issues/963>
'pfg=; Expires=Sat, 19-Sep-2020 06:58:14 GMT+0000; Max-Age=0; path=/; domain=.tumblr.com; secure; HttpOnly',
None,
[]
),
(
'hello=world; Path=/; Expires=Fri, 12 Jun 2020 12:28:55 GMT; HttpOnly',
datetime(2020, 6, 11).timestamp(),
[]
),
]
)
def test_get_expired_cookies_manages_multiple_cookie_headers(self, cookies, now, expected_expired):
assert get_expired_cookies(cookies, now=now) == expected_expired
class TestCookieStorage(CookieTestBase):
@pytest.mark.parametrize(
'new_cookies, new_cookies_dict, expected',
[(
'new=bar',
{'new': 'bar'},
'cookie1=foo; cookie2=foo; new=bar'
),
(
'new=bar;chocolate=milk',
{'new': 'bar', 'chocolate': 'milk'},
'chocolate=milk; cookie1=foo; cookie2=foo; new=bar'
),
(
'new=bar; chocolate=milk',
{'new': 'bar', 'chocolate': 'milk'},
'chocolate=milk; cookie1=foo; cookie2=foo; new=bar'
),
(
'new=bar;; chocolate=milk;;;',
{'new': 'bar', 'chocolate': 'milk'},
'cookie1=foo; cookie2=foo; new=bar'
),
(
'new=bar; chocolate=milk;;;',
{'new': 'bar', 'chocolate': 'milk'},
'chocolate=milk; cookie1=foo; cookie2=foo; new=bar'
)
]
)
def test_existing_and_new_cookies_sent_in_request(self, new_cookies, new_cookies_dict, expected, httpbin):
r = http(
'--session', str(self.session_path),
'--print=H',
httpbin.url,
'Cookie:' + new_cookies,
)
# Note: cookies in response are in alphabetical order
assert f'Cookie: {expected}' in r
updated_session = json.loads(self.session_path.read_text(encoding=UTF8))
for name, value in new_cookies_dict.items():
assert name, value in updated_session['cookies']
assert 'Cookie' not in updated_session['headers']
@pytest.mark.parametrize(
'cli_cookie, set_cookie, expected',
[(
'',
'/cookies/set/cookie1/bar',
'bar'
),
(
'cookie1=not_foo',
'/cookies/set/cookie1/bar',
'bar'
),
(
'cookie1=not_foo',
'',
'not_foo'
),
(
'',
'',
'foo'
)
]
)
def test_cookie_storage_priority(self, cli_cookie, set_cookie, expected, httpbin):
"""
Expected order of priority for cookie storage in session file:
1. set-cookie (from server)
2. command line arg
3. cookie already stored in session file
"""
http(
'--session', str(self.session_path),
httpbin.url + set_cookie,
'Cookie:' + cli_cookie,
)
updated_session = json.loads(self.session_path.read_text(encoding=UTF8))
assert updated_session['cookies']['cookie1']['value'] == expected
|
PYSEC-2022-167
|
tests/utils/__init__.py
|
@@ -6,6 +6,8 @@
import json
import tempfile
import warnings
+import pytest
+from contextlib import suppress
from io import BytesIO
from pathlib import Path
from typing import Any, Optional, Union, List, Iterable
@@ -16,6 +18,7 @@
from httpie.status import ExitStatus
from httpie.config import Config
from httpie.context import Environment
+from httpie.utils import url_as_host
# pytest-httpbin currently does not support chunked requests:
@@ -39,6 +42,7 @@
)
DUMMY_URL = 'http://this-should.never-resolve' # Note: URL never fetched
+DUMMY_HOST = url_as_host(DUMMY_URL)
def strip_colors(colorized_msg: str) -> str:
@@ -187,6 +191,13 @@ class ExitStatusError(Exception):
pass
[email protected]
+def mock_env() -> MockEnvironment:
+ env = MockEnvironment(stdout_mode='')
+ yield env
+ env.cleanup()
+
+
def normalize_args(args: Iterable[Any]) -> List[str]:
return [str(arg) for arg in args]
@@ -201,7 +212,7 @@ def httpie(
status.
"""
- env = kwargs.setdefault('env', MockEnvironment())
+ env = kwargs.setdefault('env', MockEnvironment(stdout_mode=''))
cli_args = ['httpie']
if not kwargs.pop('no_debug', False):
cli_args.append('--debug')
@@ -214,7 +225,16 @@ def httpie(
env.stdout.seek(0)
env.stderr.seek(0)
try:
- response = StrCLIResponse(env.stdout.read())
+ output = env.stdout.read()
+ if isinstance(output, bytes):
+ with suppress(UnicodeDecodeError):
+ output = output.decode()
+
+ if isinstance(output, bytes):
+ response = BytesCLIResponse(output)
+ else:
+ response = StrCLIResponse(output)
+
response.stderr = env.stderr.read()
response.exit_status = exit_status
response.args = cli_args
|
"""Utilities for HTTPie test suite."""
import re
import shlex
import sys
import time
import json
import tempfile
import warnings
from io import BytesIO
from pathlib import Path
from typing import Any, Optional, Union, List, Iterable
import httpie.core as core
import httpie.manager.__main__ as manager
from httpie.status import ExitStatus
from httpie.config import Config
from httpie.context import Environment
# pytest-httpbin currently does not support chunked requests:
# <https://github.com/kevin1024/pytest-httpbin/issues/33>
# <https://github.com/kevin1024/pytest-httpbin/issues/28>
HTTPBIN_WITH_CHUNKED_SUPPORT_DOMAIN = 'pie.dev'
HTTPBIN_WITH_CHUNKED_SUPPORT = 'http://' + HTTPBIN_WITH_CHUNKED_SUPPORT_DOMAIN
TESTS_ROOT = Path(__file__).parent.parent
CRLF = '\r\n'
COLOR = '\x1b['
COLOR_RE = re.compile(r'\x1b\[\d+(;\d+)*?m', re.MULTILINE)
HTTP_OK = '200 OK'
# noinspection GrazieInspection
HTTP_OK_COLOR = (
'HTTP\x1b[39m\x1b[38;5;245m/\x1b[39m\x1b'
'[38;5;37m1.1\x1b[39m\x1b[38;5;245m \x1b[39m\x1b[38;5;37m200'
'\x1b[39m\x1b[38;5;245m \x1b[39m\x1b[38;5;136mOK'
)
DUMMY_URL = 'http://this-should.never-resolve' # Note: URL never fetched
def strip_colors(colorized_msg: str) -> str:
return COLOR_RE.sub('', colorized_msg)
def mk_config_dir() -> Path:
dirname = tempfile.mkdtemp(prefix='httpie_config_')
return Path(dirname)
def add_auth(url, auth):
proto, rest = url.split('://', 1)
return f'{proto}://{auth}@{rest}'
class StdinBytesIO(BytesIO):
"""To be used for `MockEnvironment.stdin`"""
len = 0 # See `prepare_request_body()`
class MockEnvironment(Environment):
"""Environment subclass with reasonable defaults for testing."""
colors = 0 # For easier debugging
stdin_isatty = True
stdout_isatty = True
is_windows = False
def __init__(self, create_temp_config_dir=True, *, stdout_mode='b', **kwargs):
if 'stdout' not in kwargs:
kwargs['stdout'] = tempfile.TemporaryFile(
mode=f'w+{stdout_mode}',
prefix='httpie_stdout'
)
if 'stderr' not in kwargs:
kwargs['stderr'] = tempfile.TemporaryFile(
mode='w+t',
prefix='httpie_stderr'
)
super().__init__(**kwargs)
self._create_temp_config_dir = create_temp_config_dir
self._delete_config_dir = False
self._temp_dir = Path(tempfile.gettempdir())
@property
def config(self) -> Config:
if (self._create_temp_config_dir
and self._temp_dir not in self.config_dir.parents):
self.create_temp_config_dir()
return super().config
def create_temp_config_dir(self):
self.config_dir = mk_config_dir()
self._delete_config_dir = True
def cleanup(self):
self.stdout.close()
self.stderr.close()
warnings.resetwarnings()
if self._delete_config_dir:
assert self._temp_dir in self.config_dir.parents
from shutil import rmtree
rmtree(self.config_dir, ignore_errors=True)
def __del__(self):
# noinspection PyBroadException
try:
self.cleanup()
except Exception:
pass
class BaseCLIResponse:
"""
Represents the result of simulated `$ http' invocation via `http()`.
Holds and provides access to:
- stdout output: print(self)
- stderr output: print(self.stderr)
- devnull output: print(self.devnull)
- exit_status output: print(self.exit_status)
"""
stderr: str = None
devnull: str = None
json: dict = None
exit_status: ExitStatus = None
command: str = None
args: List[str] = []
complete_args: List[str] = []
@property
def command(self):
cmd = ' '.join(shlex.quote(arg) for arg in ['http', *self.args])
# pytest-httpbin to real httpbin.
return re.sub(r'127\.0\.0\.1:\d+', 'httpbin.org', cmd)
class BytesCLIResponse(bytes, BaseCLIResponse):
"""
Used as a fallback when a StrCLIResponse cannot be used.
E.g. when the output contains binary data or when it is colorized.
`.json` will always be None.
"""
class StrCLIResponse(str, BaseCLIResponse):
@property
def json(self) -> Optional[dict]:
"""
Return deserialized the request or response JSON body,
if one (and only one) included in the output and is parsable.
"""
if not hasattr(self, '_json'):
self._json = None
# De-serialize JSON body if possible.
if COLOR in self:
# Colorized output cannot be parsed.
pass
elif self.strip().startswith('{'):
# Looks like JSON body.
self._json = json.loads(self)
elif self.count('Content-Type:') == 1:
# Looks like a HTTP message,
# try to extract JSON from its body.
try:
j = self.strip()[self.strip().rindex('\r\n\r\n'):]
except ValueError:
pass
else:
try:
# noinspection PyAttributeOutsideInit
self._json = json.loads(j)
except ValueError:
pass
return self._json
class ExitStatusError(Exception):
pass
def normalize_args(args: Iterable[Any]) -> List[str]:
return [str(arg) for arg in args]
def httpie(
*args,
**kwargs
) -> StrCLIResponse:
"""
Run HTTPie manager command with the given
args/kwargs, and capture stderr/out and exit
status.
"""
env = kwargs.setdefault('env', MockEnvironment())
cli_args = ['httpie']
if not kwargs.pop('no_debug', False):
cli_args.append('--debug')
cli_args += normalize_args(args)
exit_status = manager.main(
args=cli_args,
**kwargs
)
env.stdout.seek(0)
env.stderr.seek(0)
try:
response = StrCLIResponse(env.stdout.read())
response.stderr = env.stderr.read()
response.exit_status = exit_status
response.args = cli_args
finally:
env.stdout.truncate(0)
env.stderr.truncate(0)
env.stdout.seek(0)
env.stderr.seek(0)
return response
def http(
*args,
program_name='http',
tolerate_error_exit_status=False,
**kwargs,
) -> Union[StrCLIResponse, BytesCLIResponse]:
# noinspection PyUnresolvedReferences
"""
Run HTTPie and capture stderr/out and exit status.
Content written to devnull will be captured only if
env.devnull is set manually.
Invoke `httpie.core.main()` with `args` and `kwargs`,
and return a `CLIResponse` subclass instance.
The return value is either a `StrCLIResponse`, or `BytesCLIResponse`
if unable to decode the output. Devnull is string when possible,
bytes otherwise.
The response has the following attributes:
`stdout` is represented by the instance itself (print r)
`stderr`: text written to stderr
`devnull` text written to devnull.
`exit_status`: the exit status
`json`: decoded JSON (if possible) or `None`
Exceptions are propagated.
If you pass ``tolerate_error_exit_status=True``, then error exit statuses
won't result into an exception.
Example:
$ http --auth=user:password GET pie.dev/basic-auth/user/password
>>> httpbin = getfixture('httpbin')
>>> r = http('-a', 'user:pw', httpbin.url + '/basic-auth/user/pw')
>>> type(r) == StrCLIResponse
True
>>> r.exit_status is ExitStatus.SUCCESS
True
>>> r.stderr
''
>>> 'HTTP/1.1 200 OK' in r
True
>>> r.json == {'authenticated': True, 'user': 'user'}
True
"""
env = kwargs.get('env')
if not env:
env = kwargs['env'] = MockEnvironment()
stdout = env.stdout
stderr = env.stderr
devnull = env.devnull
args = list(args)
args_with_config_defaults = args + env.config.default_options
add_to_args = []
if '--debug' not in args_with_config_defaults:
if (not tolerate_error_exit_status
and '--traceback' not in args_with_config_defaults):
add_to_args.append('--traceback')
if not any('--timeout' in arg for arg in args_with_config_defaults):
add_to_args.append('--timeout=3')
complete_args = [program_name, *add_to_args, *args]
# print(' '.join(complete_args))
def dump_stderr():
stderr.seek(0)
sys.stderr.write(stderr.read())
try:
try:
exit_status = core.main(args=complete_args, **kwargs)
if '--download' in args:
# Let the progress reporter thread finish.
time.sleep(.5)
except SystemExit:
if tolerate_error_exit_status:
exit_status = ExitStatus.ERROR
else:
dump_stderr()
raise
except Exception:
stderr.seek(0)
sys.stderr.write(stderr.read())
raise
else:
if (not tolerate_error_exit_status
and exit_status != ExitStatus.SUCCESS):
dump_stderr()
raise ExitStatusError(
'httpie.core.main() unexpectedly returned'
f' a non-zero exit status: {exit_status}'
)
stdout.seek(0)
stderr.seek(0)
devnull.seek(0)
output = stdout.read()
devnull_output = devnull.read()
try:
output = output.decode()
except UnicodeDecodeError:
r = BytesCLIResponse(output)
else:
r = StrCLIResponse(output)
try:
devnull_output = devnull_output.decode()
except Exception:
pass
r.devnull = devnull_output
r.stderr = stderr.read()
r.exit_status = exit_status
r.args = args
r.complete_args = ' '.join(complete_args)
if r.exit_status != ExitStatus.SUCCESS:
sys.stderr.write(r.stderr)
# print(f'\n\n$ {r.command}\n')
return r
finally:
devnull.close()
stdout.close()
stderr.close()
env.cleanup()
|
PYSEC-2022-167
|
tests/utils/http_server.py
|
@@ -85,6 +85,19 @@ def status_custom_msg(handler):
handler.end_headers()
[email protected]('GET', '/cookies/set-and-redirect')
+def set_cookie_and_redirect(handler):
+ handler.send_response(302)
+
+ redirect_to = handler.headers.get('X-Redirect-To', '/headers')
+ handler.send_header('Location', redirect_to)
+
+ raw_cookies = handler.headers.get('X-Cookies', 'a=b')
+ for cookie in raw_cookies.split(', '):
+ handler.send_header('Set-Cookie', cookie)
+ handler.end_headers()
+
+
@pytest.fixture(scope="function")
def http_server():
"""A custom HTTP server implementation for our tests, that is
|
import threading
from collections import defaultdict
from http import HTTPStatus
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse
import pytest
class TestHandler(BaseHTTPRequestHandler):
handlers = defaultdict(dict)
@classmethod
def handler(cls, method, path):
def inner(func):
cls.handlers[method][path] = func
return func
return inner
def do_generic(self):
parse_result = urlparse(self.path)
func = self.handlers[self.command].get(parse_result.path)
if func is None:
return self.send_error(HTTPStatus.NOT_FOUND)
return func(self)
do_GET = do_generic
do_POST = do_generic
@TestHandler.handler('GET', '/headers')
def get_headers(handler):
handler.send_response(200)
for key, value in handler.headers.items():
handler.send_header(key, value)
handler.send_header('Content-Length', 0)
handler.end_headers()
@TestHandler.handler('GET', '/drip')
def chunked_drip(handler):
handler.send_response(200)
accept = handler.headers.get('Accept')
if accept is not None:
handler.send_header('Content-Type', accept)
handler.send_header('Transfer-Encoding', 'chunked')
handler.end_headers()
for _ in range(3):
body = 'test\n'
handler.wfile.write(f'{len(body):X}\r\n{body}\r\n'.encode('utf-8'))
handler.wfile.write('0\r\n\r\n'.encode('utf-8'))
@TestHandler.handler('GET', '/stream/encoding/random')
def random_encoding(handler):
from tests.fixtures import ASCII_FILE_CONTENT, FILE_CONTENT as UNICODE_FILE_CONTENT
handler.send_response(200)
handler.send_header('Transfer-Encoding', 'chunked')
handler.end_headers()
for body in [
ASCII_FILE_CONTENT,
ASCII_FILE_CONTENT,
UNICODE_FILE_CONTENT,
UNICODE_FILE_CONTENT,
UNICODE_FILE_CONTENT,
]:
body += "\n"
handler.wfile.write(f'{len(body.encode()):X}\r\n{body}\r\n'.encode())
handler.wfile.write('0\r\n\r\n'.encode('utf-8'))
@TestHandler.handler('POST', '/status/msg')
def status_custom_msg(handler):
content_len = int(handler.headers.get('content-length', 0))
post_body = handler.rfile.read(content_len).decode()
handler.send_response(200, post_body)
handler.end_headers()
@pytest.fixture(scope="function")
def http_server():
"""A custom HTTP server implementation for our tests, that is
built on top of the http.server module. Handy when we need to
deal with details which httpbin can not capture."""
server = HTTPServer(('localhost', 0), TestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.start()
yield '{}:{}'.format(*server.socket.getsockname())
server.shutdown()
thread.join(timeout=0.5)
|
PYSEC-2022-167
|
litestar/app.py
|
@@ -202,6 +202,7 @@ def __init__(
path: str | None = None,
plugins: Sequence[PluginProtocol] | None = None,
request_class: type[Request] | None = None,
+ request_max_body_size: int | None = 10_000_000,
response_cache_config: ResponseCacheConfig | None = None,
response_class: type[Response] | None = None,
response_cookies: ResponseCookies | None = None,
@@ -286,6 +287,8 @@ def __init__(
pdb_on_exception: Drop into the PDB when an exception occurs.
plugins: Sequence of plugins.
request_class: An optional subclass of :class:`Request <.connection.Request>` to use for http connections.
+ request_max_body_size: Maximum allowed size of the request body in bytes. If this size is exceeded, a
+ '413 - Request Entity Too Large' error response is returned.
response_class: A custom subclass of :class:`Response <.response.Response>` to be used as the app's default
response.
response_cookies: A sequence of :class:`Cookie <.datastructures.Cookie>`.
@@ -361,6 +364,7 @@ def __init__(
pdb_on_exception=pdb_on_exception,
plugins=self._get_default_plugins(list(plugins or [])),
request_class=request_class,
+ request_max_body_size=request_max_body_size,
response_cache_config=response_cache_config or ResponseCacheConfig(),
response_class=response_class,
response_cookies=response_cookies or [],
@@ -464,6 +468,7 @@ def __init__(
parameters=config.parameters,
path=config.path,
request_class=self.request_class,
+ request_max_body_size=request_max_body_size,
response_class=config.response_class,
response_cookies=config.response_cookies,
response_headers=config.response_headers,
|
from __future__ import annotations
import inspect
import logging
import os
import warnings
from contextlib import (
AbstractAsyncContextManager,
AsyncExitStack,
asynccontextmanager,
suppress,
)
from datetime import date, datetime, time, timedelta
from functools import partial
from itertools import chain
from pathlib import Path
from typing import TYPE_CHECKING, Any, AsyncGenerator, Callable, Iterable, Mapping, Sequence, TypedDict, cast
from litestar._asgi import ASGIRouter
from litestar._asgi.utils import get_route_handlers, wrap_in_exception_handler
from litestar._openapi.plugin import OpenAPIPlugin
from litestar._openapi.schema_generation import openapi_schema_plugins
from litestar.config.allowed_hosts import AllowedHostsConfig
from litestar.config.app import AppConfig, ExperimentalFeatures
from litestar.config.response_cache import ResponseCacheConfig
from litestar.connection import Request, WebSocket
from litestar.datastructures.state import State
from litestar.events.emitter import BaseEventEmitterBackend, SimpleEventEmitter
from litestar.exceptions import (
LitestarWarning,
MissingDependencyException,
NoRouteMatchFoundException,
)
from litestar.logging.config import LoggingConfig, get_logger_placeholder
from litestar.middleware._internal.cors import CORSMiddleware
from litestar.openapi.config import OpenAPIConfig
from litestar.plugins import (
CLIPluginProtocol,
InitPluginProtocol,
OpenAPISchemaPluginProtocol,
PluginProtocol,
PluginRegistry,
SerializationPluginProtocol,
)
from litestar.plugins.base import CLIPlugin
from litestar.router import Router
from litestar.routes import ASGIRoute, HTTPRoute, WebSocketRoute
from litestar.static_files.base import StaticFiles
from litestar.stores.registry import StoreRegistry
from litestar.types import Empty, TypeDecodersSequence
from litestar.types.internal_types import PathParameterDefinition, TemplateConfigType
from litestar.utils import deprecated, ensure_async_callable, join_paths, unique
from litestar.utils.dataclass import extract_dataclass_items
from litestar.utils.predicates import is_async_callable
from litestar.utils.warnings import warn_pdb_on_exception
if TYPE_CHECKING:
from typing_extensions import Self
from litestar.config.compression import CompressionConfig
from litestar.config.cors import CORSConfig
from litestar.config.csrf import CSRFConfig
from litestar.contrib.opentelemetry import OpenTelemetryPlugin
from litestar.datastructures import CacheControlHeader, ETag
from litestar.dto import AbstractDTO
from litestar.events.listener import EventListener
from litestar.logging.config import BaseLoggingConfig
from litestar.openapi.spec import SecurityRequirement
from litestar.openapi.spec.open_api import OpenAPI
from litestar.response import Response
from litestar.static_files.config import StaticFilesConfig
from litestar.stores.base import Store
from litestar.types import (
AfterExceptionHookHandler,
AfterRequestHookHandler,
AfterResponseHookHandler,
AnyCallable,
ASGIApp,
BeforeMessageSendHookHandler,
BeforeRequestHookHandler,
ControllerRouterHandler,
Dependencies,
EmptyType,
ExceptionHandlersMap,
GetLogger,
Guard,
LifeSpanReceive,
LifeSpanScope,
LifeSpanSend,
Logger,
Message,
Middleware,
OnAppInitHandler,
ParametersMap,
Receive,
ResponseCookies,
ResponseHeaders,
RouteHandlerType,
Scope,
Send,
TypeEncodersMap,
)
from litestar.types.callable_types import LifespanHook
__all__ = ("HandlerIndex", "Litestar", "DEFAULT_OPENAPI_CONFIG")
DEFAULT_OPENAPI_CONFIG = OpenAPIConfig(title="Litestar API", version="1.0.0")
"""The default OpenAPI config used if not configuration is explicitly passed to the
:class:`Litestar <.app.Litestar>` instance constructor.
"""
class HandlerIndex(TypedDict):
"""Map route handler names to a mapping of paths + route handler.
It's returned from the 'get_handler_index_by_name' utility method.
"""
paths: list[str]
"""Full route paths to the route handler."""
handler: RouteHandlerType
"""Route handler instance."""
identifier: str
"""Unique identifier of the handler.
Either equal to :attr`__name__ <obj.__name__>` attribute or ``__str__`` value of the handler.
"""
class Litestar(Router):
"""The Litestar application.
``Litestar`` is the root level of the app - it has the base path of ``/`` and all root level Controllers, Routers
and Route Handlers should be registered on it.
"""
__slots__ = (
"_lifespan_managers",
"_server_lifespan_managers",
"_debug",
"_openapi_schema",
"_static_files_config",
"plugins",
"after_exception",
"allowed_hosts",
"asgi_handler",
"asgi_router",
"before_send",
"compression_config",
"cors_config",
"csrf_config",
"event_emitter",
"get_logger",
"logger",
"logging_config",
"multipart_form_part_limit",
"on_shutdown",
"on_startup",
"openapi_config",
"response_cache_config",
"route_map",
"state",
"stores",
"template_engine",
"pdb_on_exception",
"experimental_features",
)
def __init__(
self,
route_handlers: Sequence[ControllerRouterHandler] | None = None,
*,
after_exception: Sequence[AfterExceptionHookHandler] | None = None,
after_request: AfterRequestHookHandler | None = None,
after_response: AfterResponseHookHandler | None = None,
allowed_hosts: Sequence[str] | AllowedHostsConfig | None = None,
before_request: BeforeRequestHookHandler | None = None,
before_send: Sequence[BeforeMessageSendHookHandler] | None = None,
cache_control: CacheControlHeader | None = None,
compression_config: CompressionConfig | None = None,
cors_config: CORSConfig | None = None,
csrf_config: CSRFConfig | None = None,
dto: type[AbstractDTO] | None | EmptyType = Empty,
debug: bool | None = None,
dependencies: Dependencies | None = None,
etag: ETag | None = None,
event_emitter_backend: type[BaseEventEmitterBackend] = SimpleEventEmitter,
exception_handlers: ExceptionHandlersMap | None = None,
guards: Sequence[Guard] | None = None,
include_in_schema: bool | EmptyType = Empty,
listeners: Sequence[EventListener] | None = None,
logging_config: BaseLoggingConfig | EmptyType | None = Empty,
middleware: Sequence[Middleware] | None = None,
multipart_form_part_limit: int = 1000,
on_app_init: Sequence[OnAppInitHandler] | None = None,
on_shutdown: Sequence[LifespanHook] | None = None,
on_startup: Sequence[LifespanHook] | None = None,
openapi_config: OpenAPIConfig | None = DEFAULT_OPENAPI_CONFIG,
opt: Mapping[str, Any] | None = None,
parameters: ParametersMap | None = None,
path: str | None = None,
plugins: Sequence[PluginProtocol] | None = None,
request_class: type[Request] | None = None,
response_cache_config: ResponseCacheConfig | None = None,
response_class: type[Response] | None = None,
response_cookies: ResponseCookies | None = None,
response_headers: ResponseHeaders | None = None,
return_dto: type[AbstractDTO] | None | EmptyType = Empty,
security: Sequence[SecurityRequirement] | None = None,
signature_namespace: Mapping[str, Any] | None = None,
signature_types: Sequence[Any] | None = None,
state: State | None = None,
static_files_config: Sequence[StaticFilesConfig] | None = None,
stores: StoreRegistry | dict[str, Store] | None = None,
tags: Sequence[str] | None = None,
template_config: TemplateConfigType | None = None,
type_decoders: TypeDecodersSequence | None = None,
type_encoders: TypeEncodersMap | None = None,
websocket_class: type[WebSocket] | None = None,
lifespan: Sequence[Callable[[Litestar], AbstractAsyncContextManager] | AbstractAsyncContextManager]
| None = None,
pdb_on_exception: bool | None = None,
experimental_features: Iterable[ExperimentalFeatures] | None = None,
) -> None:
"""Initialize a ``Litestar`` application.
Args:
after_exception: A sequence of :class:`exception hook handlers <.types.AfterExceptionHookHandler>`. This
hook is called after an exception occurs. In difference to exception handlers, it is not meant to
return a response - only to process the exception (e.g. log it, send it to Sentry etc.).
after_request: A sync or async function executed after the route handler function returned and the response
object has been resolved. Receives the response object.
after_response: A sync or async function called after the response has been awaited. It receives the
:class:`Request <.connection.Request>` object and should not return any values.
allowed_hosts: A sequence of allowed hosts, or an
:class:`AllowedHostsConfig <.config.allowed_hosts.AllowedHostsConfig>` instance. Enables the builtin
allowed hosts middleware.
before_request: A sync or async function called immediately before calling the route handler. Receives the
:class:`Request <.connection.Request>` instance and any non-``None`` return value is used for the
response, bypassing the route handler.
before_send: A sequence of :class:`before send hook handlers <.types.BeforeMessageSendHookHandler>`. Called
when the ASGI send function is called.
cache_control: A ``cache-control`` header of type
:class:`CacheControlHeader <litestar.datastructures.CacheControlHeader>` to add to route handlers of
this app. Can be overridden by route handlers.
compression_config: Configures compression behaviour of the application, this enabled a builtin or user
defined Compression middleware.
cors_config: If set, configures CORS handling for the application.
csrf_config: If set, configures :class:`CSRFMiddleware <.middleware.csrf.CSRFMiddleware>`.
debug: If ``True``, app errors rendered as HTML with a stack trace.
dependencies: A string keyed mapping of dependency :class:`Providers <.di.Provide>`.
dto: :class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for (de)serializing and
validation of request data.
etag: An ``etag`` header of type :class:`ETag <.datastructures.ETag>` to add to route handlers of this app.
Can be overridden by route handlers.
event_emitter_backend: A subclass of
:class:`BaseEventEmitterBackend <.events.emitter.BaseEventEmitterBackend>`.
exception_handlers: A mapping of status codes and/or exception types to handler functions.
guards: A sequence of :class:`Guard <.types.Guard>` callables.
include_in_schema: A boolean flag dictating whether the route handler should be documented in the OpenAPI schema.
lifespan: A list of callables returning async context managers, wrapping the lifespan of the ASGI application
listeners: A sequence of :class:`EventListener <.events.listener.EventListener>`.
logging_config: A subclass of :class:`BaseLoggingConfig <.logging.config.BaseLoggingConfig>`.
middleware: A sequence of :class:`Middleware <.types.Middleware>`.
multipart_form_part_limit: The maximal number of allowed parts in a multipart/formdata request. This limit
is intended to protect from DoS attacks.
on_app_init: A sequence of :class:`OnAppInitHandler <.types.OnAppInitHandler>` instances. Handlers receive
an instance of :class:`AppConfig <.config.app.AppConfig>` that will have been initially populated with
the parameters passed to :class:`Litestar <litestar.app.Litestar>`, and must return an instance of same.
If more than one handler is registered they are called in the order they are provided.
on_shutdown: A sequence of :class:`LifespanHook <.types.LifespanHook>` called during application
shutdown.
on_startup: A sequence of :class:`LifespanHook <litestar.types.LifespanHook>` called during
application startup.
openapi_config: Defaults to :attr:`DEFAULT_OPENAPI_CONFIG`
opt: A string keyed mapping of arbitrary values that can be accessed in :class:`Guards <.types.Guard>` or
wherever you have access to :class:`Request <litestar.connection.request.Request>` or
:class:`ASGI Scope <.types.Scope>`.
parameters: A mapping of :class:`Parameter <.params.Parameter>` definitions available to all application
paths.
path: A path fragment that is prefixed to all route handlers, controllers and routers associated
with the application instance.
.. versionadded:: 2.8.0
pdb_on_exception: Drop into the PDB when an exception occurs.
plugins: Sequence of plugins.
request_class: An optional subclass of :class:`Request <.connection.Request>` to use for http connections.
response_class: A custom subclass of :class:`Response <.response.Response>` to be used as the app's default
response.
response_cookies: A sequence of :class:`Cookie <.datastructures.Cookie>`.
response_headers: A string keyed mapping of :class:`ResponseHeader <.datastructures.ResponseHeader>`
response_cache_config: Configures caching behavior of the application.
return_dto: :class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for serializing
outbound response data.
route_handlers: A sequence of route handlers, which can include instances of
:class:`Router <.router.Router>`, subclasses of :class:`Controller <.controller.Controller>` or any
callable decorated by the route handler decorators.
security: A sequence of dicts that will be added to the schema of all route handlers in the application.
See
:data:`SecurityRequirement <.openapi.spec.SecurityRequirement>` for details.
signature_namespace: A mapping of names to types for use in forward reference resolution during signature modeling.
signature_types: A sequence of types for use in forward reference resolution during signature modeling.
These types will be added to the signature namespace using their ``__name__`` attribute.
state: An optional :class:`State <.datastructures.State>` for application state.
static_files_config: A sequence of :class:`StaticFilesConfig <.static_files.StaticFilesConfig>`
stores: Central registry of :class:`Store <.stores.base.Store>` that will be available throughout the
application. If this is a dictionary to it will be passed to a
:class:`StoreRegistry <.stores.registry.StoreRegistry>`. If it is a
:class:`StoreRegistry <.stores.registry.StoreRegistry>`, this instance will be used directly.
tags: A sequence of string tags that will be appended to the schema of all route handlers under the
application.
template_config: An instance of :class:`TemplateConfig <.template.TemplateConfig>`
type_decoders: A sequence of tuples, each composed of a predicate testing for type identity and a msgspec
hook for deserialization.
type_encoders: A mapping of types to callables that transform them into types supported for serialization.
websocket_class: An optional subclass of :class:`WebSocket <.connection.WebSocket>` to use for websocket
connections.
experimental_features: An iterable of experimental features to enable
"""
if logging_config is Empty:
logging_config = LoggingConfig()
if debug is None:
debug = os.getenv("LITESTAR_DEBUG", "0") == "1"
if pdb_on_exception is None:
pdb_on_exception = os.getenv("LITESTAR_PDB", "0") == "1"
config = AppConfig(
after_exception=list(after_exception or []),
after_request=after_request,
after_response=after_response,
allowed_hosts=allowed_hosts if isinstance(allowed_hosts, AllowedHostsConfig) else list(allowed_hosts or []),
before_request=before_request,
before_send=list(before_send or []),
cache_control=cache_control,
compression_config=compression_config,
cors_config=cors_config,
csrf_config=csrf_config,
debug=debug,
dependencies=dict(dependencies or {}),
dto=dto,
etag=etag,
event_emitter_backend=event_emitter_backend,
exception_handlers=exception_handlers or {},
guards=list(guards or []),
include_in_schema=include_in_schema,
lifespan=list(lifespan or []),
listeners=list(listeners or []),
logging_config=logging_config,
middleware=list(middleware or []),
multipart_form_part_limit=multipart_form_part_limit,
on_shutdown=list(on_shutdown or []),
on_startup=list(on_startup or []),
openapi_config=openapi_config,
opt=dict(opt or {}),
path=path or "",
parameters=parameters or {},
pdb_on_exception=pdb_on_exception,
plugins=self._get_default_plugins(list(plugins or [])),
request_class=request_class,
response_cache_config=response_cache_config or ResponseCacheConfig(),
response_class=response_class,
response_cookies=response_cookies or [],
response_headers=response_headers or [],
return_dto=return_dto,
route_handlers=list(route_handlers) if route_handlers is not None else [],
security=list(security or []),
signature_namespace=dict(signature_namespace or {}),
signature_types=list(signature_types or []),
state=state or State(),
static_files_config=list(static_files_config or []),
stores=stores,
tags=list(tags or []),
template_config=template_config,
type_encoders=type_encoders,
type_decoders=type_decoders,
websocket_class=websocket_class,
experimental_features=list(experimental_features or []),
)
config.plugins.extend([OpenAPIPlugin(self), *openapi_schema_plugins])
for handler in chain(
on_app_init or [],
(p.on_app_init for p in config.plugins if isinstance(p, InitPluginProtocol)),
[self._patch_opentelemetry_middleware],
):
config = handler(config) # pyright: ignore
self.plugins = PluginRegistry(config.plugins)
self._openapi_schema: OpenAPI | None = None
self._debug: bool = True
self.stores: StoreRegistry = (
config.stores if isinstance(config.stores, StoreRegistry) else StoreRegistry(config.stores)
)
self._lifespan_managers = config.lifespan
for store in self.stores._stores.values():
self._lifespan_managers.append(store)
self._server_lifespan_managers = [p.server_lifespan for p in config.plugins or [] if isinstance(p, CLIPlugin)]
self.experimental_features = frozenset(config.experimental_features or [])
if ExperimentalFeatures.DTO_CODEGEN in self.experimental_features:
warnings.warn(
"Use of redundant experimental feature flag DTO_CODEGEN. "
"DTO codegen backend is enabled by default since Litestar 2.8. The "
"DTO_CODEGEN feature flag can be safely removed from the configuration "
"and will be removed in version 3.0.",
category=LitestarWarning,
stacklevel=2,
)
self.get_logger: GetLogger = get_logger_placeholder
self.logger: Logger | None = None
self.routes: list[HTTPRoute | ASGIRoute | WebSocketRoute] = []
self.after_exception = [ensure_async_callable(h) for h in config.after_exception]
self.allowed_hosts = cast("AllowedHostsConfig | None", config.allowed_hosts)
self.before_send = [ensure_async_callable(h) for h in config.before_send]
self.compression_config = config.compression_config
self.cors_config = config.cors_config
self.csrf_config = config.csrf_config
self.event_emitter = config.event_emitter_backend(listeners=config.listeners)
self.logging_config = config.logging_config
self.multipart_form_part_limit = config.multipart_form_part_limit
self.on_shutdown = config.on_shutdown
self.on_startup = config.on_startup
self.openapi_config = config.openapi_config
self.request_class: type[Request] = config.request_class or Request
self.response_cache_config = config.response_cache_config
self.state = config.state
self._static_files_config = config.static_files_config
self.template_engine = config.template_config.engine_instance if config.template_config else None
self.websocket_class: type[WebSocket] = config.websocket_class or WebSocket
self.debug = config.debug
self.pdb_on_exception: bool = config.pdb_on_exception
self.include_in_schema = include_in_schema
if self.pdb_on_exception:
warn_pdb_on_exception()
try:
from starlette.exceptions import HTTPException as StarletteHTTPException
from litestar.middleware._internal.exceptions.middleware import _starlette_exception_handler
config.exception_handlers.setdefault(StarletteHTTPException, _starlette_exception_handler)
except ImportError:
pass
super().__init__(
after_request=config.after_request,
after_response=config.after_response,
before_request=config.before_request,
cache_control=config.cache_control,
dependencies=config.dependencies,
dto=config.dto,
etag=config.etag,
exception_handlers=config.exception_handlers,
guards=config.guards,
middleware=config.middleware,
opt=config.opt,
parameters=config.parameters,
path=config.path,
request_class=self.request_class,
response_class=config.response_class,
response_cookies=config.response_cookies,
response_headers=config.response_headers,
return_dto=config.return_dto,
# route handlers are registered below
route_handlers=[],
security=config.security,
signature_namespace=config.signature_namespace,
signature_types=config.signature_types,
tags=config.tags,
type_encoders=config.type_encoders,
type_decoders=config.type_decoders,
include_in_schema=config.include_in_schema,
websocket_class=self.websocket_class,
)
self.asgi_router = ASGIRouter(app=self)
for route_handler in config.route_handlers:
self.register(route_handler)
if self.logging_config:
self.get_logger = self.logging_config.configure()
self.logger = self.get_logger("litestar")
for static_config in self._static_files_config:
self.register(static_config.to_static_files_app())
self.asgi_handler = self._create_asgi_handler()
@staticmethod
def _patch_opentelemetry_middleware(config: AppConfig) -> AppConfig:
# workaround to support otel middleware priority. Should be replaced by regular
# middleware priorities once available
try:
from litestar.contrib.opentelemetry import OpenTelemetryPlugin
if not any(isinstance(p, OpenTelemetryPlugin) for p in config.plugins):
config.middleware, otel_middleware = OpenTelemetryPlugin._pop_otel_middleware(config.middleware)
if otel_middleware:
otel_plugin = OpenTelemetryPlugin()
otel_plugin._middleware = otel_middleware
config.plugins = [*config.plugins, otel_plugin]
except ImportError:
pass
return config
@property
@deprecated(version="2.6.0", kind="property", info="Use create_static_files router instead")
def static_files_config(self) -> list[StaticFilesConfig]:
return self._static_files_config
@property
@deprecated(version="2.0", alternative="Litestar.plugins.cli", kind="property")
def cli_plugins(self) -> list[CLIPluginProtocol]:
return list(self.plugins.cli)
@property
@deprecated(version="2.0", alternative="Litestar.plugins.openapi", kind="property")
def openapi_schema_plugins(self) -> list[OpenAPISchemaPluginProtocol]:
return list(self.plugins.openapi)
@property
@deprecated(version="2.0", alternative="Litestar.plugins.serialization", kind="property")
def serialization_plugins(self) -> list[SerializationPluginProtocol]:
return list(self.plugins.serialization)
@staticmethod
def _get_default_plugins(plugins: list[PluginProtocol]) -> list[PluginProtocol]:
from litestar.plugins.core import MsgspecDIPlugin
plugins.append(MsgspecDIPlugin())
with suppress(MissingDependencyException):
from litestar.plugins.pydantic import (
PydanticDIPlugin,
PydanticInitPlugin,
PydanticPlugin,
PydanticSchemaPlugin,
)
pydantic_plugin_found = any(isinstance(plugin, PydanticPlugin) for plugin in plugins)
pydantic_init_plugin_found = any(isinstance(plugin, PydanticInitPlugin) for plugin in plugins)
pydantic_schema_plugin_found = any(isinstance(plugin, PydanticSchemaPlugin) for plugin in plugins)
pydantic_serialization_plugin_found = any(isinstance(plugin, PydanticDIPlugin) for plugin in plugins)
if not pydantic_plugin_found and not pydantic_init_plugin_found and not pydantic_schema_plugin_found:
plugins.append(PydanticPlugin())
elif not pydantic_plugin_found and pydantic_init_plugin_found and not pydantic_schema_plugin_found:
plugins.append(PydanticSchemaPlugin())
elif not pydantic_plugin_found and not pydantic_init_plugin_found:
plugins.append(PydanticInitPlugin())
if not pydantic_plugin_found and not pydantic_serialization_plugin_found:
plugins.append(PydanticDIPlugin())
with suppress(MissingDependencyException):
from litestar.contrib.attrs import AttrsSchemaPlugin
pre_configured = any(isinstance(plugin, AttrsSchemaPlugin) for plugin in plugins)
if not pre_configured:
plugins.append(AttrsSchemaPlugin())
return plugins
@property
def debug(self) -> bool:
return self._debug
@debug.setter
def debug(self, value: bool) -> None:
"""Sets the debug logging level for the application.
When possible, it calls the `self.logging_config.set_level` method. This allows for implementation specific code and APIs to be called.
"""
if self.logger and self.logging_config:
self.logging_config.set_level(self.logger, logging.DEBUG if value else logging.INFO)
elif self.logger and hasattr(self.logger, "setLevel"): # pragma: no cover
self.logger.setLevel(logging.DEBUG if value else logging.INFO) # pragma: no cover
if isinstance(self.logging_config, LoggingConfig):
self.logging_config.loggers["litestar"]["level"] = "DEBUG" if value else "INFO"
self._debug = value
async def __call__(
self,
scope: Scope | LifeSpanScope,
receive: Receive | LifeSpanReceive,
send: Send | LifeSpanSend,
) -> None:
"""Application entry point.
Lifespan events (startup / shutdown) are sent to the lifespan handler, otherwise the ASGI handler is used
Args:
scope: The ASGI connection scope.
receive: The ASGI receive function.
send: The ASGI send function.
Returns:
None
"""
if scope["type"] == "lifespan":
await self.asgi_router.lifespan(receive=receive, send=send) # type: ignore[arg-type]
return
scope["app"] = self
scope.setdefault("state", {})
await self.asgi_handler(scope, receive, self._wrap_send(send=send, scope=scope)) # type: ignore[arg-type]
async def _call_lifespan_hook(self, hook: LifespanHook) -> None:
ret = hook(self) if inspect.signature(hook).parameters else hook() # type: ignore[call-arg]
if is_async_callable(hook): # pyright: ignore[reportGeneralTypeIssues]
await ret
@asynccontextmanager
async def lifespan(self) -> AsyncGenerator[None, None]:
"""Context manager handling the ASGI lifespan.
It will be entered when the ``lifespan`` message has been received from the
server, and exit after the ``asgi.shutdown`` message. During this period, it is
responsible for calling the ``on_startup``, ``on_shutdown`` hooks, as well as
custom lifespan managers.
"""
async with AsyncExitStack() as exit_stack:
for hook in self.on_shutdown[::-1]:
exit_stack.push_async_callback(partial(self._call_lifespan_hook, hook))
await exit_stack.enter_async_context(self.event_emitter)
for manager in self._lifespan_managers:
if not isinstance(manager, AbstractAsyncContextManager):
manager = manager(self)
await exit_stack.enter_async_context(manager)
for hook in self.on_startup:
await self._call_lifespan_hook(hook)
yield
@property
def openapi_schema(self) -> OpenAPI:
"""Access the OpenAPI schema of the application.
Returns:
The :class:`OpenAPI`
<pydantic_openapi_schema.open_api.OpenAPI> instance of the
application.
Raises:
ImproperlyConfiguredException: If the application ``openapi_config`` attribute is ``None``.
"""
return self.plugins.get(OpenAPIPlugin).provide_openapi()
@classmethod
def from_config(cls, config: AppConfig) -> Self:
"""Initialize a ``Litestar`` application from a configuration instance.
Args:
config: An instance of :class:`AppConfig` <.config.AppConfig>
Returns:
An instance of ``Litestar`` application.
"""
return cls(**dict(extract_dataclass_items(config)))
def register(self, value: ControllerRouterHandler) -> None: # type: ignore[override]
"""Register a route handler on the app.
This method can be used to dynamically add endpoints to an application.
Args:
value: An instance of :class:`Router <.router.Router>`, a subclass of
:class:`Controller <.controller.Controller>` or any function decorated by the route handler decorators.
Returns:
None
"""
routes = super().register(value=value)
for route in routes:
route_handlers = get_route_handlers(route)
for route_handler in route_handlers:
route_handler.on_registration(self)
if isinstance(route, HTTPRoute):
route.create_handler_map()
elif isinstance(route, WebSocketRoute):
handler = route.route_handler
route.handler_parameter_model = handler.create_kwargs_model(path_parameters=route.path_parameters)
for plugin in self.plugins.receive_route:
plugin.receive_route(route)
self.asgi_router.construct_routing_trie()
def get_handler_index_by_name(self, name: str) -> HandlerIndex | None:
"""Receives a route handler name and returns an optional dictionary containing the route handler instance and
list of paths sorted lexically.
Examples:
.. code-block:: python
from litestar import Litestar, get
@get("/", name="my-handler")
def handler() -> None:
pass
app = Litestar(route_handlers=[handler])
handler_index = app.get_handler_index_by_name("my-handler")
# { "paths": ["/"], "handler" ... }
Args:
name: A route handler unique name.
Returns:
A :class:`HandlerIndex <.app.HandlerIndex>` instance or ``None``.
"""
handler = self.asgi_router.route_handler_index.get(name)
if not handler:
return None
identifier = handler.name or str(handler)
routes = self.asgi_router.route_mapping[identifier]
paths = sorted(unique([route.path for route in routes]))
return HandlerIndex(handler=handler, paths=paths, identifier=identifier)
def route_reverse(self, name: str, **path_parameters: Any) -> str:
"""Receives a route handler name, path parameter values and returns url path to the handler with filled path
parameters.
Examples:
.. code-block:: python
from litestar import Litestar, get
@get("/group/{group_id:int}/user/{user_id:int}", name="get_membership_details")
def get_membership_details(group_id: int, user_id: int) -> None:
pass
app = Litestar(route_handlers=[get_membership_details])
path = app.route_reverse("get_membership_details", user_id=100, group_id=10)
# /group/10/user/100
Args:
name: A route handler unique name.
**path_parameters: Actual values for path parameters in the route.
Raises:
NoRouteMatchFoundException: If route with 'name' does not exist, path parameters are missing in
``**path_parameters or have wrong type``.
Returns:
A fully formatted url path.
"""
handler_index = self.get_handler_index_by_name(name)
if handler_index is None:
raise NoRouteMatchFoundException(f"Route {name} can not be found")
allow_str_instead = {datetime, date, time, timedelta, float, Path}
routes = sorted(
self.asgi_router.route_mapping[handler_index["identifier"]],
key=lambda r: len(r.path_parameters),
reverse=True,
)
passed_parameters = set(path_parameters.keys())
selected_route = next(
(route for route in routes if passed_parameters.issuperset(route.path_parameters)),
routes[-1],
)
output: list[str] = []
for component in selected_route.path_components:
if isinstance(component, PathParameterDefinition):
val = path_parameters.get(component.name)
if not isinstance(val, component.type) and (
component.type not in allow_str_instead or not isinstance(val, str)
):
raise NoRouteMatchFoundException(
f"Received type for path parameter {component.name} doesn't match declared type {component.type}"
)
output.append(str(val))
else:
output.append(component)
return join_paths(output)
@deprecated(
"2.6.0", info="Use create_static_files router instead of StaticFilesConfig, which works with route_reverse"
)
def url_for_static_asset(self, name: str, file_path: str) -> str:
"""Receives a static files handler name, an asset file path and returns resolved url path to the asset.
Examples:
.. code-block:: python
from litestar import Litestar
from litestar.static_files.config import StaticFilesConfig
app = Litestar(
static_files_config=[
StaticFilesConfig(directories=["css"], path="/static/css", name="css")
]
)
path = app.url_for_static_asset("css", "main.css")
# /static/css/main.css
Args:
name: A static handler unique name.
file_path: a string containing path to an asset.
Raises:
NoRouteMatchFoundException: If static files handler with ``name`` does not exist.
Returns:
A url path to the asset.
"""
handler_index = self.get_handler_index_by_name(name)
if handler_index is None:
raise NoRouteMatchFoundException(f"Static handler {name} can not be found")
handler_fn = cast("AnyCallable", handler_index["handler"].fn)
if not isinstance(handler_fn, StaticFiles):
raise NoRouteMatchFoundException(f"Handler with name {name} is not a static files handler")
return join_paths([handler_index["paths"][0], file_path])
@property
def route_handler_method_view(self) -> dict[str, list[str]]:
"""Map route handlers to paths.
Returns:
A dictionary of router handlers and lists of paths as strings
"""
route_map: dict[str, list[str]] = {
handler: [route.path for route in routes] for handler, routes in self.asgi_router.route_mapping.items()
}
return route_map
def _create_asgi_handler(self) -> ASGIApp:
"""Create an ASGIApp that wraps the ASGI router inside an exception handler.
If CORS or TrustedHost configs are provided to the constructor, they will wrap the router as well.
"""
asgi_handler = wrap_in_exception_handler(app=self.asgi_router)
if self.cors_config:
asgi_handler = CORSMiddleware(app=asgi_handler, config=self.cors_config)
try:
otel_plugin: OpenTelemetryPlugin = self.plugins.get("OpenTelemetryPlugin")
asgi_handler = otel_plugin.middleware(app=asgi_handler)
except KeyError:
pass
return asgi_handler
def _wrap_send(self, send: Send, scope: Scope) -> Send:
"""Wrap the ASGI send and handles any 'before send' hooks.
Args:
send: The ASGI send function.
scope: The ASGI scope.
Returns:
An ASGI send function.
"""
if self.before_send:
async def wrapped_send(message: Message) -> None:
for hook in self.before_send:
await hook(message, scope)
await send(message)
return wrapped_send
return send
def update_openapi_schema(self) -> None:
"""Update the OpenAPI schema to reflect the route handlers registered on the app.
Returns:
None
"""
self.plugins.get(OpenAPIPlugin)._build_openapi()
def emit(self, event_id: str, *args: Any, **kwargs: Any) -> None:
"""Emit an event to all attached listeners.
Args:
event_id: The ID of the event to emit, e.g ``my_event``.
args: args to pass to the listener(s).
kwargs: kwargs to pass to the listener(s)
Returns:
None
"""
self.event_emitter.emit(event_id, *args, **kwargs)
|
PYSEC-2024-178
|
litestar/config/app.py
|
@@ -163,6 +163,9 @@ class AppConfig:
"""List of :class:`SerializationPluginProtocol <.plugins.SerializationPluginProtocol>`."""
request_class: type[Request] | None = field(default=None)
"""An optional subclass of :class:`Request <.connection.Request>` to use for http connections."""
+ request_max_body_size: int | None | EmptyType = Empty
+ """Maximum allowed size of the request body in bytes. If this size is exceeded, a '413 - Request Entity Too Large'
+ error response is returned."""
response_class: type[Response] | None = field(default=None)
"""A custom subclass of :class:`Response <.response.Response>` to be used as the app's default response."""
response_cookies: ResponseCookies = field(default_factory=list)
|
from __future__ import annotations
import enum
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, Callable
from litestar.config.allowed_hosts import AllowedHostsConfig
from litestar.config.response_cache import ResponseCacheConfig
from litestar.datastructures import State
from litestar.events.emitter import SimpleEventEmitter
from litestar.types.empty import Empty
if TYPE_CHECKING:
from contextlib import AbstractAsyncContextManager
from litestar import Litestar, Response
from litestar.config.compression import CompressionConfig
from litestar.config.cors import CORSConfig
from litestar.config.csrf import CSRFConfig
from litestar.connection import Request, WebSocket
from litestar.datastructures import CacheControlHeader, ETag
from litestar.di import Provide
from litestar.dto import AbstractDTO
from litestar.events.emitter import BaseEventEmitterBackend
from litestar.events.listener import EventListener
from litestar.logging.config import BaseLoggingConfig
from litestar.openapi.config import OpenAPIConfig
from litestar.openapi.spec import SecurityRequirement
from litestar.plugins import PluginProtocol
from litestar.static_files.config import StaticFilesConfig
from litestar.stores.base import Store
from litestar.stores.registry import StoreRegistry
from litestar.types import (
AfterExceptionHookHandler,
AfterRequestHookHandler,
AfterResponseHookHandler,
AnyCallable,
BeforeMessageSendHookHandler,
BeforeRequestHookHandler,
ControllerRouterHandler,
ExceptionHandlersMap,
Guard,
Middleware,
ParametersMap,
ResponseCookies,
ResponseHeaders,
TypeEncodersMap,
)
from litestar.types.callable_types import LifespanHook
from litestar.types.composite_types import TypeDecodersSequence
from litestar.types.empty import EmptyType
from litestar.types.internal_types import TemplateConfigType
__all__ = (
"AppConfig",
"ExperimentalFeatures",
)
@dataclass
class AppConfig:
"""The parameters provided to the ``Litestar`` app are used to instantiate an instance, and then the instance is
passed to any callbacks registered to ``on_app_init`` in the order they are provided.
The final attribute values are used to instantiate the application object.
"""
after_exception: list[AfterExceptionHookHandler] = field(default_factory=list)
"""An application level :class:`exception hook handler <.types.AfterExceptionHookHandler>` or list thereof.
This hook is called after an exception occurs. In difference to exception handlers, it is not meant to return a
response - only to process the exception (e.g. log it, send it to Sentry etc.).
"""
after_request: AfterRequestHookHandler | None = field(default=None)
"""A sync or async function executed after the route handler function returned and the response object has been
resolved.
Receives the response object which may be any subclass of :class:`Response <.response.Response>`.
"""
after_response: AfterResponseHookHandler | None = field(default=None)
"""A sync or async function called after the response has been awaited. It receives the
:class:`Request <.connection.Request>` object and should not return any values.
"""
allowed_hosts: list[str] | AllowedHostsConfig | None = field(default=None)
"""If set enables the builtin allowed hosts middleware."""
before_request: BeforeRequestHookHandler | None = field(default=None)
"""A sync or async function called immediately before calling the route handler. Receives the
:class:`Request <.connection.Request>` instance and any non-``None`` return value is used for the response,
bypassing the route handler.
"""
before_send: list[BeforeMessageSendHookHandler] = field(default_factory=list)
"""An application level :class:`before send hook handler <.types.BeforeMessageSendHookHandler>` or list thereof.
This hook is called when the ASGI send function is called.
"""
cache_control: CacheControlHeader | None = field(default=None)
"""A ``cache-control`` header of type :class:`CacheControlHeader <.datastructures.CacheControlHeader>` to add to
route handlers of this app.
Can be overridden by route handlers.
"""
compression_config: CompressionConfig | None = field(default=None)
"""Configures compression behaviour of the application, this enabled a builtin or user defined Compression
middleware.
"""
cors_config: CORSConfig | None = field(default=None)
"""If set this enables the builtin CORS middleware."""
csrf_config: CSRFConfig | None = field(default=None)
"""If set this enables the builtin CSRF middleware."""
debug: bool = field(default=False)
"""If ``True``, app errors rendered as HTML with a stack trace."""
dependencies: dict[str, Provide | AnyCallable] = field(default_factory=dict)
"""A string keyed dictionary of dependency :class:`Provider <.di.Provide>` instances."""
dto: type[AbstractDTO] | None | EmptyType = field(default=Empty)
""":class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for (de)serializing and validation of request data."""
etag: ETag | None = field(default=None)
"""An ``etag`` header of type :class:`ETag <.datastructures.ETag>` to add to route handlers of this app.
Can be overridden by route handlers.
"""
event_emitter_backend: type[BaseEventEmitterBackend] = field(default=SimpleEventEmitter)
"""A subclass of :class:`BaseEventEmitterBackend <.events.emitter.BaseEventEmitterBackend>`."""
exception_handlers: ExceptionHandlersMap = field(default_factory=dict)
"""A dictionary that maps handler functions to status codes and/or exception types."""
guards: list[Guard] = field(default_factory=list)
"""A list of :class:`Guard <.types.Guard>` callables."""
include_in_schema: bool | EmptyType = field(default=Empty)
"""A boolean flag dictating whether the route handler should be documented in the OpenAPI schema"""
lifespan: list[Callable[[Litestar], AbstractAsyncContextManager] | AbstractAsyncContextManager] = field(
default_factory=list
)
"""A list of callables returning async context managers, wrapping the lifespan of the ASGI application"""
listeners: list[EventListener] = field(default_factory=list)
"""A list of :class:`EventListener <.events.listener.EventListener>`."""
logging_config: BaseLoggingConfig | None = field(default=None)
"""An instance of :class:`BaseLoggingConfig <.logging.config.BaseLoggingConfig>` subclass."""
middleware: list[Middleware] = field(default_factory=list)
"""A list of :class:`Middleware <.types.Middleware>`."""
on_shutdown: list[LifespanHook] = field(default_factory=list)
"""A list of :class:`LifespanHook <.types.LifespanHook>` called during application shutdown."""
on_startup: list[LifespanHook] = field(default_factory=list)
"""A list of :class:`LifespanHook <.types.LifespanHook>` called during application startup."""
openapi_config: OpenAPIConfig | None = field(default=None)
"""Defaults to :data:`DEFAULT_OPENAPI_CONFIG <litestar.app.DEFAULT_OPENAPI_CONFIG>`"""
opt: dict[str, Any] = field(default_factory=dict)
"""A string keyed dictionary of arbitrary values that can be accessed in :class:`Guards <.types.Guard>` or
wherever you have access to :class:`Request <.connection.Request>` or :class:`ASGI Scope <litestar.types.Scope>`.
Can be overridden by routers and router handlers.
"""
parameters: ParametersMap = field(default_factory=dict)
"""A mapping of :class:`Parameter <.params.Parameter>` definitions available to all application paths."""
path: str = field(default="")
"""A base path that prefixed to all route handlers, controllers and routers associated with the
application instance.
.. versionadded:: 2.8.0
"""
pdb_on_exception: bool = field(default=False)
"""Drop into the PDB on an exception"""
plugins: list[PluginProtocol] = field(default_factory=list)
"""List of :class:`SerializationPluginProtocol <.plugins.SerializationPluginProtocol>`."""
request_class: type[Request] | None = field(default=None)
"""An optional subclass of :class:`Request <.connection.Request>` to use for http connections."""
response_class: type[Response] | None = field(default=None)
"""A custom subclass of :class:`Response <.response.Response>` to be used as the app's default response."""
response_cookies: ResponseCookies = field(default_factory=list)
"""A list of :class:`Cookie <.datastructures.Cookie>`."""
response_headers: ResponseHeaders = field(default_factory=list)
"""A string keyed dictionary mapping :class:`ResponseHeader <.datastructures.ResponseHeader>`."""
response_cache_config: ResponseCacheConfig = field(default_factory=ResponseCacheConfig)
"""Configures caching behavior of the application."""
return_dto: type[AbstractDTO] | None | EmptyType = field(default=Empty)
""":class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for serializing outbound response
data.
"""
route_handlers: list[ControllerRouterHandler] = field(default_factory=list)
"""A required list of route handlers, which can include instances of :class:`Router <.router.Router>`,
subclasses of :class:`Controller <.controller.Controller>` or any function decorated by the route handler
decorators.
"""
security: list[SecurityRequirement] = field(default_factory=list)
"""A list of dictionaries that will be added to the schema of all route handlers in the application. See
:data:`SecurityRequirement <.openapi.spec.SecurityRequirement>` for details.
"""
signature_namespace: dict[str, Any] = field(default_factory=dict)
"""A mapping of names to types for use in forward reference resolution during signature modeling."""
signature_types: list[Any] = field(default_factory=list)
"""A sequence of types for use in forward reference resolution during signature modeling.
These types will be added to the signature namespace using their ``__name__`` attribute.
"""
state: State = field(default_factory=State)
"""A :class:`State` <.datastructures.State>` instance holding application state."""
static_files_config: list[StaticFilesConfig] = field(default_factory=list)
"""An instance or list of :class:`StaticFilesConfig <.static_files.StaticFilesConfig>`."""
stores: StoreRegistry | dict[str, Store] | None = None
"""Central registry of :class:`Store <.stores.base.Store>` to be made available and be used throughout the
application. Can be either a dictionary mapping strings to :class:`Store <.stores.base.Store>` instances, or an
instance of :class:`StoreRegistry <.stores.registry.StoreRegistry>`.
"""
tags: list[str] = field(default_factory=list)
"""A list of string tags that will be appended to the schema of all route handlers under the application."""
template_config: TemplateConfigType | None = field(default=None)
"""An instance of :class:`TemplateConfig <.template.TemplateConfig>`."""
type_decoders: TypeDecodersSequence | None = field(default=None)
"""A sequence of tuples, each composed of a predicate testing for type identity and a msgspec hook for deserialization."""
type_encoders: TypeEncodersMap | None = field(default=None)
"""A mapping of types to callables that transform them into types supported for serialization."""
websocket_class: type[WebSocket] | None = field(default=None)
"""An optional subclass of :class:`WebSocket <.connection.WebSocket>` to use for websocket connections."""
multipart_form_part_limit: int = field(default=1000)
"""The maximal number of allowed parts in a multipart/formdata request. This limit is intended to protect from
DoS attacks."""
experimental_features: list[ExperimentalFeatures] | None = None
def __post_init__(self) -> None:
"""Normalize the allowed hosts to be a config or None.
Returns:
Optional config.
"""
if self.allowed_hosts and isinstance(self.allowed_hosts, list):
self.allowed_hosts = AllowedHostsConfig(allowed_hosts=self.allowed_hosts)
class ExperimentalFeatures(str, enum.Enum):
DTO_CODEGEN = "DTO_CODEGEN"
|
PYSEC-2024-178
|
litestar/connection/request.py
|
@@ -1,7 +1,8 @@
from __future__ import annotations
+import math
import warnings
-from typing import TYPE_CHECKING, Any, AsyncGenerator, Generic
+from typing import TYPE_CHECKING, Any, AsyncGenerator, Generic, cast
from litestar._multipart import parse_content_header, parse_multipart_form
from litestar._parsers import parse_url_encoded_form_data
@@ -17,12 +18,14 @@
from litestar.datastructures.multi_dicts import FormMultiDict
from litestar.enums import ASGIExtension, RequestEncodingType
from litestar.exceptions import (
+ ClientException,
InternalServerException,
LitestarException,
LitestarWarning,
)
+from litestar.exceptions.http_exceptions import RequestEntityTooLarge
from litestar.serialization import decode_json, decode_msgpack
-from litestar.types import Empty
+from litestar.types import Empty, HTTPReceiveMessage
__all__ = ("Request",)
@@ -52,6 +55,7 @@ class Request(Generic[UserT, AuthT, StateT], ASGIConnection["HTTPRouteHandler",
"_msgpack",
"_content_type",
"_accept",
+ "_content_length",
"is_connected",
"supports_push_promise",
)
@@ -79,6 +83,7 @@ def __init__(self, scope: Scope, receive: Receive = empty_receive, send: Send =
self._msgpack: Any = Empty
self._content_type: tuple[str, dict[str, str]] | EmptyType = Empty
self._accept: Accept | EmptyType = Empty
+ self._content_length: int | None | EmptyType = Empty
self.supports_push_promise = ASGIExtension.SERVER_PUSH in self._server_extensions
@property
@@ -152,6 +157,21 @@ async def msgpack(self) -> Any:
)
return self._msgpack
+ @property
+ def content_length(self) -> int | None:
+ cached_content_length = self._content_length
+ if cached_content_length is not Empty:
+ return cached_content_length
+
+ content_length_header = self.headers.get("content-length")
+ try:
+ content_length = self._content_length = (
+ int(content_length_header) if content_length_header is not None else None
+ )
+ except ValueError:
+ raise ClientException(f"Invalid content-length: {content_length_header!r}") from None
+ return content_length
+
async def stream(self) -> AsyncGenerator[bytes, None]:
"""Return an async generator that streams chunks of bytes.
@@ -164,10 +184,46 @@ async def stream(self) -> AsyncGenerator[bytes, None]:
if self._body is Empty:
if not self.is_connected:
raise InternalServerException("stream consumed")
- while event := await self.receive():
+
+ announced_content_length = self.content_length
+ # setting this to 'math.inf' as a micro-optimisation; Comparing against a
+ # float is slightly faster than checking if a value is 'None' and then
+ # comparing it to an int. since we expect a limit to be set most of the
+ # time, this is a bit more efficient
+ max_content_length = self.route_handler.resolve_request_max_body_size() or math.inf
+
+ # if the 'content-length' header is set, and exceeds the limit, we can bail
+ # out early before reading anything
+ if announced_content_length is not None and announced_content_length > max_content_length:
+ raise RequestEntityTooLarge
+
+ total_bytes_streamed: int = 0
+ while event := cast("HTTPReceiveMessage", await self.receive()):
if event["type"] == "http.request":
- if event["body"]:
- yield event["body"]
+ body = event["body"]
+ if body:
+ total_bytes_streamed += len(body)
+
+ # if a 'content-length' header was set, check if we have
+ # received more bytes than specified. in most cases this should
+ # be caught before it hits the application layer and an ASGI
+ # server (e.g. uvicorn) will not allow this, but since it's not
+ # forbidden according to the HTTP or ASGI spec, we err on the
+ # side of caution and still perform this check.
+ #
+ # uvicorn documented behaviour for this case:
+ # https://github.com/encode/uvicorn/blob/fe3910083e3990695bc19c2ef671dd447262ae18/docs/server-behavior.md?plain=1#L11
+ if announced_content_length:
+ if total_bytes_streamed > announced_content_length:
+ raise ClientException("Malformed request")
+
+ # we don't have a 'content-length' header, likely a chunked
+ # transfer. we don't really care and simply check if we have
+ # received more bytes than allowed
+ elif total_bytes_streamed > max_content_length:
+ raise RequestEntityTooLarge
+
+ yield body
if not event.get("more_body", False):
break
|
from __future__ import annotations
import warnings
from typing import TYPE_CHECKING, Any, AsyncGenerator, Generic
from litestar._multipart import parse_content_header, parse_multipart_form
from litestar._parsers import parse_url_encoded_form_data
from litestar.connection.base import (
ASGIConnection,
AuthT,
StateT,
UserT,
empty_receive,
empty_send,
)
from litestar.datastructures.headers import Accept
from litestar.datastructures.multi_dicts import FormMultiDict
from litestar.enums import ASGIExtension, RequestEncodingType
from litestar.exceptions import (
InternalServerException,
LitestarException,
LitestarWarning,
)
from litestar.serialization import decode_json, decode_msgpack
from litestar.types import Empty
__all__ = ("Request",)
if TYPE_CHECKING:
from litestar.handlers.http_handlers import HTTPRouteHandler # noqa: F401
from litestar.types.asgi_types import HTTPScope, Method, Receive, Scope, Send
from litestar.types.empty import EmptyType
SERVER_PUSH_HEADERS = {
"accept",
"accept-encoding",
"accept-language",
"cache-control",
"user-agent",
}
class Request(Generic[UserT, AuthT, StateT], ASGIConnection["HTTPRouteHandler", UserT, AuthT, StateT]):
"""The Litestar Request class."""
__slots__ = (
"_json",
"_form",
"_body",
"_msgpack",
"_content_type",
"_accept",
"is_connected",
"supports_push_promise",
)
scope: HTTPScope # pyright: ignore
"""The ASGI scope attached to the connection."""
receive: Receive
"""The ASGI receive function."""
send: Send
"""The ASGI send function."""
def __init__(self, scope: Scope, receive: Receive = empty_receive, send: Send = empty_send) -> None:
"""Initialize ``Request``.
Args:
scope: The ASGI connection scope.
receive: The ASGI receive function.
send: The ASGI send function.
"""
super().__init__(scope, receive, send)
self.is_connected: bool = True
self._body: bytes | EmptyType = Empty
self._form: FormMultiDict | EmptyType = Empty
self._json: Any = Empty
self._msgpack: Any = Empty
self._content_type: tuple[str, dict[str, str]] | EmptyType = Empty
self._accept: Accept | EmptyType = Empty
self.supports_push_promise = ASGIExtension.SERVER_PUSH in self._server_extensions
@property
def method(self) -> Method:
"""Return the request method.
Returns:
The request :class:`Method <litestar.types.Method>`
"""
return self.scope["method"]
@property
def content_type(self) -> tuple[str, dict[str, str]]:
"""Parse the request's 'Content-Type' header, returning the header value and any options as a dictionary.
Returns:
A tuple with the parsed value and a dictionary containing any options send in it.
"""
if self._content_type is Empty:
if (content_type := self._connection_state.content_type) is not Empty:
self._content_type = content_type
else:
self._content_type = self._connection_state.content_type = parse_content_header(
self.headers.get("Content-Type", "")
)
return self._content_type
@property
def accept(self) -> Accept:
"""Parse the request's 'Accept' header, returning an :class:`Accept <litestar.datastructures.headers.Accept>` instance.
Returns:
An :class:`Accept <litestar.datastructures.headers.Accept>` instance, representing the list of acceptable media types.
"""
if self._accept is Empty:
if (accept := self._connection_state.accept) is not Empty:
self._accept = accept
else:
self._accept = self._connection_state.accept = Accept(self.headers.get("Accept", "*/*"))
return self._accept
async def json(self) -> Any:
"""Retrieve the json request body from the request.
Returns:
An arbitrary value
"""
if self._json is Empty:
if (json_ := self._connection_state.json) is not Empty:
self._json = json_
else:
body = await self.body()
self._json = self._connection_state.json = decode_json(
body or b"null", type_decoders=self.route_handler.resolve_type_decoders()
)
return self._json
async def msgpack(self) -> Any:
"""Retrieve the MessagePack request body from the request.
Returns:
An arbitrary value
"""
if self._msgpack is Empty:
if (msgpack := self._connection_state.msgpack) is not Empty:
self._msgpack = msgpack
else:
body = await self.body()
self._msgpack = self._connection_state.msgpack = decode_msgpack(
body or b"\xc0", type_decoders=self.route_handler.resolve_type_decoders()
)
return self._msgpack
async def stream(self) -> AsyncGenerator[bytes, None]:
"""Return an async generator that streams chunks of bytes.
Returns:
An async generator.
Raises:
RuntimeError: if the stream is already consumed
"""
if self._body is Empty:
if not self.is_connected:
raise InternalServerException("stream consumed")
while event := await self.receive():
if event["type"] == "http.request":
if event["body"]:
yield event["body"]
if not event.get("more_body", False):
break
if event["type"] == "http.disconnect":
raise InternalServerException("client disconnected prematurely")
self.is_connected = False
yield b""
else:
yield self._body
yield b""
return
async def body(self) -> bytes:
"""Return the body of the request.
Returns:
A byte-string representing the body of the request.
"""
if self._body is Empty:
if (body := self._connection_state.body) is not Empty:
self._body = body
else:
self._body = self._connection_state.body = b"".join([c async for c in self.stream()])
return self._body
async def form(self) -> FormMultiDict:
"""Retrieve form data from the request. If the request is either a 'multipart/form-data' or an
'application/x-www-form- urlencoded', return a FormMultiDict instance populated with the values sent in the
request, otherwise, an empty instance.
Returns:
A FormMultiDict instance
"""
if self._form is Empty:
if (form_data := self._connection_state.form) is Empty:
content_type, options = self.content_type
if content_type == RequestEncodingType.MULTI_PART:
form_data = parse_multipart_form(
body=await self.body(),
boundary=options.get("boundary", "").encode(),
multipart_form_part_limit=self.app.multipart_form_part_limit,
)
elif content_type == RequestEncodingType.URL_ENCODED:
form_data = parse_url_encoded_form_data(
await self.body(),
)
else:
form_data = {}
self._connection_state.form = form_data
# form_data is a dict[str, list[str] | str | UploadFile]. Convert it to a
# list[tuple[str, str | UploadFile]] before passing it to FormMultiDict so
# multi-keys can be accessed properly
items = []
for k, v in form_data.items():
if isinstance(v, list):
for sv in v:
items.append((k, sv))
else:
items.append((k, v))
self._form = FormMultiDict(items)
return self._form
async def send_push_promise(self, path: str, raise_if_unavailable: bool = False) -> None:
"""Send a push promise.
This method requires the `http.response.push` extension to be sent from the ASGI server.
Args:
path: Path to send the promise to.
raise_if_unavailable: Raise an exception if server push is not supported by
the server
Returns:
None
"""
if not self.supports_push_promise:
if raise_if_unavailable:
raise LitestarException("Attempted to send a push promise but the server does not support it")
warnings.warn(
"Attempted to send a push promise but the server does not support it. In a future version, this will "
"raise an exception. To enable this behaviour in the current version, set raise_if_unavailable=True. "
"To prevent this behaviour, make sure that the server you are using supports the 'http.response.push' "
"ASGI extension, or check this dynamically via "
":attr:`~litestar.connection.Request.supports_push_promise`",
stacklevel=2,
category=LitestarWarning,
)
return
raw_headers = [
(header_name.encode("latin-1"), value.encode("latin-1"))
for header_name in (self.headers.keys() & SERVER_PUSH_HEADERS)
for value in self.headers.getall(header_name, [])
]
await self.send({"type": "http.response.push", "path": path, "headers": raw_headers})
|
PYSEC-2024-178
|
litestar/controller.py
|
@@ -64,6 +64,7 @@ class Controller:
"parameters",
"path",
"request_class",
+ "request_max_body_size",
"response_class",
"response_cookies",
"response_headers",
@@ -136,6 +137,11 @@ class Controller:
"""A custom subclass of :class:`Request <.connection.Request>` to be used as the default request for all route
handlers under the controller.
"""
+ request_max_body_size: int | None | EmptyType
+ """
+ Maximum allowed size of the request body in bytes. If this size is exceeded, a '413 - Request Entity Too Large'
+ error response is returned."""
+
response_class: type[Response] | None
"""A custom subclass of :class:`Response <.response.Response>` to be used as the default response for all route
handlers under the controller.
@@ -191,6 +197,9 @@ def __init__(self, owner: Router) -> None:
if not hasattr(self, "include_in_schema"):
self.include_in_schema = Empty
+ if not hasattr(self, "request_max_body_size"):
+ self.request_max_body_size = Empty
+
self.signature_namespace = add_types_to_signature_namespace(
getattr(self, "signature_types", []), getattr(self, "signature_namespace", {})
)
@@ -235,6 +244,7 @@ def as_router(self) -> Router:
type_encoders=self.type_encoders,
type_decoders=self.type_decoders,
websocket_class=self.websocket_class,
+ request_max_body_size=self.request_max_body_size,
)
router.owner = self.owner
return router
|
from __future__ import annotations
import types
from collections import defaultdict
from copy import deepcopy
from operator import attrgetter
from typing import TYPE_CHECKING, Any, Mapping, Sequence, cast
from litestar._layers.utils import narrow_response_cookies, narrow_response_headers
from litestar.exceptions import ImproperlyConfiguredException
from litestar.handlers.base import BaseRouteHandler
from litestar.handlers.http_handlers import HTTPRouteHandler
from litestar.handlers.websocket_handlers import WebsocketRouteHandler
from litestar.types.empty import Empty
from litestar.utils import normalize_path
from litestar.utils.signature import add_types_to_signature_namespace
__all__ = ("Controller",)
if TYPE_CHECKING:
from litestar.connection import Request, WebSocket
from litestar.datastructures import CacheControlHeader, ETag
from litestar.dto import AbstractDTO
from litestar.openapi.spec import SecurityRequirement
from litestar.response import Response
from litestar.router import Router
from litestar.types import (
AfterRequestHookHandler,
AfterResponseHookHandler,
BeforeRequestHookHandler,
Dependencies,
ExceptionHandlersMap,
Guard,
Middleware,
ParametersMap,
ResponseCookies,
TypeEncodersMap,
)
from litestar.types.composite_types import ResponseHeaders, TypeDecodersSequence
from litestar.types.empty import EmptyType
class Controller:
"""The Litestar Controller class.
Subclass this class to create 'view' like components and utilize OOP.
"""
__slots__ = (
"after_request",
"after_response",
"before_request",
"cache_control",
"dependencies",
"dto",
"etag",
"exception_handlers",
"guards",
"include_in_schema",
"middleware",
"opt",
"owner",
"parameters",
"path",
"request_class",
"response_class",
"response_cookies",
"response_headers",
"return_dto",
"security",
"signature_namespace",
"signature_types",
"tags",
"type_encoders",
"type_decoders",
"websocket_class",
)
after_request: AfterRequestHookHandler | None
"""A sync or async function executed before a :class:`Request <.connection.Request>` is passed to any route handler.
If this function returns a value, the request will not reach the route handler, and instead this value will be used.
"""
after_response: AfterResponseHookHandler | None
"""A sync or async function called after the response has been awaited.
It receives the :class:`Request <.connection.Request>` instance and should not return any values.
"""
before_request: BeforeRequestHookHandler | None
"""A sync or async function called immediately before calling the route handler.
It receives the :class:`Request <.connection.Request>` instance and any non-``None`` return value is used for the
response, bypassing the route handler.
"""
cache_control: CacheControlHeader | None
"""A :class:`CacheControlHeader <.datastructures.CacheControlHeader>` header to add to route handlers of this
controller.
Can be overridden by route handlers.
"""
dependencies: Dependencies | None
"""A string keyed dictionary of dependency :class:`Provider <.di.Provide>` instances."""
dto: type[AbstractDTO] | None | EmptyType
""":class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for (de)serializing and validation of request data."""
etag: ETag | None
"""An ``etag`` header of type :class:`ETag <.datastructures.ETag>` to add to route handlers of this controller.
Can be overridden by route handlers.
"""
exception_handlers: ExceptionHandlersMap | None
"""A map of handler functions to status codes and/or exception types."""
guards: Sequence[Guard] | None
"""A sequence of :class:`Guard <.types.Guard>` callables."""
include_in_schema: bool | EmptyType
"""A boolean flag dictating whether the route handler should be documented in the OpenAPI schema"""
middleware: Sequence[Middleware] | None
"""A sequence of :class:`Middleware <.types.Middleware>`."""
opt: Mapping[str, Any] | None
"""A string key mapping of arbitrary values that can be accessed in :class:`Guards <.types.Guard>` or wherever you
have access to :class:`Request <.connection.Request>` or :class:`ASGI Scope <.types.Scope>`.
"""
owner: Router
"""The :class:`Router <.router.Router>` or :class:`Litestar <litestar.app.Litestar>` app that owns the controller.
This value is set internally by Litestar and it should not be set when subclassing the controller.
"""
parameters: ParametersMap | None
"""A mapping of :class:`Parameter <.params.Parameter>` definitions available to all application paths."""
path: str
"""A path fragment for the controller.
All route handlers under the controller will have the fragment appended to them. If not set it defaults to ``/``.
"""
request_class: type[Request] | None
"""A custom subclass of :class:`Request <.connection.Request>` to be used as the default request for all route
handlers under the controller.
"""
response_class: type[Response] | None
"""A custom subclass of :class:`Response <.response.Response>` to be used as the default response for all route
handlers under the controller.
"""
response_cookies: ResponseCookies | None
"""A list of :class:`Cookie <.datastructures.Cookie>` instances."""
response_headers: ResponseHeaders | None
"""A string keyed dictionary mapping :class:`ResponseHeader <.datastructures.ResponseHeader>` instances."""
return_dto: type[AbstractDTO] | None | EmptyType
""":class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for serializing outbound response
data.
"""
tags: Sequence[str] | None
"""A sequence of string tags that will be appended to the schema of all route handlers under the controller."""
security: Sequence[SecurityRequirement] | None
"""A sequence of dictionaries that to the schema of all route handlers under the controller."""
signature_namespace: dict[str, Any]
"""A mapping of names to types for use in forward reference resolution during signature modeling."""
signature_types: Sequence[Any]
"""A sequence of types for use in forward reference resolution during signature modeling.
These types will be added to the signature namespace using their ``__name__`` attribute.
"""
type_decoders: TypeDecodersSequence | None
"""A sequence of tuples, each composed of a predicate testing for type identity and a msgspec hook for deserialization."""
type_encoders: TypeEncodersMap | None
"""A mapping of types to callables that transform them into types supported for serialization."""
websocket_class: type[WebSocket] | None
"""A custom subclass of :class:`WebSocket <.connection.WebSocket>` to be used as the default websocket for all route
handlers under the controller.
"""
def __init__(self, owner: Router) -> None:
"""Initialize a controller.
Should only be called by routers as part of controller registration.
Args:
owner: An instance of :class:`Router <.router.Router>`
"""
# Since functions set on classes are bound, we need replace the bound instance with the class version
for key in ("after_request", "after_response", "before_request"):
cls_value = getattr(type(self), key, None)
if callable(cls_value):
setattr(self, key, cls_value)
if not hasattr(self, "dto"):
self.dto = Empty
if not hasattr(self, "return_dto"):
self.return_dto = Empty
if not hasattr(self, "include_in_schema"):
self.include_in_schema = Empty
self.signature_namespace = add_types_to_signature_namespace(
getattr(self, "signature_types", []), getattr(self, "signature_namespace", {})
)
for key in self.__slots__:
if not hasattr(self, key):
setattr(self, key, None)
self.response_cookies = narrow_response_cookies(self.response_cookies)
self.response_headers = narrow_response_headers(self.response_headers)
self.path = normalize_path(self.path or "/")
self.owner = owner
def as_router(self) -> Router:
from litestar.router import Router
router = Router(
path=self.path,
route_handlers=self.get_route_handlers(),
after_request=self.after_request,
after_response=self.after_response,
before_request=self.before_request,
cache_control=self.cache_control,
dependencies=self.dependencies,
dto=self.dto,
etag=self.etag,
exception_handlers=self.exception_handlers,
guards=self.guards,
include_in_schema=self.include_in_schema,
middleware=self.middleware,
opt=self.opt,
parameters=self.parameters,
request_class=self.request_class,
response_class=self.response_class,
response_cookies=self.response_cookies,
response_headers=self.response_headers,
return_dto=self.return_dto,
security=self.security,
signature_types=self.signature_types,
signature_namespace=self.signature_namespace,
tags=self.tags,
type_encoders=self.type_encoders,
type_decoders=self.type_decoders,
websocket_class=self.websocket_class,
)
router.owner = self.owner
return router
def get_route_handlers(self) -> list[BaseRouteHandler]:
"""Get a controller's route handlers and set the controller as the handlers' owner.
Returns:
A list containing a copy of the route handlers defined on the controller
"""
route_handlers: list[BaseRouteHandler] = []
controller_names = set(dir(Controller))
self_handlers = [
getattr(self, name)
for name in dir(self)
if name not in controller_names and isinstance(getattr(self, name), BaseRouteHandler)
]
self_handlers.sort(key=attrgetter("handler_id"))
for self_handler in self_handlers:
route_handler = deepcopy(self_handler)
# at the point we get a reference to the handler function, it's unbound, so
# we replace it with a regular bound method here
route_handler._fn = types.MethodType(route_handler._fn, self)
route_handler.owner = self
route_handlers.append(route_handler)
self.validate_route_handlers(route_handlers=route_handlers)
return route_handlers
def validate_route_handlers(self, route_handlers: list[BaseRouteHandler]) -> None:
"""Validate that the combination of path and decorator method or type are unique on the controller.
Args:
route_handlers: The controller's route handlers.
Raises:
ImproperlyConfiguredException
Returns:
None
"""
paths: defaultdict[str, set[str]] = defaultdict(set)
for route_handler in route_handlers:
if isinstance(route_handler, HTTPRouteHandler):
methods: set[str] = cast("set[str]", route_handler.http_methods)
elif isinstance(route_handler, WebsocketRouteHandler):
methods = {"websocket"}
else:
methods = {"asgi"}
for path in route_handler.paths:
if (entry := paths[path]) and (intersection := entry.intersection(methods)):
raise ImproperlyConfiguredException(
f"the combination of path and method must be unique in a controller - "
f"the following methods {''.join(m.lower() for m in intersection)} for {type(self).__name__} "
f"controller path {path} are not unique"
)
paths[path].update(methods)
|
PYSEC-2024-178
|
litestar/exceptions/http_exceptions.py
|
@@ -10,6 +10,7 @@
HTTP_403_FORBIDDEN,
HTTP_404_NOT_FOUND,
HTTP_405_METHOD_NOT_ALLOWED,
+ HTTP_413_REQUEST_ENTITY_TOO_LARGE,
HTTP_429_TOO_MANY_REQUESTS,
HTTP_500_INTERNAL_SERVER_ERROR,
HTTP_503_SERVICE_UNAVAILABLE,
@@ -119,6 +120,11 @@ class MethodNotAllowedException(ClientException):
status_code = HTTP_405_METHOD_NOT_ALLOWED
+class RequestEntityTooLarge(ClientException):
+ status_code = HTTP_413_REQUEST_ENTITY_TOO_LARGE
+ detail = "Request Entity Too Large"
+
+
class TooManyRequestsException(ClientException):
"""Request limits have been exceeded."""
|
from __future__ import annotations
from http import HTTPStatus
from typing import Any
from litestar.exceptions.base_exceptions import LitestarException
from litestar.status_codes import (
HTTP_400_BAD_REQUEST,
HTTP_401_UNAUTHORIZED,
HTTP_403_FORBIDDEN,
HTTP_404_NOT_FOUND,
HTTP_405_METHOD_NOT_ALLOWED,
HTTP_429_TOO_MANY_REQUESTS,
HTTP_500_INTERNAL_SERVER_ERROR,
HTTP_503_SERVICE_UNAVAILABLE,
)
__all__ = (
"ClientException",
"HTTPException",
"ImproperlyConfiguredException",
"InternalServerException",
"MethodNotAllowedException",
"NoRouteMatchFoundException",
"NotAuthorizedException",
"NotFoundException",
"PermissionDeniedException",
"ServiceUnavailableException",
"TemplateNotFoundException",
"TooManyRequestsException",
"ValidationException",
)
class HTTPException(LitestarException):
"""Base exception for HTTP error responses.
These exceptions carry information to construct an HTTP response.
"""
status_code: int = HTTP_500_INTERNAL_SERVER_ERROR
"""Exception status code."""
detail: str
"""Exception details or message."""
headers: dict[str, str] | None
"""Headers to attach to the response."""
extra: dict[str, Any] | list[Any] | None
"""An extra mapping to attach to the exception."""
def __init__(
self,
*args: Any,
detail: str = "",
status_code: int | None = None,
headers: dict[str, str] | None = None,
extra: dict[str, Any] | list[Any] | None = None,
) -> None:
"""Initialize ``HTTPException``.
Set ``detail`` and ``args`` if not provided.
Args:
*args: if ``detail`` kwarg not provided, first arg should be error detail.
detail: Exception details or message. Will default to args[0] if not provided.
status_code: Exception HTTP status code.
headers: Headers to set on the response.
extra: An extra mapping to attach to the exception.
"""
super().__init__(*args, detail=detail)
self.status_code = status_code or self.status_code
self.extra = extra
self.headers = headers
if not self.detail:
self.detail = HTTPStatus(self.status_code).phrase
self.args = (f"{self.status_code}: {self.detail}", *self.args)
def __repr__(self) -> str:
return f"{self.status_code} - {self.__class__.__name__} - {self.detail}"
def __str__(self) -> str:
return " ".join(self.args).strip()
class ImproperlyConfiguredException(HTTPException, ValueError):
"""Application has improper configuration."""
class ClientException(HTTPException):
"""Client error."""
status_code: int = HTTP_400_BAD_REQUEST
class ValidationException(ClientException, ValueError):
"""Client data validation error."""
class NotAuthorizedException(ClientException):
"""Request lacks valid authentication credentials for the requested resource."""
status_code = HTTP_401_UNAUTHORIZED
class PermissionDeniedException(ClientException):
"""Request understood, but not authorized."""
status_code = HTTP_403_FORBIDDEN
class NotFoundException(ClientException, ValueError):
"""Cannot find the requested resource."""
status_code = HTTP_404_NOT_FOUND
class MethodNotAllowedException(ClientException):
"""Server knows the request method, but the target resource doesn't support this method."""
status_code = HTTP_405_METHOD_NOT_ALLOWED
class TooManyRequestsException(ClientException):
"""Request limits have been exceeded."""
status_code = HTTP_429_TOO_MANY_REQUESTS
class InternalServerException(HTTPException):
"""Server encountered an unexpected condition that prevented it from fulfilling the request."""
status_code: int = HTTP_500_INTERNAL_SERVER_ERROR
class ServiceUnavailableException(InternalServerException):
"""Server is not ready to handle the request."""
status_code = HTTP_503_SERVICE_UNAVAILABLE
class NoRouteMatchFoundException(InternalServerException):
"""A route with the given name could not be found."""
class TemplateNotFoundException(InternalServerException):
"""Referenced template could not be found."""
def __init__(self, *args: Any, template_name: str) -> None:
"""Initialize ``TemplateNotFoundException``.
Args:
*args (Any): Passed through to ``super().__init__()`` - should not include ``detail``.
template_name (str): Name of template that could not be found.
"""
super().__init__(*args, detail=f"Template {template_name} not found.")
|
PYSEC-2024-178
|
litestar/handlers/http_handlers/base.py
|
@@ -82,6 +82,7 @@ class HTTPRouteHandler(BaseRouteHandler):
"_resolved_request_class",
"_resolved_tags",
"_resolved_security",
+ "_resolved_request_max_body_size",
"after_request",
"after_response",
"background",
@@ -113,6 +114,7 @@ class HTTPRouteHandler(BaseRouteHandler):
"sync_to_thread",
"tags",
"template_name",
+ "request_max_body_size",
)
has_sync_callable: bool
@@ -139,6 +141,7 @@ def __init__(
name: str | None = None,
opt: Mapping[str, Any] | None = None,
request_class: type[Request] | None = None,
+ request_max_body_size: int | None | EmptyType = Empty,
response_class: type[Response] | None = None,
response_cookies: ResponseCookies | None = None,
response_headers: ResponseHeaders | None = None,
@@ -204,6 +207,8 @@ def __init__(
:class:`ASGI Scope <.types.Scope>`.
request_class: A custom subclass of :class:`Request <.connection.Request>` to be used as route handler's
default request.
+ request_max_body_size: Maximum allowed size of the request body in bytes. If this size is exceeded,
+ a '413 - Request Entity Too Large' error response is returned.
response_class: A custom subclass of :class:`Response <.response.Response>` to be used as route handler's
default response.
response_cookies: A sequence of :class:`Cookie <.datastructures.Cookie>` instances.
@@ -272,6 +277,7 @@ def __init__(
self.response_class = response_class
self.response_cookies: Sequence[Cookie] | None = narrow_response_cookies(response_cookies)
self.response_headers: Sequence[ResponseHeader] | None = narrow_response_headers(response_headers)
+ self.request_max_body_size = request_max_body_size
self.sync_to_thread = sync_to_thread
# OpenAPI related attributes
@@ -297,6 +303,7 @@ def __init__(
self._resolved_request_class: type[Request] | EmptyType = Empty
self._resolved_security: list[SecurityRequirement] | EmptyType = Empty
self._resolved_tags: list[str] | EmptyType = Empty
+ self._resolved_request_max_body_size: int | EmptyType | None = Empty
def __call__(self, fn: AnyCallable) -> HTTPRouteHandler:
"""Replace a function with itself."""
@@ -473,6 +480,25 @@ def resolve_tags(self) -> list[str]:
return self._resolved_tags
+ def resolve_request_max_body_size(self) -> int | None:
+ if (resolved_limits := self._resolved_request_max_body_size) is not Empty:
+ return resolved_limits
+
+ max_body_size = self._resolved_request_max_body_size = next( # pyright: ignore
+ (
+ max_body_size
+ for layer in reversed(self.ownership_layers)
+ if (max_body_size := layer.request_max_body_size) is not Empty
+ ),
+ Empty,
+ )
+ if max_body_size is Empty:
+ raise ImproperlyConfiguredException(
+ "'request_max_body_size' set to 'Empty' on all layers. To omit a limit, "
+ "set 'request_max_body_size=None'"
+ )
+ return max_body_size
+
def get_response_handler(self, is_response_type_data: bool = False) -> Callable[[Any], Awaitable[ASGIApp]]:
"""Resolve the response_handler function for the route handler.
|
from __future__ import annotations
from enum import Enum
from typing import TYPE_CHECKING, AnyStr, Mapping, Sequence, TypedDict, cast
from litestar._layers.utils import narrow_response_cookies, narrow_response_headers
from litestar.connection import Request
from litestar.datastructures.cookie import Cookie
from litestar.datastructures.response_header import ResponseHeader
from litestar.enums import HttpMethod, MediaType
from litestar.exceptions import (
HTTPException,
ImproperlyConfiguredException,
)
from litestar.handlers.base import BaseRouteHandler
from litestar.handlers.http_handlers._utils import (
create_data_handler,
create_generic_asgi_response_handler,
create_response_handler,
get_default_status_code,
is_empty_response_annotation,
normalize_http_method,
)
from litestar.openapi.spec import Operation
from litestar.response import Response
from litestar.status_codes import HTTP_204_NO_CONTENT, HTTP_304_NOT_MODIFIED
from litestar.types import (
AfterRequestHookHandler,
AfterResponseHookHandler,
AnyCallable,
ASGIApp,
BeforeRequestHookHandler,
CacheKeyBuilder,
Dependencies,
Empty,
EmptyType,
ExceptionHandlersMap,
Guard,
Method,
Middleware,
ResponseCookies,
ResponseHeaders,
TypeEncodersMap,
)
from litestar.utils import ensure_async_callable
from litestar.utils.predicates import is_async_callable
from litestar.utils.warnings import warn_implicit_sync_to_thread, warn_sync_to_thread_with_async_callable
if TYPE_CHECKING:
from typing import Any, Awaitable, Callable
from litestar.app import Litestar
from litestar.background_tasks import BackgroundTask, BackgroundTasks
from litestar.config.response_cache import CACHE_FOREVER
from litestar.datastructures import CacheControlHeader, ETag
from litestar.dto import AbstractDTO
from litestar.openapi.datastructures import ResponseSpec
from litestar.openapi.spec import SecurityRequirement
from litestar.types.callable_types import AsyncAnyCallable, OperationIDCreator
from litestar.types.composite_types import TypeDecodersSequence
__all__ = ("HTTPRouteHandler", "route")
class ResponseHandlerMap(TypedDict):
default_handler: Callable[[Any], Awaitable[ASGIApp]] | EmptyType
response_type_handler: Callable[[Any], Awaitable[ASGIApp]] | EmptyType
class HTTPRouteHandler(BaseRouteHandler):
"""HTTP Route Decorator.
Use this decorator to decorate an HTTP handler with multiple methods.
"""
__slots__ = (
"_resolved_after_response",
"_resolved_before_request",
"_response_handler_mapping",
"_resolved_include_in_schema",
"_resolved_response_class",
"_resolved_request_class",
"_resolved_tags",
"_resolved_security",
"after_request",
"after_response",
"background",
"before_request",
"cache",
"cache_control",
"cache_key_builder",
"content_encoding",
"content_media_type",
"deprecated",
"description",
"etag",
"has_sync_callable",
"http_methods",
"include_in_schema",
"media_type",
"operation_class",
"operation_id",
"raises",
"request_class",
"response_class",
"response_cookies",
"response_description",
"response_headers",
"responses",
"security",
"status_code",
"summary",
"sync_to_thread",
"tags",
"template_name",
)
has_sync_callable: bool
def __init__(
self,
path: str | Sequence[str] | None = None,
*,
after_request: AfterRequestHookHandler | None = None,
after_response: AfterResponseHookHandler | None = None,
background: BackgroundTask | BackgroundTasks | None = None,
before_request: BeforeRequestHookHandler | None = None,
cache: bool | int | type[CACHE_FOREVER] = False,
cache_control: CacheControlHeader | None = None,
cache_key_builder: CacheKeyBuilder | None = None,
dependencies: Dependencies | None = None,
dto: type[AbstractDTO] | None | EmptyType = Empty,
etag: ETag | None = None,
exception_handlers: ExceptionHandlersMap | None = None,
guards: Sequence[Guard] | None = None,
http_method: HttpMethod | Method | Sequence[HttpMethod | Method],
media_type: MediaType | str | None = None,
middleware: Sequence[Middleware] | None = None,
name: str | None = None,
opt: Mapping[str, Any] | None = None,
request_class: type[Request] | None = None,
response_class: type[Response] | None = None,
response_cookies: ResponseCookies | None = None,
response_headers: ResponseHeaders | None = None,
return_dto: type[AbstractDTO] | None | EmptyType = Empty,
status_code: int | None = None,
sync_to_thread: bool | None = None,
# OpenAPI related attributes
content_encoding: str | None = None,
content_media_type: str | None = None,
deprecated: bool = False,
description: str | None = None,
include_in_schema: bool | EmptyType = Empty,
operation_class: type[Operation] = Operation,
operation_id: str | OperationIDCreator | None = None,
raises: Sequence[type[HTTPException]] | None = None,
response_description: str | None = None,
responses: Mapping[int, ResponseSpec] | None = None,
signature_namespace: Mapping[str, Any] | None = None,
security: Sequence[SecurityRequirement] | None = None,
summary: str | None = None,
tags: Sequence[str] | None = None,
type_decoders: TypeDecodersSequence | None = None,
type_encoders: TypeEncodersMap | None = None,
**kwargs: Any,
) -> None:
"""Initialize ``HTTPRouteHandler``.
Args:
path: A path fragment for the route handler function or a sequence of path fragments.
If not given defaults to ``/``
after_request: A sync or async function executed before a :class:`Request <.connection.Request>` is passed
to any route handler. If this function returns a value, the request will not reach the route handler,
and instead this value will be used.
after_response: A sync or async function called after the response has been awaited. It receives the
:class:`Request <.connection.Request>` object and should not return any values.
background: A :class:`BackgroundTask <.background_tasks.BackgroundTask>` instance or
:class:`BackgroundTasks <.background_tasks.BackgroundTasks>` to execute after the response is finished.
Defaults to ``None``.
before_request: A sync or async function called immediately before calling the route handler. Receives
the :class:`Request <.connection.Request>` instance and any non-``None`` return value is used for the
response, bypassing the route handler.
cache: Enables response caching if configured on the application level. Valid values are ``True`` or a
number of seconds (e.g. ``120``) to cache the response.
cache_control: A ``cache-control`` header of type
:class:`CacheControlHeader <.datastructures.CacheControlHeader>` that will be added to the response.
cache_key_builder: A :class:`cache-key builder function <.types.CacheKeyBuilder>`. Allows for customization
of the cache key if caching is configured on the application level.
dependencies: A string keyed mapping of dependency :class:`Provider <.di.Provide>` instances.
dto: :class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for (de)serializing and
validation of request data.
etag: An ``etag`` header of type :class:`ETag <.datastructures.ETag>` that will be added to the response.
exception_handlers: A mapping of status codes and/or exception types to handler functions.
guards: A sequence of :class:`Guard <.types.Guard>` callables.
http_method: An :class:`http method string <.types.Method>`, a member of the enum
:class:`HttpMethod <.enums.HttpMethod>` or a list of these that correlates to the methods the route
handler function should handle.
media_type: A member of the :class:`MediaType <.enums.MediaType>` enum or a string with a valid IANA
Media-Type.
middleware: A sequence of :class:`Middleware <.types.Middleware>`.
name: A string identifying the route handler.
opt: A string keyed mapping of arbitrary values that can be accessed in :class:`Guards <.types.Guard>` or
wherever you have access to :class:`Request <.connection.Request>` or
:class:`ASGI Scope <.types.Scope>`.
request_class: A custom subclass of :class:`Request <.connection.Request>` to be used as route handler's
default request.
response_class: A custom subclass of :class:`Response <.response.Response>` to be used as route handler's
default response.
response_cookies: A sequence of :class:`Cookie <.datastructures.Cookie>` instances.
response_headers: A string keyed mapping of :class:`ResponseHeader <.datastructures.ResponseHeader>`
instances.
responses: A mapping of additional status codes and a description of their expected content.
This information will be included in the OpenAPI schema
return_dto: :class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for serializing
outbound response data.
signature_namespace: A mapping of names to types for use in forward reference resolution during signature modelling.
status_code: An http status code for the response. Defaults to ``200`` for ``GET``, ``PUT`` and ``PATCH``,
``201`` for ``POST`` and ``204`` for ``DELETE``. For mixed method requests it will check for ``POST`` and ``DELETE`` first
then defaults to ``200``.
sync_to_thread: A boolean dictating whether the handler function will be executed in a worker thread or the
main event loop. This has an effect only for sync handler functions. See using sync handler functions.
content_encoding: A string describing the encoding of the content, e.g. ``"base64"``.
content_media_type: A string designating the media-type of the content, e.g. ``"image/png"``.
deprecated: A boolean dictating whether this route should be marked as deprecated in the OpenAPI schema.
description: Text used for the route's schema description section.
include_in_schema: A boolean flag dictating whether the route handler should be documented in the OpenAPI schema.
operation_class: :class:`Operation <.openapi.spec.operation.Operation>` to be used with the route's OpenAPI schema.
operation_id: Either a string or a callable returning a string. An identifier used for the route's schema operationId.
raises: A list of exception classes extending from litestar.HttpException that is used for the OpenAPI documentation.
This list should describe all exceptions raised within the route handler's function/method. The Litestar
ValidationException will be added automatically for the schema if any validation is involved.
response_description: Text used for the route's response schema description section.
security: A sequence of dictionaries that contain information about which security scheme can be used on the endpoint.
summary: Text used for the route's schema summary section.
tags: A sequence of string tags that will be appended to the OpenAPI schema.
type_decoders: A sequence of tuples, each composed of a predicate testing for type identity and a msgspec hook for deserialization.
type_encoders: A mapping of types to callables that transform them into types supported for serialization.
**kwargs: Any additional kwarg - will be set in the opt dictionary.
"""
if not http_method:
raise ImproperlyConfiguredException("An http_method kwarg is required")
self.http_methods = normalize_http_method(http_methods=http_method)
self.status_code = status_code or get_default_status_code(http_methods=self.http_methods)
super().__init__(
path=path,
dependencies=dependencies,
dto=dto,
exception_handlers=exception_handlers,
guards=guards,
middleware=middleware,
name=name,
opt=opt,
return_dto=return_dto,
signature_namespace=signature_namespace,
type_decoders=type_decoders,
type_encoders=type_encoders,
**kwargs,
)
self.after_request = ensure_async_callable(after_request) if after_request else None # pyright: ignore
self.after_response = ensure_async_callable(after_response) if after_response else None
self.background = background
self.before_request = ensure_async_callable(before_request) if before_request else None
self.cache = cache
self.cache_control = cache_control
self.cache_key_builder = cache_key_builder
self.etag = etag
self.media_type: MediaType | str = media_type or ""
self.request_class = request_class
self.response_class = response_class
self.response_cookies: Sequence[Cookie] | None = narrow_response_cookies(response_cookies)
self.response_headers: Sequence[ResponseHeader] | None = narrow_response_headers(response_headers)
self.sync_to_thread = sync_to_thread
# OpenAPI related attributes
self.content_encoding = content_encoding
self.content_media_type = content_media_type
self.deprecated = deprecated
self.description = description
self.include_in_schema = include_in_schema
self.operation_class = operation_class
self.operation_id = operation_id
self.raises = raises
self.response_description = response_description
self.summary = summary
self.tags = tags
self.security = security
self.responses = responses
# memoized attributes, defaulted to Empty
self._resolved_after_response: AsyncAnyCallable | None | EmptyType = Empty
self._resolved_before_request: AsyncAnyCallable | None | EmptyType = Empty
self._response_handler_mapping: ResponseHandlerMap = {"default_handler": Empty, "response_type_handler": Empty}
self._resolved_include_in_schema: bool | EmptyType = Empty
self._resolved_response_class: type[Response] | EmptyType = Empty
self._resolved_request_class: type[Request] | EmptyType = Empty
self._resolved_security: list[SecurityRequirement] | EmptyType = Empty
self._resolved_tags: list[str] | EmptyType = Empty
def __call__(self, fn: AnyCallable) -> HTTPRouteHandler:
"""Replace a function with itself."""
if not is_async_callable(fn):
if self.sync_to_thread is None:
warn_implicit_sync_to_thread(fn, stacklevel=3)
elif self.sync_to_thread is not None:
warn_sync_to_thread_with_async_callable(fn, stacklevel=3)
super().__call__(fn)
return self
def resolve_request_class(self) -> type[Request]:
"""Return the closest custom Request class in the owner graph or the default Request class.
This method is memoized so the computation occurs only once.
Returns:
The default :class:`Request <.connection.Request>` class for the route handler.
"""
if self._resolved_request_class is Empty:
self._resolved_request_class = next(
(layer.request_class for layer in reversed(self.ownership_layers) if layer.request_class is not None),
Request,
)
return cast("type[Request]", self._resolved_request_class)
def resolve_response_class(self) -> type[Response]:
"""Return the closest custom Response class in the owner graph or the default Response class.
This method is memoized so the computation occurs only once.
Returns:
The default :class:`Response <.response.Response>` class for the route handler.
"""
if self._resolved_response_class is Empty:
self._resolved_response_class = next(
(layer.response_class for layer in reversed(self.ownership_layers) if layer.response_class is not None),
Response,
)
return cast("type[Response]", self._resolved_response_class)
def resolve_response_headers(self) -> frozenset[ResponseHeader]:
"""Return all header parameters in the scope of the handler function.
Returns:
A dictionary mapping keys to :class:`ResponseHeader <.datastructures.ResponseHeader>` instances.
"""
resolved_response_headers: dict[str, ResponseHeader] = {}
for layer in self.ownership_layers:
if layer_response_headers := layer.response_headers:
if isinstance(layer_response_headers, Mapping):
# this can't happen unless you manually set response_headers on an instance, which would result in a
# type-checking error on everything but the controller. We cover this case nevertheless
resolved_response_headers.update(
{name: ResponseHeader(name=name, value=value) for name, value in layer_response_headers.items()}
)
else:
resolved_response_headers.update({h.name: h for h in layer_response_headers})
for extra_header in ("cache_control", "etag"):
if header_model := getattr(layer, extra_header, None):
resolved_response_headers[header_model.HEADER_NAME] = ResponseHeader(
name=header_model.HEADER_NAME,
value=header_model.to_header(),
documentation_only=header_model.documentation_only,
)
return frozenset(resolved_response_headers.values())
def resolve_response_cookies(self) -> frozenset[Cookie]:
"""Return a list of Cookie instances. Filters the list to ensure each cookie key is unique.
Returns:
A list of :class:`Cookie <.datastructures.Cookie>` instances.
"""
response_cookies: set[Cookie] = set()
for layer in reversed(self.ownership_layers):
if layer_response_cookies := layer.response_cookies:
if isinstance(layer_response_cookies, Mapping):
# this can't happen unless you manually set response_cookies on an instance, which would result in a
# type-checking error on everything but the controller. We cover this case nevertheless
response_cookies.update(
{Cookie(key=key, value=value) for key, value in layer_response_cookies.items()}
)
else:
response_cookies.update(cast("set[Cookie]", layer_response_cookies))
return frozenset(response_cookies)
def resolve_before_request(self) -> AsyncAnyCallable | None:
"""Resolve the before_handler handler by starting from the route handler and moving up.
If a handler is found it is returned, otherwise None is set.
This method is memoized so the computation occurs only once.
Returns:
An optional :class:`before request lifecycle hook handler <.types.BeforeRequestHookHandler>`
"""
if self._resolved_before_request is Empty:
before_request_handlers = [layer.before_request for layer in self.ownership_layers if layer.before_request]
self._resolved_before_request = before_request_handlers[-1] if before_request_handlers else None
return cast("AsyncAnyCallable | None", self._resolved_before_request)
def resolve_after_response(self) -> AsyncAnyCallable | None:
"""Resolve the after_response handler by starting from the route handler and moving up.
If a handler is found it is returned, otherwise None is set.
This method is memoized so the computation occurs only once.
Returns:
An optional :class:`after response lifecycle hook handler <.types.AfterResponseHookHandler>`
"""
if self._resolved_after_response is Empty:
after_response_handlers: list[AsyncAnyCallable] = [
layer.after_response # type: ignore[misc]
for layer in self.ownership_layers
if layer.after_response
]
self._resolved_after_response = after_response_handlers[-1] if after_response_handlers else None
return cast("AsyncAnyCallable | None", self._resolved_after_response)
def resolve_include_in_schema(self) -> bool:
"""Resolve the 'include_in_schema' property by starting from the route handler and moving up.
If 'include_in_schema' is found in any of the ownership layers, the last value found is returned.
If not found in any layer, the default value ``True`` is returned.
Returns:
bool: The resolved 'include_in_schema' property.
"""
if self._resolved_include_in_schema is Empty:
include_in_schemas = [
i.include_in_schema for i in self.ownership_layers if isinstance(i.include_in_schema, bool)
]
self._resolved_include_in_schema = include_in_schemas[-1] if include_in_schemas else True
return self._resolved_include_in_schema
def resolve_security(self) -> list[SecurityRequirement]:
"""Resolve the security property by starting from the route handler and moving up.
Security requirements are additive, so the security requirements of the route handler are the sum of all
security requirements of the ownership layers.
Returns:
list[SecurityRequirement]: The resolved security property.
"""
if self._resolved_security is Empty:
self._resolved_security = []
for layer in self.ownership_layers:
if isinstance(layer.security, Sequence):
self._resolved_security.extend(layer.security)
return self._resolved_security
def resolve_tags(self) -> list[str]:
"""Resolve the tags property by starting from the route handler and moving up.
Tags are additive, so the tags of the route handler are the sum of all tags of the ownership layers.
Returns:
list[str]: A sorted list of unique tags.
"""
if self._resolved_tags is Empty:
tag_set = set()
for layer in self.ownership_layers:
for tag in layer.tags or []:
tag_set.add(tag)
self._resolved_tags = sorted(tag_set)
return self._resolved_tags
def get_response_handler(self, is_response_type_data: bool = False) -> Callable[[Any], Awaitable[ASGIApp]]:
"""Resolve the response_handler function for the route handler.
This method is memoized so the computation occurs only once.
Args:
is_response_type_data: Whether to return a handler for 'Response' instances.
Returns:
Async Callable to handle an HTTP Request
"""
if self._response_handler_mapping["default_handler"] is Empty:
after_request_handlers: list[AsyncAnyCallable] = [
layer.after_request # type: ignore[misc]
for layer in self.ownership_layers
if layer.after_request
]
after_request = cast(
"AfterRequestHookHandler | None",
after_request_handlers[-1] if after_request_handlers else None,
)
media_type = self.media_type.value if isinstance(self.media_type, Enum) else self.media_type
response_class = self.resolve_response_class()
headers = self.resolve_response_headers()
cookies = self.resolve_response_cookies()
type_encoders = self.resolve_type_encoders()
return_type = self.parsed_fn_signature.return_type
return_annotation = return_type.annotation
self._response_handler_mapping["response_type_handler"] = response_type_handler = create_response_handler(
after_request=after_request,
background=self.background,
cookies=cookies,
headers=headers,
media_type=media_type,
status_code=self.status_code,
type_encoders=type_encoders,
)
if return_type.is_subclass_of(Response):
self._response_handler_mapping["default_handler"] = response_type_handler
elif is_async_callable(return_annotation) or return_annotation is ASGIApp:
self._response_handler_mapping["default_handler"] = create_generic_asgi_response_handler(
after_request=after_request
)
else:
self._response_handler_mapping["default_handler"] = create_data_handler(
after_request=after_request,
background=self.background,
cookies=cookies,
headers=headers,
media_type=media_type,
response_class=response_class,
status_code=self.status_code,
type_encoders=type_encoders,
)
return cast(
"Callable[[Any], Awaitable[ASGIApp]]",
self._response_handler_mapping["response_type_handler"]
if is_response_type_data
else self._response_handler_mapping["default_handler"],
)
async def to_response(self, app: Litestar, data: Any, request: Request) -> ASGIApp:
"""Return a :class:`Response <.response.Response>` from the handler by resolving and calling it.
Args:
app: The :class:`Litestar <litestar.app.Litestar>` app instance
data: Either an instance of a :class:`Response <.response.Response>`,
a Response instance or an arbitrary value.
request: A :class:`Request <.connection.Request>` instance
Returns:
A Response instance
"""
if return_dto_type := self.resolve_return_dto():
data = return_dto_type(request).data_to_encodable_type(data)
response_handler = self.get_response_handler(is_response_type_data=isinstance(data, Response))
return await response_handler(app=app, data=data, request=request) # type: ignore[call-arg]
def on_registration(self, app: Litestar) -> None:
super().on_registration(app)
self.resolve_after_response()
self.resolve_include_in_schema()
self.has_sync_callable = not is_async_callable(self.fn)
if self.has_sync_callable and self.sync_to_thread:
self._fn = ensure_async_callable(self.fn)
self.has_sync_callable = False
def _validate_handler_function(self) -> None:
"""Validate the route handler function once it is set by inspecting its return annotations."""
super()._validate_handler_function()
return_type = self.parsed_fn_signature.return_type
if return_type.annotation is Empty:
raise ImproperlyConfiguredException(
f"A return value of a route handler function {self} should be type annotated. "
"If your function doesn't return a value, annotate it as returning 'None'."
)
if (
self.status_code < 200 or self.status_code in {HTTP_204_NO_CONTENT, HTTP_304_NOT_MODIFIED}
) and not is_empty_response_annotation(return_type):
raise ImproperlyConfiguredException(
"A status code 204, 304 or in the range below 200 does not support a response body. "
"If the function should return a value, change the route handler status code to an appropriate value.",
)
if not self.media_type:
if return_type.is_subclass_of((str, bytes)) or return_type.annotation is AnyStr:
self.media_type = MediaType.TEXT
elif not return_type.is_subclass_of(Response):
self.media_type = MediaType.JSON
if "socket" in self.parsed_fn_signature.parameters:
raise ImproperlyConfiguredException("The 'socket' kwarg is not supported with http handlers")
if "data" in self.parsed_fn_signature.parameters and "GET" in self.http_methods:
raise ImproperlyConfiguredException("'data' kwarg is unsupported for 'GET' request handlers")
if (body_param := self.parsed_fn_signature.parameters.get("body")) and not body_param.is_subclass_of(bytes):
raise ImproperlyConfiguredException(
f"Invalid type annotation for 'body' parameter in route handler {self}. 'body' will always receive the "
f"raw request body as bytes but was annotated with '{body_param.raw!r}'. If you want to receive "
"processed request data, use the 'data' parameter."
)
route = HTTPRouteHandler
|
PYSEC-2024-178
|
litestar/handlers/http_handlers/decorators.py
|
@@ -628,6 +628,7 @@ def __init__(
name: str | None = None,
opt: Mapping[str, Any] | None = None,
request_class: type[Request] | None = None,
+ request_max_body_size: int | None | EmptyType = Empty,
response_class: type[Response] | None = None,
response_cookies: ResponseCookies | None = None,
response_headers: ResponseHeaders | None = None,
@@ -692,6 +693,8 @@ def __init__(
wherever you have access to :class:`Request <.connection.Request>` or :class:`ASGI Scope <.types.Scope>`.
request_class: A custom subclass of :class:`Request <.connection.Request>` to be used as route handler's
default request.
+ request_max_body_size: Maximum allowed size of the request body in bytes. If this size is exceeded,
+ a '413 - Request Entity Too Large' error response is returned.
response_class: A custom subclass of :class:`Response <.response.Response>` to be used as route handler's
default response.
response_cookies: A sequence of :class:`Cookie <.datastructures.Cookie>` instances.
@@ -755,6 +758,7 @@ def __init__(
path=path,
raises=raises,
request_class=request_class,
+ request_max_body_size=request_max_body_size,
response_class=response_class,
response_cookies=response_cookies,
response_description=response_description,
@@ -803,6 +807,7 @@ def __init__(
name: str | None = None,
opt: Mapping[str, Any] | None = None,
request_class: type[Request] | None = None,
+ request_max_body_size: int | None | EmptyType = Empty,
response_class: type[Response] | None = None,
response_cookies: ResponseCookies | None = None,
response_headers: ResponseHeaders | None = None,
@@ -867,6 +872,8 @@ def __init__(
wherever you have access to :class:`Request <.connection.Request>` or :class:`ASGI Scope <.types.Scope>`.
request_class: A custom subclass of :class:`Request <.connection.Request>` to be used as route handler's
default request.
+ request_max_body_size: Maximum allowed size of the request body in bytes. If this size is exceeded,
+ a '413 - Request Entity Too Large' error response is returned.
response_class: A custom subclass of :class:`Response <.response.Response>` to be used as route handler's
default response.
response_cookies: A sequence of :class:`Cookie <.datastructures.Cookie>` instances.
@@ -930,6 +937,7 @@ def __init__(
path=path,
raises=raises,
request_class=request_class,
+ request_max_body_size=request_max_body_size,
response_class=response_class,
response_cookies=response_cookies,
response_description=response_description,
@@ -978,6 +986,7 @@ def __init__(
name: str | None = None,
opt: Mapping[str, Any] | None = None,
request_class: type[Request] | None = None,
+ request_max_body_size: int | None | EmptyType = Empty,
response_class: type[Response] | None = None,
response_cookies: ResponseCookies | None = None,
response_headers: ResponseHeaders | None = None,
@@ -1042,6 +1051,8 @@ def __init__(
wherever you have access to :class:`Request <.connection.Request>` or :class:`ASGI Scope <.types.Scope>`.
request_class: A custom subclass of :class:`Request <.connection.Request>` to be used as route handler's
default request.
+ request_max_body_size: Maximum allowed size of the request body in bytes. If this size is exceeded,
+ a '413 - Request Entity Too Large' error response is returned.
response_class: A custom subclass of :class:`Response <.response.Response>` to be used as route handler's
default response.
response_cookies: A sequence of :class:`Cookie <.datastructures.Cookie>` instances.
@@ -1105,6 +1116,7 @@ def __init__(
path=path,
raises=raises,
request_class=request_class,
+ request_max_body_size=request_max_body_size,
response_class=response_class,
response_cookies=response_cookies,
response_description=response_description,
|
from __future__ import annotations
import warnings
from typing import TYPE_CHECKING
from litestar.enums import HttpMethod, MediaType
from litestar.exceptions import HTTPException, ImproperlyConfiguredException
from litestar.openapi.spec import Operation
from litestar.response.file import ASGIFileResponse, File
from litestar.types import Empty, TypeDecodersSequence
from litestar.utils import is_class_and_subclass
from ._utils import is_empty_response_annotation
from .base import HTTPRouteHandler
if TYPE_CHECKING:
from typing import Any, Mapping, Sequence
from litestar.background_tasks import BackgroundTask, BackgroundTasks
from litestar.config.response_cache import CACHE_FOREVER
from litestar.connection import Request
from litestar.datastructures import CacheControlHeader, ETag
from litestar.dto import AbstractDTO
from litestar.openapi.datastructures import ResponseSpec
from litestar.openapi.spec import SecurityRequirement
from litestar.response import Response
from litestar.types import (
AfterRequestHookHandler,
AfterResponseHookHandler,
BeforeRequestHookHandler,
CacheKeyBuilder,
Dependencies,
EmptyType,
ExceptionHandlersMap,
Guard,
Middleware,
ResponseCookies,
ResponseHeaders,
TypeEncodersMap,
)
from litestar.types.callable_types import OperationIDCreator
__all__ = ("get", "head", "post", "put", "patch", "delete")
MSG_SEMANTIC_ROUTE_HANDLER_WITH_HTTP = "semantic route handlers cannot define http_method"
def _subclass_warning() -> None:
warnings.warn(
"Semantic HTTP route handler classes are deprecated and will be replaced by "
"functional decorators in Litestar 3.0.",
category=DeprecationWarning,
stacklevel=2,
)
class delete(HTTPRouteHandler):
"""DELETE Route Decorator.
Use this decorator to decorate an HTTP handler for DELETE requests.
"""
def __init__(
self,
path: str | None | Sequence[str] = None,
*,
after_request: AfterRequestHookHandler | None = None,
after_response: AfterResponseHookHandler | None = None,
background: BackgroundTask | BackgroundTasks | None = None,
before_request: BeforeRequestHookHandler | None = None,
cache: bool | int | type[CACHE_FOREVER] = False,
cache_control: CacheControlHeader | None = None,
cache_key_builder: CacheKeyBuilder | None = None,
dependencies: Dependencies | None = None,
dto: type[AbstractDTO] | None | EmptyType = Empty,
etag: ETag | None = None,
exception_handlers: ExceptionHandlersMap | None = None,
guards: Sequence[Guard] | None = None,
media_type: MediaType | str | None = None,
middleware: Sequence[Middleware] | None = None,
name: str | None = None,
opt: Mapping[str, Any] | None = None,
request_class: type[Request] | None = None,
response_class: type[Response] | None = None,
response_cookies: ResponseCookies | None = None,
response_headers: ResponseHeaders | None = None,
return_dto: type[AbstractDTO] | None | EmptyType = Empty,
signature_namespace: Mapping[str, Any] | None = None,
status_code: int | None = None,
sync_to_thread: bool | None = None,
# OpenAPI related attributes
content_encoding: str | None = None,
content_media_type: str | None = None,
deprecated: bool = False,
description: str | None = None,
include_in_schema: bool | EmptyType = Empty,
operation_class: type[Operation] = Operation,
operation_id: str | OperationIDCreator | None = None,
raises: Sequence[type[HTTPException]] | None = None,
response_description: str | None = None,
responses: Mapping[int, ResponseSpec] | None = None,
security: Sequence[SecurityRequirement] | None = None,
summary: str | None = None,
tags: Sequence[str] | None = None,
type_decoders: TypeDecodersSequence | None = None,
type_encoders: TypeEncodersMap | None = None,
**kwargs: Any,
) -> None:
"""Initialize ``delete``
Args:
path: A path fragment for the route handler function or a sequence of path fragments.
If not given defaults to ``/``
after_request: A sync or async function executed before a :class:`Request <.connection.Request>` is passed
to any route handler. If this function returns a value, the request will not reach the route handler,
and instead this value will be used.
after_response: A sync or async function called after the response has been awaited. It receives the
:class:`Request <.connection.Request>` object and should not return any values.
background: A :class:`BackgroundTask <.background_tasks.BackgroundTask>` instance or
:class:`BackgroundTasks <.background_tasks.BackgroundTasks>` to execute after the response is finished.
Defaults to ``None``.
before_request: A sync or async function called immediately before calling the route handler. Receives
the :class:`.connection.Request` instance and any non-``None`` return value is used for the response,
bypassing the route handler.
cache: Enables response caching if configured on the application level. Valid values are ``True`` or a number
of seconds (e.g. ``120``) to cache the response.
cache_control: A ``cache-control`` header of type
:class:`CacheControlHeader <.datastructures.CacheControlHeader>` that will be added to the response.
cache_key_builder: A :class:`cache-key builder function <.types.CacheKeyBuilder>`. Allows for customization
of the cache key if caching is configured on the application level.
dto: :class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for (de)serializing and
validation of request data.
dependencies: A string keyed mapping of dependency :class:`Provider <.di.Provide>` instances.
etag: An ``etag`` header of type :class:`ETag <.datastructures.ETag>` that will be added to the response.
exception_handlers: A mapping of status codes and/or exception types to handler functions.
guards: A sequence of :class:`Guard <.types.Guard>` callables.
http_method: An :class:`http method string <.types.Method>`, a member of the enum
:class:`HttpMethod <litestar.enums.HttpMethod>` or a list of these that correlates to the methods the
route handler function should handle.
media_type: A member of the :class:`MediaType <.enums.MediaType>` enum or a string with a
valid IANA Media-Type.
middleware: A sequence of :class:`Middleware <.types.Middleware>`.
name: A string identifying the route handler.
opt: A string keyed mapping of arbitrary values that can be accessed in :class:`Guards <.types.Guard>` or
wherever you have access to :class:`Request <.connection.Request>` or :class:`ASGI Scope <.types.Scope>`.
request_class: A custom subclass of :class:`Request <.connection.Request>` to be used as route handler's
default request.
response_class: A custom subclass of :class:`Response <.response.Response>` to be used as route handler's
default response.
response_cookies: A sequence of :class:`Cookie <.datastructures.Cookie>` instances.
response_headers: A string keyed mapping of :class:`ResponseHeader <.datastructures.ResponseHeader>`
instances.
responses: A mapping of additional status codes and a description of their expected content.
This information will be included in the OpenAPI schema
return_dto: :class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for serializing
outbound response data.
signature_namespace: A mapping of names to types for use in forward reference resolution during signature modelling.
status_code: An http status code for the response. Defaults to ``200`` for mixed method or ``GET``, ``PUT``
and ``PATCH``, ``201`` for ``POST`` and ``204`` for ``DELETE``.
sync_to_thread: A boolean dictating whether the handler function will be executed in a worker thread or the
main event loop. This has an effect only for sync handler functions. See using sync handler functions.
content_encoding: A string describing the encoding of the content, e.g. ``base64``.
content_media_type: A string designating the media-type of the content, e.g. ``image/png``.
deprecated: A boolean dictating whether this route should be marked as deprecated in the OpenAPI schema.
description: Text used for the route's schema description section.
include_in_schema: A boolean flag dictating whether the route handler should be documented in the OpenAPI schema.
operation_class: :class:`Operation <.openapi.spec.operation.Operation>` to be used with the route's OpenAPI schema.
operation_id: Either a string or a callable returning a string. An identifier used for the route's schema operationId.
raises: A list of exception classes extending from litestar.HttpException that is used for the OpenAPI documentation.
This list should describe all exceptions raised within the route handler's function/method. The Litestar
ValidationException will be added automatically for the schema if any validation is involved.
response_description: Text used for the route's response schema description section.
security: A sequence of dictionaries that contain information about which security scheme can be used on the endpoint.
summary: Text used for the route's schema summary section.
tags: A sequence of string tags that will be appended to the OpenAPI schema.
type_decoders: A sequence of tuples, each composed of a predicate testing for type identity and a msgspec
hook for deserialization.
type_encoders: A mapping of types to callables that transform them into types supported for serialization.
**kwargs: Any additional kwarg - will be set in the opt dictionary.
"""
if "http_method" in kwargs:
raise ImproperlyConfiguredException(MSG_SEMANTIC_ROUTE_HANDLER_WITH_HTTP)
super().__init__(
after_request=after_request,
after_response=after_response,
background=background,
before_request=before_request,
cache=cache,
cache_control=cache_control,
cache_key_builder=cache_key_builder,
content_encoding=content_encoding,
content_media_type=content_media_type,
dependencies=dependencies,
deprecated=deprecated,
description=description,
dto=dto,
etag=etag,
exception_handlers=exception_handlers,
guards=guards,
http_method=HttpMethod.DELETE,
include_in_schema=include_in_schema,
media_type=media_type,
middleware=middleware,
name=name,
operation_class=operation_class,
operation_id=operation_id,
opt=opt,
path=path,
raises=raises,
request_class=request_class,
response_class=response_class,
response_cookies=response_cookies,
response_description=response_description,
response_headers=response_headers,
responses=responses,
return_dto=return_dto,
security=security,
signature_namespace=signature_namespace,
status_code=status_code,
summary=summary,
sync_to_thread=sync_to_thread,
tags=tags,
type_decoders=type_decoders,
type_encoders=type_encoders,
**kwargs,
)
def __init_subclass__(cls, **kwargs: Any) -> None:
_subclass_warning()
class get(HTTPRouteHandler):
"""GET Route Decorator.
Use this decorator to decorate an HTTP handler for GET requests.
"""
def __init__(
self,
path: str | None | Sequence[str] = None,
*,
after_request: AfterRequestHookHandler | None = None,
after_response: AfterResponseHookHandler | None = None,
background: BackgroundTask | BackgroundTasks | None = None,
before_request: BeforeRequestHookHandler | None = None,
cache: bool | int | type[CACHE_FOREVER] = False,
cache_control: CacheControlHeader | None = None,
cache_key_builder: CacheKeyBuilder | None = None,
dependencies: Dependencies | None = None,
dto: type[AbstractDTO] | None | EmptyType = Empty,
etag: ETag | None = None,
exception_handlers: ExceptionHandlersMap | None = None,
guards: Sequence[Guard] | None = None,
media_type: MediaType | str | None = None,
middleware: Sequence[Middleware] | None = None,
name: str | None = None,
opt: Mapping[str, Any] | None = None,
request_class: type[Request] | None = None,
response_class: type[Response] | None = None,
response_cookies: ResponseCookies | None = None,
response_headers: ResponseHeaders | None = None,
return_dto: type[AbstractDTO] | None | EmptyType = Empty,
signature_namespace: Mapping[str, Any] | None = None,
status_code: int | None = None,
sync_to_thread: bool | None = None,
# OpenAPI related attributes
content_encoding: str | None = None,
content_media_type: str | None = None,
deprecated: bool = False,
description: str | None = None,
include_in_schema: bool | EmptyType = Empty,
operation_class: type[Operation] = Operation,
operation_id: str | OperationIDCreator | None = None,
raises: Sequence[type[HTTPException]] | None = None,
response_description: str | None = None,
responses: Mapping[int, ResponseSpec] | None = None,
security: Sequence[SecurityRequirement] | None = None,
summary: str | None = None,
tags: Sequence[str] | None = None,
type_decoders: TypeDecodersSequence | None = None,
type_encoders: TypeEncodersMap | None = None,
**kwargs: Any,
) -> None:
"""Initialize ``get``.
Args:
path: A path fragment for the route handler function or a sequence of path fragments.
If not given defaults to ``/``
after_request: A sync or async function executed before a :class:`Request <.connection.Request>` is passed
to any route handler. If this function returns a value, the request will not reach the route handler,
and instead this value will be used.
after_response: A sync or async function called after the response has been awaited. It receives the
:class:`Request <.connection.Request>` object and should not return any values.
background: A :class:`BackgroundTask <.background_tasks.BackgroundTask>` instance or
:class:`BackgroundTasks <.background_tasks.BackgroundTasks>` to execute after the response is finished.
Defaults to ``None``.
before_request: A sync or async function called immediately before calling the route handler. Receives
the :class:`.connection.Request` instance and any non-``None`` return value is used for the response,
bypassing the route handler.
cache: Enables response caching if configured on the application level. Valid values are ``True`` or a number
of seconds (e.g. ``120``) to cache the response.
cache_control: A ``cache-control`` header of type
:class:`CacheControlHeader <.datastructures.CacheControlHeader>` that will be added to the response.
cache_key_builder: A :class:`cache-key builder function <.types.CacheKeyBuilder>`. Allows for customization
of the cache key if caching is configured on the application level.
dependencies: A string keyed mapping of dependency :class:`Provider <.di.Provide>` instances.
dto: :class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for (de)serializing and
validation of request data.
etag: An ``etag`` header of type :class:`ETag <.datastructures.ETag>` that will be added to the response.
exception_handlers: A mapping of status codes and/or exception types to handler functions.
guards: A sequence of :class:`Guard <.types.Guard>` callables.
http_method: An :class:`http method string <.types.Method>`, a member of the enum
:class:`HttpMethod <litestar.enums.HttpMethod>` or a list of these that correlates to the methods the
route handler function should handle.
media_type: A member of the :class:`MediaType <.enums.MediaType>` enum or a string with a
valid IANA Media-Type.
middleware: A sequence of :class:`Middleware <.types.Middleware>`.
name: A string identifying the route handler.
opt: A string keyed mapping of arbitrary values that can be accessed in :class:`Guards <.types.Guard>` or
wherever you have access to :class:`Request <.connection.Request>` or :class:`ASGI Scope <.types.Scope>`.
request_class: A custom subclass of :class:`Request <.connection.Request>` to be used as route handler's
default request.
response_class: A custom subclass of :class:`Response <.response.Response>` to be used as route handler's
default response.
response_cookies: A sequence of :class:`Cookie <.datastructures.Cookie>` instances.
response_headers: A string keyed mapping of :class:`ResponseHeader <.datastructures.ResponseHeader>`
instances.
responses: A mapping of additional status codes and a description of their expected content.
This information will be included in the OpenAPI schema
return_dto: :class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for serializing
outbound response data.
signature_namespace: A mapping of names to types for use in forward reference resolution during signature modelling.
status_code: An http status code for the response. Defaults to ``200`` for mixed method or ``GET``, ``PUT`` and
``PATCH``, ``201`` for ``POST`` and ``204`` for ``DELETE``.
sync_to_thread: A boolean dictating whether the handler function will be executed in a worker thread or the
main event loop. This has an effect only for sync handler functions. See using sync handler functions.
content_encoding: A string describing the encoding of the content, e.g. ``base64``.
content_media_type: A string designating the media-type of the content, e.g. ``image/png``.
deprecated: A boolean dictating whether this route should be marked as deprecated in the OpenAPI schema.
description: Text used for the route's schema description section.
include_in_schema: A boolean flag dictating whether the route handler should be documented in the OpenAPI schema.
operation_class: :class:`Operation <.openapi.spec.operation.Operation>` to be used with the route's OpenAPI schema.
operation_id: Either a string or a callable returning a string. An identifier used for the route's schema operationId.
raises: A list of exception classes extending from litestar.HttpException that is used for the OpenAPI documentation.
This list should describe all exceptions raised within the route handler's function/method. The Litestar
ValidationException will be added automatically for the schema if any validation is involved.
response_description: Text used for the route's response schema description section.
security: A sequence of dictionaries that contain information about which security scheme can be used on the endpoint.
summary: Text used for the route's schema summary section.
tags: A sequence of string tags that will be appended to the OpenAPI schema.
type_decoders: A sequence of tuples, each composed of a predicate testing for type identity and a msgspec
hook for deserialization.
type_encoders: A mapping of types to callables that transform them into types supported for serialization.
**kwargs: Any additional kwarg - will be set in the opt dictionary.
"""
if "http_method" in kwargs:
raise ImproperlyConfiguredException(MSG_SEMANTIC_ROUTE_HANDLER_WITH_HTTP)
super().__init__(
after_request=after_request,
after_response=after_response,
background=background,
before_request=before_request,
cache=cache,
cache_control=cache_control,
cache_key_builder=cache_key_builder,
content_encoding=content_encoding,
content_media_type=content_media_type,
dependencies=dependencies,
deprecated=deprecated,
description=description,
dto=dto,
etag=etag,
exception_handlers=exception_handlers,
guards=guards,
http_method=HttpMethod.GET,
include_in_schema=include_in_schema,
media_type=media_type,
middleware=middleware,
name=name,
operation_class=operation_class,
operation_id=operation_id,
opt=opt,
path=path,
raises=raises,
request_class=request_class,
response_class=response_class,
response_cookies=response_cookies,
response_description=response_description,
response_headers=response_headers,
responses=responses,
return_dto=return_dto,
security=security,
signature_namespace=signature_namespace,
status_code=status_code,
summary=summary,
sync_to_thread=sync_to_thread,
tags=tags,
type_decoders=type_decoders,
type_encoders=type_encoders,
**kwargs,
)
def __init_subclass__(cls, **kwargs: Any) -> None:
_subclass_warning()
class head(HTTPRouteHandler):
"""HEAD Route Decorator.
Use this decorator to decorate an HTTP handler for HEAD requests.
"""
def __init__(
self,
path: str | None | Sequence[str] = None,
*,
after_request: AfterRequestHookHandler | None = None,
after_response: AfterResponseHookHandler | None = None,
background: BackgroundTask | BackgroundTasks | None = None,
before_request: BeforeRequestHookHandler | None = None,
cache: bool | int | type[CACHE_FOREVER] = False,
cache_control: CacheControlHeader | None = None,
cache_key_builder: CacheKeyBuilder | None = None,
dependencies: Dependencies | None = None,
dto: type[AbstractDTO] | None | EmptyType = Empty,
etag: ETag | None = None,
exception_handlers: ExceptionHandlersMap | None = None,
guards: Sequence[Guard] | None = None,
media_type: MediaType | str | None = None,
middleware: Sequence[Middleware] | None = None,
name: str | None = None,
opt: Mapping[str, Any] | None = None,
request_class: type[Request] | None = None,
response_class: type[Response] | None = None,
response_cookies: ResponseCookies | None = None,
response_headers: ResponseHeaders | None = None,
signature_namespace: Mapping[str, Any] | None = None,
status_code: int | None = None,
sync_to_thread: bool | None = None,
# OpenAPI related attributes
content_encoding: str | None = None,
content_media_type: str | None = None,
deprecated: bool = False,
description: str | None = None,
include_in_schema: bool | EmptyType = Empty,
operation_class: type[Operation] = Operation,
operation_id: str | OperationIDCreator | None = None,
raises: Sequence[type[HTTPException]] | None = None,
response_description: str | None = None,
responses: Mapping[int, ResponseSpec] | None = None,
return_dto: type[AbstractDTO] | None | EmptyType = Empty,
security: Sequence[SecurityRequirement] | None = None,
summary: str | None = None,
tags: Sequence[str] | None = None,
type_decoders: TypeDecodersSequence | None = None,
type_encoders: TypeEncodersMap | None = None,
**kwargs: Any,
) -> None:
"""Initialize ``head``.
Notes:
- A response to a head request cannot include a body.
See: [MDN](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/HEAD).
Args:
path: A path fragment for the route handler function or a sequence of path fragments.
If not given defaults to ``/``
after_request: A sync or async function executed before a :class:`Request <.connection.Request>` is passed
to any route handler. If this function returns a value, the request will not reach the route handler,
and instead this value will be used.
after_response: A sync or async function called after the response has been awaited. It receives the
:class:`Request <.connection.Request>` object and should not return any values.
background: A :class:`BackgroundTask <.background_tasks.BackgroundTask>` instance or
:class:`BackgroundTasks <.background_tasks.BackgroundTasks>` to execute after the response is finished.
Defaults to ``None``.
before_request: A sync or async function called immediately before calling the route handler. Receives
the :class:`.connection.Request` instance and any non-``None`` return value is used for the response,
bypassing the route handler.
cache: Enables response caching if configured on the application level. Valid values are ``True`` or a number
of seconds (e.g. ``120``) to cache the response.
cache_control: A ``cache-control`` header of type
:class:`CacheControlHeader <.datastructures.CacheControlHeader>` that will be added to the response.
cache_key_builder: A :class:`cache-key builder function <.types.CacheKeyBuilder>`. Allows for customization
of the cache key if caching is configured on the application level.
dependencies: A string keyed mapping of dependency :class:`Provider <.di.Provide>` instances.
dto: :class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for (de)serializing and
validation of request data.
etag: An ``etag`` header of type :class:`ETag <.datastructures.ETag>` that will be added to the response.
exception_handlers: A mapping of status codes and/or exception types to handler functions.
guards: A sequence of :class:`Guard <.types.Guard>` callables.
http_method: An :class:`http method string <.types.Method>`, a member of the enum
:class:`HttpMethod <litestar.enums.HttpMethod>` or a list of these that correlates to the methods the
route handler function should handle.
media_type: A member of the :class:`MediaType <.enums.MediaType>` enum or a string with a
valid IANA Media-Type.
middleware: A sequence of :class:`Middleware <.types.Middleware>`.
name: A string identifying the route handler.
opt: A string keyed mapping of arbitrary values that can be accessed in :class:`Guards <.types.Guard>` or
wherever you have access to :class:`Request <.connection.Request>` or :class:`ASGI Scope <.types.Scope>`.
request_class: A custom subclass of :class:`Request <.connection.Request>` to be used as route handler's
default request.
response_class: A custom subclass of :class:`Response <.response.Response>` to be used as route handler's
default response.
response_cookies: A sequence of :class:`Cookie <.datastructures.Cookie>` instances.
response_headers: A string keyed mapping of :class:`ResponseHeader <.datastructures.ResponseHeader>`
instances.
responses: A mapping of additional status codes and a description of their expected content.
This information will be included in the OpenAPI schema
return_dto: :class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for serializing
outbound response data.
signature_namespace: A mapping of names to types for use in forward reference resolution during signature modelling.
status_code: An http status code for the response. Defaults to ``200`` for mixed method or ``GET``, ``PUT`` and
``PATCH``, ``201`` for ``POST`` and ``204`` for ``DELETE``.
sync_to_thread: A boolean dictating whether the handler function will be executed in a worker thread or the
main event loop. This has an effect only for sync handler functions. See using sync handler functions.
content_encoding: A string describing the encoding of the content, e.g. ``base64``.
content_media_type: A string designating the media-type of the content, e.g. ``image/png``.
deprecated: A boolean dictating whether this route should be marked as deprecated in the OpenAPI schema.
description: Text used for the route's schema description section.
include_in_schema: A boolean flag dictating whether the route handler should be documented in the OpenAPI schema.
operation_class: :class:`Operation <.openapi.spec.operation.Operation>` to be used with the route's OpenAPI schema.
operation_id: Either a string or a callable returning a string. An identifier used for the route's schema operationId.
raises: A list of exception classes extending from litestar.HttpException that is used for the OpenAPI documentation.
This list should describe all exceptions raised within the route handler's function/method. The Litestar
ValidationException will be added automatically for the schema if any validation is involved.
response_description: Text used for the route's response schema description section.
security: A sequence of dictionaries that contain information about which security scheme can be used on the endpoint.
summary: Text used for the route's schema summary section.
tags: A sequence of string tags that will be appended to the OpenAPI schema.
type_decoders: A sequence of tuples, each composed of a predicate testing for type identity and a msgspec
hook for deserialization.
type_encoders: A mapping of types to callables that transform them into types supported for serialization.
**kwargs: Any additional kwarg - will be set in the opt dictionary.
"""
if "http_method" in kwargs:
raise ImproperlyConfiguredException(MSG_SEMANTIC_ROUTE_HANDLER_WITH_HTTP)
super().__init__(
after_request=after_request,
after_response=after_response,
background=background,
before_request=before_request,
cache=cache,
cache_control=cache_control,
cache_key_builder=cache_key_builder,
content_encoding=content_encoding,
content_media_type=content_media_type,
dependencies=dependencies,
deprecated=deprecated,
description=description,
dto=dto,
etag=etag,
exception_handlers=exception_handlers,
guards=guards,
http_method=HttpMethod.HEAD,
include_in_schema=include_in_schema,
media_type=media_type,
middleware=middleware,
name=name,
operation_class=operation_class,
operation_id=operation_id,
opt=opt,
path=path,
raises=raises,
request_class=request_class,
response_class=response_class,
response_cookies=response_cookies,
response_description=response_description,
response_headers=response_headers,
responses=responses,
return_dto=return_dto,
security=security,
signature_namespace=signature_namespace,
status_code=status_code,
summary=summary,
sync_to_thread=sync_to_thread,
tags=tags,
type_decoders=type_decoders,
type_encoders=type_encoders,
**kwargs,
)
def __init_subclass__(cls, **kwargs: Any) -> None:
_subclass_warning()
def _validate_handler_function(self) -> None:
"""Validate the route handler function once it is set by inspecting its return annotations."""
super()._validate_handler_function()
# we allow here File and File because these have special setting for head responses
field_definition = self.parsed_fn_signature.return_type
if not (
is_empty_response_annotation(field_definition)
or is_class_and_subclass(field_definition.annotation, File)
or is_class_and_subclass(field_definition.annotation, ASGIFileResponse)
):
raise ImproperlyConfiguredException(
f"{self}: Handlers for 'HEAD' requests must not return a value. Either return 'None' or a response type without a body."
)
class patch(HTTPRouteHandler):
"""PATCH Route Decorator.
Use this decorator to decorate an HTTP handler for PATCH requests.
"""
def __init__(
self,
path: str | None | Sequence[str] = None,
*,
after_request: AfterRequestHookHandler | None = None,
after_response: AfterResponseHookHandler | None = None,
background: BackgroundTask | BackgroundTasks | None = None,
before_request: BeforeRequestHookHandler | None = None,
cache: bool | int | type[CACHE_FOREVER] = False,
cache_control: CacheControlHeader | None = None,
cache_key_builder: CacheKeyBuilder | None = None,
dependencies: Dependencies | None = None,
dto: type[AbstractDTO] | None | EmptyType = Empty,
etag: ETag | None = None,
exception_handlers: ExceptionHandlersMap | None = None,
guards: Sequence[Guard] | None = None,
media_type: MediaType | str | None = None,
middleware: Sequence[Middleware] | None = None,
name: str | None = None,
opt: Mapping[str, Any] | None = None,
request_class: type[Request] | None = None,
response_class: type[Response] | None = None,
response_cookies: ResponseCookies | None = None,
response_headers: ResponseHeaders | None = None,
return_dto: type[AbstractDTO] | None | EmptyType = Empty,
signature_namespace: Mapping[str, Any] | None = None,
status_code: int | None = None,
sync_to_thread: bool | None = None,
# OpenAPI related attributes
content_encoding: str | None = None,
content_media_type: str | None = None,
deprecated: bool = False,
description: str | None = None,
include_in_schema: bool | EmptyType = Empty,
operation_class: type[Operation] = Operation,
operation_id: str | OperationIDCreator | None = None,
raises: Sequence[type[HTTPException]] | None = None,
response_description: str | None = None,
responses: Mapping[int, ResponseSpec] | None = None,
security: Sequence[SecurityRequirement] | None = None,
summary: str | None = None,
tags: Sequence[str] | None = None,
type_decoders: TypeDecodersSequence | None = None,
type_encoders: TypeEncodersMap | None = None,
**kwargs: Any,
) -> None:
"""Initialize ``patch``.
Args:
path: A path fragment for the route handler function or a sequence of path fragments.
If not given defaults to ``/``
after_request: A sync or async function executed before a :class:`Request <.connection.Request>` is passed
to any route handler. If this function returns a value, the request will not reach the route handler,
and instead this value will be used.
after_response: A sync or async function called after the response has been awaited. It receives the
:class:`Request <.connection.Request>` object and should not return any values.
background: A :class:`BackgroundTask <.background_tasks.BackgroundTask>` instance or
:class:`BackgroundTasks <.background_tasks.BackgroundTasks>` to execute after the response is finished.
Defaults to ``None``.
before_request: A sync or async function called immediately before calling the route handler. Receives
the :class:`.connection.Request` instance and any non-``None`` return value is used for the response,
bypassing the route handler.
cache: Enables response caching if configured on the application level. Valid values are ``True`` or a number
of seconds (e.g. ``120``) to cache the response.
cache_control: A ``cache-control`` header of type
:class:`CacheControlHeader <.datastructures.CacheControlHeader>` that will be added to the response.
cache_key_builder: A :class:`cache-key builder function <.types.CacheKeyBuilder>`. Allows for customization
of the cache key if caching is configured on the application level.
dependencies: A string keyed mapping of dependency :class:`Provider <.di.Provide>` instances.
dto: :class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for (de)serializing and
validation of request data.
etag: An ``etag`` header of type :class:`ETag <.datastructures.ETag>` that will be added to the response.
exception_handlers: A mapping of status codes and/or exception types to handler functions.
guards: A sequence of :class:`Guard <.types.Guard>` callables.
http_method: An :class:`http method string <.types.Method>`, a member of the enum
:class:`HttpMethod <litestar.enums.HttpMethod>` or a list of these that correlates to the methods the
route handler function should handle.
media_type: A member of the :class:`MediaType <.enums.MediaType>` enum or a string with a
valid IANA Media-Type.
middleware: A sequence of :class:`Middleware <.types.Middleware>`.
name: A string identifying the route handler.
opt: A string keyed mapping of arbitrary values that can be accessed in :class:`Guards <.types.Guard>` or
wherever you have access to :class:`Request <.connection.Request>` or :class:`ASGI Scope <.types.Scope>`.
request_class: A custom subclass of :class:`Request <.connection.Request>` to be used as route handler's
default request.
response_class: A custom subclass of :class:`Response <.response.Response>` to be used as route handler's
default response.
response_cookies: A sequence of :class:`Cookie <.datastructures.Cookie>` instances.
response_headers: A string keyed mapping of :class:`ResponseHeader <.datastructures.ResponseHeader>`
instances.
responses: A mapping of additional status codes and a description of their expected content.
This information will be included in the OpenAPI schema
return_dto: :class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for serializing
outbound response data.
signature_namespace: A mapping of names to types for use in forward reference resolution during signature modelling.
status_code: An http status code for the response. Defaults to ``200`` for mixed method or ``GET``, ``PUT`` and
``PATCH``, ``201`` for ``POST`` and ``204`` for ``DELETE``.
sync_to_thread: A boolean dictating whether the handler function will be executed in a worker thread or the
main event loop. This has an effect only for sync handler functions. See using sync handler functions.
content_encoding: A string describing the encoding of the content, e.g. ``base64``.
content_media_type: A string designating the media-type of the content, e.g. ``image/png``.
deprecated: A boolean dictating whether this route should be marked as deprecated in the OpenAPI schema.
description: Text used for the route's schema description section.
include_in_schema: A boolean flag dictating whether the route handler should be documented in the OpenAPI schema.
operation_class: :class:`Operation <.openapi.spec.operation.Operation>` to be used with the route's OpenAPI schema.
operation_id: Either a string or a callable returning a string. An identifier used for the route's schema operationId.
raises: A list of exception classes extending from litestar.HttpException that is used for the OpenAPI documentation.
This list should describe all exceptions raised within the route handler's function/method. The Litestar
ValidationException will be added automatically for the schema if any validation is involved.
response_description: Text used for the route's response schema description section.
security: A sequence of dictionaries that contain information about which security scheme can be used on the endpoint.
summary: Text used for the route's schema summary section.
tags: A sequence of string tags that will be appended to the OpenAPI schema.
type_decoders: A sequence of tuples, each composed of a predicate testing for type identity and a msgspec
hook for deserialization.
type_encoders: A mapping of types to callables that transform them into types supported for serialization.
**kwargs: Any additional kwarg - will be set in the opt dictionary.
"""
if "http_method" in kwargs:
raise ImproperlyConfiguredException(MSG_SEMANTIC_ROUTE_HANDLER_WITH_HTTP)
super().__init__(
after_request=after_request,
after_response=after_response,
background=background,
before_request=before_request,
cache=cache,
cache_control=cache_control,
cache_key_builder=cache_key_builder,
content_encoding=content_encoding,
content_media_type=content_media_type,
dependencies=dependencies,
deprecated=deprecated,
description=description,
dto=dto,
etag=etag,
exception_handlers=exception_handlers,
guards=guards,
http_method=HttpMethod.PATCH,
include_in_schema=include_in_schema,
media_type=media_type,
middleware=middleware,
name=name,
operation_class=operation_class,
operation_id=operation_id,
opt=opt,
path=path,
raises=raises,
request_class=request_class,
response_class=response_class,
response_cookies=response_cookies,
response_description=response_description,
response_headers=response_headers,
responses=responses,
return_dto=return_dto,
security=security,
signature_namespace=signature_namespace,
status_code=status_code,
summary=summary,
sync_to_thread=sync_to_thread,
tags=tags,
type_decoders=type_decoders,
type_encoders=type_encoders,
**kwargs,
)
def __init_subclass__(cls, **kwargs: Any) -> None:
_subclass_warning()
class post(HTTPRouteHandler):
"""POST Route Decorator.
Use this decorator to decorate an HTTP handler for POST requests.
"""
def __init__(
self,
path: str | None | Sequence[str] = None,
*,
after_request: AfterRequestHookHandler | None = None,
after_response: AfterResponseHookHandler | None = None,
background: BackgroundTask | BackgroundTasks | None = None,
before_request: BeforeRequestHookHandler | None = None,
cache: bool | int | type[CACHE_FOREVER] = False,
cache_control: CacheControlHeader | None = None,
cache_key_builder: CacheKeyBuilder | None = None,
dependencies: Dependencies | None = None,
dto: type[AbstractDTO] | None | EmptyType = Empty,
etag: ETag | None = None,
exception_handlers: ExceptionHandlersMap | None = None,
guards: Sequence[Guard] | None = None,
media_type: MediaType | str | None = None,
middleware: Sequence[Middleware] | None = None,
name: str | None = None,
opt: Mapping[str, Any] | None = None,
request_class: type[Request] | None = None,
response_class: type[Response] | None = None,
response_cookies: ResponseCookies | None = None,
response_headers: ResponseHeaders | None = None,
return_dto: type[AbstractDTO] | None | EmptyType = Empty,
signature_namespace: Mapping[str, Any] | None = None,
status_code: int | None = None,
sync_to_thread: bool | None = None,
# OpenAPI related attributes
content_encoding: str | None = None,
content_media_type: str | None = None,
deprecated: bool = False,
description: str | None = None,
include_in_schema: bool | EmptyType = Empty,
operation_class: type[Operation] = Operation,
operation_id: str | OperationIDCreator | None = None,
raises: Sequence[type[HTTPException]] | None = None,
response_description: str | None = None,
responses: Mapping[int, ResponseSpec] | None = None,
security: Sequence[SecurityRequirement] | None = None,
summary: str | None = None,
tags: Sequence[str] | None = None,
type_decoders: TypeDecodersSequence | None = None,
type_encoders: TypeEncodersMap | None = None,
**kwargs: Any,
) -> None:
"""Initialize ``post``
Args:
path: A path fragment for the route handler function or a sequence of path fragments.
If not given defaults to ``/``
after_request: A sync or async function executed before a :class:`Request <.connection.Request>` is passed
to any route handler. If this function returns a value, the request will not reach the route handler,
and instead this value will be used.
after_response: A sync or async function called after the response has been awaited. It receives the
:class:`Request <.connection.Request>` object and should not return any values.
background: A :class:`BackgroundTask <.background_tasks.BackgroundTask>` instance or
:class:`BackgroundTasks <.background_tasks.BackgroundTasks>` to execute after the response is finished.
Defaults to ``None``.
before_request: A sync or async function called immediately before calling the route handler. Receives
the :class:`.connection.Request` instance and any non-``None`` return value is used for the response,
bypassing the route handler.
cache: Enables response caching if configured on the application level. Valid values are ``True`` or a number
of seconds (e.g. ``120``) to cache the response.
cache_control: A ``cache-control`` header of type
:class:`CacheControlHeader <.datastructures.CacheControlHeader>` that will be added to the response.
cache_key_builder: A :class:`cache-key builder function <.types.CacheKeyBuilder>`. Allows for customization
of the cache key if caching is configured on the application level.
dependencies: A string keyed mapping of dependency :class:`Provider <.di.Provide>` instances.
dto: :class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for (de)serializing and
validation of request data.
etag: An ``etag`` header of type :class:`ETag <.datastructures.ETag>` that will be added to the response.
exception_handlers: A mapping of status codes and/or exception types to handler functions.
guards: A sequence of :class:`Guard <.types.Guard>` callables.
http_method: An :class:`http method string <.types.Method>`, a member of the enum
:class:`HttpMethod <litestar.enums.HttpMethod>` or a list of these that correlates to the methods the
route handler function should handle.
media_type: A member of the :class:`MediaType <.enums.MediaType>` enum or a string with a
valid IANA Media-Type.
middleware: A sequence of :class:`Middleware <.types.Middleware>`.
name: A string identifying the route handler.
opt: A string keyed mapping of arbitrary values that can be accessed in :class:`Guards <.types.Guard>` or
wherever you have access to :class:`Request <.connection.Request>` or :class:`ASGI Scope <.types.Scope>`.
request_class: A custom subclass of :class:`Request <.connection.Request>` to be used as route handler's
default request.
response_class: A custom subclass of :class:`Response <.response.Response>` to be used as route handler's
default response.
response_cookies: A sequence of :class:`Cookie <.datastructures.Cookie>` instances.
response_headers: A string keyed mapping of :class:`ResponseHeader <.datastructures.ResponseHeader>`
instances.
responses: A mapping of additional status codes and a description of their expected content.
This information will be included in the OpenAPI schema
return_dto: :class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for serializing
outbound response data.
signature_namespace: A mapping of names to types for use in forward reference resolution during signature modelling.
status_code: An http status code for the response. Defaults to ``200`` for mixed method or ``GET``, ``PUT`` and
``PATCH``, ``201`` for ``POST`` and ``204`` for ``DELETE``.
sync_to_thread: A boolean dictating whether the handler function will be executed in a worker thread or the
main event loop. This has an effect only for sync handler functions. See using sync handler functions.
content_encoding: A string describing the encoding of the content, e.g. ``base64``.
content_media_type: A string designating the media-type of the content, e.g. ``image/png``.
deprecated: A boolean dictating whether this route should be marked as deprecated in the OpenAPI schema.
description: Text used for the route's schema description section.
include_in_schema: A boolean flag dictating whether the route handler should be documented in the OpenAPI schema.
operation_class: :class:`Operation <.openapi.spec.operation.Operation>` to be used with the route's OpenAPI schema.
operation_id: Either a string or a callable returning a string. An identifier used for the route's schema operationId.
raises: A list of exception classes extending from litestar.HttpException that is used for the OpenAPI documentation.
This list should describe all exceptions raised within the route handler's function/method. The Litestar
ValidationException will be added automatically for the schema if any validation is involved.
response_description: Text used for the route's response schema description section.
security: A sequence of dictionaries that contain information about which security scheme can be used on the endpoint.
summary: Text used for the route's schema summary section.
tags: A sequence of string tags that will be appended to the OpenAPI schema.
type_decoders: A sequence of tuples, each composed of a predicate testing for type identity and a msgspec
hook for deserialization.
type_encoders: A mapping of types to callables that transform them into types supported for serialization.
**kwargs: Any additional kwarg - will be set in the opt dictionary.
"""
if "http_method" in kwargs:
raise ImproperlyConfiguredException(MSG_SEMANTIC_ROUTE_HANDLER_WITH_HTTP)
super().__init__(
after_request=after_request,
after_response=after_response,
background=background,
before_request=before_request,
cache=cache,
cache_control=cache_control,
cache_key_builder=cache_key_builder,
content_encoding=content_encoding,
content_media_type=content_media_type,
dependencies=dependencies,
deprecated=deprecated,
description=description,
dto=dto,
exception_handlers=exception_handlers,
etag=etag,
guards=guards,
http_method=HttpMethod.POST,
include_in_schema=include_in_schema,
media_type=media_type,
middleware=middleware,
name=name,
operation_class=operation_class,
operation_id=operation_id,
opt=opt,
path=path,
raises=raises,
request_class=request_class,
response_class=response_class,
response_cookies=response_cookies,
response_description=response_description,
response_headers=response_headers,
responses=responses,
return_dto=return_dto,
signature_namespace=signature_namespace,
security=security,
status_code=status_code,
summary=summary,
sync_to_thread=sync_to_thread,
tags=tags,
type_decoders=type_decoders,
type_encoders=type_encoders,
**kwargs,
)
def __init_subclass__(cls, **kwargs: Any) -> None:
_subclass_warning()
class put(HTTPRouteHandler):
"""PUT Route Decorator.
Use this decorator to decorate an HTTP handler for PUT requests.
"""
def __init__(
self,
path: str | None | Sequence[str] = None,
*,
after_request: AfterRequestHookHandler | None = None,
after_response: AfterResponseHookHandler | None = None,
background: BackgroundTask | BackgroundTasks | None = None,
before_request: BeforeRequestHookHandler | None = None,
cache: bool | int | type[CACHE_FOREVER] = False,
cache_control: CacheControlHeader | None = None,
cache_key_builder: CacheKeyBuilder | None = None,
dependencies: Dependencies | None = None,
dto: type[AbstractDTO] | None | EmptyType = Empty,
etag: ETag | None = None,
exception_handlers: ExceptionHandlersMap | None = None,
guards: Sequence[Guard] | None = None,
media_type: MediaType | str | None = None,
middleware: Sequence[Middleware] | None = None,
name: str | None = None,
opt: Mapping[str, Any] | None = None,
request_class: type[Request] | None = None,
response_class: type[Response] | None = None,
response_cookies: ResponseCookies | None = None,
response_headers: ResponseHeaders | None = None,
return_dto: type[AbstractDTO] | None | EmptyType = Empty,
signature_namespace: Mapping[str, Any] | None = None,
status_code: int | None = None,
sync_to_thread: bool | None = None,
# OpenAPI related attributes
content_encoding: str | None = None,
content_media_type: str | None = None,
deprecated: bool = False,
description: str | None = None,
include_in_schema: bool | EmptyType = Empty,
operation_class: type[Operation] = Operation,
operation_id: str | OperationIDCreator | None = None,
raises: Sequence[type[HTTPException]] | None = None,
response_description: str | None = None,
responses: Mapping[int, ResponseSpec] | None = None,
security: Sequence[SecurityRequirement] | None = None,
summary: str | None = None,
tags: Sequence[str] | None = None,
type_decoders: TypeDecodersSequence | None = None,
type_encoders: TypeEncodersMap | None = None,
**kwargs: Any,
) -> None:
"""Initialize ``put``
Args:
path: A path fragment for the route handler function or a sequence of path fragments.
If not given defaults to ``/``
after_request: A sync or async function executed before a :class:`Request <.connection.Request>` is passed
to any route handler. If this function returns a value, the request will not reach the route handler,
and instead this value will be used.
after_response: A sync or async function called after the response has been awaited. It receives the
:class:`Request <.connection.Request>` object and should not return any values.
background: A :class:`BackgroundTask <.background_tasks.BackgroundTask>` instance or
:class:`BackgroundTasks <.background_tasks.BackgroundTasks>` to execute after the response is finished.
Defaults to ``None``.
before_request: A sync or async function called immediately before calling the route handler. Receives
the :class:`.connection.Request` instance and any non-``None`` return value is used for the response,
bypassing the route handler.
cache: Enables response caching if configured on the application level. Valid values are ``True`` or a number
of seconds (e.g. ``120``) to cache the response.
cache_control: A ``cache-control`` header of type
:class:`CacheControlHeader <.datastructures.CacheControlHeader>` that will be added to the response.
cache_key_builder: A :class:`cache-key builder function <.types.CacheKeyBuilder>`. Allows for customization
of the cache key if caching is configured on the application level.
dependencies: A string keyed mapping of dependency :class:`Provider <.di.Provide>` instances.
dto: :class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for (de)serializing and
validation of request data.
etag: An ``etag`` header of type :class:`ETag <.datastructures.ETag>` that will be added to the response.
exception_handlers: A mapping of status codes and/or exception types to handler functions.
guards: A sequence of :class:`Guard <.types.Guard>` callables.
http_method: An :class:`http method string <.types.Method>`, a member of the enum
:class:`HttpMethod <litestar.enums.HttpMethod>` or a list of these that correlates to the methods the
route handler function should handle.
media_type: A member of the :class:`MediaType <.enums.MediaType>` enum or a string with a
valid IANA Media-Type.
middleware: A sequence of :class:`Middleware <.types.Middleware>`.
name: A string identifying the route handler.
opt: A string keyed mapping of arbitrary values that can be accessed in :class:`Guards <.types.Guard>` or
wherever you have access to :class:`Request <.connection.Request>` or :class:`ASGI Scope <.types.Scope>`.
request_class: A custom subclass of :class:`Request <.connection.Request>` to be used as route handler's
default request.
response_class: A custom subclass of :class:`Response <.response.Response>` to be used as route handler's
default response.
response_cookies: A sequence of :class:`Cookie <.datastructures.Cookie>` instances.
response_headers: A string keyed mapping of :class:`ResponseHeader <.datastructures.ResponseHeader>`
instances.
responses: A mapping of additional status codes and a description of their expected content.
This information will be included in the OpenAPI schema
return_dto: :class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for serializing
outbound response data.
signature_namespace: A mapping of names to types for use in forward reference resolution during signature modelling.
status_code: An http status code for the response. Defaults to ``200`` for mixed method or ``GET``, ``PUT`` and
``PATCH``, ``201`` for ``POST`` and ``204`` for ``DELETE``.
sync_to_thread: A boolean dictating whether the handler function will be executed in a worker thread or the
main event loop. This has an effect only for sync handler functions. See using sync handler functions.
content_encoding: A string describing the encoding of the content, e.g. ``base64``.
content_media_type: A string designating the media-type of the content, e.g. ``image/png``.
deprecated: A boolean dictating whether this route should be marked as deprecated in the OpenAPI schema.
description: Text used for the route's schema description section.
include_in_schema: A boolean flag dictating whether the route handler should be documented in the OpenAPI schema.
operation_class: :class:`Operation <.openapi.spec.operation.Operation>` to be used with the route's OpenAPI schema.
operation_id: Either a string or a callable returning a string. An identifier used for the route's schema operationId.
raises: A list of exception classes extending from litestar.HttpException that is used for the OpenAPI documentation.
This list should describe all exceptions raised within the route handler's function/method. The Litestar
ValidationException will be added automatically for the schema if any validation is involved.
response_description: Text used for the route's response schema description section.
security: A sequence of dictionaries that contain information about which security scheme can be used on the endpoint.
summary: Text used for the route's schema summary section.
tags: A sequence of string tags that will be appended to the OpenAPI schema.
type_decoders: A sequence of tuples, each composed of a predicate testing for type identity and a msgspec
hook for deserialization.
type_encoders: A mapping of types to callables that transform them into types supported for serialization.
**kwargs: Any additional kwarg - will be set in the opt dictionary.
"""
if "http_method" in kwargs:
raise ImproperlyConfiguredException(MSG_SEMANTIC_ROUTE_HANDLER_WITH_HTTP)
super().__init__(
after_request=after_request,
after_response=after_response,
background=background,
before_request=before_request,
cache=cache,
cache_control=cache_control,
cache_key_builder=cache_key_builder,
content_encoding=content_encoding,
content_media_type=content_media_type,
dependencies=dependencies,
deprecated=deprecated,
description=description,
dto=dto,
exception_handlers=exception_handlers,
etag=etag,
guards=guards,
http_method=HttpMethod.PUT,
include_in_schema=include_in_schema,
media_type=media_type,
middleware=middleware,
name=name,
operation_class=operation_class,
operation_id=operation_id,
opt=opt,
path=path,
raises=raises,
request_class=request_class,
response_class=response_class,
response_cookies=response_cookies,
response_description=response_description,
response_headers=response_headers,
responses=responses,
return_dto=return_dto,
security=security,
signature_namespace=signature_namespace,
status_code=status_code,
summary=summary,
sync_to_thread=sync_to_thread,
tags=tags,
type_decoders=type_decoders,
type_encoders=type_encoders,
**kwargs,
)
def __init_subclass__(cls, **kwargs: Any) -> None:
_subclass_warning()
|
PYSEC-2024-178
|
litestar/router.py
|
@@ -68,6 +68,7 @@ class Router:
"path",
"registered_route_handler_ids",
"request_class",
+ "request_max_body_size",
"response_class",
"response_cookies",
"response_headers",
@@ -111,6 +112,7 @@ def __init__(
type_decoders: TypeDecodersSequence | None = None,
type_encoders: TypeEncodersMap | None = None,
websocket_class: type[WebSocket] | None = None,
+ request_max_body_size: int | None | EmptyType = Empty,
) -> None:
"""Initialize a ``Router``.
@@ -143,6 +145,8 @@ def __init__(
with the router instance.
request_class: A custom subclass of :class:`Request <.connection.Request>` to be used as the default for
all route handlers, controllers and other routers associated with the router instance.
+ request_max_body_size: Maximum allowed size of the request body in bytes. If this size is exceeded,
+ a '413 - Request Entity Too Large" error response is returned.
response_class: A custom subclass of :class:`Response <.response.Response>` to be used as the default for
all route handlers, controllers and other routers associated with the router instance.
response_cookies: A sequence of :class:`Cookie <.datastructures.Cookie>` instances.
@@ -197,6 +201,7 @@ def __init__(
self.type_encoders = dict(type_encoders) if type_encoders is not None else None
self.type_decoders = list(type_decoders) if type_decoders is not None else None
self.websocket_class = websocket_class
+ self.request_max_body_size = request_max_body_size
for route_handler in route_handlers or []:
self.register(value=route_handler)
|
from __future__ import annotations
from collections import defaultdict
from copy import copy, deepcopy
from typing import TYPE_CHECKING, Any, Mapping, Sequence, cast
from litestar._layers.utils import narrow_response_cookies, narrow_response_headers
from litestar.controller import Controller
from litestar.exceptions import ImproperlyConfiguredException
from litestar.handlers.asgi_handlers import ASGIRouteHandler
from litestar.handlers.http_handlers import HTTPRouteHandler
from litestar.handlers.websocket_handlers import WebsocketListener, WebsocketRouteHandler
from litestar.routes import ASGIRoute, HTTPRoute, WebSocketRoute
from litestar.types.empty import Empty
from litestar.utils import find_index, is_class_and_subclass, join_paths, normalize_path, unique
from litestar.utils.signature import add_types_to_signature_namespace
from litestar.utils.sync import ensure_async_callable
__all__ = ("Router",)
if TYPE_CHECKING:
from litestar.connection import Request, WebSocket
from litestar.datastructures import CacheControlHeader, ETag
from litestar.dto import AbstractDTO
from litestar.openapi.spec import SecurityRequirement
from litestar.response import Response
from litestar.routes import BaseRoute
from litestar.types import (
AfterRequestHookHandler,
AfterResponseHookHandler,
BeforeRequestHookHandler,
ControllerRouterHandler,
ExceptionHandlersMap,
Guard,
Middleware,
ParametersMap,
ResponseCookies,
RouteHandlerMapItem,
RouteHandlerType,
TypeEncodersMap,
)
from litestar.types.composite_types import Dependencies, ResponseHeaders, TypeDecodersSequence
from litestar.types.empty import EmptyType
class Router:
"""The Litestar Router class.
A Router instance is used to group controller, routers and route handler functions under a shared path fragment
"""
__slots__ = (
"after_request",
"after_response",
"before_request",
"cache_control",
"dependencies",
"dto",
"etag",
"exception_handlers",
"guards",
"include_in_schema",
"middleware",
"opt",
"owner",
"parameters",
"path",
"registered_route_handler_ids",
"request_class",
"response_class",
"response_cookies",
"response_headers",
"return_dto",
"routes",
"security",
"signature_namespace",
"tags",
"type_decoders",
"type_encoders",
"websocket_class",
)
def __init__(
self,
path: str,
*,
after_request: AfterRequestHookHandler | None = None,
after_response: AfterResponseHookHandler | None = None,
before_request: BeforeRequestHookHandler | None = None,
cache_control: CacheControlHeader | None = None,
dependencies: Dependencies | None = None,
dto: type[AbstractDTO] | None | EmptyType = Empty,
etag: ETag | None = None,
exception_handlers: ExceptionHandlersMap | None = None,
guards: Sequence[Guard] | None = None,
include_in_schema: bool | EmptyType = Empty,
middleware: Sequence[Middleware] | None = None,
opt: Mapping[str, Any] | None = None,
parameters: ParametersMap | None = None,
request_class: type[Request] | None = None,
response_class: type[Response] | None = None,
response_cookies: ResponseCookies | None = None,
response_headers: ResponseHeaders | None = None,
return_dto: type[AbstractDTO] | None | EmptyType = Empty,
route_handlers: Sequence[ControllerRouterHandler],
security: Sequence[SecurityRequirement] | None = None,
signature_namespace: Mapping[str, Any] | None = None,
signature_types: Sequence[Any] | None = None,
tags: Sequence[str] | None = None,
type_decoders: TypeDecodersSequence | None = None,
type_encoders: TypeEncodersMap | None = None,
websocket_class: type[WebSocket] | None = None,
) -> None:
"""Initialize a ``Router``.
Args:
after_request: A sync or async function executed before a :class:`Request <.connection.Request>` is passed
to any route handler. If this function returns a value, the request will not reach the route handler,
and instead this value will be used.
after_response: A sync or async function called after the response has been awaited. It receives the
:class:`Request <.connection.Request>` object and should not return any values.
before_request: A sync or async function called immediately before calling the route handler. Receives
the :class:`litestar.connection.Request` instance and any non-``None`` return value is used for the
response, bypassing the route handler.
cache_control: A ``cache-control`` header of type
:class:`CacheControlHeader <.datastructures.CacheControlHeader>` to add to route handlers of
this router. Can be overridden by route handlers.
dependencies: A string keyed mapping of dependency :class:`Provide <.di.Provide>` instances.
dto: :class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for (de)serializing and
validation of request data.
etag: An ``etag`` header of type :class:`ETag <.datastructures.ETag>` to add to route handlers of this app.
exception_handlers: A mapping of status codes and/or exception types to handler functions.
guards: A sequence of :data:`Guard <.types.Guard>` callables.
include_in_schema: A boolean flag dictating whether the route handler should be documented in the OpenAPI schema.
middleware: A sequence of :data:`Middleware <.types.Middleware>`.
opt: A string keyed mapping of arbitrary values that can be accessed in :data:`Guards <.types.Guard>` or
wherever you have access to :class:`Request <.connection.Request>` or
:data:`ASGI Scope <.types.Scope>`.
parameters: A mapping of :func:`Parameter <.params.Parameter>` definitions available to all application
paths.
path: A path fragment that is prefixed to all route handlers, controllers and other routers associated
with the router instance.
request_class: A custom subclass of :class:`Request <.connection.Request>` to be used as the default for
all route handlers, controllers and other routers associated with the router instance.
response_class: A custom subclass of :class:`Response <.response.Response>` to be used as the default for
all route handlers, controllers and other routers associated with the router instance.
response_cookies: A sequence of :class:`Cookie <.datastructures.Cookie>` instances.
response_headers: A string keyed mapping of :class:`ResponseHeader <.datastructures.ResponseHeader>`
instances.
return_dto: :class:`AbstractDTO <.dto.base_dto.AbstractDTO>` to use for serializing
outbound response data.
route_handlers: A required sequence of route handlers, which can include instances of
:class:`Router <.router.Router>`, subclasses of :class:`Controller <.controller.Controller>` or any
function decorated by the route handler decorators.
security: A sequence of dicts that will be added to the schema of all route handlers in the application.
See :data:`SecurityRequirement <.openapi.spec.SecurityRequirement>`
for details.
signature_namespace: A mapping of names to types for use in forward reference resolution during signature modeling.
signature_types: A sequence of types for use in forward reference resolution during signature modeling.
These types will be added to the signature namespace using their ``__name__`` attribute.
tags: A sequence of string tags that will be appended to the schema of all route handlers under the
application.
type_decoders: A sequence of tuples, each composed of a predicate testing for type identity and a msgspec hook for deserialization.
type_encoders: A mapping of types to callables that transform them into types supported for serialization.
websocket_class: A custom subclass of :class:`WebSocket <.connection.WebSocket>` to be used as the default for
all route handlers, controllers and other routers associated with the router instance.
"""
self.after_request = ensure_async_callable(after_request) if after_request else None # pyright: ignore
self.after_response = ensure_async_callable(after_response) if after_response else None
self.before_request = ensure_async_callable(before_request) if before_request else None
self.cache_control = cache_control
self.dto = dto
self.etag = etag
self.dependencies = dict(dependencies or {})
self.exception_handlers = dict(exception_handlers or {})
self.guards = list(guards or [])
self.include_in_schema = include_in_schema
self.middleware = list(middleware or [])
self.opt = dict(opt or {})
self.owner: Router | None = None
self.parameters = dict(parameters or {})
self.path = normalize_path(path)
self.request_class = request_class
self.response_class = response_class
self.response_cookies = narrow_response_cookies(response_cookies)
self.response_headers = narrow_response_headers(response_headers)
self.return_dto = return_dto
self.routes: list[HTTPRoute | ASGIRoute | WebSocketRoute] = []
self.security = list(security or [])
self.signature_namespace = add_types_to_signature_namespace(
signature_types or [], dict(signature_namespace or {})
)
self.tags = list(tags or [])
self.registered_route_handler_ids: set[int] = set()
self.type_encoders = dict(type_encoders) if type_encoders is not None else None
self.type_decoders = list(type_decoders) if type_decoders is not None else None
self.websocket_class = websocket_class
for route_handler in route_handlers or []:
self.register(value=route_handler)
def register(self, value: ControllerRouterHandler) -> list[BaseRoute]:
"""Register a Controller, Route instance or RouteHandler on the router.
Args:
value: a subclass or instance of Controller, an instance of :class:`Router` or a function/method that has
been decorated by any of the routing decorators, e.g. :class:`get <.handlers.get>`,
:class:`post <.handlers.post>`.
Returns:
Collection of handlers added to the router.
"""
validated_value = self._validate_registration_value(value)
routes: list[BaseRoute] = []
for route_path, handlers_map in self.get_route_handler_map(value=validated_value).items():
path = join_paths([self.path, route_path])
if http_handlers := unique(
[handler for handler in handlers_map.values() if isinstance(handler, HTTPRouteHandler)]
):
if existing_handlers := unique(
[
handler
for handler in self.route_handler_method_map.get(path, {}).values()
if isinstance(handler, HTTPRouteHandler)
]
):
http_handlers.extend(existing_handlers)
existing_route_index = find_index(self.routes, lambda x: x.path == path) # noqa: B023
if existing_route_index == -1: # pragma: no cover
raise ImproperlyConfiguredException("unable to find_index existing route index")
route: WebSocketRoute | ASGIRoute | HTTPRoute = HTTPRoute(
path=path,
route_handlers=http_handlers,
)
self.routes[existing_route_index] = route
else:
route = HTTPRoute(path=path, route_handlers=http_handlers)
self.routes.append(route)
routes.append(route)
if websocket_handler := handlers_map.get("websocket"):
route = WebSocketRoute(path=path, route_handler=cast("WebsocketRouteHandler", websocket_handler))
self.routes.append(route)
routes.append(route)
if asgi_handler := handlers_map.get("asgi"):
route = ASGIRoute(path=path, route_handler=cast("ASGIRouteHandler", asgi_handler))
self.routes.append(route)
routes.append(route)
return routes
@property
def route_handler_method_map(self) -> dict[str, RouteHandlerMapItem]:
"""Map route paths to :class:`RouteHandlerMapItem <litestar.types.internal_typ es.RouteHandlerMapItem>`
Returns:
A dictionary mapping paths to route handlers
"""
route_map: dict[str, RouteHandlerMapItem] = defaultdict(dict)
for route in self.routes:
if isinstance(route, HTTPRoute):
for route_handler in route.route_handlers:
for method in route_handler.http_methods:
route_map[route.path][method] = route_handler
else:
route_map[route.path]["websocket" if isinstance(route, WebSocketRoute) else "asgi"] = (
route.route_handler
)
return route_map
@classmethod
def get_route_handler_map(
cls,
value: RouteHandlerType | Router,
) -> dict[str, RouteHandlerMapItem]:
"""Map route handlers to HTTP methods."""
if isinstance(value, Router):
return value.route_handler_method_map
copied_value = copy(value)
if isinstance(value, HTTPRouteHandler):
return {path: {http_method: copied_value for http_method in value.http_methods} for path in value.paths}
return {
path: {"websocket" if isinstance(value, WebsocketRouteHandler) else "asgi": copied_value}
for path in value.paths
}
def _validate_registration_value(self, value: ControllerRouterHandler) -> RouteHandlerType | Router:
"""Ensure values passed to the register method are supported."""
if is_class_and_subclass(value, Controller):
return value(owner=self).as_router()
# this narrows down to an ABC, but we assume a non-abstract subclass of the ABC superclass
if is_class_and_subclass(value, WebsocketListener):
return value(owner=self).to_handler() # pyright: ignore
if isinstance(value, Router):
if value is self:
raise ImproperlyConfiguredException("Cannot register a router on itself")
router_copy = deepcopy(value)
router_copy.owner = self
return router_copy
if isinstance(value, (ASGIRouteHandler, HTTPRouteHandler, WebsocketRouteHandler)):
value.owner = self
return value
raise ImproperlyConfiguredException(
"Unsupported value passed to `Router.register`. "
"If you passed in a function or method, "
"make sure to decorate it first with one of the routing decorators"
)
|
PYSEC-2024-178
|
tests/conftest.py
|
@@ -216,6 +216,7 @@ def inner(
"route_handler": route_handler,
"user": user,
"session": session,
+ "headers": [],
**kwargs,
}
return cast("Scope", scope)
|
from __future__ import annotations
import importlib.util
import logging
import os
import random
import shutil
import string
import sys
from datetime import datetime
from os import urandom
from pathlib import Path
from typing import TYPE_CHECKING, Any, AsyncGenerator, Callable, Generator, Union, cast
from unittest.mock import AsyncMock, MagicMock
import pytest
from pytest_lazy_fixtures import lf
from redis.asyncio import Redis as AsyncRedis
from redis.client import Redis
from time_machine import travel
from litestar.logging import LoggingConfig
from litestar.middleware.session import SessionMiddleware
from litestar.middleware.session.base import BaseSessionBackend
from litestar.middleware.session.client_side import ClientSideSessionBackend, CookieBackendConfig
from litestar.middleware.session.server_side import ServerSideSessionBackend, ServerSideSessionConfig
from litestar.openapi.config import OpenAPIConfig
from litestar.stores.base import Store
from litestar.stores.file import FileStore
from litestar.stores.memory import MemoryStore
from litestar.stores.redis import RedisStore
from litestar.testing import RequestFactory
from tests.helpers import not_none
if TYPE_CHECKING:
from types import ModuleType
from pytest import FixtureRequest, MonkeyPatch
from time_machine import Coordinates
from litestar import Litestar
from litestar.types import (
AnyIOBackend,
ASGIApp,
ASGIVersion,
GetLogger,
Receive,
RouteHandlerType,
Scope,
ScopeSession,
Send,
)
pytest_plugins = ["tests.docker_service_fixtures"]
@pytest.fixture
def mock() -> MagicMock:
return MagicMock()
@pytest.fixture()
def async_mock() -> AsyncMock:
return AsyncMock()
@pytest.fixture(params=[pytest.param("asyncio", id="asyncio"), pytest.param("trio", id="trio")])
def anyio_backend(request: pytest.FixtureRequest) -> str:
return request.param # type: ignore[no-any-return]
@pytest.fixture()
def mock_asgi_app() -> ASGIApp:
async def asgi_app(scope: Scope, receive: Receive, send: Send) -> None: ...
return asgi_app
@pytest.fixture()
def redis_store(redis_client: AsyncRedis) -> RedisStore:
return RedisStore(redis=redis_client)
@pytest.fixture()
def memory_store() -> MemoryStore:
return MemoryStore()
@pytest.fixture()
def file_store(tmp_path: Path) -> FileStore:
return FileStore(path=tmp_path)
@pytest.fixture()
def file_store_create_directories(tmp_path: Path) -> FileStore:
path = tmp_path / "subdir1" / "subdir2"
return FileStore(path=path, create_directories=True)
@pytest.fixture()
def file_store_create_directories_flag_false(tmp_path: Path) -> FileStore:
shutil.rmtree(tmp_path, ignore_errors=True) # in case the path was already created by different tests - we clean it
return FileStore(path=tmp_path.joinpath("subdir"), create_directories=False)
@pytest.fixture(
params=[pytest.param("redis_store", marks=pytest.mark.xdist_group("redis")), "memory_store", "file_store"]
)
def store(request: FixtureRequest) -> Store:
return cast("Store", request.getfixturevalue(request.param))
@pytest.fixture
def cookie_session_backend_config() -> CookieBackendConfig:
return CookieBackendConfig(secret=urandom(16))
@pytest.fixture()
def cookie_session_backend(cookie_session_backend_config: CookieBackendConfig) -> ClientSideSessionBackend:
return ClientSideSessionBackend(config=cookie_session_backend_config)
@pytest.fixture(
params=[
pytest.param(lf("cookie_session_backend_config"), id="cookie"),
pytest.param(lf("server_side_session_config"), id="server-side"),
]
)
def session_backend_config(request: pytest.FixtureRequest) -> ServerSideSessionConfig | CookieBackendConfig:
return cast("Union[ServerSideSessionConfig, CookieBackendConfig]", request.param)
@pytest.fixture()
def server_side_session_config() -> ServerSideSessionConfig:
return ServerSideSessionConfig()
@pytest.fixture()
def server_side_session_backend(server_side_session_config: ServerSideSessionConfig) -> ServerSideSessionBackend:
return ServerSideSessionBackend(config=server_side_session_config)
@pytest.fixture(
params=[
pytest.param("cookie_session_backend", id="cookie"),
pytest.param("server_side_session_backend", id="server-side"),
]
)
def session_backend(request: pytest.FixtureRequest) -> BaseSessionBackend:
return cast("BaseSessionBackend", request.getfixturevalue(request.param))
@pytest.fixture()
def session_backend_config_memory(memory_store: MemoryStore) -> ServerSideSessionConfig:
return ServerSideSessionConfig()
@pytest.fixture
def session_middleware(session_backend: BaseSessionBackend, mock_asgi_app: ASGIApp) -> SessionMiddleware[Any]:
return SessionMiddleware(app=mock_asgi_app, backend=session_backend)
@pytest.fixture
def cookie_session_middleware(
cookie_session_backend: ClientSideSessionBackend, mock_asgi_app: ASGIApp
) -> SessionMiddleware[ClientSideSessionBackend]:
return SessionMiddleware(app=mock_asgi_app, backend=cookie_session_backend)
@pytest.fixture
def test_client_backend(anyio_backend_name: str) -> AnyIOBackend:
return cast("AnyIOBackend", anyio_backend_name)
@pytest.fixture
def create_scope() -> Callable[..., Scope]:
def inner(
*,
type: str = "http",
app: Litestar | None = None,
asgi: ASGIVersion | None = None,
auth: Any = None,
client: tuple[str, int] | None = ("testclient", 50000),
extensions: dict[str, dict[object, object]] | None = None,
http_version: str = "1.1",
path: str = "/",
path_params: dict[str, str] | None = None,
query_string: str = "",
root_path: str = "",
route_handler: RouteHandlerType | None = None,
scheme: str = "http",
server: tuple[str, int | None] | None = ("testserver", 80),
session: ScopeSession | None = None,
state: dict[str, Any] | None = None,
user: Any = None,
**kwargs: dict[str, Any],
) -> Scope:
scope = {
"app": app,
"asgi": asgi or {"spec_version": "2.0", "version": "3.0"},
"auth": auth,
"type": type,
"path": path,
"raw_path": path.encode(),
"root_path": root_path,
"scheme": scheme,
"query_string": query_string.encode(),
"client": client,
"server": server,
"method": "GET",
"http_version": http_version,
"extensions": extensions or {"http.response.template": {}},
"state": state or {},
"path_params": path_params or {},
"route_handler": route_handler,
"user": user,
"session": session,
**kwargs,
}
return cast("Scope", scope)
return inner
@pytest.fixture
def scope(create_scope: Callable[..., Scope]) -> Scope:
return create_scope()
@pytest.fixture
def create_module(tmp_path: Path, monkeypatch: MonkeyPatch) -> Callable[[str], ModuleType]:
"""Utility fixture for dynamic module creation."""
def wrapped(source: str) -> ModuleType:
"""
Args:
source: Source code as a string.
Returns:
An imported module.
"""
def module_name_generator() -> str:
letters = string.ascii_lowercase
return "".join(random.choice(letters) for _ in range(10))
module_name = module_name_generator()
path = tmp_path / f"{module_name}.py"
path.write_text(source)
# https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
spec = not_none(importlib.util.spec_from_file_location(module_name, path))
module = not_none(importlib.util.module_from_spec(spec))
monkeypatch.setitem(sys.modules, module_name, module)
not_none(spec.loader).exec_module(module)
return module
return wrapped
@pytest.fixture()
def frozen_datetime() -> Generator[Coordinates, None, None]:
with travel(datetime.utcnow, tick=False) as frozen:
yield frozen
@pytest.fixture()
def request_factory() -> RequestFactory:
return RequestFactory()
@pytest.fixture()
def reset_httpx_logging() -> Generator[None, None, None]:
# ensure that httpx logging is not interfering with our test client
httpx_logger = logging.getLogger("httpx")
initial_level = httpx_logger.level
httpx_logger.setLevel(logging.WARNING)
yield
httpx_logger.setLevel(initial_level)
# the monkeypatch fixture does not work with session scoped dependencies
@pytest.fixture(autouse=True, scope="session")
def disable_warn_implicit_sync_to_thread() -> Generator[None, None, None]:
old_value = os.getenv("LITESTAR_WARN_IMPLICIT_SYNC_TO_THREAD")
os.environ["LITESTAR_WARN_IMPLICIT_SYNC_TO_THREAD"] = "0"
yield
if old_value is not None:
os.environ["LITESTAR_WARN_IMPLICIT_SYNC_TO_THREAD"] = old_value
@pytest.fixture()
def disable_warn_sync_to_thread_with_async(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setenv("LITESTAR_WARN_SYNC_TO_THREAD_WITH_ASYNC", "0")
@pytest.fixture()
def enable_warn_implicit_sync_to_thread(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setenv("LITESTAR_WARN_IMPLICIT_SYNC_TO_THREAD", "1")
@pytest.fixture
def get_logger() -> GetLogger:
# due to the limitations of caplog we have to place this call here.
# we also have to allow propagation.
return LoggingConfig(
logging_module="logging",
loggers={
"litestar": {"level": "INFO", "handlers": ["queue_listener"], "propagate": True},
},
).configure()
@pytest.fixture()
async def redis_client(docker_ip: str, redis_service: None) -> AsyncGenerator[AsyncRedis, None]:
# this is to get around some weirdness with pytest-asyncio and redis interaction
# on 3.8 and 3.9
Redis(host=docker_ip, port=6397).flushall()
client: AsyncRedis = AsyncRedis(host=docker_ip, port=6397)
yield client
try:
await client.aclose() # type: ignore[attr-defined]
except RuntimeError:
pass
@pytest.fixture(autouse=True)
def _patch_openapi_config(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr("litestar.app.DEFAULT_OPENAPI_CONFIG", OpenAPIConfig(title="Litestar API", version="1.0.0"))
|
PYSEC-2024-178
|
tests/unit/test_connection/test_connection_caching.py
|
@@ -5,7 +5,7 @@
import pytest
-from litestar import Request
+from litestar import Request, post
from litestar.testing import RequestFactory
from litestar.types import Empty, HTTPReceiveMessage, Scope
from litestar.utils.scope.state import ScopeState
@@ -17,11 +17,15 @@ async def test_multiple_request_object_data_caching(create_scope: Callable[...,
https://github.com/litestar-org/litestar/issues/2727
"""
+ @post("/", request_max_body_size=None)
+ async def handler() -> None:
+ pass
+
async def test_receive() -> HTTPReceiveMessage:
mock()
return {"type": "http.request", "body": b"abc", "more_body": False}
- scope = create_scope()
+ scope = create_scope(route_handler=handler)
request_1 = Request[Any, Any, Any](scope, test_receive)
request_2 = Request[Any, Any, Any](scope, test_receive)
assert (await request_1.body()) == b"abc"
@@ -121,6 +125,8 @@ def check_get_mock() -> None:
get_mock.assert_has_calls([call(state_key), call("headers")])
elif state_key == "form":
get_mock.assert_has_calls([call(state_key), call("content_type")])
+ elif state_key == "body":
+ get_mock.assert_has_calls([call(state_key), call("headers")])
else:
get_mock.assert_called_once_with(state_key)
@@ -136,6 +142,8 @@ def check_set_mock() -> None:
set_mock.assert_has_calls([call("content_type", ANY), call(state_key, ANY)])
elif state_key in {"accept", "cookies", "content_type"}:
set_mock.assert_has_calls([call("headers", ANY), call(state_key, ANY)])
+ elif state_key == "body":
+ set_mock.assert_has_calls([call("headers", ANY), call(state_key, ANY)])
else:
set_mock.assert_called_once_with(state_key, ANY)
|
from __future__ import annotations
from typing import Any, Awaitable, Callable
from unittest.mock import ANY, MagicMock, call
import pytest
from litestar import Request
from litestar.testing import RequestFactory
from litestar.types import Empty, HTTPReceiveMessage, Scope
from litestar.utils.scope.state import ScopeState
async def test_multiple_request_object_data_caching(create_scope: Callable[..., Scope], mock: MagicMock) -> None:
"""Test that accessing the request data on multiple request objects only attempts to await `receive()` once.
https://github.com/litestar-org/litestar/issues/2727
"""
async def test_receive() -> HTTPReceiveMessage:
mock()
return {"type": "http.request", "body": b"abc", "more_body": False}
scope = create_scope()
request_1 = Request[Any, Any, Any](scope, test_receive)
request_2 = Request[Any, Any, Any](scope, test_receive)
assert (await request_1.body()) == b"abc"
assert (await request_2.body()) == b"abc"
assert mock.call_count == 1
@pytest.fixture(name="get_mock")
def get_mock_fixture() -> MagicMock:
return MagicMock()
@pytest.fixture(name="set_mock")
def set_mock_fixture() -> MagicMock:
return MagicMock()
@pytest.fixture(name="create_connection")
def create_connection_fixture(
get_mock: MagicMock, set_mock: MagicMock, monkeypatch: pytest.MonkeyPatch
) -> Callable[..., Request]:
class MockScopeState(ScopeState):
def __getattribute__(self, key: str) -> Any:
get_mock(key)
return object.__getattribute__(self, key)
def __setattr__(self, key: str, value: Any) -> None:
set_mock(key, value)
super().__setattr__(key, value)
def create_connection(body_type: str = "json") -> Request:
monkeypatch.setattr("litestar.connection.base.ScopeState", MockScopeState)
connection = RequestFactory().get()
async def fake_receive() -> HTTPReceiveMessage:
if body_type == "msgpack":
return {"type": "http.request", "body": b"\x81\xa3abc\xa3def", "more_body": False}
return {"type": "http.request", "body": b'{"abc":"def"}', "more_body": False}
monkeypatch.setattr(connection, "receive", fake_receive)
return connection
return create_connection
@pytest.fixture(name="get_value")
def get_value_fixture() -> Callable[[Request, str, bool], Awaitable[Any]]:
"""Fixture to get the value of a connection cached property.
Returns:
A function to get the value of a connection cached property.
"""
async def get_value_(connection: Request, prop_name: str, is_coro: bool) -> Any:
"""Helper to get the value of the tested cached property."""
value = getattr(connection, prop_name)
return await value() if is_coro else value
return get_value_
caching_tests = [
("url", "url", "_url", False),
("base_url", "base_url", "_base_url", False),
("parsed_query", "query_params", "_parsed_query", False),
("cookies", "cookies", "_cookies", False),
("body", "body", "_body", True),
("form", "form", "_form", True),
("msgpack", "msgpack", "_msgpack", True),
("json", "json", "_json", True),
("accept", "accept", "_accept", False),
("content_type", "content_type", "_content_type", False),
]
@pytest.mark.parametrize(("state_key", "prop_name", "cache_attr_name", "is_coro"), caching_tests)
async def test_connection_cached_properties_no_scope_or_connection_caching(
state_key: str,
prop_name: str,
cache_attr_name: str,
is_coro: bool,
create_connection: Callable[..., Request],
get_mock: MagicMock,
set_mock: MagicMock,
get_value: Callable[[Request, str, bool], Awaitable[Any]],
) -> None:
def check_get_mock() -> None:
"""Helper to check the get mock.
For certain properties, we call `get_litestar_scope_state()` twice, once for the property and once for the
body. For these cases, we check that the mock was called twice.
"""
if state_key in {"json", "msgpack"}:
get_mock.assert_has_calls([call(state_key), call("body")])
elif state_key in {"accept", "cookies", "content_type"}:
get_mock.assert_has_calls([call(state_key), call("headers")])
elif state_key == "form":
get_mock.assert_has_calls([call(state_key), call("content_type")])
else:
get_mock.assert_called_once_with(state_key)
def check_set_mock() -> None:
"""Helper to check the set mock.
For certain properties, we call `set_litestar_scope_state()` twice, once for the property and once for the
body. For these cases, we check that the mock was called twice.
"""
if state_key in {"json", "msgpack"}:
set_mock.assert_has_calls([call("body", ANY), call(state_key, ANY)])
elif state_key == "form":
set_mock.assert_has_calls([call("content_type", ANY), call(state_key, ANY)])
elif state_key in {"accept", "cookies", "content_type"}:
set_mock.assert_has_calls([call("headers", ANY), call(state_key, ANY)])
else:
set_mock.assert_called_once_with(state_key, ANY)
connection = create_connection("msgpack" if state_key == "msgpack" else "json")
connection_state = connection._connection_state
assert getattr(connection_state, state_key) is Empty
setattr(connection, cache_attr_name, Empty)
get_mock.reset_mock()
set_mock.reset_mock()
await get_value(connection, prop_name, is_coro)
check_get_mock()
check_set_mock()
@pytest.mark.parametrize(("state_key", "prop_name", "cache_attr_name", "is_coro"), caching_tests)
async def test_connection_cached_properties_cached_in_scope(
state_key: str,
prop_name: str,
cache_attr_name: str,
is_coro: bool,
create_connection: Callable[..., Request],
get_mock: MagicMock,
set_mock: MagicMock,
get_value: Callable[[Request, str, bool], Awaitable[Any]],
) -> None:
# set the value in the scope and ensure empty on connection
connection = create_connection()
connection_state = ScopeState.from_scope(connection.scope)
setattr(connection_state, state_key, {"not": "empty"})
setattr(connection, cache_attr_name, Empty)
get_mock.reset_mock()
set_mock.reset_mock()
await get_value(connection, prop_name, is_coro)
get_mock.assert_called_once_with(state_key)
set_mock.assert_not_called()
@pytest.mark.parametrize(("state_key", "prop_name", "cache_attr_name", "is_coro"), caching_tests)
async def test_connection_cached_properties_cached_on_connection(
state_key: str,
prop_name: str,
cache_attr_name: str,
is_coro: bool,
create_connection: Callable[..., Request],
get_mock: MagicMock,
set_mock: MagicMock,
get_value: Callable[[Request, str, bool], Awaitable[Any]],
) -> None:
connection = create_connection()
# set the value on the connection
setattr(connection, cache_attr_name, {"not": "empty"})
get_mock.reset_mock()
set_mock.reset_mock()
await get_value(connection, prop_name, is_coro)
get_mock.assert_not_called()
set_mock.assert_not_called()
|
PYSEC-2024-178
|
tests/unit/test_connection/test_request.py
|
@@ -11,7 +11,7 @@
import pytest
-from litestar import MediaType, Request, asgi, get, post
+from litestar import MediaType, Request, get, post
from litestar.connection.base import AuthT, StateT, UserT, empty_send
from litestar.datastructures import Address, Cookie, State
from litestar.exceptions import (
@@ -24,6 +24,7 @@
from litestar.response.base import ASGIResponse
from litestar.serialization import encode_json, encode_msgpack
from litestar.static_files.config import StaticFilesConfig
+from litestar.status_codes import HTTP_400_BAD_REQUEST, HTTP_413_REQUEST_ENTITY_TOO_LARGE
from litestar.testing import TestClient, create_test_client
if TYPE_CHECKING:
@@ -32,7 +33,7 @@
from litestar.types import ASGIApp, Receive, Scope, Send
-@get("/", sync_to_thread=False)
+@get("/", sync_to_thread=False, request_max_body_size=None)
def _route_handler() -> None:
pass
@@ -230,56 +231,51 @@ def test_request_client(
def test_request_body() -> None:
- async def app(scope: Scope, receive: Receive, send: Send) -> None:
- request = Request[Any, Any, State](scope, receive)
+ @post("/")
+ async def handler(request: Request) -> bytes:
body = await request.body()
- response = ASGIResponse(body=encode_json({"body": body.decode()}))
- await response(scope, receive, send)
-
- client = TestClient(app)
+ return encode_json({"body": body.decode()})
- response = client.get("/")
- assert response.json() == {"body": ""}
+ with create_test_client([handler]) as client:
+ response = client.post("/")
+ assert response.json() == {"body": ""}
- response = client.post("/", json={"a": "123"})
- assert response.json() == {"body": '{"a": "123"}'}
+ response = client.post("/", json={"a": "123"})
+ assert response.json() == {"body": '{"a": "123"}'}
- response = client.post("/", content="abc")
- assert response.json() == {"body": "abc"}
+ response = client.post("/", content="abc")
+ assert response.json() == {"body": "abc"}
def test_request_stream() -> None:
- async def app(scope: Scope, receive: Receive, send: Send) -> None:
- request = Request[Any, Any, State](scope, receive)
+ @post("/")
+ async def handler(request: Request) -> bytes:
body = b""
async for chunk in request.stream():
body += chunk
- response = ASGIResponse(body=encode_json({"body": body.decode()}))
- await response(scope, receive, send)
+ return encode_json({"body": body.decode()})
- client = TestClient(app)
+ with create_test_client([handler]) as client:
+ response = client.post("/")
+ assert response.json() == {"body": ""}
- response = client.get("/")
- assert response.json() == {"body": ""}
-
- response = client.post("/", json={"a": "123"})
- assert response.json() == {"body": '{"a": "123"}'}
+ response = client.post("/", json={"a": "123"})
+ assert response.json() == {"body": '{"a": "123"}'}
- response = client.post("/", content="abc")
- assert response.json() == {"body": "abc"}
+ response = client.post("/", content="abc")
+ assert response.json() == {"body": "abc"}
def test_request_form_urlencoded() -> None:
- async def app(scope: Scope, receive: Receive, send: Send) -> None:
- request = Request[Any, Any, State](scope, receive)
+ @post("/")
+ async def handler(request: Request) -> bytes:
form = await request.form()
- response = ASGIResponse(body=encode_json({"form": dict(form)}))
- await response(scope, receive, send)
- client = TestClient(app)
+ return encode_json({"form": dict(form)})
- response = client.post("/", data={"abc": "123 @"})
- assert response.json() == {"form": {"abc": "123 @"}}
+ with create_test_client([handler]) as client:
+ response = client.post("/", data={"abc": "123 @"})
+ assert response.json() == {"form": {"abc": "123 @"}}
def test_request_form_urlencoded_multi_keys() -> None:
@@ -301,19 +297,17 @@ async def handler(request: Request) -> int:
def test_request_body_then_stream() -> None:
- async def app(scope: Any, receive: Receive, send: Send) -> None:
- request = Request[Any, Any, State](scope, receive)
+ @post("/")
+ async def handler(request: Request) -> bytes:
body = await request.body()
chunks = b""
async for chunk in request.stream():
chunks += chunk
- response = ASGIResponse(body=encode_json({"body": body.decode(), "stream": chunks.decode()}))
- await response(scope, receive, send)
-
- client = TestClient(app)
+ return encode_json({"body": body.decode(), "stream": chunks.decode()})
- response = client.post("/", content="abc")
- assert response.json() == {"body": "abc", "stream": "abc"}
+ with create_test_client([handler]) as client:
+ response = client.post("/", content="abc")
+ assert response.json() == {"body": "abc", "stream": "abc"}
def test_request_stream_then_body() -> None:
@@ -329,19 +323,27 @@ async def app(scope: Scope, receive: Receive, send: Send) -> None:
response = ASGIResponse(body=encode_json({"body": body.decode(), "stream": chunks.decode()}))
await response(scope, receive, send)
- client = TestClient(app)
+ @post("/")
+ async def handler(request: Request) -> bytes:
+ chunks = b""
+ async for chunk in request.stream():
+ chunks += chunk
+ try:
+ body = await request.body()
+ except InternalServerException:
+ body = b"<stream consumed>"
+ return encode_json({"body": body.decode(), "stream": chunks.decode()})
- response = client.post("/", content="abc")
- assert response.json() == {"body": "<stream consumed>", "stream": "abc"}
+ with create_test_client([handler]) as client:
+ response = client.post("/", content="abc")
+ assert response.json() == {"body": "<stream consumed>", "stream": "abc"}
def test_request_json() -> None:
- @asgi("/")
- async def handler(scope: Scope, receive: Receive, send: Send) -> None:
- request = Request[Any, Any, State](scope, receive)
+ @post("/")
+ async def handler(request: Request) -> bytes:
data = await request.json()
- response = ASGIResponse(body=encode_json({"json": data}))
- await response(scope, receive, send)
+ return encode_json({"json": data})
with create_test_client(handler) as client:
response = client.post("/", json={"a": "123"})
@@ -361,10 +363,11 @@ async def app(scope: Scope, receive: Receive, send: Send) -> None:
assert response.text == "/he/llo, b'/he%2Fllo'"
-def test_request_without_setting_receive() -> None:
+def test_request_without_setting_receive(create_scope: Callable[..., Scope]) -> None:
"""If Request is instantiated without the 'receive' channel, then .body() is not available."""
async def app(scope: Scope, receive: Receive, send: Send) -> None:
+ scope.update(create_scope(route_handler=_route_handler)) # type: ignore[typeddict-item]
request = Request[Any, Any, State](scope)
try:
data = await request.json()
@@ -431,20 +434,19 @@ async def app(scope: Scope, receive: Receive, send: Send) -> None:
def test_chunked_encoding() -> None:
- async def app(scope: Scope, receive: Receive, send: Send) -> None:
- request = Request[Any, Any, State](scope, receive)
+ @post("/")
+ async def handler(request: Request) -> bytes:
body = await request.body()
- response = ASGIResponse(body=encode_json({"body": body.decode()}))
- await response(scope, receive, send)
+ return encode_json({"body": body.decode()})
- client = TestClient(app)
+ with create_test_client([handler]) as client:
- def post_body() -> Generator[bytes, None, None]:
- yield b"foo"
- yield b"bar"
+ def post_body() -> Generator[bytes, None, None]:
+ yield b"foo"
+ yield b"bar"
- response = client.post("/", content=post_body())
- assert response.json() == {"body": "foobar"}
+ response = client.post("/", content=post_body())
+ assert response.json() == {"body": "foobar"}
def test_request_send_push_promise() -> None:
@@ -548,3 +550,74 @@ async def get_state(request: Request[Any, Any, State]) -> dict[str, str]:
) as client:
response = client.get("/")
assert response.json() == {"state": 2}
+
+
+def test_request_body_exceeds_content_length() -> None:
+ @post("/")
+ def handler(body: bytes) -> None:
+ pass
+
+ with create_test_client([handler]) as client:
+ response = client.post("/", headers={"content-length": "1"}, content=b"ab")
+ assert response.status_code == HTTP_400_BAD_REQUEST
+ assert response.json() == {"status_code": 400, "detail": "Malformed request"}
+
+
+def test_request_body_exceeds_max_request_body_size() -> None:
+ @post("/one", request_max_body_size=1)
+ async def handler_one(request: Request) -> None:
+ await request.body()
+
+ @post("/two", request_max_body_size=1)
+ async def handler_two(body: bytes) -> None:
+ pass
+
+ with create_test_client([handler_one, handler_two]) as client:
+ response = client.post("/one", headers={"content-length": "2"}, content=b"ab")
+ assert response.status_code == HTTP_413_REQUEST_ENTITY_TOO_LARGE
+
+ response = client.post("/two", headers={"content-length": "2"}, content=b"ab")
+ assert response.status_code == HTTP_413_REQUEST_ENTITY_TOO_LARGE
+
+
+def test_request_body_exceeds_max_request_body_size_chunked() -> None:
+ @post("/one", request_max_body_size=1)
+ async def handler_one(request: Request) -> None:
+ assert request.headers["transfer-encoding"] == "chunked"
+ await request.body()
+
+ @post("/two", request_max_body_size=1)
+ async def handler_two(body: bytes, request: Request) -> None:
+ assert request.headers["transfer-encoding"] == "chunked"
+ await request.body()
+
+ def generator() -> Generator[bytes, None, None]:
+ yield b"1"
+ yield b"2"
+
+ with create_test_client([handler_one, handler_two]) as client:
+ response = client.post("/one", content=generator())
+ assert response.status_code == HTTP_413_REQUEST_ENTITY_TOO_LARGE
+
+ response = client.post("/two", content=generator())
+ assert response.status_code == HTTP_413_REQUEST_ENTITY_TOO_LARGE
+
+
+def test_request_content_length() -> None:
+ @post("/")
+ def handler(request: Request) -> dict:
+ return {"content-length": request.content_length}
+
+ with create_test_client([handler]) as client:
+ assert client.post("/", content=b"1").json() == {"content-length": 1}
+
+
+def test_request_invalid_content_length() -> None:
+ @post("/")
+ def handler(request: Request) -> dict:
+ return {"content-length": request.content_length}
+
+ with create_test_client([handler]) as client:
+ response = client.post("/", content=b"1", headers={"content-length": "a"})
+ assert response.status_code == HTTP_400_BAD_REQUEST
+ assert response.json() == {"detail": "Invalid content-length: 'a'", "status_code": 400}
|
"""A large part of the tests in this file were adapted from:
https://github.com/encode/starlette/blob/master/tests/test_requests.py. And are meant to ensure our compatibility with
their API.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable, Dict, Generator
from unittest.mock import patch
import pytest
from litestar import MediaType, Request, asgi, get, post
from litestar.connection.base import AuthT, StateT, UserT, empty_send
from litestar.datastructures import Address, Cookie, State
from litestar.exceptions import (
InternalServerException,
LitestarException,
LitestarWarning,
SerializationException,
)
from litestar.middleware import MiddlewareProtocol
from litestar.response.base import ASGIResponse
from litestar.serialization import encode_json, encode_msgpack
from litestar.static_files.config import StaticFilesConfig
from litestar.testing import TestClient, create_test_client
if TYPE_CHECKING:
from pathlib import Path
from litestar.types import ASGIApp, Receive, Scope, Send
@get("/", sync_to_thread=False)
def _route_handler() -> None:
pass
@pytest.fixture(name="scope")
def scope_fixture(create_scope: Callable[..., Scope]) -> Scope:
return create_scope(type="http", route_handler=_route_handler)
async def test_request_empty_body_to_json(anyio_backend: str, scope: Scope) -> None:
with patch.object(Request, "body", return_value=b""):
request_empty_payload: Request[Any, Any, State] = Request(scope=scope)
request_json = await request_empty_payload.json()
assert request_json is None
async def test_request_invalid_body_to_json(anyio_backend: str, scope: Scope) -> None:
with patch.object(Request, "body", return_value=b"invalid"), pytest.raises(SerializationException):
request_empty_payload: Request[Any, Any, State] = Request(scope=scope)
await request_empty_payload.json()
async def test_request_valid_body_to_json(anyio_backend: str, scope: Scope) -> None:
with patch.object(Request, "body", return_value=b'{"test": "valid"}'):
request_empty_payload: Request[Any, Any, State] = Request(scope=scope)
request_json = await request_empty_payload.json()
assert request_json == {"test": "valid"}
async def test_request_empty_body_to_msgpack(anyio_backend: str, scope: Scope) -> None:
with patch.object(Request, "body", return_value=b""):
request_empty_payload: Request[Any, Any, State] = Request(scope=scope)
request_msgpack = await request_empty_payload.msgpack()
assert request_msgpack is None
async def test_request_invalid_body_to_msgpack(anyio_backend: str, scope: Scope) -> None:
with patch.object(Request, "body", return_value=b"invalid"), pytest.raises(SerializationException):
request_empty_payload: Request[Any, Any, State] = Request(scope=scope)
await request_empty_payload.msgpack()
async def test_request_valid_body_to_msgpack(anyio_backend: str, scope: Scope) -> None:
with patch.object(Request, "body", return_value=encode_msgpack({"test": "valid"})):
request_empty_payload: Request[Any, Any, State] = Request(scope=scope)
request_msgpack = await request_empty_payload.msgpack()
assert request_msgpack == {"test": "valid"}
def test_request_url_for() -> None:
@get(path="/proxy", name="proxy")
def proxy() -> None:
pass
@get(path="/test", signature_namespace={"dict": Dict})
def root(request: Request[Any, Any, State]) -> dict[str, str]:
return {"url": request.url_for("proxy")}
@get(path="/test-none", signature_namespace={"dict": Dict})
def test_none(request: Request[Any, Any, State]) -> dict[str, str]:
return {"url": request.url_for("none")}
with create_test_client(route_handlers=[proxy, root, test_none]) as client:
response = client.get("/test")
assert response.json() == {"url": "http://testserver.local/proxy"}
response = client.get("/test-none")
assert response.status_code == 500
def test_request_asset_url(tmp_path: Path) -> None:
@get(path="/resolver", signature_namespace={"dict": Dict})
def resolver(request: Request[Any, Any, State]) -> dict[str, str]:
return {"url": request.url_for_static_asset("js", "main.js")}
@get(path="/resolver-none", signature_namespace={"dict": Dict})
def resolver_none(request: Request[Any, Any, State]) -> dict[str, str]:
return {"url": request.url_for_static_asset("none", "main.js")}
with create_test_client(
route_handlers=[resolver, resolver_none],
static_files_config=[StaticFilesConfig(path="/static/js", directories=[tmp_path], name="js")],
) as client:
response = client.get("/resolver")
assert response.json() == {"url": "http://testserver.local/static/js/main.js"}
response = client.get("/resolver-none")
assert response.status_code == 500
def test_route_handler_property() -> None:
value: Any = {}
@get("/")
def handler(request: Request[Any, Any, State]) -> None:
value["handler"] = request.route_handler
with create_test_client(route_handlers=[handler]) as client:
client.get("/")
assert str(value["handler"]) == str(handler)
def test_custom_request_class() -> None:
value: Any = {}
class MyRequest(Request[UserT, AuthT, StateT]):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.scope["called"] = True # type: ignore[typeddict-unknown-key]
@get("/", signature_types=[MyRequest])
def handler(request: MyRequest[Any, Any, State]) -> None:
value["called"] = request.scope.get("called")
with create_test_client(route_handlers=[handler], request_class=MyRequest) as client:
client.get("/")
assert value["called"]
def test_request_url() -> None:
async def app(scope: Scope, receive: Receive, send: Send) -> None:
request = Request[Any, Any, State](scope, receive)
data = {"method": request.method, "url": str(request.url)}
response = ASGIResponse(body=encode_json(data))
await response(scope, receive, send)
client = TestClient(app)
response = client.get("/123?a=abc")
assert response.json() == {"method": "GET", "url": "http://testserver.local/123?a=abc"}
response = client.get("https://example.org:123/")
assert response.json() == {"method": "GET", "url": "https://example.org:123/"}
def test_request_query_params() -> None:
async def app(scope: Scope, receive: Receive, send: Send) -> None:
request = Request[Any, Any, State](scope, receive)
params = dict(request.query_params)
response = ASGIResponse(body=encode_json({"params": params}))
await response(scope, receive, send)
client = TestClient(app)
response = client.get("/?a=123&b=456")
assert response.json() == {"params": {"a": "123", "b": "456"}}
def test_request_headers() -> None:
async def app(scope: Scope, receive: Receive, send: Send) -> None:
request = Request[Any, Any, State](scope, receive)
headers = dict(request.headers)
response = ASGIResponse(body=encode_json({"headers": headers}))
await response(scope, receive, send)
client = TestClient(app)
response = client.get("/", headers={"host": "example.org"})
assert response.json() == {
"headers": {
"host": "example.org",
"user-agent": "testclient",
"accept-encoding": "gzip, deflate, br",
"accept": "*/*",
"connection": "keep-alive",
}
}
def test_request_accept_header() -> None:
async def app(scope: Scope, receive: Receive, send: Send) -> None:
request = Request[Any, Any, State](scope, receive)
response = ASGIResponse(body=encode_json({"accepted_types": list(request.accept)}))
await response(scope, receive, send)
client = TestClient(app)
response = client.get("/", headers={"Accept": "text/plain, application/xml;q=0.7, text/html;p=test"})
assert response.json() == {"accepted_types": ["text/html;p=test", "text/plain", "application/xml;q=0.7"]}
@pytest.mark.parametrize(
"scope_values,expected_client",
(
({"type": "http", "route_handler": _route_handler, "client": ["client", 42]}, Address("client", 42)),
({"type": "http", "route_handler": _route_handler, "client": None}, None),
({"type": "http", "route_handler": _route_handler}, None),
),
)
def test_request_client(
scope_values: dict[str, Any], expected_client: Address | None, create_scope: Callable[..., Scope]
) -> None:
scope = create_scope()
scope.update(scope_values) # type: ignore[typeddict-item]
if "client" not in scope_values:
del scope["client"] # type: ignore[misc]
client = Request[Any, Any, State](scope).client
assert client == expected_client
def test_request_body() -> None:
async def app(scope: Scope, receive: Receive, send: Send) -> None:
request = Request[Any, Any, State](scope, receive)
body = await request.body()
response = ASGIResponse(body=encode_json({"body": body.decode()}))
await response(scope, receive, send)
client = TestClient(app)
response = client.get("/")
assert response.json() == {"body": ""}
response = client.post("/", json={"a": "123"})
assert response.json() == {"body": '{"a": "123"}'}
response = client.post("/", content="abc")
assert response.json() == {"body": "abc"}
def test_request_stream() -> None:
async def app(scope: Scope, receive: Receive, send: Send) -> None:
request = Request[Any, Any, State](scope, receive)
body = b""
async for chunk in request.stream():
body += chunk
response = ASGIResponse(body=encode_json({"body": body.decode()}))
await response(scope, receive, send)
client = TestClient(app)
response = client.get("/")
assert response.json() == {"body": ""}
response = client.post("/", json={"a": "123"})
assert response.json() == {"body": '{"a": "123"}'}
response = client.post("/", content="abc")
assert response.json() == {"body": "abc"}
def test_request_form_urlencoded() -> None:
async def app(scope: Scope, receive: Receive, send: Send) -> None:
request = Request[Any, Any, State](scope, receive)
form = await request.form()
response = ASGIResponse(body=encode_json({"form": dict(form)}))
await response(scope, receive, send)
client = TestClient(app)
response = client.post("/", data={"abc": "123 @"})
assert response.json() == {"form": {"abc": "123 @"}}
def test_request_form_urlencoded_multi_keys() -> None:
@post("/")
async def handler(request: Request) -> Any:
return (await request.form()).getall("foo")
with create_test_client(handler) as client:
assert client.post("/", data={"foo": ["1", "2"]}).json() == ["1", "2"]
def test_request_form_multipart_multi_keys() -> None:
@post("/")
async def handler(request: Request) -> int:
return len((await request.form()).getall("foo"))
with create_test_client(handler) as client:
assert client.post("/", data={"foo": "1"}, files={"foo": b"a"}).json() == 2
def test_request_body_then_stream() -> None:
async def app(scope: Any, receive: Receive, send: Send) -> None:
request = Request[Any, Any, State](scope, receive)
body = await request.body()
chunks = b""
async for chunk in request.stream():
chunks += chunk
response = ASGIResponse(body=encode_json({"body": body.decode(), "stream": chunks.decode()}))
await response(scope, receive, send)
client = TestClient(app)
response = client.post("/", content="abc")
assert response.json() == {"body": "abc", "stream": "abc"}
def test_request_stream_then_body() -> None:
async def app(scope: Scope, receive: Receive, send: Send) -> None:
request = Request[Any, Any, State](scope, receive)
chunks = b""
async for chunk in request.stream():
chunks += chunk
try:
body = await request.body()
except InternalServerException:
body = b"<stream consumed>"
response = ASGIResponse(body=encode_json({"body": body.decode(), "stream": chunks.decode()}))
await response(scope, receive, send)
client = TestClient(app)
response = client.post("/", content="abc")
assert response.json() == {"body": "<stream consumed>", "stream": "abc"}
def test_request_json() -> None:
@asgi("/")
async def handler(scope: Scope, receive: Receive, send: Send) -> None:
request = Request[Any, Any, State](scope, receive)
data = await request.json()
response = ASGIResponse(body=encode_json({"json": data}))
await response(scope, receive, send)
with create_test_client(handler) as client:
response = client.post("/", json={"a": "123"})
assert response.json() == {"json": {"a": "123"}}
def test_request_raw_path() -> None:
async def app(scope: Scope, receive: Receive, send: Send) -> None:
request = Request[Any, Any, State](scope, receive)
path = str(request.scope["path"])
raw_path = str(request.scope["raw_path"])
response = ASGIResponse(body=f"{path}, {raw_path}".encode(), media_type=MediaType.TEXT)
await response(scope, receive, send)
client = TestClient(app)
response = client.get("/he%2Fllo")
assert response.text == "/he/llo, b'/he%2Fllo'"
def test_request_without_setting_receive() -> None:
"""If Request is instantiated without the 'receive' channel, then .body() is not available."""
async def app(scope: Scope, receive: Receive, send: Send) -> None:
request = Request[Any, Any, State](scope)
try:
data = await request.json()
except RuntimeError:
data = "Receive channel not available"
response = ASGIResponse(body=encode_json({"json": data}))
await response(scope, receive, send)
client = TestClient(app)
response = client.post("/", json={"a": "123"})
assert response.json() == {"json": "Receive channel not available"}
async def test_request_disconnect(create_scope: Callable[..., Scope]) -> None:
"""If a client disconnect occurs while reading request body then InternalServerException should be raised."""
async def app(scope: Scope, receive: Receive, send: Send) -> None:
request = Request[Any, Any, State](scope, receive)
await request.body()
async def receiver() -> dict[str, str]:
return {"type": "http.disconnect"}
with pytest.raises(InternalServerException):
await app(
create_scope(type="http", route_handler=_route_handler, method="POST", path="/"),
receiver, # type: ignore[arg-type]
empty_send,
)
def test_request_state() -> None:
@get("/", signature_namespace={"dict": Dict})
def handler(request: Request[Any, Any, State]) -> dict[Any, Any]:
request.state.test = 1
assert request.state.test == 1
return request.state.dict()
with create_test_client(handler) as client:
response = client.get("/")
assert response.json()["test"] == 1
def test_request_cookies() -> None:
async def app(scope: Scope, receive: Receive, send: Send) -> None:
request = Request[Any, Any, State](scope, receive)
mycookie = request.cookies.get("mycookie")
if mycookie:
asgi_response = ASGIResponse(body=mycookie.encode("utf-8"), media_type="text/plain")
else:
asgi_response = ASGIResponse(
body=b"Hello, world!",
media_type="text/plain",
cookies=[Cookie(key="mycookie", value="Hello, cookies!")],
)
await asgi_response(scope, receive, send)
client = TestClient(app)
response = client.get("/")
assert response.text == "Hello, world!"
response = client.get("/")
assert response.text == "Hello, cookies!"
def test_chunked_encoding() -> None:
async def app(scope: Scope, receive: Receive, send: Send) -> None:
request = Request[Any, Any, State](scope, receive)
body = await request.body()
response = ASGIResponse(body=encode_json({"body": body.decode()}))
await response(scope, receive, send)
client = TestClient(app)
def post_body() -> Generator[bytes, None, None]:
yield b"foo"
yield b"bar"
response = client.post("/", content=post_body())
assert response.json() == {"body": "foobar"}
def test_request_send_push_promise() -> None:
async def app(scope: Scope, receive: Receive, send: Send) -> None:
# the server is push-enabled
scope["extensions"]["http.response.push"] = {} # type: ignore[index]
request = Request[Any, Any, State](scope, receive, send)
await request.send_push_promise("/style.css")
response = ASGIResponse(body=encode_json({"json": "OK"}))
await response(scope, receive, send)
client = TestClient(app)
response = client.get("/")
assert response.json() == {"json": "OK"}
def test_request_send_push_promise_without_push_extension() -> None:
"""If server does not support the `http.response.push` extension,
.send_push_promise() does nothing.
"""
async def app(scope: Scope, receive: Receive, send: Send) -> None:
request = Request[Any, Any, State](scope)
with pytest.warns(LitestarWarning, match="Attempted to send a push promise"):
await request.send_push_promise("/style.css")
response = ASGIResponse(body=encode_json({"json": "OK"}))
await response(scope, receive, send)
client = TestClient(app)
response = client.get("/")
assert response.json() == {"json": "OK"}
def test_request_send_push_promise_without_push_extension_raises() -> None:
"""If server does not support the `http.response.push` extension,
.send_push_promise() does nothing.
"""
async def app(scope: Scope, receive: Receive, send: Send) -> None:
request = Request[Any, Any, State](scope)
with pytest.raises(LitestarException, match="Attempted to send a push promise"):
await request.send_push_promise("/style.css", raise_if_unavailable=True)
response = ASGIResponse(body=encode_json({"json": "OK"}))
await response(scope, receive, send)
TestClient(app).get("/")
def test_request_send_push_promise_without_setting_send() -> None:
"""If Request is instantiated without the send channel, then.
.send_push_promise() is not available.
"""
async def app(scope: Scope, receive: Receive, send: Send) -> None:
# the server is push-enabled
scope["extensions"]["http.response.push"] = {} # type: ignore[index]
data = "OK"
request = Request[Any, Any, State](scope)
try:
await request.send_push_promise("/style.css")
except RuntimeError:
data = "Send channel not available"
response = ASGIResponse(body=encode_json({"json": data}))
await response(scope, receive, send)
client = TestClient(app)
response = client.get("/")
assert response.json() == {"json": "Send channel not available"}
class BeforeRequestMiddleWare(MiddlewareProtocol):
def __init__(self, app: ASGIApp) -> None:
self.app = app
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
scope["state"]["main"] = 1
await self.app(scope, receive, send)
def test_state() -> None:
def before_request(request: Request[Any, Any, State]) -> None:
assert request.state.main == 1
request.state.main = 2
@get(path="/", signature_namespace={"dict": Dict})
async def get_state(request: Request[Any, Any, State]) -> dict[str, str]:
return {"state": request.state.main}
with create_test_client(
route_handlers=[get_state], middleware=[BeforeRequestMiddleWare], before_request=before_request
) as client:
response = client.get("/")
assert response.json() == {"state": 2}
|
PYSEC-2024-178
|
aiohttp/web_middlewares.py
|
@@ -108,6 +108,7 @@ async def impl(request: Request, handler: _Handler) -> StreamResponse:
paths_to_check.append(merged_slashes[:-1])
for path in paths_to_check:
+ path = re.sub("^//+", "/", path) # SECURITY: GHSA-v6wp-4m6f-gcjg
resolves, request = await _check_request_resolves(request, path)
if resolves:
raise redirect_class(request.raw_path + query)
|
import re
import warnings
from typing import TYPE_CHECKING, Awaitable, Callable, Tuple, Type, TypeVar
from .web_exceptions import HTTPMove, HTTPPermanentRedirect
from .web_request import Request
from .web_response import StreamResponse
from .web_urldispatcher import SystemRoute
__all__ = (
"middleware",
"normalize_path_middleware",
)
if TYPE_CHECKING: # pragma: no cover
from .web_app import Application
_Func = TypeVar("_Func")
async def _check_request_resolves(request: Request, path: str) -> Tuple[bool, Request]:
alt_request = request.clone(rel_url=path)
match_info = await request.app.router.resolve(alt_request)
alt_request._match_info = match_info # type: ignore[assignment]
if match_info.http_exception is None:
return True, alt_request
return False, request
def middleware(f: _Func) -> _Func:
warnings.warn(
"Middleware decorator is deprecated since 4.0 "
"and its behaviour is default, "
"you can simply remove this decorator.",
DeprecationWarning,
stacklevel=2,
)
return f
_Handler = Callable[[Request], Awaitable[StreamResponse]]
_Middleware = Callable[[Request, _Handler], Awaitable[StreamResponse]]
def normalize_path_middleware(
*,
append_slash: bool = True,
remove_slash: bool = False,
merge_slashes: bool = True,
redirect_class: Type[HTTPMove] = HTTPPermanentRedirect,
) -> _Middleware:
"""
Middleware factory which produces a middleware that normalizes
the path of a request. By normalizing it means:
- Add or remove a trailing slash to the path.
- Double slashes are replaced by one.
The middleware returns as soon as it finds a path that resolves
correctly. The order if both merge and append/remove are enabled is
1) merge slashes
2) append/remove slash
3) both merge slashes and append/remove slash.
If the path resolves with at least one of those conditions, it will
redirect to the new path.
Only one of `append_slash` and `remove_slash` can be enabled. If both
are `True` the factory will raise an assertion error
If `append_slash` is `True` the middleware will append a slash when
needed. If a resource is defined with trailing slash and the request
comes without it, it will append it automatically.
If `remove_slash` is `True`, `append_slash` must be `False`. When enabled
the middleware will remove trailing slashes and redirect if the resource
is defined
If merge_slashes is True, merge multiple consecutive slashes in the
path into one.
"""
correct_configuration = not (append_slash and remove_slash)
assert correct_configuration, "Cannot both remove and append slash"
async def impl(request: Request, handler: _Handler) -> StreamResponse:
if isinstance(request.match_info.route, SystemRoute):
paths_to_check = []
if "?" in request.raw_path:
path, query = request.raw_path.split("?", 1)
query = "?" + query
else:
query = ""
path = request.raw_path
if merge_slashes:
paths_to_check.append(re.sub("//+", "/", path))
if append_slash and not request.path.endswith("/"):
paths_to_check.append(path + "/")
if remove_slash and request.path.endswith("/"):
paths_to_check.append(path[:-1])
if merge_slashes and append_slash:
paths_to_check.append(re.sub("//+", "/", path + "/"))
if merge_slashes and remove_slash and path.endswith("/"):
merged_slashes = re.sub("//+", "/", path)
paths_to_check.append(merged_slashes[:-1])
for path in paths_to_check:
resolves, request = await _check_request_resolves(request, path)
if resolves:
raise redirect_class(request.raw_path + query)
return await handler(request)
return impl
def _fix_request_current_app(app: "Application") -> _Middleware:
async def impl(request: Request, handler: _Handler) -> StreamResponse:
with request.match_info.set_current_app(app):
return await handler(request)
return impl
|
PYSEC-2021-76
|
tests/test_web_middleware.py
|
@@ -361,6 +361,38 @@ async def test_cannot_remove_and_add_slash(self) -> None:
with pytest.raises(AssertionError):
web.normalize_path_middleware(append_slash=True, remove_slash=True)
+ @pytest.mark.parametrize(
+ ["append_slash", "remove_slash"],
+ [
+ (True, False),
+ (False, True),
+ (False, False),
+ ],
+ )
+ async def test_open_redirects(
+ self, append_slash: bool, remove_slash: bool, aiohttp_client: Any
+ ) -> None:
+ async def handle(request: web.Request) -> web.StreamResponse:
+ pytest.fail(
+ msg="Security advisory 'GHSA-v6wp-4m6f-gcjg' test handler "
+ "matched unexpectedly",
+ pytrace=False,
+ )
+
+ app = web.Application(
+ middlewares=[
+ web.normalize_path_middleware(
+ append_slash=append_slash, remove_slash=remove_slash
+ )
+ ]
+ )
+ app.add_routes([web.get("/", handle), web.get("/google.com", handle)])
+ client = await aiohttp_client(app, server_kwargs={"skip_url_asserts": True})
+ resp = await client.get("//google.com", allow_redirects=False)
+ assert resp.status == 308
+ assert resp.headers["Location"] == "/google.com"
+ assert resp.url.query == URL("//google.com").query
+
async def test_bug_3669(aiohttp_client: Any):
async def paymethod(request):
|
# type: ignore
from typing import Any
import pytest
from yarl import URL
from aiohttp import web
async def test_middleware_modifies_response(loop: Any, aiohttp_client: Any) -> None:
async def handler(request):
return web.Response(body=b"OK")
async def middleware(request, handler):
resp = await handler(request)
assert 200 == resp.status
resp.set_status(201)
resp.text = resp.text + "[MIDDLEWARE]"
return resp
app = web.Application()
app.middlewares.append(middleware)
app.router.add_route("GET", "/", handler)
client = await aiohttp_client(app)
resp = await client.get("/")
assert 201 == resp.status
txt = await resp.text()
assert "OK[MIDDLEWARE]" == txt
async def test_middleware_handles_exception(loop: Any, aiohttp_client: Any) -> None:
async def handler(request):
raise RuntimeError("Error text")
async def middleware(request, handler):
with pytest.raises(RuntimeError) as ctx:
await handler(request)
return web.Response(status=501, text=str(ctx.value) + "[MIDDLEWARE]")
app = web.Application()
app.middlewares.append(middleware)
app.router.add_route("GET", "/", handler)
client = await aiohttp_client(app)
resp = await client.get("/")
assert 501 == resp.status
txt = await resp.text()
assert "Error text[MIDDLEWARE]" == txt
async def test_middleware_chain(loop: Any, aiohttp_client: Any) -> None:
async def handler(request):
return web.Response(text="OK")
handler.annotation = "annotation_value"
async def handler2(request):
return web.Response(text="OK")
middleware_annotation_seen_values = []
def make_middleware(num):
async def middleware(request, handler):
middleware_annotation_seen_values.append(
getattr(handler, "annotation", None)
)
resp = await handler(request)
resp.text = resp.text + f"[{num}]"
return resp
return middleware
app = web.Application()
app.middlewares.append(make_middleware(1))
app.middlewares.append(make_middleware(2))
app.router.add_route("GET", "/", handler)
app.router.add_route("GET", "/r2", handler2)
client = await aiohttp_client(app)
resp = await client.get("/")
assert 200 == resp.status
txt = await resp.text()
assert "OK[2][1]" == txt
assert middleware_annotation_seen_values == ["annotation_value", "annotation_value"]
# check that attributes from handler are not applied to handler2
resp = await client.get("/r2")
assert 200 == resp.status
assert middleware_annotation_seen_values == [
"annotation_value",
"annotation_value",
None,
None,
]
async def test_middleware_subapp(loop: Any, aiohttp_client: Any) -> None:
async def sub_handler(request):
return web.Response(text="OK")
sub_handler.annotation = "annotation_value"
async def handler(request):
return web.Response(text="OK")
middleware_annotation_seen_values = []
def make_middleware(num):
async def middleware(request, handler):
annotation = getattr(handler, "annotation", None)
if annotation is not None:
middleware_annotation_seen_values.append(f"{annotation}/{num}")
return await handler(request)
return middleware
app = web.Application()
app.middlewares.append(make_middleware(1))
app.router.add_route("GET", "/r2", handler)
subapp = web.Application()
subapp.middlewares.append(make_middleware(2))
subapp.router.add_route("GET", "/", sub_handler)
app.add_subapp("/sub", subapp)
client = await aiohttp_client(app)
resp = await client.get("/sub/")
assert 200 == resp.status
await resp.text()
assert middleware_annotation_seen_values == [
"annotation_value/1",
"annotation_value/2",
]
# check that attributes from sub_handler are not applied to handler
del middleware_annotation_seen_values[:]
resp = await client.get("/r2")
assert 200 == resp.status
assert middleware_annotation_seen_values == []
@pytest.fixture
def cli(loop: Any, aiohttp_client: Any):
async def handler(request):
return web.Response(text="OK")
def wrapper(extra_middlewares):
app = web.Application()
app.router.add_route("GET", "/resource1", handler)
app.router.add_route("GET", "/resource2/", handler)
app.router.add_route("GET", "/resource1/a/b", handler)
app.router.add_route("GET", "/resource2/a/b/", handler)
app.router.add_route("GET", "/resource2/a/b%2Fc/", handler)
app.middlewares.extend(extra_middlewares)
return aiohttp_client(app, server_kwargs={"skip_url_asserts": True})
return wrapper
class TestNormalizePathMiddleware:
@pytest.mark.parametrize(
"path, status",
[
("/resource1", 200),
("/resource1/", 404),
("/resource2", 200),
("/resource2/", 200),
("/resource1?p1=1&p2=2", 200),
("/resource1/?p1=1&p2=2", 404),
("/resource2?p1=1&p2=2", 200),
("/resource2/?p1=1&p2=2", 200),
("/resource2/a/b%2Fc", 200),
("/resource2/a/b%2Fc/", 200),
],
)
async def test_add_trailing_when_necessary(
self, path: Any, status: Any, cli: Any
) -> None:
extra_middlewares = [web.normalize_path_middleware(merge_slashes=False)]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
assert resp.url.query == URL(path).query
@pytest.mark.parametrize(
"path, status",
[
("/resource1", 200),
("/resource1/", 200),
("/resource2", 404),
("/resource2/", 200),
("/resource1?p1=1&p2=2", 200),
("/resource1/?p1=1&p2=2", 200),
("/resource2?p1=1&p2=2", 404),
("/resource2/?p1=1&p2=2", 200),
("/resource2/a/b%2Fc", 404),
("/resource2/a/b%2Fc/", 200),
("/resource12", 404),
("/resource12345", 404),
],
)
async def test_remove_trailing_when_necessary(
self, path: Any, status: Any, cli: Any
) -> None:
extra_middlewares = [
web.normalize_path_middleware(
append_slash=False, remove_slash=True, merge_slashes=False
)
]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
assert resp.url.query == URL(path).query
@pytest.mark.parametrize(
"path, status",
[
("/resource1", 200),
("/resource1/", 404),
("/resource2", 404),
("/resource2/", 200),
("/resource1?p1=1&p2=2", 200),
("/resource1/?p1=1&p2=2", 404),
("/resource2?p1=1&p2=2", 404),
("/resource2/?p1=1&p2=2", 200),
("/resource2/a/b%2Fc", 404),
("/resource2/a/b%2Fc/", 200),
],
)
async def test_no_trailing_slash_when_disabled(
self, path: Any, status: Any, cli: Any
) -> None:
extra_middlewares = [
web.normalize_path_middleware(append_slash=False, merge_slashes=False)
]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
assert resp.url.query == URL(path).query
@pytest.mark.parametrize(
"path, status",
[
("/resource1/a/b", 200),
("//resource1//a//b", 200),
("//resource1//a//b/", 404),
("///resource1//a//b", 200),
("/////resource1/a///b", 200),
("/////resource1/a//b/", 404),
("/resource1/a/b?p=1", 200),
("//resource1//a//b?p=1", 200),
("//resource1//a//b/?p=1", 404),
("///resource1//a//b?p=1", 200),
("/////resource1/a///b?p=1", 200),
("/////resource1/a//b/?p=1", 404),
],
)
async def test_merge_slash(self, path: Any, status: Any, cli: Any) -> None:
extra_middlewares = [web.normalize_path_middleware(append_slash=False)]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
assert resp.url.query == URL(path).query
@pytest.mark.parametrize(
"path, status",
[
("/resource1/a/b", 200),
("/resource1/a/b/", 404),
("//resource2//a//b", 200),
("//resource2//a//b/", 200),
("///resource1//a//b", 200),
("///resource1//a//b/", 404),
("/////resource1/a///b", 200),
("/////resource1/a///b/", 404),
("/resource2/a/b", 200),
("//resource2//a//b", 200),
("//resource2//a//b/", 200),
("///resource2//a//b", 200),
("///resource2//a//b/", 200),
("/////resource2/a///b", 200),
("/////resource2/a///b/", 200),
("/resource1/a/b?p=1", 200),
("/resource1/a/b/?p=1", 404),
("//resource2//a//b?p=1", 200),
("//resource2//a//b/?p=1", 200),
("///resource1//a//b?p=1", 200),
("///resource1//a//b/?p=1", 404),
("/////resource1/a///b?p=1", 200),
("/////resource1/a///b/?p=1", 404),
("/resource2/a/b?p=1", 200),
("//resource2//a//b?p=1", 200),
("//resource2//a//b/?p=1", 200),
("///resource2//a//b?p=1", 200),
("///resource2//a//b/?p=1", 200),
("/////resource2/a///b?p=1", 200),
("/////resource2/a///b/?p=1", 200),
],
)
async def test_append_and_merge_slash(
self, path: Any, status: Any, cli: Any
) -> None:
extra_middlewares = [web.normalize_path_middleware()]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
assert resp.url.query == URL(path).query
@pytest.mark.parametrize(
"path, status",
[
("/resource1/a/b", 200),
("/resource1/a/b/", 200),
("//resource2//a//b", 404),
("//resource2//a//b/", 200),
("///resource1//a//b", 200),
("///resource1//a//b/", 200),
("/////resource1/a///b", 200),
("/////resource1/a///b/", 200),
("/////resource1/a///b///", 200),
("/resource2/a/b", 404),
("//resource2//a//b", 404),
("//resource2//a//b/", 200),
("///resource2//a//b", 404),
("///resource2//a//b/", 200),
("/////resource2/a///b", 404),
("/////resource2/a///b/", 200),
("/resource1/a/b?p=1", 200),
("/resource1/a/b/?p=1", 200),
("//resource2//a//b?p=1", 404),
("//resource2//a//b/?p=1", 200),
("///resource1//a//b?p=1", 200),
("///resource1//a//b/?p=1", 200),
("/////resource1/a///b?p=1", 200),
("/////resource1/a///b/?p=1", 200),
("/resource2/a/b?p=1", 404),
("//resource2//a//b?p=1", 404),
("//resource2//a//b/?p=1", 200),
("///resource2//a//b?p=1", 404),
("///resource2//a//b/?p=1", 200),
("/////resource2/a///b?p=1", 404),
("/////resource2/a///b/?p=1", 200),
],
)
async def test_remove_and_merge_slash(
self, path: Any, status: Any, cli: Any
) -> None:
extra_middlewares = [
web.normalize_path_middleware(append_slash=False, remove_slash=True)
]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
assert resp.url.query == URL(path).query
async def test_cannot_remove_and_add_slash(self) -> None:
with pytest.raises(AssertionError):
web.normalize_path_middleware(append_slash=True, remove_slash=True)
async def test_bug_3669(aiohttp_client: Any):
async def paymethod(request):
return web.Response(text="OK")
app = web.Application()
app.router.add_route("GET", "/paymethod", paymethod)
app.middlewares.append(
web.normalize_path_middleware(append_slash=False, remove_slash=True)
)
client = await aiohttp_client(app, server_kwargs={"skip_url_asserts": True})
resp = await client.get("/paymethods")
assert resp.status == 404
assert resp.url.path != "/paymethod"
async def test_old_style_middleware(loop: Any, aiohttp_client: Any) -> None:
async def view_handler(request):
return web.Response(body=b"OK")
with pytest.warns(DeprecationWarning, match="Middleware decorator is deprecated"):
@web.middleware
async def middleware(request, handler):
resp = await handler(request)
assert 200 == resp.status
resp.set_status(201)
resp.text = resp.text + "[old style middleware]"
return resp
app = web.Application(middlewares=[middleware])
app.router.add_route("GET", "/", view_handler)
client = await aiohttp_client(app)
resp = await client.get("/")
assert 201 == resp.status
txt = await resp.text()
assert "OK[old style middleware]" == txt
async def test_new_style_middleware_class(loop: Any, aiohttp_client: Any) -> None:
async def handler(request):
return web.Response(body=b"OK")
class Middleware:
async def __call__(self, request, handler):
resp = await handler(request)
assert 200 == resp.status
resp.set_status(201)
resp.text = resp.text + "[new style middleware]"
return resp
with pytest.warns(None) as warning_checker:
app = web.Application()
app.middlewares.append(Middleware())
app.router.add_route("GET", "/", handler)
client = await aiohttp_client(app)
resp = await client.get("/")
assert 201 == resp.status
txt = await resp.text()
assert "OK[new style middleware]" == txt
assert len(warning_checker) == 0
async def test_new_style_middleware_method(loop: Any, aiohttp_client: Any) -> None:
async def handler(request):
return web.Response(body=b"OK")
class Middleware:
async def call(self, request, handler):
resp = await handler(request)
assert 200 == resp.status
resp.set_status(201)
resp.text = resp.text + "[new style middleware]"
return resp
with pytest.warns(None) as warning_checker:
app = web.Application()
app.middlewares.append(Middleware().call)
app.router.add_route("GET", "/", handler)
client = await aiohttp_client(app)
resp = await client.get("/")
assert 201 == resp.status
txt = await resp.text()
assert "OK[new style middleware]" == txt
assert len(warning_checker) == 0
|
PYSEC-2021-76
|
opencanary/__init__.py
|
@@ -1 +1,34 @@
+import os
+import subprocess
+
__version__ = "0.9.3"
+
+from os import PathLike
+from typing import Union
+
+BIN_LOCATIONS = ["/usr/bin", "/bin", "/usr/sbin", "/sbin"]
+
+
+def _check_file_exists_and_executable(path: Union[PathLike, str]) -> bool:
+ if not os.path.isfile(path):
+ return False
+ else:
+ return os.access(path, os.X_OK)
+
+
+def safe_exec(binary_name: str, args: list) -> bytes:
+ """
+ Executes the given binary with the given arguments as a subprocess. What makes this safe is that the binary name
+ is not executed as an alias, and only binaries that live in trusted system locations are executed. This means that
+ only system-wide binaries are executable.
+ """
+ exec_path = None
+ for prefix in BIN_LOCATIONS:
+ bin_path = os.path.join(prefix, binary_name)
+ if _check_file_exists_and_executable(os.path.join(prefix, binary_name)):
+ exec_path = bin_path
+ break
+ if exec_path is None:
+ raise Exception(f"Could not find executable ${binary_name}")
+ else:
+ return subprocess.check_output(args, shell=True, executable=exec_path)
|
__version__ = "0.9.3"
|
GHSA-pf5v-pqfv-x8jj
|
opencanary/config.py
|
@@ -3,12 +3,12 @@
import json
import itertools
import string
-import subprocess
import shutil
import re
from os.path import expanduser
from pkg_resources import resource_filename
from pathlib import Path
+from . import safe_exec
SAMPLE_SETTINGS = resource_filename(__name__, "data/settings.json")
SETTINGS = "opencanary.conf"
@@ -54,9 +54,9 @@ def __init__(self, configfile=SETTINGS):
self.__configfile = configfile
files = [
- configfile,
- "%s/.%s" % (expanduser("~"), configfile),
"/etc/opencanaryd/%s" % configfile,
+ "%s/.%s" % (expanduser("~"), configfile),
+ configfile,
]
print(
"** We hope you enjoy using OpenCanary. For more open source Canary goodness, head over to canarytokens.org. **"
@@ -67,14 +67,17 @@ def __init__(self, configfile=SETTINGS):
print("[-] Using config file: %s" % fname)
self.__config = json.load(f)
self.__config = expand_vars(self.__config)
+ if fname is configfile:
+ print(
+ "[-] Warning, making use of the configuration file in the immediate directory is not recommended! Suggested locations: %s"
+ % ", ".join(files[:2])
+ )
return
except IOError as e:
print("[-] Failed to open %s for reading (%s)" % (fname, e))
except ValueError as e:
print("[-] Failed to decode json from %s (%s)" % (fname, e))
- subprocess.call(
- "cp -r %s /var/tmp/config-err-$(date +%%s)" % fname, shell=True
- )
+ safe_exec("cp", ["-r", fname, "/var/tmp/config-err-$(date +%%s)"])
except Exception as e:
print("[-] An error occurred loading %s (%s)" % (fname, e))
if self.__config is None:
|
import os
import sys
import json
import itertools
import string
import subprocess
import shutil
import re
from os.path import expanduser
from pkg_resources import resource_filename
from pathlib import Path
SAMPLE_SETTINGS = resource_filename(__name__, "data/settings.json")
SETTINGS = "opencanary.conf"
def expand_vars(var):
"""Recursively replace environment variables in a dictionary, list or string with their respective values."""
if isinstance(var, dict):
for key, value in var.items():
var[key] = expand_vars(value)
return var
if isinstance(var, (list, set, tuple)):
return [expand_vars(v) for v in var]
if isinstance(var, (str, bytes)):
return os.path.expandvars(var)
return var
def is_docker():
cgroup = Path("/proc/self/cgroup")
return (
Path("/.dockerenv").is_file()
or cgroup.is_file()
and "docker" in cgroup.read_text()
)
def detectIPTables():
if shutil.which("iptables"):
return True
else:
return False
SERVICE_REGEXES = {
"ssh.version": r"(SSH-(2.0|1.5|1.99|1.0)-([!-,\-./0-~]+(:?$|\s))(?:[ -~]*)){1,253}$",
}
class Config:
def __init__(self, configfile=SETTINGS):
self.__config = None
self.__configfile = configfile
files = [
configfile,
"%s/.%s" % (expanduser("~"), configfile),
"/etc/opencanaryd/%s" % configfile,
]
print(
"** We hope you enjoy using OpenCanary. For more open source Canary goodness, head over to canarytokens.org. **"
)
for fname in files:
try:
with open(fname, "r") as f:
print("[-] Using config file: %s" % fname)
self.__config = json.load(f)
self.__config = expand_vars(self.__config)
return
except IOError as e:
print("[-] Failed to open %s for reading (%s)" % (fname, e))
except ValueError as e:
print("[-] Failed to decode json from %s (%s)" % (fname, e))
subprocess.call(
"cp -r %s /var/tmp/config-err-$(date +%%s)" % fname, shell=True
)
except Exception as e:
print("[-] An error occurred loading %s (%s)" % (fname, e))
if self.__config is None:
print(
'No config file found. Please create one with "opencanaryd --copyconfig"'
)
sys.exit(1)
def moduleEnabled(self, module_name):
k = "%s.enabled" % module_name.lower()
if k in self.__config:
return bool(self.__config[k])
return False
def getVal(self, key, default=None):
# throw exception to caller
try:
return self.__config[key]
except KeyError as e:
if default is not None:
return default
raise e
def checkValues(self): # noqa: C901
"""Set all the valid values in params and return a list of errors for invalid"""
params = self.__config
# test options indpenedently for validity
errors = []
for key, value in params.items():
try:
self.is_valid(key, value)
except ConfigException as e:
errors.append(e)
# Test that no ports overlap
ports = {k: int(v) for k, v in params.items() if k.endswith(".port")}
ports = [(port, setting) for setting, port in ports.items()]
ports.sort()
for port, settings in itertools.groupby(ports, lambda x: x[0]):
settings = list(settings)
if len(settings) > 1:
services = ", ".join([s[1].split(".")[0] for s in settings])
errmsg = "More than one service uses this port (%s)" % services
for port, setting in settings:
errors.append(ConfigException(setting, errmsg))
return errors
def is_valid(self, key, val): # noqa: C901
"""
Test an the validity of an individual setting
Raise config error message on failure.
TODO: delegate module tests to appropriate module
"""
if key.endswith(".enabled"):
if not ((val is True) or (val is False)):
raise ConfigException(
key, "Boolean setting is not True or False (%s)" % val
)
if key.endswith(".port"):
if not isinstance(val, int):
raise ConfigException(
key, "Invalid port number (%s). Must be an integer." % val
)
if val < 1 or val > 65535:
raise ConfigException(
key, "Invalid port number (%s). Must be between 1 and 65535." % val
)
# Max length of SSH version string is 255 chars including trailing CR and LF
# https://tools.ietf.org/html/rfc4253
if key == "ssh.version" and len(val) > 253:
raise ConfigException(key, "SSH version string too long (%s..)" % val[:5])
if key == "device.name":
allowed_chars = string.ascii_letters + string.digits + "+-#_"
if len(val) > 100:
raise ConfigException(key, "Name cannot be longer than 100 characters")
elif len(val) < 1:
raise ConfigException(key, "Name ought to be at least one character")
elif any(map(lambda x: x not in allowed_chars, val)):
raise ConfigException(
key,
"Please use only characters, digits, any of the following: + - # _",
)
if key == "device.desc":
allowed_chars = string.ascii_letters + string.digits + "+-#_ "
if len(val) > 100:
raise ConfigException(key, "Name cannot be longer than 100 characters")
elif len(val) < 1:
raise ConfigException(key, "Name ought to be at least one character")
elif any(map(lambda x: x not in allowed_chars, val)):
raise ConfigException(
key,
"Please use only characters, digits, spaces and any of the following: + - # _",
)
if key in SERVICE_REGEXES.keys():
if not re.match(SERVICE_REGEXES[key], val):
raise ConfigException(key, f"{val} is not valid.")
return True
def __repr__(self):
return self.__config.__repr__()
def __str__(self):
return self.__config.__str__()
def toDict(self):
"""Return all settings as a dict"""
return self.__config
def toJSON(self):
"""
JSON representation of config
"""
return json.dumps(
self.__config, sort_keys=True, indent=4, separators=(",", ": ")
)
class ConfigException(Exception):
"""Exception raised on invalid config value"""
def __init__(self, key, msg):
self.key = key
self.msg = msg
def __str__(self):
return "%s: %s" % (self.key, self.msg)
def __repr__(self):
return "<%s %s (%s)>" % (self.__class__.__name__, self.key, self.msg)
config = Config()
errors = config.checkValues()
if errors:
for error in errors:
print(error)
sys.exit(1)
|
GHSA-pf5v-pqfv-x8jj
|
opencanary/modules/portscan.py
|
@@ -1,8 +1,8 @@
from opencanary.modules import CanaryService
from opencanary.modules import FileSystemWatcher
+from opencanary import safe_exec
import os
import shutil
-import subprocess
class SynLogWatcher(FileSystemWatcher):
@@ -68,7 +68,7 @@ def handleLines(self, lines=None): # noqa: C901
def detectNFTables():
- return b"nf_tables" in subprocess.check_output(["iptables", "--version"])
+ return b"nf_tables" in safe_exec("iptables", ["--version"])
class CanaryPortscan(CanaryService):
|
from opencanary.modules import CanaryService
from opencanary.modules import FileSystemWatcher
import os
import shutil
import subprocess
class SynLogWatcher(FileSystemWatcher):
def __init__(
self, logger=None, logFile=None, ignore_localhost=False, ignore_ports=None
):
if ignore_ports is None:
ignore_ports = []
self.logger = logger
self.ignore_localhost = ignore_localhost
self.ignore_ports = ignore_ports
FileSystemWatcher.__init__(self, fileName=logFile)
def handleLines(self, lines=None): # noqa: C901
for line in lines:
try:
if "canaryfw: " in line:
logtype = self.logger.LOG_PORT_SYN
(rubbish, log) = line.split("canaryfw: ")
elif "canarynmapNULL" in line:
logtype = self.logger.LOG_PORT_NMAPNULL
(rubbish, log) = line.split("canarynmapNULL: ")
elif "canarynmapXMAS" in line:
logtype = self.logger.LOG_PORT_NMAPXMAS
(rubbish, log) = line.split("canarynmapXMAS: ")
elif "canarynmapFIN" in line:
logtype = self.logger.LOG_PORT_NMAPFIN
(rubbish, log) = line.split("canarynmapFIN: ")
elif "canarynmap: " in line:
logtype = self.logger.LOG_PORT_NMAPOS
(rubbish, log) = line.split("canarynmap: ")
else:
continue
except ValueError:
continue
tags = log.split(" ")
kv = {}
for tag in tags:
if tag.find("=") >= 0:
(key, val) = tag.split("=")
else:
key = tag
val = ""
kv[key] = val
# we've seen empty tags creep in. weed them out.
if "" in kv.keys():
kv.pop("")
data = {}
data["src_host"] = kv.pop("SRC")
data["src_port"] = kv.pop("SPT")
data["dst_host"] = kv.pop("DST")
data["dst_port"] = kv.pop("DPT")
data["logtype"] = logtype
data["logdata"] = kv
if self.ignore_localhost and data.get("src_host", False) == "127.0.0.1":
continue
if int(data.get("dst_port", -1)) in self.ignore_ports:
continue
self.logger.log(data)
def detectNFTables():
return b"nf_tables" in subprocess.check_output(["iptables", "--version"])
class CanaryPortscan(CanaryService):
NAME = "portscan"
def __init__(self, config=None, logger=None):
CanaryService.__init__(self, config=config, logger=logger)
self.audit_file = config.getVal("portscan.logfile", default="/var/log/kern.log")
self.synrate = int(config.getVal("portscan.synrate", default=5))
self.nmaposrate = int(config.getVal("portscan.nmaposrate", default="5"))
self.lorate = int(config.getVal("portscan.lorate", default="3"))
self.listen_addr = config.getVal("device.listen_addr", default="")
self.ignore_localhost = config.getVal(
"portscan.ignore_localhost", default=False
)
self.ignore_ports = config.getVal("portscan.ignore_ports", default=[])
self.iptables_path = self.config.getVal("portscan.iptables_path", False)
self.config = config
def getIptablesPath(self):
if self.iptables_path:
return self.iptables_path
if detectNFTables():
return shutil.which("iptables-legacy")
return shutil.which("iptables") or "/sbin/iptables"
def startYourEngines(self, reactor=None):
# Logging rules for loopback interface.
# This is separate from the canaryfw rule as the canary watchdog was
# causing console-side noise in the logs.
self.set_iptables_rules()
fs = SynLogWatcher(
logFile=self.audit_file,
logger=self.logger,
ignore_localhost=self.ignore_localhost,
ignore_ports=self.ignore_ports,
)
fs.start()
def configUpdated(
self,
):
pass
def set_iptables_rules(self):
iptables_path = self.getIptablesPath()
os.system(
'sudo {0} -t mangle -D PREROUTING -p tcp -i lo -j LOG --log-level=warning --log-prefix="canaryfw: " -m limit --limit="{1}/hour"'.format(
iptables_path, self.lorate
)
)
os.system(
'sudo {0} -t mangle -A PREROUTING -p tcp -i lo -j LOG --log-level=warning --log-prefix="canaryfw: " -m limit --limit="{1}/hour"'.format(
iptables_path, self.lorate
)
)
# Logging rules for canaryfw.
# We ignore loopback interface traffic as it is taken care of in above rule
os.system(
'sudo {0} -t mangle -D PREROUTING -p tcp --syn -j LOG --log-level=warning --log-prefix="canaryfw: " -m limit --limit="{1}/second" ! -i lo'.format(
iptables_path, self.synrate
)
)
os.system(
'sudo {0} -t mangle -A PREROUTING -p tcp --syn -j LOG --log-level=warning --log-prefix="canaryfw: " -m limit --limit="{1}/second" ! -i lo'.format(
iptables_path, self.synrate
)
)
# Match the T3 probe of the nmap OS detection based on TCP flags and TCP options string
os.system(
'sudo {0} -t mangle -D PREROUTING -p tcp --tcp-flags ALL URG,PSH,SYN,FIN -m u32 --u32 "40=0x03030A01 && 44=0x02040109 && 48=0x080Affff && 52=0xffff0000 && 56=0x00000402" -j LOG --log-level=warning --log-prefix="canarynmap: " -m limit --limit="{1}/second"'.format(
iptables_path, self.nmaposrate
)
)
os.system(
'sudo {0} -t mangle -A PREROUTING -p tcp --tcp-flags ALL URG,PSH,SYN,FIN -m u32 --u32 "40=0x03030A01 && 44=0x02040109 && 48=0x080Affff && 52=0xffff0000 && 56=0x00000402" -j LOG --log-level=warning --log-prefix="canarynmap: " -m limit --limit="{1}/second"'.format(
iptables_path, self.nmaposrate
)
)
# Nmap Null Scan
os.system(
'sudo {0} -t mangle -D PREROUTING -p tcp -m u32 --u32 "6&0xFF=0x6 && 0>>22&0x3C@12=0x50000400" -j LOG --log-level=warning --log-prefix="canarynmapNULL: " -m limit --limit="{1}/second"'.format(
iptables_path, self.nmaposrate
)
)
os.system(
'sudo {0} -t mangle -A PREROUTING -p tcp -m u32 --u32 "6&0xFF=0x6 && 0>>22&0x3C@12=0x50000400" -j LOG --log-level=warning --log-prefix="canarynmapNULL: " -m limit --limit="{1}/second"'.format(
iptables_path, self.nmaposrate
)
)
# Nmap Xmas Scan
os.system(
'sudo {0} -t mangle -D PREROUTING -p tcp -m u32 --u32 "6&0xFF=0x6 && 0>>22&0x3C@12=0x50290400" -j LOG --log-level=warning --log-prefix="canarynmapXMAS: " -m limit --limit="{1}/second"'.format(
iptables_path, self.nmaposrate
)
)
os.system(
'sudo {0} -t mangle -A PREROUTING -p tcp -m u32 --u32 "6&0xFF=0x6 && 0>>22&0x3C@12=0x50290400" -j LOG --log-level=warning --log-prefix="canarynmapXMAS: " -m limit --limit="{1}/second"'.format(
iptables_path, self.nmaposrate
)
)
# Nmap Fin Scan
os.system(
'sudo {0} -t mangle -D PREROUTING -p tcp -m u32 --u32 "6&0xFF=0x6 && 0>>22&0x3C@12=0x50010400" -j LOG --log-level=warning --log-prefix="canarynmapFIN: " -m limit --limit="{1}/second"'.format(
iptables_path, self.nmaposrate
)
)
os.system(
'sudo {0} -t mangle -A PREROUTING -p tcp -m u32 --u32 "6&0xFF=0x6 && 0>>22&0x3C@12=0x50010400" -j LOG --log-level=warning --log-prefix="canarynmapFIN: " -m limit --limit="{1}/second"'.format(
iptables_path, self.nmaposrate
)
)
|
GHSA-pf5v-pqfv-x8jj
|
tensorflow/python/kernel_tests/quantization_ops/quantization_ops_test.py
|
@@ -206,5 +206,60 @@ def test_invalid_inputs(self):
out_type=dtypes.qint8))
+class QuantizedAddOpTest(test_util.TensorFlowTestCase):
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_invalid_inputs(self):
+ x = constant_op.constant(
+ np.int8(0), shape=[3, 3, 3, 3], dtype=dtypes.quint8)
+ y = constant_op.constant(np.int8(0), shape=[3], dtype=dtypes.quint8)
+
+ with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
+ "must be rank 0"):
+ self.evaluate(
+ math_ops.quantized_add(
+ x=x,
+ y=y,
+ min_x=[],
+ max_x=1.0,
+ min_y=0.0,
+ max_y=1.0,
+ Toutput=dtypes.qint32))
+
+
+class QuantizedReluOpTest(test_util.TensorFlowTestCase):
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_invalid_inputs(self):
+ inputs = constant_op.constant(
+ np.int8(0), shape=[3, 3, 3, 3], dtype=dtypes.quint8)
+
+ with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
+ "must be rank 0"):
+ self.evaluate(
+ nn_ops.quantized_relu(
+ features=inputs,
+ min_features=[],
+ max_features=127.0,
+ out_type=dtypes.quint8))
+
+
+class QuantizedRelu6OpTest(test_util.TensorFlowTestCase):
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_invalid_inputs(self):
+ inputs = constant_op.constant(
+ np.int8(0), shape=[3, 3, 3, 3], dtype=dtypes.quint8)
+
+ with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
+ "must be rank 0"):
+ self.evaluate(
+ nn_ops.quantized_relu6(
+ features=inputs,
+ min_features=[],
+ max_features=127.0,
+ out_type=dtypes.quint8))
+
+
if __name__ == "__main__":
googletest.main()
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.quantize ops."""
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
class FakeQuantWithMinMaxVarsOpTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def test_invalid_inputs(self):
inputs = constant_op.constant(
value=[[1.0], [2.0], [4.0]], dtype=dtypes.float32)
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"must be rank 0"):
self.evaluate(
array_ops.fake_quant_with_min_max_vars(
inputs=inputs, min=0.0, max=[[1.0], [2.0], [4.0]]))
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"must be rank 0"):
self.evaluate(
array_ops.fake_quant_with_min_max_vars(
inputs=inputs, min=[[1.0], [2.0], [4.0]], max=1.0))
class FakeQuantWithMinMaxVarsPerChannelOpTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def test_invalid_inputs(self):
inputs = constant_op.constant(
value=[[1.0], [2.0], [4.0]], dtype=dtypes.float32)
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"must be rank 1"):
self.evaluate(
array_ops.fake_quant_with_min_max_vars_per_channel(
inputs=inputs, min=[[0.0]], max=[1.0]))
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"Dimensions must be equal|incorrect size"):
self.evaluate(
array_ops.fake_quant_with_min_max_vars_per_channel(
inputs=inputs, min=[0.0, 0.1], max=[1.0]))
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"must be rank 1"):
self.evaluate(
array_ops.fake_quant_with_min_max_vars_per_channel(
inputs=inputs, min=[1.0], max=[[1.0]]))
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"Dimensions must be equal|incorrect size"):
self.evaluate(
array_ops.fake_quant_with_min_max_vars_per_channel(
inputs=inputs, min=[0.0], max=[1.0, 1.1]))
class QuantizedBiasedAddTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def test_invalid_inputs(self):
inputs = constant_op.constant(
np.int8(0), shape=[3, 3, 3, 3], dtype=dtypes.qint8)
bias = constant_op.constant(np.int8(0), shape=[3], dtype=dtypes.qint8)
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"must be rank 0"):
self.evaluate(
nn_ops.quantized_bias_add(
input=inputs,
bias=bias,
min_input=[],
max_input=1.0,
min_bias=0.0,
max_bias=1.0,
out_type=dtypes.qint32))
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"must be rank 0"):
self.evaluate(
nn_ops.quantized_bias_add(
input=inputs,
bias=bias,
min_input=0.0,
max_input=[],
min_bias=0.0,
max_bias=1.0,
out_type=dtypes.qint32))
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"must be rank 0"):
self.evaluate(
nn_ops.quantized_bias_add(
input=inputs,
bias=bias,
min_input=0.0,
max_input=1.0,
min_bias=[],
max_bias=1.0,
out_type=dtypes.qint32))
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"must be rank 0"):
self.evaluate(
nn_ops.quantized_bias_add(
input=inputs,
bias=bias,
min_input=0.0,
max_input=1.0,
min_bias=0.0,
max_bias=[],
out_type=dtypes.qint32))
class QuantizedInstanceNormOpTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def test_invalid_inputs(self):
inputs = constant_op.constant(
np.uint8(0), shape=[3, 3, 3, 3], dtype=dtypes.quint8)
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"must be rank 0"):
self.evaluate(
array_ops.quantized_instance_norm(
x=inputs, x_min=0.0, x_max=[[1.0], [2.0], [4.0]]))
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"must be rank 0"):
self.evaluate(
array_ops.quantized_instance_norm(
x=inputs, x_min=[[1.0], [2.0], [4.0]], x_max=1.0))
class RequantizeOpTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def test_invalid_inputs(self):
inputs = constant_op.constant(
np.int32(0), shape=[3, 3, 3, 3], dtype=dtypes.qint32)
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"must be rank 0"):
self.evaluate(
math_ops.requantize(
input=inputs,
input_min=[],
input_max=1.0,
requested_output_min=0.0,
requested_output_max=1.0,
out_type=dtypes.qint8))
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"must be rank 0"):
self.evaluate(
math_ops.requantize(
input=inputs,
input_min=0.0,
input_max=[],
requested_output_min=0.0,
requested_output_max=1.0,
out_type=dtypes.qint8))
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"must be rank 0"):
self.evaluate(
math_ops.requantize(
input=inputs,
input_min=0.0,
input_max=1.0,
requested_output_min=[],
requested_output_max=1.0,
out_type=dtypes.qint8))
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"must be rank 0"):
self.evaluate(
math_ops.requantize(
input=inputs,
input_min=0.0,
input_max=1.0,
requested_output_min=0.0,
requested_output_max=[],
out_type=dtypes.qint8))
if __name__ == "__main__":
googletest.main()
|
GHSA-v7vw-577f-vp8x
|
setup.py
|
@@ -49,6 +49,7 @@
'five.grok',
'collective.documentviewer',
'dexterity.localrolesfield',
+ 'future',
'plone.api',
'plone.app.dexterity',
'plone.directives.form',
@@ -59,7 +60,7 @@
'plone.formwidget.contenttree',
'plone.principalsource',
'collective.z3cform.chosen',
- 'z3c.table',
+ 'z3c.table>=2.2',
],
extras_require={
'test': ['plone.app.testing',
|
#! -*- coding: utf8 -*-
from setuptools import setup, find_packages
version = '1.7.dev0'
long_description = (
open('README.rst').read()
+ '\n' +
'Contributors\n'
'============\n'
+ '\n' +
open('CONTRIBUTORS.rst').read()
+ '\n' +
open('CHANGES.rst').read()
+ '\n')
setup(
name='collective.dms.basecontent',
version=version,
description="Base content types for document management system",
long_description=long_description,
# Get more strings from
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Plone",
"Framework :: Plone :: 4.2",
"Framework :: Plone :: 4.3",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='document management system dms viewer',
author='Ecreall, Entrouvert, IMIO',
author_email='[email protected]',
url='https://github.com/collective/collective.dms.basecontent',
download_url='https://pypi.org/project/collective.dms.basecontent',
license='gpl',
packages=find_packages('src'),
package_dir={'': 'src'},
namespace_packages=['collective', 'collective.dms'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'five.grok',
'collective.documentviewer',
'dexterity.localrolesfield',
'plone.api',
'plone.app.dexterity',
'plone.directives.form',
'plone.namedfile',
'z3c.blobfile',
'plone.app.contenttypes',
'plone.app.relationfield',
'plone.formwidget.contenttree',
'plone.principalsource',
'collective.z3cform.chosen',
'z3c.table',
],
extras_require={
'test': ['plone.app.testing',
'ecreall.helpers.testing',
'plone.app.vocabularies'
],
},
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
)
|
PYSEC-2022-42989
|
src/collective/dms/basecontent/browser/column.py
|
@@ -1,19 +1,22 @@
-import os.path
-import Missing
-from Acquisition import aq_base
from AccessControl import getSecurityManager
-from Products.CMFCore.utils import getToolByName
+from Acquisition import aq_base
+from collective.dms.basecontent import _
from five import grok
+from html import escape
+from Products.CMFCore.utils import getToolByName
+from Products.CMFCore.WorkflowCore import WorkflowException
+from Products.CMFPlone.utils import safe_unicode
from z3c.table import interfaces
from zope.component import getMultiAdapter
-from zope.i18nmessageid import MessageFactory
from zope.i18n import translate
-import z3c.table.table
-import z3c.table.column
-from Products.CMFCore.WorkflowCore import WorkflowException
+from zope.i18nmessageid import MessageFactory
+
+import Missing
+import os.path
import plone.api
+import z3c.table.column
+import z3c.table.table
-from collective.dms.basecontent import _
PMF = MessageFactory('plone')
@@ -74,11 +77,11 @@ def renderCell(self, item):
for principal_id in value:
user = mtool.getMemberById(principal_id)
if user is not None:
- principals.append(user.getProperty('fullname', None) or user.getId())
+ principals.append(escape(user.getProperty('fullname', None)) or user.getId())
else:
group = gtool.getGroupById(principal_id)
if group is not None:
- principals.append(group.getProperty('title', None) or group.getId())
+ principals.append(escape(group.getProperty('title', None)) or group.getId())
return ', '.join(principals).decode('utf-8')
@@ -92,6 +95,16 @@ def getLinkURL(self, item):
return '%s/%s' % (item.getURL(), self.linkName)
return item.getURL()
+ def renderCell(self, item):
+ # setup a tag
+ return '<a href="%s"%s%s%s>%s</a>' % (
+ self.getLinkURL(item), # originally escaped
+ self.getLinkTarget(item),
+ self.getLinkCSS(item),
+ self.getLinkTitle(item),
+ self.getLinkContent(item), # originally escaped
+ )
+
class TitleColumn(LinkColumn):
grok.baseclass()
@@ -100,17 +113,14 @@ class TitleColumn(LinkColumn):
def getLinkContent(self, item):
title = get_value(item, 'Title')
- if isinstance(title, unicode):
- return title
- else:
- return unicode(title, 'utf-8', 'ignore')
+ return escape(safe_unicode(title))
class IconColumn(LinkColumn):
grok.baseclass()
def getLinkContent(self, item):
- content = super(IconColumn, self).getLinkContent(item)
+ content = super(IconColumn, self).getLinkContent(item) # escaped
return u"""<img title="%s" src="%s" />""" % (
content,
'%s/%s' % (self.table.portal_url, self.iconName))
|
import os.path
import Missing
from Acquisition import aq_base
from AccessControl import getSecurityManager
from Products.CMFCore.utils import getToolByName
from five import grok
from z3c.table import interfaces
from zope.component import getMultiAdapter
from zope.i18nmessageid import MessageFactory
from zope.i18n import translate
import z3c.table.table
import z3c.table.column
from Products.CMFCore.WorkflowCore import WorkflowException
import plone.api
from collective.dms.basecontent import _
PMF = MessageFactory('plone')
grok.templatedir('templates')
class Column(z3c.table.column.Column, grok.MultiAdapter):
grok.baseclass()
grok.provides(interfaces.IColumn)
def get_value(item, attribute, default=None):
try:
value = getattr(aq_base(item), attribute)
if value is Missing.Value:
return default
except AttributeError:
obj = item.getObject()
value = getattr(obj, attribute, default)
if callable(value):
value = value()
return value
class DateColumn(Column):
grok.baseclass()
attribute = NotImplemented
def renderCell(self, item):
value = get_value(item, self.attribute)
return self.table.format_date(value)
class DateTimeColumn(Column):
grok.baseclass()
attribute = NotImplemented
def renderCell(self, item):
value = get_value(item, self.attribute)
return self.table.format_date(value, long_format=True)
class PrincipalColumn(Column):
grok.baseclass()
attribute = NotImplemented
def renderCell(self, item):
value = get_value(item, self.attribute, default=())
if not isinstance(value, (list, tuple)):
value = (value,)
gtool = getToolByName(plone.api.portal.get(), 'portal_groups')
mtool = getToolByName(plone.api.portal.get(), 'portal_membership')
principals = []
for principal_id in value:
user = mtool.getMemberById(principal_id)
if user is not None:
principals.append(user.getProperty('fullname', None) or user.getId())
else:
group = gtool.getGroupById(principal_id)
if group is not None:
principals.append(group.getProperty('title', None) or group.getId())
return ', '.join(principals).decode('utf-8')
class LinkColumn(z3c.table.column.LinkColumn, Column):
grok.baseclass()
def getLinkURL(self, item):
"""Setup link url."""
if self.linkName is not None:
return '%s/%s' % (item.getURL(), self.linkName)
return item.getURL()
class TitleColumn(LinkColumn):
grok.baseclass()
header = PMF("Title")
weight = 10
def getLinkContent(self, item):
title = get_value(item, 'Title')
if isinstance(title, unicode):
return title
else:
return unicode(title, 'utf-8', 'ignore')
class IconColumn(LinkColumn):
grok.baseclass()
def getLinkContent(self, item):
content = super(IconColumn, self).getLinkContent(item)
return u"""<img title="%s" src="%s" />""" % (
content,
'%s/%s' % (self.table.portal_url, self.iconName))
class DeleteColumn(IconColumn):
grok.baseclass()
header = u""
weight = 9
linkName = "delete_confirmation"
linkContent = PMF('Delete')
linkCSS = 'edm-delete-popup'
iconName = "delete_icon.png"
linkContent = PMF(u"Delete")
def actionAvailable(self, item):
obj = item.getObject()
sm = getSecurityManager()
return sm.checkPermission('Delete objects', obj)
def renderCell(self, item):
if not self.actionAvailable(item):
return u""
return super(DeleteColumn, self).renderCell(item)
class DownloadColumn(IconColumn):
grok.baseclass()
header = u""
weight = 1
linkName = "@@download"
iconName = "download_icon.png"
linkContent = _(u"Download file")
class ExternalEditColumn(IconColumn):
grok.baseclass()
header = u""
weight = 3
linkName = "@@external_edit"
iconName = "extedit_icon.png"
linkContent = PMF(u"Edit with external application")
def actionAvailable(self, item):
obj = item.getObject()
sm = getSecurityManager()
if not sm.checkPermission('Modify portal content', obj):
return False
if obj.file is None:
return False
ext = os.path.splitext(obj.file.filename)[-1].lower()
if ext in (u'.pdf', u'.jpg', '.jpeg'):
return False
view = getMultiAdapter((obj, self.request), name='externalEditorEnabled')
if not view.available():
return False
return True
def renderCell(self, item):
if not self.actionAvailable(item):
return u""
return super(ExternalEditColumn, self).renderCell(item)
class EditColumn(IconColumn):
grok.baseclass()
header = u""
weight = 2
linkName = "edit"
iconName = "++resource++fade_edit.png"
linkContent = PMF(u"Edit")
linkCSS = 'overlay-form-reload'
def actionAvailable(self, item):
obj = item.getObject()
sm = getSecurityManager()
return sm.checkPermission('Modify portal content', obj)
def renderCell(self, item):
if not self.actionAvailable(item):
return u""
return super(EditColumn, self).renderCell(item)
class StateColumn(Column):
grok.baseclass()
header = PMF(u"State")
weight = 50
def renderCell(self, item):
try:
wtool = self.table.wtool
portal_type = get_value(item, 'portal_type')
review_state = get_value(item, 'review_state')
if not review_state:
return u""
state_title = wtool.getTitleForStateOnType(review_state,
portal_type)
return translate(PMF(state_title), context=self.request)
except WorkflowException:
return u""
class LabelColumn(Column):
grok.baseclass()
attribute = NotImplemented
def renderCell(self, item):
value = get_value(item, self.attribute)
if value is None:
value = ''
return value
|
PYSEC-2022-42989
|
django/contrib/auth/tests/tokens.py
|
@@ -50,3 +50,14 @@ def _today(self):
p2 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS + 1))
self.assertFalse(p2.check_token(user, tk1))
+
+ def test_date_length(self):
+ """
+ Make sure we don't allow overly long dates, causing a potential DoS.
+ """
+ user = User.objects.create_user('ima1337h4x0r', '[email protected]', 'p4ssw0rd')
+ p0 = PasswordResetTokenGenerator()
+
+ # This will put a 14-digit base36 timestamp into the token, which is too large.
+ tk1 = p0._make_token_with_timestamp(user, 175455491841851871349)
+ self.assertFalse(p0.check_token(user, tk1))
|
from datetime import date, timedelta
from django.conf import settings
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.test import TestCase
class TokenGeneratorTest(TestCase):
def test_make_token(self):
"""
Ensure that we can make a token and that it is valid
"""
user = User.objects.create_user('tokentestuser', '[email protected]', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
self.assertTrue(p0.check_token(user, tk1))
def test_10265(self):
"""
Ensure that the token generated for a user created in the same request
will work correctly.
"""
# See ticket #10265
user = User.objects.create_user('comebackkid', '[email protected]', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
reload = User.objects.get(username='comebackkid')
tk2 = p0.make_token(reload)
self.assertEqual(tk1, tk2)
def test_timeout(self):
"""
Ensure we can use the token after n days, but no greater.
"""
# Uses a mocked version of PasswordResetTokenGenerator so we can change
# the value of 'today'
class Mocked(PasswordResetTokenGenerator):
def __init__(self, today):
self._today_val = today
def _today(self):
return self._today_val
user = User.objects.create_user('tokentestuser', '[email protected]', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
p1 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS))
self.assertTrue(p1.check_token(user, tk1))
p2 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS + 1))
self.assertFalse(p2.check_token(user, tk1))
|
GHSA-7wph-fc4w-wqp2
|
django/contrib/auth/urls.py
|
@@ -1,4 +1,4 @@
-# These URLs are normally mapped to /admin/urls.py. This URLs file is
+# These URLs are normally mapped to /admin/urls.py. This URLs file is
# provided as a convenience to those who want to deploy these URLs elsewhere.
# This file is also used to provide a reliable view deployment for test purposes.
@@ -11,7 +11,7 @@
(r'^password_change/done/$', 'django.contrib.auth.views.password_change_done'),
(r'^password_reset/$', 'django.contrib.auth.views.password_reset'),
(r'^password_reset/done/$', 'django.contrib.auth.views.password_reset_done'),
- (r'^reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$', 'django.contrib.auth.views.password_reset_confirm'),
+ (r'^reset/(?P<uidb36>[0-9A-Za-z]{1,13})-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', 'django.contrib.auth.views.password_reset_confirm'),
(r'^reset/done/$', 'django.contrib.auth.views.password_reset_complete'),
)
|
# These URLs are normally mapped to /admin/urls.py. This URLs file is
# provided as a convenience to those who want to deploy these URLs elsewhere.
# This file is also used to provide a reliable view deployment for test purposes.
from django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'^login/$', 'django.contrib.auth.views.login'),
(r'^logout/$', 'django.contrib.auth.views.logout'),
(r'^password_change/$', 'django.contrib.auth.views.password_change'),
(r'^password_change/done/$', 'django.contrib.auth.views.password_change_done'),
(r'^password_reset/$', 'django.contrib.auth.views.password_reset'),
(r'^password_reset/done/$', 'django.contrib.auth.views.password_reset_done'),
(r'^reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$', 'django.contrib.auth.views.password_reset_confirm'),
(r'^reset/done/$', 'django.contrib.auth.views.password_reset_complete'),
)
|
GHSA-7wph-fc4w-wqp2
|
django/utils/http.py
|
@@ -73,8 +73,13 @@ def http_date(epoch_seconds=None):
def base36_to_int(s):
"""
- Convertd a base 36 string to an integer
+ Converts a base 36 string to an ``int``. To prevent
+ overconsumption of server resources, raises ``ValueError` if the
+ input is longer than 13 base36 digits (13 digits is sufficient to
+ base36-encode any 64-bit integer).
"""
+ if len(s) > 13:
+ raise ValueError("Base36 input too large")
return int(s, 36)
def int_to_base36(i):
|
import re
import urllib
from email.Utils import formatdate
from django.utils.encoding import smart_str, force_unicode
from django.utils.functional import allow_lazy
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_unicode(urllib.quote(smart_str(url), safe))
urlquote = allow_lazy(urlquote, unicode)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_unicode(urllib.quote_plus(smart_str(url), safe))
urlquote_plus = allow_lazy(urlquote_plus, unicode)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first case to UTF-8 encoded strings and
then encoded as per normal.
"""
if hasattr(query, 'items'):
query = query.items()
return urllib.urlencode(
[(smart_str(k),
isinstance(v, (list,tuple)) and [smart_str(i) for i in v] or smart_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Convertd a base 36 string to an integer
"""
return int(s, 36)
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i / j])
i = i % j
factor -= 1
return ''.join(base36)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.decode('string_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necesary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
|
GHSA-7wph-fc4w-wqp2
|
lib/ansible/modules/packaging/os/yum_repository.py
|
@@ -241,12 +241,6 @@
- Unique repository ID.
- This parameter is only required if I(state) is set to C(present) or
C(absent).
- params:
- required: false
- default: null
- description:
- - Option used to allow the user to overwrite any of the other options.
- To remove an option, set the value of the option to C(null).
password:
required: false
default: null
@@ -391,6 +385,8 @@
- The repo file will be automatically deleted if it contains no repository.
- When removing a repository, beware that the metadata cache may still remain
on disk until you run C(yum clean all). Use a notification handler for this.
+ - "The C(params) parameter was removed in Ansible 2.5 due to circumventing Ansible's parameter
+ handling"
'''
EXAMPLES = '''
@@ -699,11 +695,11 @@ def main():
supports_check_mode=True,
)
- # Update module parameters by user's parameters if defined
- if 'params' in module.params and isinstance(module.params['params'], dict):
- module.params.update(module.params['params'])
- # Remove the params
- module.params.pop('params', None)
+ # Params was removed
+ # https://meetbot.fedoraproject.org/ansible-meeting/2017-09-28/ansible_dev_meeting.2017-09-28-15.00.log.html
+ if module.params['params']:
+ module.fail_json(msg="The params option to yum_repository was removed in Ansible 2.5"
+ "since it circumvents Ansible's option handling")
name = module.params['name']
state = module.params['state']
|
#!/usr/bin/python
# encoding: utf-8
# (c) 2015-2016, Jiri Tyr <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'
}
DOCUMENTATION = '''
---
module: yum_repository
author: Jiri Tyr (@jtyr)
version_added: '2.1'
short_description: Add or remove YUM repositories
description:
- Add or remove YUM repositories in RPM-based Linux distributions.
options:
async:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- If set to C(yes) Yum will download packages and metadata from this
repo in parallel, if possible.
bandwidth:
required: false
default: 0
description:
- Maximum available network bandwidth in bytes/second. Used with the
I(throttle) option.
- If I(throttle) is a percentage and bandwidth is C(0) then bandwidth
throttling will be disabled. If I(throttle) is expressed as a data rate
(bytes/sec) then this option is ignored. Default is C(0) (no bandwidth
throttling).
baseurl:
required: false
default: null
description:
- URL to the directory where the yum repository's 'repodata' directory
lives.
- It can also be a list of multiple URLs.
- This, the I(metalink) or I(mirrorlist) parameters are required if I(state) is set to
C(present).
cost:
required: false
default: 1000
description:
- Relative cost of accessing this repository. Useful for weighing one
repo's packages as greater/less than any other.
deltarpm_metadata_percentage:
required: false
default: 100
description:
- When the relative size of deltarpm metadata vs pkgs is larger than
this, deltarpm metadata is not downloaded from the repo. Note that you
can give values over C(100), so C(200) means that the metadata is
required to be half the size of the packages. Use C(0) to turn off
this check, and always download metadata.
deltarpm_percentage:
required: false
default: 75
description:
- When the relative size of delta vs pkg is larger than this, delta is
not used. Use C(0) to turn off delta rpm processing. Local repositories
(with file:// I(baseurl)) have delta rpms turned off by default.
description:
required: false
default: null
description:
- A human readable string describing the repository.
- This parameter is only required if I(state) is set to C(present).
enabled:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- This tells yum whether or not use this repository.
enablegroups:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- Determines whether yum will allow the use of package groups for this
repository.
exclude:
required: false
default: null
description:
- List of packages to exclude from updates or installs. This should be a
space separated list. Shell globs using wildcards (eg. C(*) and C(?))
are allowed.
- The list can also be a regular YAML array.
failovermethod:
required: false
choices: [roundrobin, priority]
default: roundrobin
description:
- C(roundrobin) randomly selects a URL out of the list of URLs to start
with and proceeds through each of them as it encounters a failure
contacting the host.
- C(priority) starts from the first I(baseurl) listed and reads through
them sequentially.
file:
required: false
default: null
description:
- File name without the C(.repo) extension to save the repo in. Defaults
to the value of I(name).
gpgcakey:
required: false
default: null
description:
- A URL pointing to the ASCII-armored CA key file for the repository.
gpgcheck:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- Tells yum whether or not it should perform a GPG signature check on
packages.
gpgkey:
required: false
default: null
description:
- A URL pointing to the ASCII-armored GPG key file for the repository.
- It can also be a list of multiple URLs.
http_caching:
required: false
choices: [all, packages, none]
default: all
description:
- Determines how upstream HTTP caches are instructed to handle any HTTP
downloads that Yum does.
- C(all) means that all HTTP downloads should be cached.
- C(packages) means that only RPM package downloads should be cached (but
not repository metadata downloads).
- C(none) means that no HTTP downloads should be cached.
include:
required: false
default: null
description:
- Include external configuration file. Both, local path and URL is
supported. Configuration file will be inserted at the position of the
I(include=) line. Included files may contain further include lines.
Yum will abort with an error if an inclusion loop is detected.
includepkgs:
required: false
default: null
description:
- List of packages you want to only use from a repository. This should be
a space separated list. Shell globs using wildcards (eg. C(*) and C(?))
are allowed. Substitution variables (e.g. C($releasever)) are honored
here.
- The list can also be a regular YAML array.
ip_resolve:
required: false
choices: [4, 6, IPv4, IPv6, whatever]
default: whatever
description:
- Determines how yum resolves host names.
- C(4) or C(IPv4) - resolve to IPv4 addresses only.
- C(6) or C(IPv6) - resolve to IPv6 addresses only.
keepalive:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- This tells yum whether or not HTTP/1.1 keepalive should be used with
this repository. This can improve transfer speeds by using one
connection when downloading multiple files from a repository.
keepcache:
required: false
choices: ['0', '1']
default: '1'
description:
- Either C(1) or C(0). Determines whether or not yum keeps the cache of
headers and packages after successful installation.
metadata_expire:
required: false
default: 21600
description:
- Time (in seconds) after which the metadata will expire.
- Default value is 6 hours.
metadata_expire_filter:
required: false
choices: [never, 'read-only:past', 'read-only:present', 'read-only:future']
default: 'read-only:present'
description:
- Filter the I(metadata_expire) time, allowing a trade of speed for
accuracy if a command doesn't require it. Each yum command can specify
that it requires a certain level of timeliness quality from the remote
repos. from "I'm about to install/upgrade, so this better be current"
to "Anything that's available is good enough".
- C(never) - Nothing is filtered, always obey I(metadata_expire).
- C(read-only:past) - Commands that only care about past information are
filtered from metadata expiring. Eg. I(yum history) info (if history
needs to lookup anything about a previous transaction, then by
definition the remote package was available in the past).
- C(read-only:present) - Commands that are balanced between past and
future. Eg. I(yum list yum).
- C(read-only:future) - Commands that are likely to result in running
other commands which will require the latest metadata. Eg.
I(yum check-update).
- Note that this option does not override "yum clean expire-cache".
metalink:
required: false
default: null
description:
- Specifies a URL to a metalink file for the repomd.xml, a list of
mirrors for the entire repository are generated by converting the
mirrors for the repomd.xml file to a I(baseurl).
- This, the I(baseurl) or I(mirrorlist) parameters are required if I(state) is set to
C(present).
mirrorlist:
required: false
default: null
description:
- Specifies a URL to a file containing a list of baseurls.
- This, the I(baseurl) or I(metalink) parameters are required if I(state) is set to
C(present).
mirrorlist_expire:
required: false
default: 21600
description:
- Time (in seconds) after which the mirrorlist locally cached will
expire.
- Default value is 6 hours.
name:
required: true
description:
- Unique repository ID.
- This parameter is only required if I(state) is set to C(present) or
C(absent).
params:
required: false
default: null
description:
- Option used to allow the user to overwrite any of the other options.
To remove an option, set the value of the option to C(null).
password:
required: false
default: null
description:
- Password to use with the username for basic authentication.
priority:
required: false
default: 99
description:
- Enforce ordered protection of repositories. The value is an integer
from 1 to 99.
- This option only works if the YUM Priorities plugin is installed.
protect:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- Protect packages from updates from other repositories.
proxy:
required: false
default: null
description:
- URL to the proxy server that yum should use. Set to C(_none_) to
disable the global proxy setting.
proxy_password:
required: false
default: null
description:
- Username to use for proxy.
proxy_username:
required: false
default: null
description:
- Password for this proxy.
repo_gpgcheck:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- This tells yum whether or not it should perform a GPG signature check
on the repodata from this repository.
reposdir:
required: false
default: /etc/yum.repos.d
description:
- Directory where the C(.repo) files will be stored.
retries:
required: false
default: 10
description:
- Set the number of times any attempt to retrieve a file should retry
before returning an error. Setting this to C(0) makes yum try forever.
s3_enabled:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- Enables support for S3 repositories.
- This option only works if the YUM S3 plugin is installed.
skip_if_unavailable:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- If set to C(yes) yum will continue running if this repository cannot be
contacted for any reason. This should be set carefully as all repos are
consulted for any given command.
ssl_check_cert_permissions:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- Whether yum should check the permissions on the paths for the
certificates on the repository (both remote and local).
- If we can't read any of the files then yum will force
I(skip_if_unavailable) to be C(yes). This is most useful for non-root
processes which use yum on repos that have client cert files which are
readable only by root.
sslcacert:
required: false
default: null
description:
- Path to the directory containing the databases of the certificate
authorities yum should use to verify SSL certificates.
sslclientcert:
required: false
default: null
description:
- Path to the SSL client certificate yum should use to connect to
repos/remote sites.
sslclientkey:
required: false
default: null
description:
- Path to the SSL client key yum should use to connect to repos/remote
sites.
sslverify:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- Defines whether yum should verify SSL certificates/hosts at all.
state:
required: false
choices: [absent, present]
default: present
description:
- State of the repo file.
throttle:
required: false
default: null
description:
- Enable bandwidth throttling for downloads.
- This option can be expressed as a absolute data rate in bytes/sec. An
SI prefix (k, M or G) may be appended to the bandwidth value.
timeout:
required: false
default: 30
description:
- Number of seconds to wait for a connection before timing out.
ui_repoid_vars:
required: false
default: releasever basearch
description:
- When a repository id is displayed, append these yum variables to the
string if they are used in the I(baseurl)/etc. Variables are appended
in the order listed (and found).
username:
required: false
default: null
description:
- Username to use for basic authentication to a repo or really any url.
extends_documentation_fragment:
- files
notes:
- All comments will be removed if modifying an existing repo file.
- Section order is preserved in an existing repo file.
- Parameters in a section are ordered alphabetically in an existing repo
file.
- The repo file will be automatically deleted if it contains no repository.
- When removing a repository, beware that the metadata cache may still remain
on disk until you run C(yum clean all). Use a notification handler for this.
'''
EXAMPLES = '''
- name: Add repository
yum_repository:
name: epel
description: EPEL YUM repo
baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
- name: Add multiple repositories into the same file (1/2)
yum_repository:
name: epel
description: EPEL YUM repo
file: external_repos
baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
gpgcheck: no
- name: Add multiple repositories into the same file (2/2)
yum_repository:
name: rpmforge
description: RPMforge YUM repo
file: external_repos
baseurl: http://apt.sw.be/redhat/el7/en/$basearch/rpmforge
mirrorlist: http://mirrorlist.repoforge.org/el7/mirrors-rpmforge
enabled: no
# Handler showing how to clean yum metadata cache
- name: yum-clean-metadata
command: yum clean metadata
args:
warn: no
# Example removing a repository and cleaning up metadata cache
- name: Remove repository (and clean up left-over metadata)
yum_repository:
name: epel
state: absent
notify: yum-clean-metadata
- name: Remove repository from a specific repo file
yum_repository:
name: epel
file: external_repos
state: absent
#
# Allow to overwrite the yum_repository parameters by defining the parameters
# as a variable in the defaults or vars file:
#
# my_role_somerepo_params:
# # Disable GPG checking
# gpgcheck: no
# # Remove the gpgkey option
# gpgkey: null
#
- name: Add Some repo
yum_repository:
name: somerepo
description: Some YUM repo
baseurl: http://server.com/path/to/the/repo
gpgkey: http://server.com/keys/somerepo.pub
gpgcheck: yes
params: "{{ my_role_somerepo_params }}"
'''
RETURN = '''
repo:
description: repository name
returned: success
type: string
sample: "epel"
state:
description: state of the target, after execution
returned: success
type: string
sample: "present"
'''
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six.moves import configparser
class YumRepo(object):
# Class global variables
module = None
params = None
section = None
repofile = configparser.RawConfigParser()
# List of parameters which will be allowed in the repo file output
allowed_params = [
'async',
'bandwidth',
'baseurl',
'cost',
'deltarpm_metadata_percentage',
'deltarpm_percentage',
'enabled',
'enablegroups',
'exclude',
'failovermethod',
'gpgcakey',
'gpgcheck',
'gpgkey',
'http_caching',
'include',
'includepkgs',
'ip_resolve',
'keepalive',
'keepcache',
'metadata_expire',
'metadata_expire_filter',
'metalink',
'mirrorlist',
'mirrorlist_expire',
'name',
'password',
'priority',
'protect',
'proxy',
'proxy_password',
'proxy_username',
'repo_gpgcheck',
'retries',
's3_enabled',
'skip_if_unavailable',
'sslcacert',
'ssl_check_cert_permissions',
'sslclientcert',
'sslclientkey',
'sslverify',
'throttle',
'timeout',
'ui_repoid_vars',
'username']
# List of parameters which can be a list
list_params = ['exclude', 'includepkgs']
def __init__(self, module):
# To be able to use fail_json
self.module = module
# Shortcut for the params
self.params = self.module.params
# Section is always the repoid
self.section = self.params['repoid']
# Check if repo directory exists
repos_dir = self.params['reposdir']
if not os.path.isdir(repos_dir):
self.module.fail_json(
msg="Repo directory '%s' does not exist." % repos_dir)
# Set dest; also used to set dest parameter for the FS attributes
self.params['dest'] = os.path.join(
repos_dir, "%s.repo" % self.params['file'])
# Read the repo file if it exists
if os.path.isfile(self.params['dest']):
self.repofile.read(self.params['dest'])
def add(self):
# Remove already existing repo and create a new one
if self.repofile.has_section(self.section):
self.repofile.remove_section(self.section)
# Add section
self.repofile.add_section(self.section)
# Baseurl/mirrorlist is not required because for removal we need only
# the repo name. This is why we check if the baseurl/mirrorlist is
# defined.
req_params = (self.params['baseurl'], self.params['metalink'], self.params['mirrorlist'])
if req_params == (None, None, None):
self.module.fail_json(
msg="Parameter 'baseurl', 'metalink' or 'mirrorlist' is required for "
"adding a new repo.")
# Set options
for key, value in sorted(self.params.items()):
if key in self.list_params and isinstance(value, list):
# Join items into one string for specific parameters
value = ' '.join(value)
elif isinstance(value, bool):
# Convert boolean value to integer
value = int(value)
# Set the value only if it was defined (default is None)
if value is not None and key in self.allowed_params:
self.repofile.set(self.section, key, value)
def save(self):
if len(self.repofile.sections()):
# Write data into the file
try:
fd = open(self.params['dest'], 'w')
except IOError:
e = get_exception()
self.module.fail_json(
msg="Cannot open repo file %s." % self.params['dest'],
details=str(e))
self.repofile.write(fd)
try:
fd.close()
except IOError:
e = get_exception()
self.module.fail_json(
msg="Cannot write repo file %s." % self.params['dest'],
details=str(e))
else:
# Remove the file if there are not repos
try:
os.remove(self.params['dest'])
except OSError:
e = get_exception()
self.module.fail_json(
msg=(
"Cannot remove empty repo file %s." %
self.params['dest']),
details=str(e))
def remove(self):
# Remove section if exists
if self.repofile.has_section(self.section):
self.repofile.remove_section(self.section)
def dump(self):
repo_string = ""
# Compose the repo file
for section in sorted(self.repofile.sections()):
repo_string += "[%s]\n" % section
for key, value in sorted(self.repofile.items(section)):
repo_string += "%s = %s\n" % (key, value)
repo_string += "\n"
return repo_string
def main():
# Module settings
module = AnsibleModule(
argument_spec=dict(
async=dict(type='bool'),
bandwidth=dict(),
baseurl=dict(type='list'),
cost=dict(),
deltarpm_metadata_percentage=dict(),
deltarpm_percentage=dict(),
description=dict(),
enabled=dict(type='bool'),
enablegroups=dict(type='bool'),
exclude=dict(),
failovermethod=dict(choices=['roundrobin', 'priority']),
file=dict(),
gpgcakey=dict(),
gpgcheck=dict(type='bool'),
gpgkey=dict(type='list'),
http_caching=dict(choices=['all', 'packages', 'none']),
include=dict(),
includepkgs=dict(),
ip_resolve=dict(choices=['4', '6', 'IPv4', 'IPv6', 'whatever']),
keepalive=dict(type='bool'),
keepcache=dict(choices=['0', '1']),
metadata_expire=dict(),
metadata_expire_filter=dict(
choices=[
'never',
'read-only:past',
'read-only:present',
'read-only:future']),
metalink=dict(),
mirrorlist=dict(),
mirrorlist_expire=dict(),
name=dict(required=True),
params=dict(type='dict'),
password=dict(no_log=True),
priority=dict(),
protect=dict(type='bool'),
proxy=dict(),
proxy_password=dict(no_log=True),
proxy_username=dict(),
repo_gpgcheck=dict(type='bool'),
reposdir=dict(default='/etc/yum.repos.d', type='path'),
retries=dict(),
s3_enabled=dict(type='bool'),
skip_if_unavailable=dict(type='bool'),
sslcacert=dict(),
ssl_check_cert_permissions=dict(type='bool'),
sslclientcert=dict(),
sslclientkey=dict(),
sslverify=dict(type='bool'),
state=dict(choices=['present', 'absent'], default='present'),
throttle=dict(),
timeout=dict(),
ui_repoid_vars=dict(),
username=dict(),
),
add_file_common_args=True,
supports_check_mode=True,
)
# Update module parameters by user's parameters if defined
if 'params' in module.params and isinstance(module.params['params'], dict):
module.params.update(module.params['params'])
# Remove the params
module.params.pop('params', None)
name = module.params['name']
state = module.params['state']
# Check if required parameters are present
if state == 'present':
if (
module.params['baseurl'] is None and
module.params['metalink'] is None and
module.params['mirrorlist'] is None):
module.fail_json(
msg="Parameter 'baseurl', 'metalink' or 'mirrorlist' is required.")
if module.params['description'] is None:
module.fail_json(
msg="Parameter 'description' is required.")
# Rename "name" and "description" to ensure correct key sorting
module.params['repoid'] = module.params['name']
module.params['name'] = module.params['description']
del module.params['description']
# Change list type to string for baseurl and gpgkey
for list_param in ['baseurl', 'gpgkey']:
if (
list_param in module.params and
module.params[list_param] is not None):
module.params[list_param] = "\n".join(module.params[list_param])
# Define repo file name if it doesn't exist
if module.params['file'] is None:
module.params['file'] = module.params['repoid']
# Instantiate the YumRepo object
yumrepo = YumRepo(module)
# Get repo status before change
diff = {
'before_header': yumrepo.params['dest'],
'before': yumrepo.dump(),
'after_header': yumrepo.params['dest'],
'after': ''
}
# Perform action depending on the state
if state == 'present':
yumrepo.add()
elif state == 'absent':
yumrepo.remove()
# Get repo status after change
diff['after'] = yumrepo.dump()
# Compare repo states
changed = diff['before'] != diff['after']
# Save the file only if not in check mode and if there was a change
if not module.check_mode and changed:
yumrepo.save()
# Change file attributes if needed
if os.path.isfile(module.params['dest']):
file_args = module.load_file_common_arguments(module.params)
changed = module.set_fs_attributes_if_different(file_args, changed)
# Print status of the change
module.exit_json(changed=changed, repo=name, state=state, diff=diff)
if __name__ == '__main__':
main()
|
GHSA-588w-w6mv-3cw5
|
lib/ansible/modules/web_infrastructure/jenkins_plugin.py
|
@@ -46,14 +46,6 @@
default: jenkins
description:
- Name of the Jenkins user on the OS.
- params:
- required: false
- default: null
- description:
- - Option used to allow the user to overwrite any of the other options. To
- remove an option, set the value of the option to C(null).
- - Changed in 2.5.0, 2.4.1, 2.3.3 to raise an error if C(url_password) is specified in params.
- Use the actual C(url_password) argument instead.
state:
required: false
choices: [absent, present, pinned, unpinned, enabled, disabled, latest]
@@ -120,6 +112,8 @@
- It is not possible to run the module remotely by changing the I(url)
parameter to point to the Jenkins server. The module must be used on the
host where Jenkins runs as it needs direct access to the plugin files.
+ - "The C(params) option was removed in Ansible 2.5 due to circumventing Ansible's
+ option handling"
'''
EXAMPLES = '''
@@ -762,16 +756,11 @@ def main():
supports_check_mode=True,
)
- # Update module parameters by user's parameters if defined
- if 'params' in module.params and isinstance(module.params['params'], dict):
- if 'url_password' in module.params['params']:
- # The params argument should be removed eventually. Until then, raise an error if
- # url_password is specified there as it can lead to the password being logged
- module.fail_json(msg='Do not specify url_password in params as it may get logged')
-
- module.params.update(module.params['params'])
- # Remove the params
- module.params.pop('params', None)
+ # Params was removed
+ # https://meetbot.fedoraproject.org/ansible-meeting/2017-09-28/ansible_dev_meeting.2017-09-28-15.00.log.html
+ if module.params['params']:
+ module.fail_json(msg="The params option to jenkins_plugin was removed in Ansible 2.5"
+ "since it circumvents Ansible's option handling")
# Force basic authentication
module.params['force_basic_auth'] = True
|
#!/usr/bin/python
# encoding: utf-8
# (c) 2016, Jiri Tyr <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: jenkins_plugin
author: Jiri Tyr (@jtyr)
version_added: '2.2'
short_description: Add or remove Jenkins plugin
description:
- Ansible module which helps to manage Jenkins plugins.
options:
group:
required: false
default: jenkins
description:
- Name of the Jenkins group on the OS.
jenkins_home:
required: false
default: /var/lib/jenkins
description:
- Home directory of the Jenkins user.
mode:
required: false
default: '0664'
description:
- File mode applied on versioned plugins.
name:
required: true
description:
- Plugin name.
owner:
required: false
default: jenkins
description:
- Name of the Jenkins user on the OS.
params:
required: false
default: null
description:
- Option used to allow the user to overwrite any of the other options. To
remove an option, set the value of the option to C(null).
- Changed in 2.5.0, 2.4.1, 2.3.3 to raise an error if C(url_password) is specified in params.
Use the actual C(url_password) argument instead.
state:
required: false
choices: [absent, present, pinned, unpinned, enabled, disabled, latest]
default: present
description:
- Desired plugin state.
- If the C(latest) is set, the check for new version will be performed
every time. This is suitable to keep the plugin up-to-date.
timeout:
required: false
default: 30
description:
- Server connection timeout in secs.
updates_expiration:
required: false
default: 86400
description:
- Number of seconds after which a new copy of the I(update-center.json)
file is downloaded. This is used to avoid the need to download the
plugin to calculate its checksum when C(latest) is specified.
- Set it to C(0) if no cache file should be used. In that case, the
plugin file will always be downloaded to calculate its checksum when
C(latest) is specified.
updates_url:
required: false
default: https://updates.jenkins-ci.org
description:
- URL of the Update Centre.
- Used as the base URL to download the plugins and the
I(update-center.json) JSON file.
url:
required: false
default: http://localhost:8080
description:
- URL of the Jenkins server.
version:
required: false
default: null
description:
- Plugin version number.
- If this option is specified, all plugin dependencies must be installed
manually.
- It might take longer to verify that the correct version is installed.
This is especially true if a specific version number is specified.
- Quote the version to prevent the value to be interpreted as float. For
example if C(1.20) would be unquoted, it would become C(1.2).
with_dependencies:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- Defines whether to install plugin dependencies.
- This option takes effect only if the I(version) is not defined.
notes:
- Plugin installation should be run under root or the same user which owns
the plugin files on the disk. Only if the plugin is not installed yet and
no version is specified, the API installation is performed which requires
only the Web UI credentials.
- It's necessary to notify the handler or call the I(service) module to
restart the Jenkins service after a new plugin was installed.
- Pinning works only if the plugin is installed and Jenkis service was
successfully restarted after the plugin installation.
- It is not possible to run the module remotely by changing the I(url)
parameter to point to the Jenkins server. The module must be used on the
host where Jenkins runs as it needs direct access to the plugin files.
'''
EXAMPLES = '''
- name: Install plugin
jenkins_plugin:
name: build-pipeline-plugin
- name: Install plugin without its dependencies
jenkins_plugin:
name: build-pipeline-plugin
with_dependencies: no
- name: Make sure the plugin is always up-to-date
jenkins_plugin:
name: token-macro
state: latest
- name: Install specific version of the plugin
jenkins_plugin:
name: token-macro
version: "1.15"
- name: Pin the plugin
jenkins_plugin:
name: token-macro
state: pinned
- name: Unpin the plugin
jenkins_plugin:
name: token-macro
state: unpinned
- name: Enable the plugin
jenkins_plugin:
name: token-macro
state: enabled
- name: Disable the plugin
jenkins_plugin:
name: token-macro
state: disabled
- name: Uninstall plugin
jenkins_plugin:
name: build-pipeline-plugin
state: absent
#
# Example of how to authenticate
#
# my_jenkins_params:
# url_username: admin
#
- name: Install plugin
jenkins_plugin:
name: build-pipeline-plugin
params: "{{ my_jenkins_params }}"
url_password: p4ssw0rd
url: http://localhost:8888
# Note that url_password **can not** be placed in params as params could end up in a log file
#
# Example of a Play which handles Jenkins restarts during the state changes
#
- name: Jenkins Master play
hosts: jenkins-master
vars:
my_jenkins_plugins:
token-macro:
enabled: yes
build-pipeline-plugin:
version: "1.4.9"
pinned: no
enabled: yes
tasks:
- name: Install plugins without a specific version
jenkins_plugin:
name: "{{ item.key }}"
register: my_jenkins_plugin_unversioned
when: >
'version' not in item.value
with_dict: "{{ my_jenkins_plugins }}"
- name: Install plugins with a specific version
jenkins_plugin:
name: "{{ item.key }}"
version: "{{ item.value['version'] }}"
register: my_jenkins_plugin_versioned
when: >
'version' in item.value
with_dict: "{{ my_jenkins_plugins }}"
- name: Initiate the fact
set_fact:
jenkins_restart_required: no
- name: Check if restart is required by any of the versioned plugins
set_fact:
jenkins_restart_required: yes
when: item.changed
with_items: "{{ my_jenkins_plugin_versioned.results }}"
- name: Check if restart is required by any of the unversioned plugins
set_fact:
jenkins_restart_required: yes
when: item.changed
with_items: "{{ my_jenkins_plugin_unversioned.results }}"
- name: Restart Jenkins if required
service:
name: jenkins
state: restarted
when: jenkins_restart_required
- name: Wait for Jenkins to start up
uri:
url: http://localhost:8080
status_code: 200
timeout: 5
register: jenkins_service_status
# Keep trying for 5 mins in 5 sec intervals
retries: 60
delay: 5
until: >
'status' in jenkins_service_status and
jenkins_service_status['status'] == 200
when: jenkins_restart_required
- name: Reset the fact
set_fact:
jenkins_restart_required: no
when: jenkins_restart_required
- name: Plugin pinning
jenkins_plugin:
name: "{{ item.key }}"
state: "{{ 'pinned' if item.value['pinned'] else 'unpinned'}}"
when: >
'pinned' in item.value
with_dict: "{{ my_jenkins_plugins }}"
- name: Plugin enabling
jenkins_plugin:
name: "{{ item.key }}"
state: "{{ 'enabled' if item.value['enabled'] else 'disabled'}}"
when: >
'enabled' in item.value
with_dict: "{{ my_jenkins_plugins }}"
'''
RETURN = '''
plugin:
description: plugin name
returned: success
type: string
sample: build-pipeline-plugin
state:
description: state of the target, after execution
returned: success
type: string
sample: "present"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.urls import fetch_url, url_argument_spec
from ansible.module_utils._text import to_native
import base64
import hashlib
import json
import os
import tempfile
import time
class JenkinsPlugin(object):
def __init__(self, module):
# To be able to call fail_json
self.module = module
# Shortcuts for the params
self.params = self.module.params
self.url = self.params['url']
self.timeout = self.params['timeout']
# Crumb
self.crumb = {}
if self._csrf_enabled():
self.crumb = self._get_crumb()
# Get list of installed plugins
self._get_installed_plugins()
def _csrf_enabled(self):
csrf_data = self._get_json_data(
"%s/%s" % (self.url, "api/json"), 'CSRF')
if 'useCrumbs' not in csrf_data:
self.module.fail_json(
msg="Required fields not found in the Crumbs response.",
details=csrf_data)
return csrf_data['useCrumbs']
def _get_json_data(self, url, what, **kwargs):
# Get the JSON data
r = self._get_url_data(url, what, **kwargs)
# Parse the JSON data
try:
json_data = json.loads(to_native(r.read()))
except Exception:
e = get_exception()
self.module.fail_json(
msg="Cannot parse %s JSON data." % what,
details=to_native(e))
return json_data
def _get_url_data(
self, url, what=None, msg_status=None, msg_exception=None,
**kwargs):
# Compose default messages
if msg_status is None:
msg_status = "Cannot get %s" % what
if msg_exception is None:
msg_exception = "Retrieval of %s failed." % what
# Get the URL data
try:
response, info = fetch_url(
self.module, url, timeout=self.timeout, **kwargs)
if info['status'] != 200:
self.module.fail_json(msg=msg_status, details=info['msg'])
except Exception:
e = get_exception()
self.module.fail_json(msg=msg_exception, details=to_native(e))
return response
def _get_crumb(self):
crumb_data = self._get_json_data(
"%s/%s" % (self.url, "crumbIssuer/api/json"), 'Crumb')
if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data:
ret = {
crumb_data['crumbRequestField']: crumb_data['crumb']
}
else:
self.module.fail_json(
msg="Required fields not found in the Crum response.",
details=crumb_data)
return ret
def _get_installed_plugins(self):
plugins_data = self._get_json_data(
"%s/%s" % (self.url, "pluginManager/api/json?depth=1"),
'list of plugins')
# Check if we got valid data
if 'plugins' not in plugins_data:
self.module.fail_json(msg="No valid plugin data found.")
# Create final list of installed/pined plugins
self.is_installed = False
self.is_pinned = False
self.is_enabled = False
for p in plugins_data['plugins']:
if p['shortName'] == self.params['name']:
self.is_installed = True
if p['pinned']:
self.is_pinned = True
if p['enabled']:
self.is_enabled = True
break
def install(self):
changed = False
plugin_file = (
'%s/plugins/%s.jpi' % (
self.params['jenkins_home'],
self.params['name']))
if not self.is_installed and self.params['version'] is None:
if not self.module.check_mode:
# Install the plugin (with dependencies)
install_script = (
'd = Jenkins.instance.updateCenter.getPlugin("%s")'
'.deploy(); d.get();' % self.params['name'])
if self.params['with_dependencies']:
install_script = (
'Jenkins.instance.updateCenter.getPlugin("%s")'
'.getNeededDependencies().each{it.deploy()}; %s' % (
self.params['name'], install_script))
script_data = {
'script': install_script
}
script_data.update(self.crumb)
data = urlencode(script_data)
# Send the installation request
r = self._get_url_data(
"%s/scriptText" % self.url,
msg_status="Cannot install plugin.",
msg_exception="Plugin installation has failed.",
data=data)
hpi_file = '%s/plugins/%s.hpi' % (
self.params['jenkins_home'],
self.params['name'])
if os.path.isfile(hpi_file):
os.remove(hpi_file)
changed = True
else:
# Check if the plugin directory exists
if not os.path.isdir(self.params['jenkins_home']):
self.module.fail_json(
msg="Jenkins home directory doesn't exist.")
md5sum_old = None
if os.path.isfile(plugin_file):
# Make the checksum of the currently installed plugin
md5sum_old = hashlib.md5(
open(plugin_file, 'rb').read()).hexdigest()
if self.params['version'] in [None, 'latest']:
# Take latest version
plugin_url = (
"%s/latest/%s.hpi" % (
self.params['updates_url'],
self.params['name']))
else:
# Take specific version
plugin_url = (
"{0}/download/plugins/"
"{1}/{2}/{1}.hpi".format(
self.params['updates_url'],
self.params['name'],
self.params['version']))
if (
self.params['updates_expiration'] == 0 or
self.params['version'] not in [None, 'latest'] or
md5sum_old is None):
# Download the plugin file directly
r = self._download_plugin(plugin_url)
# Write downloaded plugin into file if checksums don't match
if md5sum_old is None:
# No previously installed plugin
if not self.module.check_mode:
self._write_file(plugin_file, r)
changed = True
else:
# Get data for the MD5
data = r.read()
# Make new checksum
md5sum_new = hashlib.md5(data).hexdigest()
# If the checksum is different from the currently installed
# plugin, store the new plugin
if md5sum_old != md5sum_new:
if not self.module.check_mode:
self._write_file(plugin_file, data)
changed = True
else:
# Check for update from the updates JSON file
plugin_data = self._download_updates()
try:
sha1_old = hashlib.sha1(open(plugin_file, 'rb').read())
except Exception:
e = get_exception()
self.module.fail_json(
msg="Cannot calculate SHA1 of the old plugin.",
details=e.message)
sha1sum_old = base64.b64encode(sha1_old.digest())
# If the latest version changed, download it
if sha1sum_old != plugin_data['sha1']:
if not self.module.check_mode:
r = self._download_plugin(plugin_url)
self._write_file(plugin_file, r)
changed = True
# Change file attributes if needed
if os.path.isfile(plugin_file):
params = {
'dest': plugin_file
}
params.update(self.params)
file_args = self.module.load_file_common_arguments(params)
if not self.module.check_mode:
# Not sure how to run this in the check mode
changed = self.module.set_fs_attributes_if_different(
file_args, changed)
else:
# See the comment above
changed = True
return changed
def _download_updates(self):
updates_filename = 'jenkins-plugin-cache.json'
updates_dir = os.path.expanduser('~/.ansible/tmp')
updates_file = "%s/%s" % (updates_dir, updates_filename)
download_updates = True
# Check if we need to download new updates file
if os.path.isfile(updates_file):
# Get timestamp when the file was changed last time
ts_file = os.stat(updates_file).st_mtime
ts_now = time.time()
if ts_now - ts_file < self.params['updates_expiration']:
download_updates = False
updates_file_orig = updates_file
# Download the updates file if needed
if download_updates:
url = "%s/update-center.json" % self.params['updates_url']
# Get the data
r = self._get_url_data(
url,
msg_status="Remote updates not found.",
msg_exception="Updates download failed.")
# Write the updates file
update_fd, updates_file = tempfile.mkstemp()
os.write(update_fd, r.read())
try:
os.close(update_fd)
except IOError:
e = get_exception()
self.module.fail_json(
msg="Cannot close the tmp updates file %s." % updates_file,
details=to_native(e))
# Open the updates file
try:
f = open(updates_file)
except IOError:
e = get_exception()
self.module.fail_json(
msg="Cannot open temporal updates file.",
details=to_native(e))
i = 0
for line in f:
# Read only the second line
if i == 1:
try:
data = json.loads(line)
except Exception:
e = get_exception()
self.module.fail_json(
msg="Cannot load JSON data from the tmp updates file.",
details=e.message)
break
i += 1
# Move the updates file to the right place if we could read it
if download_updates:
# Make sure the destination directory exists
if not os.path.isdir(updates_dir):
try:
os.makedirs(updates_dir, int('0700', 8))
except OSError:
e = get_exception()
self.module.fail_json(
msg="Cannot create temporal directory.",
details=e.message)
self.module.atomic_move(updates_file, updates_file_orig)
# Check if we have the plugin data available
if 'plugins' not in data or self.params['name'] not in data['plugins']:
self.module.fail_json(
msg="Cannot find plugin data in the updates file.")
return data['plugins'][self.params['name']]
def _download_plugin(self, plugin_url):
# Download the plugin
r = self._get_url_data(
plugin_url,
msg_status="Plugin not found.",
msg_exception="Plugin download failed.")
return r
def _write_file(self, f, data):
# Store the plugin into a temp file and then move it
tmp_f_fd, tmp_f = tempfile.mkstemp()
if isinstance(data, str):
os.write(tmp_f_fd, data)
else:
os.write(tmp_f_fd, data.read())
try:
os.close(tmp_f_fd)
except IOError:
e = get_exception()
self.module.fail_json(
msg='Cannot close the temporal plugin file %s.' % tmp_f,
details=to_native(e))
# Move the file onto the right place
self.module.atomic_move(tmp_f, f)
def uninstall(self):
changed = False
# Perform the action
if self.is_installed:
if not self.module.check_mode:
self._pm_query('doUninstall', 'Uninstallation')
changed = True
return changed
def pin(self):
return self._pinning('pin')
def unpin(self):
return self._pinning('unpin')
def _pinning(self, action):
changed = False
# Check if the plugin is pinned/unpinned
if (
action == 'pin' and not self.is_pinned or
action == 'unpin' and self.is_pinned):
# Perform the action
if not self.module.check_mode:
self._pm_query(action, "%sning" % action.capitalize())
changed = True
return changed
def enable(self):
return self._enabling('enable')
def disable(self):
return self._enabling('disable')
def _enabling(self, action):
changed = False
# Check if the plugin is pinned/unpinned
if (
action == 'enable' and not self.is_enabled or
action == 'disable' and self.is_enabled):
# Perform the action
if not self.module.check_mode:
self._pm_query(
"make%sd" % action.capitalize(),
"%sing" % action[:-1].capitalize())
changed = True
return changed
def _pm_query(self, action, msg):
url = "%s/pluginManager/plugin/%s/%s" % (
self.params['url'], self.params['name'], action)
data = urlencode(self.crumb)
# Send the request
self._get_url_data(
url,
msg_status="Plugin not found. %s" % url,
msg_exception="%s has failed." % msg,
data=data)
def main():
# Module arguments
argument_spec = url_argument_spec()
argument_spec.update(
group=dict(default='jenkins'),
jenkins_home=dict(default='/var/lib/jenkins'),
mode=dict(default='0644', type='raw'),
name=dict(required=True),
owner=dict(default='jenkins'),
params=dict(type='dict'),
state=dict(
choices=[
'present',
'absent',
'pinned',
'unpinned',
'enabled',
'disabled',
'latest'],
default='present'),
timeout=dict(default=30, type="int"),
updates_expiration=dict(default=86400, type="int"),
updates_url=dict(default='https://updates.jenkins-ci.org'),
url=dict(default='http://localhost:8080'),
url_password=dict(no_log=True),
version=dict(),
with_dependencies=dict(default=True, type='bool'),
)
# Module settings
module = AnsibleModule(
argument_spec=argument_spec,
add_file_common_args=True,
supports_check_mode=True,
)
# Update module parameters by user's parameters if defined
if 'params' in module.params and isinstance(module.params['params'], dict):
if 'url_password' in module.params['params']:
# The params argument should be removed eventually. Until then, raise an error if
# url_password is specified there as it can lead to the password being logged
module.fail_json(msg='Do not specify url_password in params as it may get logged')
module.params.update(module.params['params'])
# Remove the params
module.params.pop('params', None)
# Force basic authentication
module.params['force_basic_auth'] = True
# Convert timeout to float
try:
module.params['timeout'] = float(module.params['timeout'])
except ValueError:
e = get_exception()
module.fail_json(
msg='Cannot convert %s to float.' % module.params['timeout'],
details=to_native(e))
# Set version to latest if state is latest
if module.params['state'] == 'latest':
module.params['state'] = 'present'
module.params['version'] = 'latest'
# Create some shortcuts
name = module.params['name']
state = module.params['state']
# Initial change state of the task
changed = False
# Instantiate the JenkinsPlugin object
jp = JenkinsPlugin(module)
# Perform action depending on the requested state
if state == 'present':
changed = jp.install()
elif state == 'absent':
changed = jp.uninstall()
elif state == 'pinned':
changed = jp.pin()
elif state == 'unpinned':
changed = jp.unpin()
elif state == 'enabled':
changed = jp.enable()
elif state == 'disabled':
changed = jp.disable()
# Print status of the change
module.exit_json(changed=changed, plugin=name, state=state)
if __name__ == '__main__':
main()
|
GHSA-588w-w6mv-3cw5
|
requests_kerberos/kerberos_.py
|
@@ -251,7 +251,7 @@ def handle_response(self, response, **kwargs):
if response.status_code == 401:
_r = self.handle_401(response, **kwargs)
log.debug("handle_response(): returning {0}".format(_r))
- return _r
+ return self.handle_response(_r, **kwargs)
else:
_r = self.handle_other(response)
log.debug("handle_response(): returning {0}".format(_r))
|
import kerberos
import re
import logging
from requests.auth import AuthBase
from requests.models import Response
from requests.compat import urlparse, StringIO
from requests.structures import CaseInsensitiveDict
from requests.cookies import cookiejar_from_dict
from .exceptions import MutualAuthenticationError
log = logging.getLogger(__name__)
# Different types of mutual authentication:
# with mutual_authentication set to REQUIRED, all responses will be
# authenticated with the exception of errors. Errors will have their contents
# and headers stripped. If a non-error response cannot be authenticated, a
# MutualAuthenticationError exception will be raised.
# with mutual_authentication set to OPTIONAL, mutual authentication will be
# attempted if supported, and if supported and failed, a
# MutualAuthenticationError exception will be raised. Responses which do not
# support mutual authentication will be returned directly to the user.
# with mutual_authentication set to DISABLED, mutual authentication will not be
# attempted, even if supported.
REQUIRED = 1
OPTIONAL = 2
DISABLED = 3
class SanitizedResponse(Response):
"""The :class:`Response <Response>` object, which contains a server's
response to an HTTP request.
This differs from `requests.models.Response` in that it's headers and
content have been sanitized. This is only used for HTTP Error messages
which do not support mutual authentication when mutual authentication is
required."""
def __init__(self, response):
super(SanitizedResponse, self).__init__()
self.status_code = response.status_code
self.encoding = response.encoding
self.raw = response.raw
self.reason = response.reason
self.url = response.url
self.request = response.request
self.connection = response.connection
self._content_consumed = True
self._content = ""
self.cookies = cookiejar_from_dict({})
self.headers = CaseInsensitiveDict()
self.headers['content-length'] = '0'
for header in ('date', 'server'):
if header in response.headers:
self.headers[header] = response.headers[header]
def _negotiate_value(response):
"""Extracts the gssapi authentication token from the appropriate header"""
if hasattr(_negotiate_value, 'regex'):
regex = _negotiate_value.regex
else:
# There's no need to re-compile this EVERY time it is called. Compile
# it once and you won't have the performance hit of the compilation.
regex = re.compile('(?:.*,)*\s*Negotiate\s*([^,]*),?', re.I)
_negotiate_value.regex = regex
authreq = response.headers.get('www-authenticate', None)
if authreq:
match_obj = regex.search(authreq)
if match_obj:
return match_obj.group(1)
return None
class HTTPKerberosAuth(AuthBase):
"""Attaches HTTP GSSAPI/Kerberos Authentication to the given Request
object."""
def __init__(self, mutual_authentication=REQUIRED, service="HTTP"):
self.context = {}
self.mutual_authentication = mutual_authentication
self.pos = None
self.service = service
def generate_request_header(self, response):
"""
Generates the GSSAPI authentication token with kerberos.
If any GSSAPI step fails, return None.
"""
host = urlparse(response.url).hostname
try:
result, self.context[host] = kerberos.authGSSClientInit(
"{0}@{1}".format(self.service, host))
except kerberos.GSSError as e:
log.error("generate_request_header(): authGSSClientInit() failed:")
log.exception(e)
return None
if result < 1:
log.error("generate_request_header(): authGSSClientInit() failed: "
"{0}".format(result))
return None
try:
result = kerberos.authGSSClientStep(self.context[host],
_negotiate_value(response))
except kerberos.GSSError as e:
log.error("generate_request_header(): authGSSClientStep() failed:")
log.exception(e)
return None
if result < 0:
log.error("generate_request_header(): authGSSClientStep() failed: "
"{0}".format(result))
return None
try:
gss_response = kerberos.authGSSClientResponse(self.context[host])
except kerberos.GSSError as e:
log.error("generate_request_header(): authGSSClientResponse() "
"failed:")
log.exception(e)
return None
return "Negotiate {0}".format(gss_response)
def authenticate_user(self, response, **kwargs):
"""Handles user authentication with gssapi/kerberos"""
auth_header = self.generate_request_header(response)
if auth_header is None:
# GSS Failure, return existing response
return response
log.debug("authenticate_user(): Authorization header: {0}".format(
auth_header))
response.request.headers['Authorization'] = auth_header
# Consume the content so we can reuse the connection for the next
# request.
response.content
response.raw.release_conn()
_r = response.connection.send(response.request, **kwargs)
_r.history.append(response)
log.debug("authenticate_user(): returning {0}".format(_r))
return _r
def handle_401(self, response, **kwargs):
"""Handles 401's, attempts to use gssapi/kerberos authentication"""
log.debug("handle_401(): Handling: 401")
if _negotiate_value(response) is not None:
_r = self.authenticate_user(response, **kwargs)
log.debug("handle_401(): returning {0}".format(_r))
return _r
else:
log.debug("handle_401(): Kerberos is not supported")
log.debug("handle_401(): returning {0}".format(response))
return response
def handle_other(self, response):
"""Handles all responses with the exception of 401s.
This is necessary so that we can authenticate responses if requested"""
log.debug("handle_other(): Handling: %d" % response.status_code)
if self.mutual_authentication in (REQUIRED, OPTIONAL):
is_http_error = response.status_code >= 400
if _negotiate_value(response) is not None:
log.debug("handle_other(): Authenticating the server")
if not self.authenticate_server(response):
# Mutual authentication failure when mutual auth is wanted,
# raise an exception so the user doesn't use an untrusted
# response.
log.error("handle_other(): Mutual authentication failed")
raise MutualAuthenticationError("Unable to authenticate "
"{0}".format(response))
# Authentication successful
log.debug("handle_other(): returning {0}".format(response))
return response
elif is_http_error or self.mutual_authentication == OPTIONAL:
log.error("handle_other(): Mutual authentication unavailable "
"on {0} response".format(response.status_code))
if self.mutual_authentication == REQUIRED:
return SanitizedResponse(response)
else:
return response
else:
# Unable to attempt mutual authentication when mutual auth is
# required, raise an exception so the user doesnt use an
# untrusted response.
log.error("handle_other(): Mutual authentication failed")
raise MutualAuthenticationError("Unable to authenticate "
"{0}".format(response))
else:
log.debug("handle_other(): returning {0}".format(response))
return response
def authenticate_server(self, response):
"""
Uses GSSAPI to authenticate the server.
Returns True on success, False on failure.
"""
log.debug("authenticate_server(): Authenticate header: {0}".format(
_negotiate_value(response)))
host = urlparse(response.url).hostname
try:
result = kerberos.authGSSClientStep(self.context[host],
_negotiate_value(response))
except kerberos.GSSError as e:
log.error("authenticate_server(): authGSSClientStep() failed:")
log.exception(e)
return False
if result < 1:
log.error("auhenticate_server(): authGSSClientStep() failed: "
"{0}".format(result))
return False
log.debug("authenticate_server(): returning {0}".format(response))
return True
def handle_response(self, response, **kwargs):
"""Takes the given response and tries kerberos-auth, as needed."""
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
response.request.body.seek(self.pos)
if response.status_code == 401:
_r = self.handle_401(response, **kwargs)
log.debug("handle_response(): returning {0}".format(_r))
return _r
else:
_r = self.handle_other(response)
log.debug("handle_response(): returning {0}".format(_r))
return _r
def deregister(self, response):
"""Deregisters the response handler"""
response.request.deregister_hook('response', self.handle_response)
def __call__(self, request):
request.register_hook('response', self.handle_response)
try:
self.pos = request.body.tell()
except AttributeError:
pass
return request
|
GHSA-wh37-37xw-54hr
|
test_requests_kerberos.py
|
@@ -402,10 +402,14 @@ def test_handle_response_401(self):
response.connection = connection
response._content = ""
response.raw = raw
+
auth = requests_kerberos.HTTPKerberosAuth()
+ auth.handle_other = Mock(return_value=response_ok)
+
r = auth.handle_response(response)
self.assertTrue(response in r.history)
+ auth.handle_other.assert_called_with(response_ok)
self.assertEqual(r, response_ok)
self.assertEqual(request.headers['Authorization'], 'Negotiate GSSRESPONSE')
connection.send.assert_called_with(request)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for requests_kerberos."""
from mock import Mock, patch
import requests
import kerberos
import requests_kerberos
import unittest
# kerberos.authClientInit() is called with the service name (HTTP@FQDN) and
# returns 1 and a kerberos context object on success. Returns -1 on failure.
clientInit_complete = Mock(return_value=(1, "CTX"))
clientInit_error = Mock(return_value=(-1, "CTX"))
# kerberos.authGSSClientStep() is called with the kerberos context object
# returned by authGSSClientInit and the negotiate auth token provided in the
# http response's www-authenticate header. It returns 0 or 1 on success. 0
# Indicates that authentication is progressing but not complete.
clientStep_complete = Mock(return_value=1)
clientStep_continue = Mock(return_value=0)
clientStep_error = Mock(return_value=-1)
clientStep_exception = Mock(side_effect=kerberos.GSSError)
# kerberos.authGSSCLientResponse() is called with the kerberos context which
# was initially returned by authGSSClientInit and had been mutated by a call by
# authGSSClientStep. It returns a string.
clientResponse = Mock(return_value="GSSRESPONSE")
# Note: we're not using the @mock.patch decorator:
# > My only word of warning is that in the past, the patch decorator hides
# > tests when using the standard unittest library.
# > -- sigmavirus24 in https://github.com/requests/requests-kerberos/issues/1
class KerberosTestCase(unittest.TestCase):
def setUp(self):
"""Setup."""
clientInit_complete.reset_mock()
clientInit_error.reset_mock()
clientStep_complete.reset_mock()
clientStep_continue.reset_mock()
clientStep_error.reset_mock()
clientStep_exception.reset_mock()
clientResponse.reset_mock()
def tearDown(self):
"""Teardown."""
pass
def test_negotate_value_extraction(self):
response = requests.Response()
response.headers = {'www-authenticate': 'negotiate token'}
self.assertEqual(
requests_kerberos.kerberos_._negotiate_value(response),
'token'
)
def test_negotate_value_extraction_none(self):
response = requests.Response()
response.headers = {}
self.assertTrue(
requests_kerberos.kerberos_._negotiate_value(response) is None
)
def test_generate_request_header(self):
with patch.multiple('kerberos',
authGSSClientInit=clientInit_complete,
authGSSClientResponse=clientResponse,
authGSSClientStep=clientStep_continue):
response = requests.Response()
response.url = "http://www.example.org/"
response.headers = {'www-authenticate': 'negotiate token'}
auth = requests_kerberos.HTTPKerberosAuth()
self.assertEqual(
auth.generate_request_header(response),
"Negotiate GSSRESPONSE"
)
clientInit_complete.assert_called_with("[email protected]")
clientStep_continue.assert_called_with("CTX", "token")
clientResponse.assert_called_with("CTX")
def test_generate_request_header_init_error(self):
with patch.multiple('kerberos',
authGSSClientInit=clientInit_error,
authGSSClientResponse=clientResponse,
authGSSClientStep=clientStep_continue):
response = requests.Response()
response.url = "http://www.example.org/"
response.headers = {'www-authenticate': 'negotiate token'}
auth = requests_kerberos.HTTPKerberosAuth()
self.assertEqual(
auth.generate_request_header(response),
None
)
clientInit_error.assert_called_with("[email protected]")
self.assertFalse(clientStep_continue.called)
self.assertFalse(clientResponse.called)
def test_generate_request_header_step_error(self):
with patch.multiple('kerberos',
authGSSClientInit=clientInit_complete,
authGSSClientResponse=clientResponse,
authGSSClientStep=clientStep_error):
response = requests.Response()
response.url = "http://www.example.org/"
response.headers = {'www-authenticate': 'negotiate token'}
auth = requests_kerberos.HTTPKerberosAuth()
self.assertEqual(
auth.generate_request_header(response),
None
)
clientInit_complete.assert_called_with("[email protected]")
clientStep_error.assert_called_with("CTX", "token")
self.assertFalse(clientResponse.called)
def test_authenticate_user(self):
with patch.multiple('kerberos',
authGSSClientInit=clientInit_complete,
authGSSClientResponse=clientResponse,
authGSSClientStep=clientStep_continue):
response_ok = requests.Response()
response_ok.url = "http://www.example.org/"
response_ok.status_code = 200
response_ok.headers = {'www-authenticate': 'negotiate servertoken'}
connection = Mock()
connection.send = Mock(return_value=response_ok)
raw = Mock()
raw.release_conn = Mock(return_value=None)
request = requests.Request()
response = requests.Response()
response.request = request
response.url = "http://www.example.org/"
response.headers = {'www-authenticate': 'negotiate token'}
response.status_code = 401
response.connection = connection
response._content = ""
response.raw = raw
auth = requests_kerberos.HTTPKerberosAuth()
r = auth.authenticate_user(response)
self.assertTrue(response in r.history)
self.assertEqual(r, response_ok)
self.assertEqual(request.headers['Authorization'], 'Negotiate GSSRESPONSE')
connection.send.assert_called_with(request)
raw.release_conn.assert_called_with()
clientInit_complete.assert_called_with("[email protected]")
clientStep_continue.assert_called_with("CTX", "token")
clientResponse.assert_called_with("CTX")
def test_handle_401(self):
with patch.multiple('kerberos',
authGSSClientInit=clientInit_complete,
authGSSClientResponse=clientResponse,
authGSSClientStep=clientStep_continue):
response_ok = requests.Response()
response_ok.url = "http://www.example.org/"
response_ok.status_code = 200
response_ok.headers = {'www-authenticate': 'negotiate servertoken'}
connection = Mock()
connection.send = Mock(return_value=response_ok)
raw = Mock()
raw.release_conn = Mock(return_value=None)
request = requests.Request()
response = requests.Response()
response.request = request
response.url = "http://www.example.org/"
response.headers = {'www-authenticate': 'negotiate token'}
response.status_code = 401
response.connection = connection
response._content = ""
response.raw = raw
auth = requests_kerberos.HTTPKerberosAuth()
r = auth.handle_401(response)
self.assertTrue(response in r.history)
self.assertEqual(r, response_ok)
self.assertEqual(request.headers['Authorization'], 'Negotiate GSSRESPONSE')
connection.send.assert_called_with(request)
raw.release_conn.assert_called_with()
clientInit_complete.assert_called_with("[email protected]")
clientStep_continue.assert_called_with("CTX", "token")
clientResponse.assert_called_with("CTX")
def test_authenticate_server(self):
with patch.multiple('kerberos', authGSSClientStep=clientStep_complete):
response_ok = requests.Response()
response_ok.url = "http://www.example.org/"
response_ok.status_code = 200
response_ok.headers = {'www-authenticate': 'negotiate servertoken',
'authorization': 'Negotiate GSSRESPONSE'
}
auth = requests_kerberos.HTTPKerberosAuth()
auth.context = {"www.example.org": "CTX"}
result = auth.authenticate_server(response_ok)
self.assertTrue(result)
clientStep_complete.assert_called_with("CTX", "servertoken")
def test_handle_other(self):
with patch('kerberos.authGSSClientStep', clientStep_complete):
response_ok = requests.Response()
response_ok.url = "http://www.example.org/"
response_ok.status_code = 200
response_ok.headers = {'www-authenticate': 'negotiate servertoken',
'authorization': 'Negotiate GSSRESPONSE'
}
auth = requests_kerberos.HTTPKerberosAuth()
auth.context = {"www.example.org": "CTX"}
r = auth.handle_other(response_ok)
self.assertEqual(r, response_ok)
clientStep_complete.assert_called_with("CTX", "servertoken")
def test_handle_response_200(self):
with patch('kerberos.authGSSClientStep', clientStep_complete):
response_ok = requests.Response()
response_ok.url = "http://www.example.org/"
response_ok.status_code = 200
response_ok.headers = {'www-authenticate': 'negotiate servertoken',
'authorization': 'Negotiate GSSRESPONSE'
}
auth = requests_kerberos.HTTPKerberosAuth()
auth.context = {"www.example.org": "CTX"}
r = auth.handle_response(response_ok)
self.assertEqual(r, response_ok)
clientStep_complete.assert_called_with("CTX", "servertoken")
def test_handle_response_200_mutual_auth_required_failure(self):
with patch('kerberos.authGSSClientStep', clientStep_error):
response_ok = requests.Response()
response_ok.url = "http://www.example.org/"
response_ok.status_code = 200
response_ok.headers = {}
auth = requests_kerberos.HTTPKerberosAuth()
auth.context = {"www.example.org": "CTX"}
self.assertRaises(requests_kerberos.MutualAuthenticationError,
auth.handle_response,
response_ok)
self.assertFalse(clientStep_error.called)
def test_handle_response_200_mutual_auth_required_failure_2(self):
with patch('kerberos.authGSSClientStep', clientStep_exception):
response_ok = requests.Response()
response_ok.url = "http://www.example.org/"
response_ok.status_code = 200
response_ok.headers = {'www-authenticate': 'negotiate servertoken',
'authorization': 'Negotiate GSSRESPONSE'
}
auth = requests_kerberos.HTTPKerberosAuth()
auth.context = {"www.example.org": "CTX"}
self.assertRaises(requests_kerberos.MutualAuthenticationError,
auth.handle_response,
response_ok)
clientStep_exception.assert_called_with("CTX", "servertoken")
def test_handle_response_200_mutual_auth_optional_hard_failure(self):
with patch('kerberos.authGSSClientStep', clientStep_error):
response_ok = requests.Response()
response_ok.url = "http://www.example.org/"
response_ok.status_code = 200
response_ok.headers = {'www-authenticate': 'negotiate servertoken',
'authorization': 'Negotiate GSSRESPONSE'
}
auth = requests_kerberos.HTTPKerberosAuth(requests_kerberos.OPTIONAL)
auth.context = {"www.example.org": "CTX"}
self.assertRaises(requests_kerberos.MutualAuthenticationError,
auth.handle_response,
response_ok)
clientStep_error.assert_called_with("CTX", "servertoken")
def test_handle_response_200_mutual_auth_optional_soft_failure(self):
with patch('kerberos.authGSSClientStep', clientStep_error):
response_ok = requests.Response()
response_ok.url = "http://www.example.org/"
response_ok.status_code = 200
auth = requests_kerberos.HTTPKerberosAuth(requests_kerberos.OPTIONAL)
auth.context = {"www.example.org": "CTX"}
r = auth.handle_response(response_ok)
self.assertEqual(r, response_ok)
self.assertFalse(clientStep_error.called)
def test_handle_response_500_mutual_auth_required_failure(self):
with patch('kerberos.authGSSClientStep', clientStep_error):
response_500 = requests.Response()
response_500.url = "http://www.example.org/"
response_500.status_code = 500
response_500.headers = {}
response_500.request = "REQUEST"
response_500.connection = "CONNECTION"
response_500._content = "CONTENT"
response_500.encoding = "ENCODING"
response_500.raw = "RAW"
response_500.cookies = "COOKIES"
auth = requests_kerberos.HTTPKerberosAuth()
auth.context = {"www.example.org": "CTX"}
r = auth.handle_response(response_500)
self.assertNotEqual(r, response_500)
self.assertNotEqual(r.headers, response_500.headers)
self.assertEqual(r.status_code, response_500.status_code)
self.assertEqual(r.encoding, response_500.encoding)
self.assertEqual(r.raw, response_500.raw)
self.assertEqual(r.url, response_500.url)
self.assertEqual(r.reason, response_500.reason)
self.assertEqual(r.connection, response_500.connection)
self.assertEqual(r.content, b'')
self.assertNotEqual(r.cookies, response_500.cookies)
self.assertFalse(clientStep_error.called)
def test_handle_response_500_mutual_auth_optional_failure(self):
with patch('kerberos.authGSSClientStep', clientStep_error):
response_500 = requests.Response()
response_500.url = "http://www.example.org/"
response_500.status_code = 500
response_500.headers = {}
response_500.request = "REQUEST"
response_500.connection = "CONNECTION"
response_500._content = "CONTENT"
response_500.encoding = "ENCODING"
response_500.raw = "RAW"
response_500.cookies = "COOKIES"
auth = requests_kerberos.HTTPKerberosAuth(requests_kerberos.OPTIONAL)
auth.context = {"www.example.org": "CTX"}
r = auth.handle_response(response_500)
self.assertEqual(r, response_500)
self.assertFalse(clientStep_error.called)
def test_handle_response_401(self):
with patch.multiple('kerberos',
authGSSClientInit=clientInit_complete,
authGSSClientResponse=clientResponse,
authGSSClientStep=clientStep_continue):
response_ok = requests.Response()
response_ok.url = "http://www.example.org/"
response_ok.status_code = 200
response_ok.headers = {'www-authenticate': 'negotiate servertoken'}
connection = Mock()
connection.send = Mock(return_value=response_ok)
raw = Mock()
raw.release_conn = Mock(return_value=None)
request = requests.Request()
response = requests.Response()
response.request = request
response.url = "http://www.example.org/"
response.headers = {'www-authenticate': 'negotiate token'}
response.status_code = 401
response.connection = connection
response._content = ""
response.raw = raw
auth = requests_kerberos.HTTPKerberosAuth()
r = auth.handle_response(response)
self.assertTrue(response in r.history)
self.assertEqual(r, response_ok)
self.assertEqual(request.headers['Authorization'], 'Negotiate GSSRESPONSE')
connection.send.assert_called_with(request)
raw.release_conn.assert_called_with()
clientInit_complete.assert_called_with("[email protected]")
clientStep_continue.assert_called_with("CTX", "token")
clientResponse.assert_called_with("CTX")
def test_generate_request_header_custom_service(self):
with patch.multiple('kerberos',
authGSSClientInit=clientInit_error,
authGSSClientResponse=clientResponse,
authGSSClientStep=clientStep_continue):
response = requests.Response()
response.url = "http://www.example.org/"
response.headers = {'www-authenticate': 'negotiate token'}
auth = requests_kerberos.HTTPKerberosAuth(service="barfoo")
auth.generate_request_header(response),
clientInit_error.assert_called_with("[email protected]")
if __name__ == '__main__':
unittest.main()
|
GHSA-wh37-37xw-54hr
|
plugins/Math/plugin.py
|
@@ -44,6 +44,7 @@
_ = PluginInternationalization('Math')
from .local import convertcore
+from .evaluator import safe_eval, InvalidNode, SAFE_ENV
baseArg = ('int', 'base', lambda i: i <= 36)
@@ -97,36 +98,6 @@ def _convertBaseToBase(self, number, toBase, fromBase):
return str(number)
return self._convertDecimalToBase(number, toBase)
- _mathEnv = {'__builtins__': types.ModuleType('__builtins__'), 'i': 1j}
- _mathEnv.update(math.__dict__)
- _mathEnv.update(cmath.__dict__)
- def _sqrt(x):
- if isinstance(x, complex) or x < 0:
- return cmath.sqrt(x)
- else:
- return math.sqrt(x)
- def _cbrt(x):
- return math.pow(x, 1.0/3)
- def _factorial(x):
- if x<=10000:
- return float(math.factorial(x))
- else:
- raise Exception('factorial argument too large')
- _mathEnv['sqrt'] = _sqrt
- _mathEnv['cbrt'] = _cbrt
- _mathEnv['abs'] = abs
- _mathEnv['max'] = max
- _mathEnv['min'] = min
- _mathEnv['round'] = lambda x, y=0: round(x, int(y))
- _mathSafeEnv = dict([(x,y) for x,y in _mathEnv.items()])
- _mathSafeEnv['factorial'] = _factorial
- _mathRe = re.compile(r'((?:(?<![A-Fa-f\d)])-)?'
- r'(?:0x[A-Fa-f\d]+|'
- r'0[0-7]+|'
- r'\d+\.\d+|'
- r'\.\d+|'
- r'\d+\.|'
- r'\d+))')
def _floatToString(self, x):
if -1e-10 < x < 1e-10:
return '0'
@@ -157,17 +128,6 @@ def _complexToString(self, x):
else:
return '%s%s' % (realS, imagS)
- _calc_match_forbidden_chars = re.compile('[_\[\]]')
- _calc_remover = utils.str.MultipleRemover('_[] \t')
- ###
- # So this is how the 'calc' command works:
- # First, we make a nice little safe environment for evaluation; basically,
- # the names in the 'math' and 'cmath' modules. Then, we remove the ability
- # of a random user to get ints evaluated: this means we have to turn all
- # int literals (even octal numbers and hexadecimal numbers) into floats.
- # Then we delete all square brackets, underscores, and whitespace, so no
- # one can do list comprehensions or call __...__ functions.
- ###
@internationalizeDocstring
def calc(self, irc, msg, args, text):
"""<math expression>
@@ -178,57 +138,17 @@ def calc(self, irc, msg, args, text):
crash to the bot with something like '10**10**10**10'. One consequence
is that large values such as '10**24' might not be exact.
"""
- try:
- text = str(text)
- except UnicodeEncodeError:
- irc.error(_("There's no reason you should have fancy non-ASCII "
- "characters in your mathematical expression. "
- "Please remove them."))
- return
- if self._calc_match_forbidden_chars.match(text):
- # Note: this is important to keep this to forbid usage of
- # __builtins__
- irc.error(_('There\'s really no reason why you should have '
- 'underscores or brackets in your mathematical '
- 'expression. Please remove them.'))
- return
- text = self._calc_remover(text)
- if 'lambda' in text:
- irc.error(_('You can\'t use lambda in this command.'))
- return
- text = text.lower()
- def handleMatch(m):
- s = m.group(1)
- if s.startswith('0x'):
- i = int(s, 16)
- elif s.startswith('0') and '.' not in s:
- try:
- i = int(s, 8)
- except ValueError:
- i = int(s)
- else:
- i = float(s)
- x = complex(i)
- if x.imag == 0:
- x = x.real
- # Need to use string-formatting here instead of str() because
- # use of str() on large numbers loses information:
- # str(float(33333333333333)) => '3.33333333333e+13'
- # float('3.33333333333e+13') => 33333333333300.0
- return '%.16f' % x
- return str(x)
- text = self._mathRe.sub(handleMatch, text)
try:
self.log.info('evaluating %q from %s', text, msg.prefix)
- x = complex(eval(text, self._mathSafeEnv, self._mathSafeEnv))
+ x = complex(safe_eval(text, allow_ints=False))
irc.reply(self._complexToString(x))
except OverflowError:
maxFloat = math.ldexp(0.9999999999999999, 1024)
irc.error(_('The answer exceeded %s or so.') % maxFloat)
- except TypeError:
- irc.error(_('Something in there wasn\'t a valid number.'))
+ except InvalidNode as e:
+ irc.error(_('Invalid syntax: %s') % e.args[0])
except NameError as e:
- irc.error(_('%s is not a defined function.') % str(e).split()[1])
+ irc.error(_('%s is not a defined function.') % e.args[0])
except Exception as e:
irc.error(str(e))
calc = wrap(calc, ['text'])
@@ -241,28 +161,15 @@ def icalc(self, irc, msg, args, text):
math, and can thus cause the bot to suck up CPU. Hence it requires
the 'trusted' capability to use.
"""
- if self._calc_match_forbidden_chars.match(text):
- # Note: this is important to keep this to forbid usage of
- # __builtins__
- irc.error(_('There\'s really no reason why you should have '
- 'underscores or brackets in your mathematical '
- 'expression. Please remove them.'))
- return
- # This removes spaces, too, but we'll leave the removal of _[] for
- # safety's sake.
- text = self._calc_remover(text)
- if 'lambda' in text:
- irc.error(_('You can\'t use lambda in this command.'))
- return
- text = text.replace('lambda', '')
try:
self.log.info('evaluating %q from %s', text, msg.prefix)
- irc.reply(str(eval(text, self._mathEnv, self._mathEnv)))
+ x = safe_eval(text, allow_ints=True)
+ irc.reply(str(x))
except OverflowError:
maxFloat = math.ldexp(0.9999999999999999, 1024)
irc.error(_('The answer exceeded %s or so.') % maxFloat)
- except TypeError:
- irc.error(_('Something in there wasn\'t a valid number.'))
+ except InvalidNode as e:
+ irc.error(_('Invalid syntax: %s') % e.args[0])
except NameError as e:
irc.error(_('%s is not a defined function.') % str(e).split()[1])
except Exception as e:
@@ -286,8 +193,8 @@ def rpn(self, irc, msg, args):
x = abs(x)
stack.append(x)
except ValueError: # Not a float.
- if arg in self._mathSafeEnv:
- f = self._mathSafeEnv[arg]
+ if arg in SAFE_ENV:
+ f = SAFE_ENV[arg]
if callable(f):
called = False
arguments = []
@@ -310,7 +217,7 @@ def rpn(self, irc, msg, args):
arg1 = stack.pop()
s = '%s%s%s' % (arg1, arg, arg2)
try:
- stack.append(eval(s, self._mathSafeEnv, self._mathSafeEnv))
+ stack.append(safe_eval(s, allow_ints=False))
except SyntaxError:
irc.error(format(_('%q is not a defined function.'),
arg))
|
###
# Copyright (c) 2002-2004, Jeremiah Fincher
# Copyright (c) 2008-2009, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from __future__ import division
import re
import math
import cmath
import types
import string
import supybot.utils as utils
from supybot.commands import *
import supybot.utils.minisix as minisix
import supybot.callbacks as callbacks
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('Math')
from .local import convertcore
baseArg = ('int', 'base', lambda i: i <= 36)
class Math(callbacks.Plugin):
"""Provides commands to work with math, such as a calculator and
a unit converter."""
@internationalizeDocstring
def base(self, irc, msg, args, frm, to, number):
"""<fromBase> [<toBase>] <number>
Converts <number> from base <fromBase> to base <toBase>.
If <toBase> is left out, it converts to decimal.
"""
if not number:
number = str(to)
to = 10
try:
irc.reply(self._convertBaseToBase(number, to, frm))
except ValueError:
irc.error(_('Invalid <number> for base %s: %s') % (frm, number))
base = wrap(base, [('int', 'base', lambda i: 2 <= i <= 36),
optional(('int', 'base', lambda i: 2 <= i <= 36), 10),
additional('something')])
def _convertDecimalToBase(self, number, base):
"""Convert a decimal number to another base; returns a string."""
if number == 0:
return '0'
elif number < 0:
negative = True
number = -number
else:
negative = False
digits = []
while number != 0:
digit = number % base
if digit >= 10:
digit = string.ascii_uppercase[digit - 10]
else:
digit = str(digit)
digits.append(digit)
number = number // base
digits.reverse()
return '-'*negative + ''.join(digits)
def _convertBaseToBase(self, number, toBase, fromBase):
"""Convert a number from any base, 2 through 36, to any other
base, 2 through 36. Returns a string."""
number = minisix.long(str(number), fromBase)
if toBase == 10:
return str(number)
return self._convertDecimalToBase(number, toBase)
_mathEnv = {'__builtins__': types.ModuleType('__builtins__'), 'i': 1j}
_mathEnv.update(math.__dict__)
_mathEnv.update(cmath.__dict__)
def _sqrt(x):
if isinstance(x, complex) or x < 0:
return cmath.sqrt(x)
else:
return math.sqrt(x)
def _cbrt(x):
return math.pow(x, 1.0/3)
def _factorial(x):
if x<=10000:
return float(math.factorial(x))
else:
raise Exception('factorial argument too large')
_mathEnv['sqrt'] = _sqrt
_mathEnv['cbrt'] = _cbrt
_mathEnv['abs'] = abs
_mathEnv['max'] = max
_mathEnv['min'] = min
_mathEnv['round'] = lambda x, y=0: round(x, int(y))
_mathSafeEnv = dict([(x,y) for x,y in _mathEnv.items()])
_mathSafeEnv['factorial'] = _factorial
_mathRe = re.compile(r'((?:(?<![A-Fa-f\d)])-)?'
r'(?:0x[A-Fa-f\d]+|'
r'0[0-7]+|'
r'\d+\.\d+|'
r'\.\d+|'
r'\d+\.|'
r'\d+))')
def _floatToString(self, x):
if -1e-10 < x < 1e-10:
return '0'
elif -1e-10 < int(x) - x < 1e-10:
return str(int(x))
else:
return str(x)
def _complexToString(self, x):
realS = self._floatToString(x.real)
imagS = self._floatToString(x.imag)
if imagS == '0':
return realS
elif imagS == '1':
imagS = '+i'
elif imagS == '-1':
imagS = '-i'
elif x.imag < 0:
imagS = '%si' % imagS
else:
imagS = '+%si' % imagS
if realS == '0' and imagS == '0':
return '0'
elif realS == '0':
return imagS.lstrip('+')
elif imagS == '0':
return realS
else:
return '%s%s' % (realS, imagS)
_calc_match_forbidden_chars = re.compile('[_\[\]]')
_calc_remover = utils.str.MultipleRemover('_[] \t')
###
# So this is how the 'calc' command works:
# First, we make a nice little safe environment for evaluation; basically,
# the names in the 'math' and 'cmath' modules. Then, we remove the ability
# of a random user to get ints evaluated: this means we have to turn all
# int literals (even octal numbers and hexadecimal numbers) into floats.
# Then we delete all square brackets, underscores, and whitespace, so no
# one can do list comprehensions or call __...__ functions.
###
@internationalizeDocstring
def calc(self, irc, msg, args, text):
"""<math expression>
Returns the value of the evaluated <math expression>. The syntax is
Python syntax; the type of arithmetic is floating point. Floating
point arithmetic is used in order to prevent a user from being able to
crash to the bot with something like '10**10**10**10'. One consequence
is that large values such as '10**24' might not be exact.
"""
try:
text = str(text)
except UnicodeEncodeError:
irc.error(_("There's no reason you should have fancy non-ASCII "
"characters in your mathematical expression. "
"Please remove them."))
return
if self._calc_match_forbidden_chars.match(text):
# Note: this is important to keep this to forbid usage of
# __builtins__
irc.error(_('There\'s really no reason why you should have '
'underscores or brackets in your mathematical '
'expression. Please remove them.'))
return
text = self._calc_remover(text)
if 'lambda' in text:
irc.error(_('You can\'t use lambda in this command.'))
return
text = text.lower()
def handleMatch(m):
s = m.group(1)
if s.startswith('0x'):
i = int(s, 16)
elif s.startswith('0') and '.' not in s:
try:
i = int(s, 8)
except ValueError:
i = int(s)
else:
i = float(s)
x = complex(i)
if x.imag == 0:
x = x.real
# Need to use string-formatting here instead of str() because
# use of str() on large numbers loses information:
# str(float(33333333333333)) => '3.33333333333e+13'
# float('3.33333333333e+13') => 33333333333300.0
return '%.16f' % x
return str(x)
text = self._mathRe.sub(handleMatch, text)
try:
self.log.info('evaluating %q from %s', text, msg.prefix)
x = complex(eval(text, self._mathSafeEnv, self._mathSafeEnv))
irc.reply(self._complexToString(x))
except OverflowError:
maxFloat = math.ldexp(0.9999999999999999, 1024)
irc.error(_('The answer exceeded %s or so.') % maxFloat)
except TypeError:
irc.error(_('Something in there wasn\'t a valid number.'))
except NameError as e:
irc.error(_('%s is not a defined function.') % str(e).split()[1])
except Exception as e:
irc.error(str(e))
calc = wrap(calc, ['text'])
@internationalizeDocstring
def icalc(self, irc, msg, args, text):
"""<math expression>
This is the same as the calc command except that it allows integer
math, and can thus cause the bot to suck up CPU. Hence it requires
the 'trusted' capability to use.
"""
if self._calc_match_forbidden_chars.match(text):
# Note: this is important to keep this to forbid usage of
# __builtins__
irc.error(_('There\'s really no reason why you should have '
'underscores or brackets in your mathematical '
'expression. Please remove them.'))
return
# This removes spaces, too, but we'll leave the removal of _[] for
# safety's sake.
text = self._calc_remover(text)
if 'lambda' in text:
irc.error(_('You can\'t use lambda in this command.'))
return
text = text.replace('lambda', '')
try:
self.log.info('evaluating %q from %s', text, msg.prefix)
irc.reply(str(eval(text, self._mathEnv, self._mathEnv)))
except OverflowError:
maxFloat = math.ldexp(0.9999999999999999, 1024)
irc.error(_('The answer exceeded %s or so.') % maxFloat)
except TypeError:
irc.error(_('Something in there wasn\'t a valid number.'))
except NameError as e:
irc.error(_('%s is not a defined function.') % str(e).split()[1])
except Exception as e:
irc.error(utils.exnToString(e))
icalc = wrap(icalc, [('checkCapability', 'trusted'), 'text'])
_rpnEnv = {
'dup': lambda s: s.extend([s.pop()]*2),
'swap': lambda s: s.extend([s.pop(), s.pop()])
}
def rpn(self, irc, msg, args):
"""<rpn math expression>
Returns the value of an RPN expression.
"""
stack = []
for arg in args:
try:
x = complex(arg)
if x == abs(x):
x = abs(x)
stack.append(x)
except ValueError: # Not a float.
if arg in self._mathSafeEnv:
f = self._mathSafeEnv[arg]
if callable(f):
called = False
arguments = []
while not called and stack:
arguments.append(stack.pop())
try:
stack.append(f(*arguments))
called = True
except TypeError:
pass
if not called:
irc.error(_('Not enough arguments for %s') % arg)
return
else:
stack.append(f)
elif arg in self._rpnEnv:
self._rpnEnv[arg](stack)
else:
arg2 = stack.pop()
arg1 = stack.pop()
s = '%s%s%s' % (arg1, arg, arg2)
try:
stack.append(eval(s, self._mathSafeEnv, self._mathSafeEnv))
except SyntaxError:
irc.error(format(_('%q is not a defined function.'),
arg))
return
if len(stack) == 1:
irc.reply(str(self._complexToString(complex(stack[0]))))
else:
s = ', '.join(map(self._complexToString, list(map(complex, stack))))
irc.reply(_('Stack: [%s]') % s)
@internationalizeDocstring
def convert(self, irc, msg, args, number, unit1, unit2):
"""[<number>] <unit> to <other unit>
Converts from <unit> to <other unit>. If number isn't given, it
defaults to 1. For unit information, see 'units' command.
"""
try:
digits = len(str(number).split('.')[1])
except IndexError:
digits = 0
try:
newNum = convertcore.convert(number, unit1, unit2)
if isinstance(newNum, float):
zeros = 0
for char in str(newNum).split('.')[1]:
if char != '0':
break
zeros += 1
# Let's add one signifiant digit. Physicists would not like
# that, but common people usually do not give extra zeros...
# (for example, with '32 C to F', an extra digit would be
# expected).
newNum = round(newNum, digits + 1 + zeros)
newNum = self._floatToString(newNum)
irc.reply(str(newNum))
except convertcore.UnitDataError as ude:
irc.error(str(ude))
convert = wrap(convert, [optional('float', 1.0),'something','to','text'])
@internationalizeDocstring
def units(self, irc, msg, args, type):
""" [<type>]
With no arguments, returns a list of measurement types, which can be
passed as arguments. When called with a type as an argument, returns
the units of that type.
"""
irc.reply(convertcore.units(type))
units = wrap(units, [additional('text')])
Class = Math
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
PYSEC-2019-32
|
plugins/Math/test.py
|
@@ -91,9 +91,6 @@ def testBase(self):
self.assertError('base 4 4')
self.assertError('base 10 12 A')
- print()
- print("If we have not fixed a bug with Math.base, the following ")
- print("tests will hang the test-suite.")
self.assertRegexp('base 2 10 [base 10 2 -12]', '-12')
self.assertRegexp('base 16 2 [base 2 16 -110101]', '-110101')
@@ -117,7 +114,10 @@ def testCalc(self):
self.assertError('calc factorial(20000)')
def testCalcNoNameError(self):
- self.assertNotRegexp('calc foobar(x)', 'NameError')
+ self.assertRegexp('calc foobar(x)', 'foobar is not a defined function')
+
+ def testCalcInvalidNode(self):
+ self.assertRegexp('calc {"foo": "bar"}', 'Illegal construct Dict')
def testCalcImaginary(self):
self.assertResponse('calc 3 + sqrt(-1)', '3+i')
|
###
# Copyright (c) 2002-2004, Jeremiah Fincher
# Copyright (c) 2008, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from __future__ import print_function
from supybot.test import *
class MathTestCase(PluginTestCase):
plugins = ('Math',)
def testBase(self):
self.assertNotRegexp('base 56 asdflkj', 'ValueError')
self.assertResponse('base 16 2 F', '1111')
self.assertResponse('base 2 16 1111', 'F')
self.assertResponse('base 20 BBBB', '92631')
self.assertResponse('base 10 20 92631', 'BBBB')
self.assertResponse('base 2 36 10', '2')
self.assertResponse('base 36 2 10', '100100')
self.assertResponse('base 2 1010101', '85')
self.assertResponse('base 2 2 11', '11')
self.assertResponse('base 12 0', '0')
self.assertResponse('base 36 2 0', '0')
self.assertNotError("base 36 " +\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ")
self.assertResponse("base 10 36 [base 36 " +\
"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"\
"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"\
"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"\
"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"\
"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"\
"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"\
"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz]",
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ")
self.assertResponse('base 2 10 [base 10 2 12]', '12')
self.assertResponse('base 16 2 [base 2 16 110101]', '110101')
self.assertResponse('base 10 8 [base 8 76532]', '76532')
self.assertResponse('base 10 36 [base 36 csalnwea]', 'CSALNWEA')
self.assertResponse('base 5 4 [base 4 5 212231]', '212231')
self.assertError('base 37 1')
self.assertError('base 1 1')
self.assertError('base 12 1 1')
self.assertError('base 1 12 1')
self.assertError('base 1.0 12 1')
self.assertError('base A 1')
self.assertError('base 4 4')
self.assertError('base 10 12 A')
print()
print("If we have not fixed a bug with Math.base, the following ")
print("tests will hang the test-suite.")
self.assertRegexp('base 2 10 [base 10 2 -12]', '-12')
self.assertRegexp('base 16 2 [base 2 16 -110101]', '-110101')
def testCalc(self):
self.assertResponse('calc 5*0.06', str(5*0.06))
self.assertResponse('calc 2.0-7.0', str(2-7))
self.assertResponse('calc e**(i*pi)+1', '0')
if minisix.PY3:
# Python 2 has bad handling of exponentiation of negative numbers
self.assertResponse('calc (-1)**.5', 'i')
self.assertRegexp('calc (-5)**.5', '2.236067977[0-9]+i')
self.assertRegexp('calc -((-5)**.5)', '-2.236067977[0-9]+i')
self.assertNotRegexp('calc [9, 5] + [9, 10]', 'TypeError')
self.assertError('calc [9, 5] + [9, 10]')
self.assertNotError('calc degrees(2)')
self.assertNotError('calc (2 * 3) - 2*(3*4)')
self.assertNotError('calc (3) - 2*(3*4)')
self.assertNotError('calc (1600 * 1200) - 2*(1024*1280)')
self.assertNotError('calc 3-2*4')
self.assertNotError('calc (1600 * 1200)-2*(1024*1280)')
self.assertError('calc factorial(20000)')
def testCalcNoNameError(self):
self.assertNotRegexp('calc foobar(x)', 'NameError')
def testCalcImaginary(self):
self.assertResponse('calc 3 + sqrt(-1)', '3+i')
def testCalcFloorWorksWithSqrt(self):
self.assertNotError('calc floor(sqrt(5))')
def testCaseInsensitive(self):
self.assertNotError('calc PI**PI')
def testCalcMaxMin(self):
self.assertResponse('calc max(1,2)', '2')
self.assertResponse('calc min(1,2)', '1')
def testCalcStrFloat(self):
self.assertResponse('calc 3+33333333333333', '33333333333336')
def testICalc(self):
self.assertResponse('icalc 1^1', '0')
self.assertResponse('icalc 10**24', '1' + '0'*24)
self.assertRegexp('icalc 49/6', '8.16')
self.assertNotError('icalc factorial(20000)')
def testRpn(self):
self.assertResponse('rpn 5 2 +', '7')
self.assertResponse('rpn 1 2 3 +', 'Stack: [1, 5]')
self.assertResponse('rpn 1 dup', 'Stack: [1, 1]')
self.assertResponse('rpn 2 3 4 + -', str(2-7))
self.assertNotError('rpn 2 degrees')
def testRpnSwap(self):
self.assertResponse('rpn 1 2 swap', 'Stack: [2, 1]')
def testRpmNoSyntaxError(self):
self.assertNotRegexp('rpn 2 3 foobar', 'SyntaxError')
def testConvert(self):
self.assertResponse('convert 1 m to cm', '100')
self.assertResponse('convert m to cm', '100')
self.assertResponse('convert 3 metres to km', '0.003')
self.assertResponse('convert 32 F to C', '0')
self.assertResponse('convert 32 C to F', '89.6')
self.assertResponse('convert [calc 2*pi] rad to degree', '360')
self.assertResponse('convert amu to atomic mass unit',
'1')
self.assertResponse('convert [calc 2*pi] rad to circle', '1')
self.assertError('convert 1 meatball to bananas')
self.assertError('convert 1 gram to meatballs')
self.assertError('convert 1 mol to grams')
self.assertError('convert 1 m to kpa')
def testConvertSingularPlural(self):
self.assertResponse('convert [calc 2*pi] rads to degrees', '360')
self.assertResponse('convert 1 carat to grams', '0.2')
self.assertResponse('convert 10 lbs to oz', '160')
self.assertResponse('convert mA to amps', '0.001')
def testConvertCaseSensitivity(self):
self.assertError('convert MA to amps')
self.assertError('convert M to amps')
self.assertError('convert Radians to rev')
def testUnits(self):
self.assertNotError('units')
self.assertNotError('units mass')
self.assertNotError('units flux density')
def testAbs(self):
self.assertResponse('calc abs(2)', '2')
self.assertResponse('calc abs(-2)', '2')
self.assertResponse('calc abs(2.0)', '2')
self.assertResponse('calc abs(-2.0)', '2')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
PYSEC-2019-32
|
PyPDF2/_cmap.py
|
@@ -5,7 +5,7 @@
from ._codecs import adobe_glyphs, charset_encoding
from ._utils import logger_warning
from .errors import PdfReadWarning
-from .generic import DecodedStreamObject, DictionaryObject
+from .generic import DecodedStreamObject, DictionaryObject, NameObject
# code freely inspired from @twiggy ; see #711
@@ -124,6 +124,7 @@ def parse_encoding(
enc: Union(str, DictionaryObject) = ft["/Encoding"].get_object() # type: ignore
if isinstance(enc, str):
try:
+ enc = NameObject.unnumber(enc) # for #xx decoding
if enc in charset_encoding:
encoding = charset_encoding[enc].copy()
elif enc in _predefined_cmap:
|
import warnings
from binascii import unhexlify
from typing import Any, Dict, List, Tuple, Union, cast
from ._codecs import adobe_glyphs, charset_encoding
from ._utils import logger_warning
from .errors import PdfReadWarning
from .generic import DecodedStreamObject, DictionaryObject
# code freely inspired from @twiggy ; see #711
def build_char_map(
font_name: str, space_width: float, obj: DictionaryObject
) -> Tuple[
str, float, Union[str, Dict[int, str]], Dict
]: # font_type,space_width /2, encoding, cmap
ft: DictionaryObject = obj["/Resources"]["/Font"][font_name] # type: ignore
font_type: str = cast(str, ft["/Subtype"])
space_code = 32
encoding, space_code = parse_encoding(ft, space_code)
map_dict, space_code, int_entry = parse_to_unicode(ft, space_code)
# encoding can be either a string for decode (on 1,2 or a variable number of bytes) of a char table (for 1 byte only for me)
# if empty string, it means it is than encoding field is not present and we have to select the good encoding from cmap input data
if encoding == "":
if -1 not in map_dict or map_dict[-1] == 1:
# I have not been able to find any rule for no /Encoding nor /ToUnicode
# One example shows /Symbol,bold I consider 8 bits encoding default
encoding = "charmap"
else:
encoding = "utf-16-be"
# apply rule from PDF ref 1.7 §5.9.1, 1st bullet : if cmap not empty encoding should be discarded (here transformed into identity for those characters)
# if encoding is an str it is expected to be a identity translation
elif isinstance(encoding, dict):
for x in int_entry:
if x <= 255:
encoding[x] = chr(x)
try:
# override space_width with new params
space_width = _default_fonts_space_width[cast(str, ft["/BaseFont"])]
except Exception:
pass
# I conside the space_code is available on one byte
if isinstance(space_code, str):
try: # one byte
sp = space_code.encode("charmap")[0]
except Exception:
sp = space_code.encode("utf-16-be")
sp = sp[0] + 256 * sp[1]
else:
sp = space_code
sp_width = compute_space_width(ft, sp, space_width)
return (
font_type,
float(sp_width / 2),
encoding,
# https://github.com/python/mypy/issues/4374
map_dict,
)
# used when missing data, e.g. font def missing
unknown_char_map: Tuple[str, float, Union[str, Dict[int, str]], Dict[Any, Any]] = (
"Unknown",
9999,
dict(zip(range(256), ["�"] * 256)),
{},
)
_predefined_cmap: Dict[str, str] = {
"/Identity-H": "utf-16-be",
"/Identity-V": "utf-16-be",
"/GB-EUC-H": "gbk", # TBC
"/GB-EUC-V": "gbk", # TBC
"/GBpc-EUC-H": "gb2312", # TBC
"/GBpc-EUC-V": "gb2312", # TBC
}
# manually extracted from http://mirrors.ctan.org/fonts/adobe/afm/Adobe-Core35_AFMs-229.tar.gz
_default_fonts_space_width: Dict[str, int] = {
"/Courrier": 600,
"/Courier-Bold": 600,
"/Courier-BoldOblique": 600,
"/Courier-Oblique": 600,
"/Helvetica": 278,
"/Helvetica-Bold": 278,
"/Helvetica-BoldOblique": 278,
"/Helvetica-Oblique": 278,
"/Helvetica-Narrow": 228,
"/Helvetica-NarrowBold": 228,
"/Helvetica-NarrowBoldOblique": 228,
"/Helvetica-NarrowOblique": 228,
"/Times-Roman": 250,
"/Times-Bold": 250,
"/Times-BoldItalic": 250,
"/Times-Italic": 250,
"/Symbol": 250,
"/ZapfDingbats": 278,
}
def parse_encoding(
ft: DictionaryObject, space_code: int
) -> Tuple[Union[str, Dict[int, str]], int]:
encoding: Union[str, List[str], Dict[int, str]] = []
if "/Encoding" not in ft:
try:
if "/BaseFont" in ft and cast(str, ft["/BaseFont"]) in charset_encoding:
encoding = dict(
zip(range(256), charset_encoding[cast(str, ft["/BaseFont"])])
)
else:
encoding = "charmap"
return encoding, _default_fonts_space_width[cast(str, ft["/BaseFont"])]
except Exception:
if cast(str, ft["/Subtype"]) == "/Type1":
return "charmap", space_code
else:
return "", space_code
enc: Union(str, DictionaryObject) = ft["/Encoding"].get_object() # type: ignore
if isinstance(enc, str):
try:
if enc in charset_encoding:
encoding = charset_encoding[enc].copy()
elif enc in _predefined_cmap:
encoding = _predefined_cmap[enc]
else:
raise Exception("not found")
except Exception:
warnings.warn(
f"Advanced encoding {enc} not implemented yet",
PdfReadWarning,
)
encoding = enc
elif isinstance(enc, DictionaryObject) and "/BaseEncoding" in enc:
try:
encoding = charset_encoding[cast(str, enc["/BaseEncoding"])].copy()
except Exception:
warnings.warn(
f"Advanced encoding {encoding} not implemented yet",
PdfReadWarning,
)
encoding = charset_encoding["/StandardCoding"].copy()
else:
encoding = charset_encoding["/StandardCoding"].copy()
if "/Differences" in enc:
x: int = 0
o: Union[int, str]
for o in cast(DictionaryObject, cast(DictionaryObject, enc)["/Differences"]):
if isinstance(o, int):
x = o
else: # isinstance(o,str):
try:
encoding[x] = adobe_glyphs[o] # type: ignore
except Exception:
encoding[x] = o # type: ignore
if o == " ":
space_code = x
x += 1
if isinstance(encoding, list):
encoding = dict(zip(range(256), encoding))
return encoding, space_code
def parse_to_unicode(
ft: DictionaryObject, space_code: int
) -> Tuple[Dict[Any, Any], int, List[int]]:
# will store all translation code
# and map_dict[-1] we will have the number of bytes to convert
map_dict: Dict[Any, Any] = {}
# will provide the list of cmap keys as int to correct encoding
int_entry: List[int] = []
if "/ToUnicode" not in ft:
return {}, space_code, []
process_rg: bool = False
process_char: bool = False
multiline_rg: Union[
None, Tuple[int, int]
] = None # tuple = (current_char, remaining size) ; cf #1285 for example of file
cm = prepare_cm(ft)
for l in cm.split(b"\n"):
process_rg, process_char, multiline_rg = process_cm_line(
l.strip(b" "), process_rg, process_char, multiline_rg, map_dict, int_entry
)
for a, value in map_dict.items():
if value == " ":
space_code = a
return map_dict, space_code, int_entry
def prepare_cm(ft: DictionaryObject) -> bytes:
cm: bytes = cast(DecodedStreamObject, ft["/ToUnicode"]).get_data()
# we need to prepare cm before due to missing return line in pdf printed to pdf from word
cm = (
cm.strip()
.replace(b"beginbfchar", b"\nbeginbfchar\n")
.replace(b"endbfchar", b"\nendbfchar\n")
.replace(b"beginbfrange", b"\nbeginbfrange\n")
.replace(b"endbfrange", b"\nendbfrange\n")
.replace(b"<<", b"\n{\n") # text between << and >> not used but
.replace(b">>", b"\n}\n") # some solution to find it back
)
ll = cm.split(b"<")
for i in range(len(ll)):
j = ll[i].find(b">")
if j >= 0:
if j == 0:
# string is empty: stash a placeholder here (see below)
# see https://github.com/py-pdf/PyPDF2/issues/1111
content = b"."
else:
content = ll[i][:j].replace(b" ", b"")
ll[i] = content + b" " + ll[i][j + 1 :]
cm = (
(b" ".join(ll))
.replace(b"[", b" [ ")
.replace(b"]", b" ]\n ")
.replace(b"\r", b"\n")
)
return cm
def process_cm_line(
l: bytes,
process_rg: bool,
process_char: bool,
multiline_rg: Union[None, Tuple[int, int]],
map_dict: Dict[Any, Any],
int_entry: List[int],
) -> Tuple[bool, bool, Union[None, Tuple[int, int]]]:
if l in (b"", b" ") or l[0] == 37: # 37 = %
return process_rg, process_char, multiline_rg
if b"beginbfrange" in l:
process_rg = True
elif b"endbfrange" in l:
process_rg = False
elif b"beginbfchar" in l:
process_char = True
elif b"endbfchar" in l:
process_char = False
elif process_rg:
multiline_rg = parse_bfrange(l, map_dict, int_entry, multiline_rg)
elif process_char:
parse_bfchar(l, map_dict, int_entry)
return process_rg, process_char, multiline_rg
def parse_bfrange(
l: bytes,
map_dict: Dict[Any, Any],
int_entry: List[int],
multiline_rg: Union[None, Tuple[int, int]],
) -> Union[None, Tuple[int, int]]:
lst = [x for x in l.split(b" ") if x]
closure_found = False
nbi = len(lst[0])
map_dict[-1] = nbi // 2
fmt = b"%%0%dX" % nbi
if multiline_rg is not None:
a = multiline_rg[0] # a, b not in the current line
b = multiline_rg[1]
for sq in lst[1:]:
if sq == b"]":
closure_found = True
break
map_dict[
unhexlify(fmt % a).decode(
"charmap" if map_dict[-1] == 1 else "utf-16-be",
"surrogatepass",
)
] = unhexlify(sq).decode("utf-16-be", "surrogatepass")
int_entry.append(a)
a += 1
else:
a = int(lst[0], 16)
b = int(lst[1], 16)
if lst[2] == b"[":
for sq in lst[3:]:
if sq == b"]":
closure_found = True
break
map_dict[
unhexlify(fmt % a).decode(
"charmap" if map_dict[-1] == 1 else "utf-16-be",
"surrogatepass",
)
] = unhexlify(sq).decode("utf-16-be", "surrogatepass")
int_entry.append(a)
a += 1
else: # case without list
c = int(lst[2], 16)
fmt2 = b"%%0%dX" % max(4, len(lst[2]))
closure_found = True
while a <= b:
map_dict[
unhexlify(fmt % a).decode(
"charmap" if map_dict[-1] == 1 else "utf-16-be",
"surrogatepass",
)
] = unhexlify(fmt2 % c).decode("utf-16-be", "surrogatepass")
int_entry.append(a)
a += 1
c += 1
return None if closure_found else (a, b)
def parse_bfchar(l: bytes, map_dict: Dict[Any, Any], int_entry: List[int]) -> None:
lst = [x for x in l.split(b" ") if x]
map_dict[-1] = len(lst[0]) // 2
while len(lst) > 1:
map_to = ""
# placeholder (see above) means empty string
if lst[1] != b".":
map_to = unhexlify(lst[1]).decode(
"charmap" if len(lst[1]) < 4 else "utf-16-be", "surrogatepass"
) # join is here as some cases where the code was split
map_dict[
unhexlify(lst[0]).decode(
"charmap" if map_dict[-1] == 1 else "utf-16-be", "surrogatepass"
)
] = map_to
int_entry.append(int(lst[0], 16))
lst = lst[2:]
def compute_space_width(
ft: DictionaryObject, space_code: int, space_width: float
) -> float:
sp_width: float = space_width * 2 # default value
w = []
w1 = {}
st: int = 0
if "/DescendantFonts" in ft: # ft["/Subtype"].startswith("/CIDFontType"):
ft1 = ft["/DescendantFonts"][0].get_object() # type: ignore
try:
w1[-1] = cast(float, ft1["/DW"])
except Exception:
w1[-1] = 1000.0
if "/W" in ft1:
w = list(ft1["/W"])
else:
w = []
while len(w) > 0:
st = w[0]
second = w[1]
if isinstance(second, int):
for x in range(st, second):
w1[x] = w[2]
w = w[3:]
elif isinstance(second, list):
for y in second:
w1[st] = y
st += 1
w = w[2:]
else:
logger_warning(
"unknown widths : \n" + (ft1["/W"]).__repr__(),
__name__,
)
break
try:
sp_width = w1[space_code]
except Exception:
sp_width = (
w1[-1] / 2.0
) # if using default we consider space will be only half size
elif "/Widths" in ft:
w = list(ft["/Widths"]) # type: ignore
try:
st = cast(int, ft["/FirstChar"])
en: int = cast(int, ft["/LastChar"])
if st > space_code or en < space_code:
raise Exception("Not in range")
if w[space_code - st] == 0:
raise Exception("null width")
sp_width = w[space_code - st]
except Exception:
if "/FontDescriptor" in ft and "/MissingWidth" in cast(
DictionaryObject, ft["/FontDescriptor"]
):
sp_width = ft["/FontDescriptor"]["/MissingWidth"] # type: ignore
else:
# will consider width of char as avg(width)/2
m = 0
cpt = 0
for x in w:
if x > 0:
m += x
cpt += 1
sp_width = m / max(1, cpt) / 2
return sp_width
|
GHSA-hm9v-vj3r-r55m
|
PyPDF2/_reader.py
|
@@ -1139,6 +1139,7 @@ def get_object(self, indirect_reference: IndirectObject) -> Optional[PdfObject]:
buf = bytes(self.stream.getbuffer()) # type: ignore
else:
p = self.stream.tell()
+ self.stream.seek(0, 0)
buf = self.stream.read(-1)
self.stream.seek(p, 0)
m = re.search(
@@ -1192,6 +1193,7 @@ def get_object(self, indirect_reference: IndirectObject) -> Optional[PdfObject]:
buf = bytes(self.stream.getbuffer()) # type: ignore
else:
p = self.stream.tell()
+ self.stream.seek(0, 0)
buf = self.stream.read(-1)
self.stream.seek(p, 0)
m = re.search(
@@ -1883,6 +1885,13 @@ def xfa(self) -> Optional[Dict[str, Any]]:
retval[tag] = es
return retval
+ def _get_indirect_object(self, num: int, gen: int) -> Optional[PdfObject]:
+ """
+ used to ease development
+ equivalent to generic.IndirectObject(num,gen,self).get_object()
+ """
+ return IndirectObject(num, gen, self).get_object()
+
class PdfFileReader(PdfReader): # pragma: no cover
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
# Copyright (c) 2006, Mathieu Fenniak
# Copyright (c) 2007, Ashish Kulkarni <[email protected]>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import re
import struct
import zlib
from io import BytesIO
from pathlib import Path
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
Union,
cast,
)
from ._encryption import Encryption, PasswordType
from ._page import PageObject, _VirtualList
from ._utils import (
StrByteType,
StreamType,
b_,
deprecate_no_replacement,
deprecate_with_replacement,
logger_warning,
read_non_whitespace,
read_previous_line,
read_until_whitespace,
skip_over_comment,
skip_over_whitespace,
)
from .constants import CatalogAttributes as CA
from .constants import CatalogDictionary as CD
from .constants import CheckboxRadioButtonAttributes
from .constants import Core as CO
from .constants import DocumentInformationAttributes as DI
from .constants import FieldDictionaryAttributes, GoToActionArguments
from .constants import PageAttributes as PG
from .constants import PagesAttributes as PA
from .constants import TrailerKeys as TK
from .errors import (
EmptyFileError,
FileNotDecryptedError,
PdfReadError,
PdfStreamError,
WrongPasswordError,
)
from .generic import (
ArrayObject,
ContentStream,
DecodedStreamObject,
Destination,
DictionaryObject,
EncodedStreamObject,
Field,
FloatObject,
IndirectObject,
NameObject,
NullObject,
NumberObject,
PdfObject,
TextStringObject,
TreeObject,
read_object,
)
from .types import OutlineType, PagemodeType
from .xmp import XmpInformation
def convert_to_int(d: bytes, size: int) -> Union[int, Tuple[Any, ...]]:
if size > 8:
raise PdfReadError("invalid size in convert_to_int")
d = b"\x00\x00\x00\x00\x00\x00\x00\x00" + d
d = d[-8:]
return struct.unpack(">q", d)[0]
def convertToInt(
d: bytes, size: int
) -> Union[int, Tuple[Any, ...]]: # pragma: no cover
deprecate_with_replacement("convertToInt", "convert_to_int")
return convert_to_int(d, size)
class DocumentInformation(DictionaryObject):
"""
A class representing the basic document metadata provided in a PDF File.
This class is accessible through :py:class:`PdfReader.metadata<PyPDF2.PdfReader.metadata>`.
All text properties of the document metadata have
*two* properties, eg. author and author_raw. The non-raw property will
always return a ``TextStringObject``, making it ideal for a case where
the metadata is being displayed. The raw property can sometimes return
a ``ByteStringObject``, if PyPDF2 was unable to decode the string's
text encoding; this requires additional safety in the caller and
therefore is not as commonly accessed.
"""
def __init__(self) -> None:
DictionaryObject.__init__(self)
def _get_text(self, key: str) -> Optional[str]:
retval = self.get(key, None)
if isinstance(retval, TextStringObject):
return retval
return None
def getText(self, key: str) -> Optional[str]: # pragma: no cover
"""
The text value of the specified key or None.
.. deprecated:: 1.28.0
Use the attributes (e.g. :py:attr:`title` / :py:attr:`author`).
"""
deprecate_no_replacement("getText")
return self._get_text(key)
@property
def title(self) -> Optional[str]:
"""
Read-only property accessing the document's **title**.
Returns a unicode string (``TextStringObject``) or ``None``
if the title is not specified.
"""
return (
self._get_text(DI.TITLE) or self.get(DI.TITLE).get_object() # type: ignore
if self.get(DI.TITLE)
else None
)
@property
def title_raw(self) -> Optional[str]:
"""The "raw" version of title; can return a ``ByteStringObject``."""
return self.get(DI.TITLE)
@property
def author(self) -> Optional[str]:
"""
Read-only property accessing the document's **author**.
Returns a unicode string (``TextStringObject``) or ``None``
if the author is not specified.
"""
return self._get_text(DI.AUTHOR)
@property
def author_raw(self) -> Optional[str]:
"""The "raw" version of author; can return a ``ByteStringObject``."""
return self.get(DI.AUTHOR)
@property
def subject(self) -> Optional[str]:
"""
Read-only property accessing the document's **subject**.
Returns a unicode string (``TextStringObject``) or ``None``
if the subject is not specified.
"""
return self._get_text(DI.SUBJECT)
@property
def subject_raw(self) -> Optional[str]:
"""The "raw" version of subject; can return a ``ByteStringObject``."""
return self.get(DI.SUBJECT)
@property
def creator(self) -> Optional[str]:
"""
Read-only property accessing the document's **creator**.
If the document was converted to PDF from another format, this is the
name of the application (e.g. OpenOffice) that created the original
document from which it was converted. Returns a unicode string
(``TextStringObject``) or ``None`` if the creator is not specified.
"""
return self._get_text(DI.CREATOR)
@property
def creator_raw(self) -> Optional[str]:
"""The "raw" version of creator; can return a ``ByteStringObject``."""
return self.get(DI.CREATOR)
@property
def producer(self) -> Optional[str]:
"""
Read-only property accessing the document's **producer**.
If the document was converted to PDF from another format, this is
the name of the application (for example, OSX Quartz) that converted
it to PDF. Returns a unicode string (``TextStringObject``)
or ``None`` if the producer is not specified.
"""
return self._get_text(DI.PRODUCER)
@property
def producer_raw(self) -> Optional[str]:
"""The "raw" version of producer; can return a ``ByteStringObject``."""
return self.get(DI.PRODUCER)
class PdfReader:
"""
Initialize a PdfReader object.
This operation can take some time, as the PDF stream's cross-reference
tables are read into memory.
:param stream: A File object or an object that supports the standard read
and seek methods similar to a File object. Could also be a
string representing a path to a PDF file.
:param bool strict: Determines whether user should be warned of all
problems and also causes some correctable problems to be fatal.
Defaults to ``False``.
:param None/str/bytes password: Decrypt PDF file at initialization. If the
password is None, the file will not be decrypted.
Defaults to ``None``
"""
def __init__(
self,
stream: Union[StrByteType, Path],
strict: bool = False,
password: Union[None, str, bytes] = None,
) -> None:
self.strict = strict
self.flattened_pages: Optional[List[PageObject]] = None
self.resolved_objects: Dict[Tuple[Any, Any], Optional[PdfObject]] = {}
self.xref_index = 0
self._page_id2num: Optional[
Dict[Any, Any]
] = None # map page indirect_ref number to Page Number
if hasattr(stream, "mode") and "b" not in stream.mode: # type: ignore
logger_warning(
"PdfReader stream/file object is not in binary mode. "
"It may not be read correctly.",
__name__,
)
if isinstance(stream, (str, Path)):
with open(stream, "rb") as fh:
stream = BytesIO(fh.read())
self.read(stream)
self.stream = stream
self._override_encryption = False
self._encryption: Optional[Encryption] = None
if self.is_encrypted:
self._override_encryption = True
# Some documents may not have a /ID, use two empty
# byte strings instead. Solves
# https://github.com/mstamy2/PyPDF2/issues/608
id_entry = self.trailer.get(TK.ID)
id1_entry = id_entry[0].get_object().original_bytes if id_entry else b""
encrypt_entry = cast(
DictionaryObject, self.trailer[TK.ENCRYPT].get_object()
)
self._encryption = Encryption.read(encrypt_entry, id1_entry)
# try empty password if no password provided
pwd = password if password is not None else b""
if (
self._encryption.verify(pwd) == PasswordType.NOT_DECRYPTED
and password is not None
):
# raise if password provided
raise WrongPasswordError("Wrong password")
self._override_encryption = False
else:
if password is not None:
raise PdfReadError("Not encrypted file")
@property
def pdf_header(self) -> str:
# TODO: Make this return a bytes object for consistency
# but that needs a deprecation
loc = self.stream.tell()
self.stream.seek(0, 0)
pdf_file_version = self.stream.read(8).decode("utf-8")
self.stream.seek(loc, 0) # return to where it was
return pdf_file_version
@property
def metadata(self) -> Optional[DocumentInformation]:
"""
Retrieve the PDF file's document information dictionary, if it exists.
Note that some PDF files use metadata streams instead of docinfo
dictionaries, and these metadata streams will not be accessed by this
function.
:return: the document information of this PDF file
"""
if TK.INFO not in self.trailer:
return None
obj = self.trailer[TK.INFO]
retval = DocumentInformation()
if isinstance(obj, type(None)):
raise PdfReadError(
"trailer not found or does not point to document information directory"
)
retval.update(obj) # type: ignore
return retval
def getDocumentInfo(self) -> Optional[DocumentInformation]: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use the attribute :py:attr:`metadata` instead.
"""
deprecate_with_replacement("getDocumentInfo", "metadata")
return self.metadata
@property
def documentInfo(self) -> Optional[DocumentInformation]: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use the attribute :py:attr:`metadata` instead.
"""
deprecate_with_replacement("documentInfo", "metadata")
return self.metadata
@property
def xmp_metadata(self) -> Optional[XmpInformation]:
"""
XMP (Extensible Metadata Platform) data
:return: a :class:`XmpInformation<xmp.XmpInformation>`
instance that can be used to access XMP metadata from the document.
or ``None`` if no metadata was found on the document root.
"""
try:
self._override_encryption = True
return self.trailer[TK.ROOT].xmp_metadata # type: ignore
finally:
self._override_encryption = False
def getXmpMetadata(self) -> Optional[XmpInformation]: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use the attribute :py:attr:`xmp_metadata` instead.
"""
deprecate_with_replacement("getXmpMetadata", "xmp_metadata")
return self.xmp_metadata
@property
def xmpMetadata(self) -> Optional[XmpInformation]: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use the attribute :py:attr:`xmp_metadata` instead.
"""
deprecate_with_replacement("xmpMetadata", "xmp_metadata")
return self.xmp_metadata
def _get_num_pages(self) -> int:
"""
Calculate the number of pages in this PDF file.
:return: number of pages
:raises PdfReadError: if file is encrypted and restrictions prevent
this action.
"""
# Flattened pages will not work on an Encrypted PDF;
# the PDF file's page count is used in this case. Otherwise,
# the original method (flattened page count) is used.
if self.is_encrypted:
return self.trailer[TK.ROOT]["/Pages"]["/Count"] # type: ignore
else:
if self.flattened_pages is None:
self._flatten()
return len(self.flattened_pages) # type: ignore
def getNumPages(self) -> int: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use :code:`len(reader.pages)` instead.
"""
deprecate_with_replacement("reader.getNumPages", "len(reader.pages)")
return self._get_num_pages()
@property
def numPages(self) -> int: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use :code:`len(reader.pages)` instead.
"""
deprecate_with_replacement("reader.numPages", "len(reader.pages)")
return self._get_num_pages()
def getPage(self, pageNumber: int) -> PageObject: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use :code:`reader.pages[pageNumber]` instead.
"""
deprecate_with_replacement(
"reader.getPage(pageNumber)", "reader.pages[pageNumber]"
)
return self._get_page(pageNumber)
def _get_page(self, page_number: int) -> PageObject:
"""
Retrieve a page by number from this PDF file.
:param int page_number: The page number to retrieve
(pages begin at zero)
:return: a :class:`PageObject<PyPDF2._page.PageObject>` instance.
"""
# ensure that we're not trying to access an encrypted PDF
# assert not self.trailer.has_key(TK.ENCRYPT)
if self.flattened_pages is None:
self._flatten()
assert self.flattened_pages is not None, "hint for mypy"
return self.flattened_pages[page_number]
@property
def namedDestinations(self) -> Dict[str, Any]: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use :py:attr:`named_destinations` instead.
"""
deprecate_with_replacement("namedDestinations", "named_destinations")
return self.named_destinations
@property
def named_destinations(self) -> Dict[str, Any]:
"""
A read-only dictionary which maps names to
:class:`Destinations<PyPDF2.generic.Destination>`
"""
return self._get_named_destinations()
# A select group of relevant field attributes. For the complete list,
# see section 8.6.2 of the PDF 1.7 reference.
def get_fields(
self,
tree: Optional[TreeObject] = None,
retval: Optional[Dict[Any, Any]] = None,
fileobj: Optional[Any] = None,
) -> Optional[Dict[str, Any]]:
"""
Extract field data if this PDF contains interactive form fields.
The *tree* and *retval* parameters are for recursive use.
:param fileobj: A file object (usually a text file) to write
a report to on all interactive form fields found.
:return: A dictionary where each key is a field name, and each
value is a :class:`Field<PyPDF2.generic.Field>` object. By
default, the mapping name is used for keys.
``None`` if form data could not be located.
"""
field_attributes = FieldDictionaryAttributes.attributes_dict()
field_attributes.update(CheckboxRadioButtonAttributes.attributes_dict())
if retval is None:
retval = {}
catalog = cast(DictionaryObject, self.trailer[TK.ROOT])
# get the AcroForm tree
if CD.ACRO_FORM in catalog:
tree = cast(Optional[TreeObject], catalog[CD.ACRO_FORM])
else:
return None
if tree is None:
return retval
self._check_kids(tree, retval, fileobj)
for attr in field_attributes:
if attr in tree:
# Tree is a field
self._build_field(tree, retval, fileobj, field_attributes)
break
if "/Fields" in tree:
fields = cast(ArrayObject, tree["/Fields"])
for f in fields:
field = f.get_object()
self._build_field(field, retval, fileobj, field_attributes)
return retval
def getFields(
self,
tree: Optional[TreeObject] = None,
retval: Optional[Dict[Any, Any]] = None,
fileobj: Optional[Any] = None,
) -> Optional[Dict[str, Any]]: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use :meth:`get_fields` instead.
"""
deprecate_with_replacement("getFields", "get_fields")
return self.get_fields(tree, retval, fileobj)
def _build_field(
self,
field: Union[TreeObject, DictionaryObject],
retval: Dict[Any, Any],
fileobj: Any,
field_attributes: Any,
) -> None:
self._check_kids(field, retval, fileobj)
try:
key = field["/TM"]
except KeyError:
try:
key = field["/T"]
except KeyError:
# Ignore no-name field for now
return
if fileobj:
self._write_field(fileobj, field, field_attributes)
fileobj.write("\n")
retval[key] = Field(field)
def _check_kids(
self, tree: Union[TreeObject, DictionaryObject], retval: Any, fileobj: Any
) -> None:
if PA.KIDS in tree:
# recurse down the tree
for kid in tree[PA.KIDS]: # type: ignore
self.get_fields(kid.get_object(), retval, fileobj)
def _write_field(self, fileobj: Any, field: Any, field_attributes: Any) -> None:
field_attributes_tuple = FieldDictionaryAttributes.attributes()
field_attributes_tuple = (
field_attributes_tuple + CheckboxRadioButtonAttributes.attributes()
)
for attr in field_attributes_tuple:
if attr in (
FieldDictionaryAttributes.Kids,
FieldDictionaryAttributes.AA,
):
continue
attr_name = field_attributes[attr]
try:
if attr == FieldDictionaryAttributes.FT:
# Make the field type value more clear
types = {
"/Btn": "Button",
"/Tx": "Text",
"/Ch": "Choice",
"/Sig": "Signature",
}
if field[attr] in types:
fileobj.write(attr_name + ": " + types[field[attr]] + "\n")
elif attr == FieldDictionaryAttributes.Parent:
# Let's just write the name of the parent
try:
name = field[attr][FieldDictionaryAttributes.TM]
except KeyError:
name = field[attr][FieldDictionaryAttributes.T]
fileobj.write(attr_name + ": " + name + "\n")
else:
fileobj.write(attr_name + ": " + str(field[attr]) + "\n")
except KeyError:
# Field attribute is N/A or unknown, so don't write anything
pass
def get_form_text_fields(self) -> Dict[str, Any]:
"""
Retrieve form fields from the document with textual data.
The key is the name of the form field, the value is the content of the
field.
If the document contains multiple form fields with the same name, the
second and following will get the suffix _2, _3, ...
"""
# Retrieve document form fields
formfields = self.get_fields()
if formfields is None:
return {}
return {
formfields[field]["/T"]: formfields[field].get("/V")
for field in formfields
if formfields[field].get("/FT") == "/Tx"
}
def getFormTextFields(self) -> Dict[str, Any]: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use :meth:`get_form_text_fields` instead.
"""
deprecate_with_replacement("getFormTextFields", "get_form_text_fields")
return self.get_form_text_fields()
def _get_named_destinations(
self,
tree: Union[TreeObject, None] = None,
retval: Optional[Any] = None,
) -> Dict[str, Any]:
"""
Retrieve the named destinations present in the document.
:return: a dictionary which maps names to
:class:`Destinations<PyPDF2.generic.Destination>`.
"""
if retval is None:
retval = {}
catalog = cast(DictionaryObject, self.trailer[TK.ROOT])
# get the name tree
if CA.DESTS in catalog:
tree = cast(TreeObject, catalog[CA.DESTS])
elif CA.NAMES in catalog:
names = cast(DictionaryObject, catalog[CA.NAMES])
if CA.DESTS in names:
tree = cast(TreeObject, names[CA.DESTS])
if tree is None:
return retval
if PA.KIDS in tree:
# recurse down the tree
for kid in cast(ArrayObject, tree[PA.KIDS]):
self._get_named_destinations(kid.get_object(), retval)
# TABLE 3.33 Entries in a name tree node dictionary (PDF 1.7 specs)
elif CA.NAMES in tree: # KIDS and NAMES are exclusives (PDF 1.7 specs p 162)
names = cast(DictionaryObject, tree[CA.NAMES])
for i in range(0, len(names), 2):
key = cast(str, names[i].get_object())
value = names[i + 1].get_object()
if isinstance(value, DictionaryObject) and "/D" in value:
value = value["/D"]
dest = self._build_destination(key, value) # type: ignore
if dest is not None:
retval[key] = dest
else: # case where Dests is in root catalog (PDF 1.7 specs, §2 about PDF1.1
for k__, v__ in tree.items():
val = v__.get_object()
dest = self._build_destination(k__, val)
if dest is not None:
retval[k__] = dest
return retval
def getNamedDestinations(
self,
tree: Union[TreeObject, None] = None,
retval: Optional[Any] = None,
) -> Dict[str, Any]: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use :py:attr:`named_destinations` instead.
"""
deprecate_with_replacement("getNamedDestinations", "named_destinations")
return self._get_named_destinations(tree, retval)
@property
def outline(self) -> OutlineType:
"""
Read-only property for the outline (i.e., a collection of 'outline items'
which are also known as 'bookmarks') present in the document.
:return: a nested list of :class:`Destinations<PyPDF2.generic.Destination>`.
"""
return self._get_outline()
@property
def outlines(self) -> OutlineType: # pragma: no cover
"""
.. deprecated:: 2.9.0
Use :py:attr:`outline` instead.
"""
deprecate_with_replacement("outlines", "outline")
return self.outline
def _get_outline(
self, node: Optional[DictionaryObject] = None, outline: Optional[Any] = None
) -> OutlineType:
if outline is None:
outline = []
catalog = cast(DictionaryObject, self.trailer[TK.ROOT])
# get the outline dictionary and named destinations
if CO.OUTLINES in catalog:
lines = cast(DictionaryObject, catalog[CO.OUTLINES])
if isinstance(lines, NullObject):
return outline
# TABLE 8.3 Entries in the outline dictionary
if lines is not None and "/First" in lines:
node = cast(DictionaryObject, lines["/First"])
self._namedDests = self._get_named_destinations()
if node is None:
return outline
# see if there are any more outline items
while True:
outline_obj = self._build_outline_item(node)
if outline_obj:
outline.append(outline_obj)
# check for sub-outline
if "/First" in node:
sub_outline: List[Any] = []
self._get_outline(cast(DictionaryObject, node["/First"]), sub_outline)
if sub_outline:
outline.append(sub_outline)
if "/Next" not in node:
break
node = cast(DictionaryObject, node["/Next"])
return outline
def getOutlines(
self, node: Optional[DictionaryObject] = None, outline: Optional[Any] = None
) -> OutlineType: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use :py:attr:`outline` instead.
"""
deprecate_with_replacement("getOutlines", "outline")
return self._get_outline(node, outline)
def _get_page_number_by_indirect(
self, indirect_ref: Union[None, int, NullObject, IndirectObject]
) -> int:
"""Generate _page_id2num"""
if self._page_id2num is None:
self._page_id2num = {
x.indirect_ref.idnum: i for i, x in enumerate(self.pages) # type: ignore
}
if indirect_ref is None or isinstance(indirect_ref, NullObject):
return -1
if isinstance(indirect_ref, int):
idnum = indirect_ref
else:
idnum = indirect_ref.idnum
assert self._page_id2num is not None, "hint for mypy"
ret = self._page_id2num.get(idnum, -1)
return ret
def get_page_number(self, page: PageObject) -> int:
"""
Retrieve page number of a given PageObject
:param PageObject page: The page to get page number. Should be
an instance of :class:`PageObject<PyPDF2._page.PageObject>`
:return: the page number or -1 if page not found
"""
return self._get_page_number_by_indirect(page.indirect_ref)
def getPageNumber(self, page: PageObject) -> int: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use :meth:`get_page_number` instead.
"""
deprecate_with_replacement("getPageNumber", "get_page_number")
return self.get_page_number(page)
def get_destination_page_number(self, destination: Destination) -> int:
"""
Retrieve page number of a given Destination object.
:param Destination destination: The destination to get page number.
:return: the page number or -1 if page not found
"""
return self._get_page_number_by_indirect(destination.page)
def getDestinationPageNumber(
self, destination: Destination
) -> int: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use :meth:`get_destination_page_number` instead.
"""
deprecate_with_replacement(
"getDestinationPageNumber", "get_destination_page_number"
)
return self.get_destination_page_number(destination)
def _build_destination(
self,
title: str,
array: List[Union[NumberObject, IndirectObject, NullObject, DictionaryObject]],
) -> Destination:
page, typ = None, None
# handle outline items with missing or invalid destination
if (
isinstance(array, (type(None), NullObject))
or (isinstance(array, ArrayObject) and len(array) == 0)
or (isinstance(array, str))
):
page = NullObject()
typ = TextStringObject("/Fit")
return Destination(title, page, typ)
else:
page, typ = array[0:2] # type: ignore
array = array[2:]
try:
return Destination(title, page, typ, *array) # type: ignore
except PdfReadError:
logger_warning(f"Unknown destination: {title} {array}", __name__)
if self.strict:
raise
# create a link to first Page
tmp = self.pages[0].indirect_ref
indirect_ref = NullObject() if tmp is None else tmp
return Destination(
title, indirect_ref, TextStringObject("/Fit") # type: ignore
)
def _build_outline_item(self, node: DictionaryObject) -> Optional[Destination]:
dest, title, outline_item = None, None, None
# title required for valid outline
# PDF Reference 1.7: TABLE 8.4 Entries in an outline item dictionary
try:
title = node["/Title"]
except KeyError:
if self.strict:
raise PdfReadError(f"Outline Entry Missing /Title attribute: {node!r}")
title = "" # type: ignore
if "/A" in node:
# Action, PDFv1.7 Section 12.6 (only type GoTo supported)
action = cast(DictionaryObject, node["/A"])
action_type = cast(NameObject, action[GoToActionArguments.S])
if action_type == "/GoTo":
dest = action[GoToActionArguments.D]
elif "/Dest" in node:
# Destination, PDFv1.7 Section 12.3.2
dest = node["/Dest"]
# if array was referenced in another object, will be a dict w/ key "/D"
if isinstance(dest, DictionaryObject) and "/D" in dest:
dest = dest["/D"]
if isinstance(dest, ArrayObject):
outline_item = self._build_destination(title, dest) # type: ignore
elif isinstance(dest, str):
# named destination, addresses NameObject Issue #193
try:
outline_item = self._build_destination(
title, self._namedDests[dest].dest_array
)
except KeyError:
# named destination not found in Name Dict
outline_item = self._build_destination(title, None)
elif isinstance(dest, type(None)):
# outline item not required to have destination or action
# PDFv1.7 Table 153
outline_item = self._build_destination(title, dest) # type: ignore
else:
if self.strict:
raise PdfReadError(f"Unexpected destination {dest!r}")
outline_item = self._build_destination(title, None) # type: ignore
# if outline item created, add color, format, and child count if present
if outline_item:
if "/C" in node:
# Color of outline item font in (R, G, B) with values ranging 0.0-1.0
outline_item[NameObject("/C")] = ArrayObject(FloatObject(c) for c in node["/C"]) # type: ignore
if "/F" in node:
# specifies style characteristics bold and/or italic
# 1=italic, 2=bold, 3=both
outline_item[NameObject("/F")] = node["/F"]
if "/Count" in node:
# absolute value = num. visible children
# positive = open/unfolded, negative = closed/folded
outline_item[NameObject("/Count")] = node["/Count"]
return outline_item
@property
def pages(self) -> List[PageObject]:
"""Read-only property that emulates a list of :py:class:`Page<PyPDF2._page.Page>` objects."""
return _VirtualList(self._get_num_pages, self._get_page) # type: ignore
@property
def page_layout(self) -> Optional[str]:
"""
Get the page layout.
:return: Page layout currently being used.
.. list-table:: Valid ``layout`` values
:widths: 50 200
* - /NoLayout
- Layout explicitly not specified
* - /SinglePage
- Show one page at a time
* - /OneColumn
- Show one column at a time
* - /TwoColumnLeft
- Show pages in two columns, odd-numbered pages on the left
* - /TwoColumnRight
- Show pages in two columns, odd-numbered pages on the right
* - /TwoPageLeft
- Show two pages at a time, odd-numbered pages on the left
* - /TwoPageRight
- Show two pages at a time, odd-numbered pages on the right
"""
trailer = cast(DictionaryObject, self.trailer[TK.ROOT])
if CD.PAGE_LAYOUT in trailer:
return cast(NameObject, trailer[CD.PAGE_LAYOUT])
return None
def getPageLayout(self) -> Optional[str]: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use :py:attr:`page_layout` instead.
"""
deprecate_with_replacement("getPageLayout", "page_layout")
return self.page_layout
@property
def pageLayout(self) -> Optional[str]: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use :py:attr:`page_layout` instead.
"""
deprecate_with_replacement("pageLayout", "page_layout")
return self.page_layout
@property
def page_mode(self) -> Optional[PagemodeType]:
"""
Get the page mode.
:return: Page mode currently being used.
.. list-table:: Valid ``mode`` values
:widths: 50 200
* - /UseNone
- Do not show outline or thumbnails panels
* - /UseOutlines
- Show outline (aka bookmarks) panel
* - /UseThumbs
- Show page thumbnails panel
* - /FullScreen
- Fullscreen view
* - /UseOC
- Show Optional Content Group (OCG) panel
* - /UseAttachments
- Show attachments panel
"""
try:
return self.trailer[TK.ROOT]["/PageMode"] # type: ignore
except KeyError:
return None
def getPageMode(self) -> Optional[PagemodeType]: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use :py:attr:`page_mode` instead.
"""
deprecate_with_replacement("getPageMode", "page_mode")
return self.page_mode
@property
def pageMode(self) -> Optional[PagemodeType]: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use :py:attr:`page_mode` instead.
"""
deprecate_with_replacement("pageMode", "page_mode")
return self.page_mode
def _flatten(
self,
pages: Union[None, DictionaryObject, PageObject] = None,
inherit: Optional[Dict[str, Any]] = None,
indirect_ref: Optional[IndirectObject] = None,
) -> None:
inheritable_page_attributes = (
NameObject(PG.RESOURCES),
NameObject(PG.MEDIABOX),
NameObject(PG.CROPBOX),
NameObject(PG.ROTATE),
)
if inherit is None:
inherit = {}
if pages is None:
# Fix issue 327: set flattened_pages attribute only for
# decrypted file
catalog = self.trailer[TK.ROOT].get_object()
pages = catalog["/Pages"].get_object() # type: ignore
self.flattened_pages = []
t = "/Pages"
if PA.TYPE in pages:
t = pages[PA.TYPE] # type: ignore
if t == "/Pages":
for attr in inheritable_page_attributes:
if attr in pages:
inherit[attr] = pages[attr]
for page in pages[PA.KIDS]: # type: ignore
addt = {}
if isinstance(page, IndirectObject):
addt["indirect_ref"] = page
self._flatten(page.get_object(), inherit, **addt)
elif t == "/Page":
for attr_in, value in list(inherit.items()):
# if the page has it's own value, it does not inherit the
# parent's value:
if attr_in not in pages:
pages[attr_in] = value
page_obj = PageObject(self, indirect_ref)
page_obj.update(pages)
# TODO: Could flattened_pages be None at this point?
self.flattened_pages.append(page_obj) # type: ignore
def _get_object_from_stream(
self, indirect_reference: IndirectObject
) -> Union[int, PdfObject, str]:
# indirect reference to object in object stream
# read the entire object stream into memory
stmnum, idx = self.xref_objStm[indirect_reference.idnum]
obj_stm: EncodedStreamObject = IndirectObject(stmnum, 0, self).get_object() # type: ignore
# This is an xref to a stream, so its type better be a stream
assert cast(str, obj_stm["/Type"]) == "/ObjStm"
# /N is the number of indirect objects in the stream
assert idx < obj_stm["/N"]
stream_data = BytesIO(b_(obj_stm.get_data())) # type: ignore
for i in range(obj_stm["/N"]): # type: ignore
read_non_whitespace(stream_data)
stream_data.seek(-1, 1)
objnum = NumberObject.read_from_stream(stream_data)
read_non_whitespace(stream_data)
stream_data.seek(-1, 1)
offset = NumberObject.read_from_stream(stream_data)
read_non_whitespace(stream_data)
stream_data.seek(-1, 1)
if objnum != indirect_reference.idnum:
# We're only interested in one object
continue
if self.strict and idx != i:
raise PdfReadError("Object is in wrong index.")
stream_data.seek(int(obj_stm["/First"] + offset), 0) # type: ignore
# to cope with some case where the 'pointer' is on a white space
read_non_whitespace(stream_data)
stream_data.seek(-1, 1)
try:
obj = read_object(stream_data, self)
except PdfStreamError as exc:
# Stream object cannot be read. Normally, a critical error, but
# Adobe Reader doesn't complain, so continue (in strict mode?)
logger_warning(
f"Invalid stream (index {i}) within object "
f"{indirect_reference.idnum} {indirect_reference.generation}: "
f"{exc}",
__name__,
)
if self.strict:
raise PdfReadError(f"Can't read object stream: {exc}")
# Replace with null. Hopefully it's nothing important.
obj = NullObject()
return obj
if self.strict:
raise PdfReadError("This is a fatal error in strict mode.")
return NullObject()
def get_object(self, indirect_reference: IndirectObject) -> Optional[PdfObject]:
retval = self.cache_get_indirect_object(
indirect_reference.generation, indirect_reference.idnum
)
if retval is not None:
return retval
if (
indirect_reference.generation == 0
and indirect_reference.idnum in self.xref_objStm
):
retval = self._get_object_from_stream(indirect_reference) # type: ignore
elif (
indirect_reference.generation in self.xref
and indirect_reference.idnum in self.xref[indirect_reference.generation]
):
if self.xref_free_entry.get(indirect_reference.generation, {}).get(
indirect_reference.idnum, False
):
return NullObject()
start = self.xref[indirect_reference.generation][indirect_reference.idnum]
self.stream.seek(start, 0)
try:
idnum, generation = self.read_object_header(self.stream)
except Exception:
if hasattr(self.stream, "getbuffer"):
buf = bytes(self.stream.getbuffer()) # type: ignore
else:
p = self.stream.tell()
buf = self.stream.read(-1)
self.stream.seek(p, 0)
m = re.search(
rf"\s{indirect_reference.idnum}\s+{indirect_reference.generation}\s+obj".encode(),
buf,
)
if m is not None:
logger_warning(
f"Object ID {indirect_reference.idnum},{indirect_reference.generation} ref repaired",
__name__,
)
self.xref[indirect_reference.generation][
indirect_reference.idnum
] = (m.start(0) + 1)
self.stream.seek(m.start(0) + 1)
idnum, generation = self.read_object_header(self.stream)
else:
idnum = -1 # exception will be raised below
if idnum != indirect_reference.idnum and self.xref_index:
# Xref table probably had bad indexes due to not being zero-indexed
if self.strict:
raise PdfReadError(
f"Expected object ID ({indirect_reference.idnum} {indirect_reference.generation}) "
f"does not match actual ({idnum} {generation}); "
"xref table not zero-indexed."
)
# xref table is corrected in non-strict mode
elif idnum != indirect_reference.idnum and self.strict:
# some other problem
raise PdfReadError(
f"Expected object ID ({indirect_reference.idnum} "
f"{indirect_reference.generation}) does not match actual "
f"({idnum} {generation})."
)
if self.strict:
assert generation == indirect_reference.generation
retval = read_object(self.stream, self) # type: ignore
# override encryption is used for the /Encrypt dictionary
if not self._override_encryption and self._encryption is not None:
# if we don't have the encryption key:
if not self._encryption.is_decrypted():
raise FileNotDecryptedError("File has not been decrypted")
# otherwise, decrypt here...
retval = cast(PdfObject, retval)
retval = self._encryption.decrypt_object(
retval, indirect_reference.idnum, indirect_reference.generation
)
else:
if hasattr(self.stream, "getbuffer"):
buf = bytes(self.stream.getbuffer()) # type: ignore
else:
p = self.stream.tell()
buf = self.stream.read(-1)
self.stream.seek(p, 0)
m = re.search(
rf"\s{indirect_reference.idnum}\s+{indirect_reference.generation}\s+obj".encode(),
buf,
)
if m is not None:
logger_warning(
f"Object {indirect_reference.idnum} {indirect_reference.generation} found",
__name__,
)
if indirect_reference.generation not in self.xref:
self.xref[indirect_reference.generation] = {}
self.xref[indirect_reference.generation][indirect_reference.idnum] = (
m.start(0) + 1
)
self.stream.seek(m.end(0) + 1)
skip_over_whitespace(self.stream)
self.stream.seek(-1, 1)
retval = read_object(self.stream, self) # type: ignore
# override encryption is used for the /Encrypt dictionary
if not self._override_encryption and self._encryption is not None:
# if we don't have the encryption key:
if not self._encryption.is_decrypted():
raise FileNotDecryptedError("File has not been decrypted")
# otherwise, decrypt here...
retval = cast(PdfObject, retval)
retval = self._encryption.decrypt_object(
retval, indirect_reference.idnum, indirect_reference.generation
)
else:
logger_warning(
f"Object {indirect_reference.idnum} {indirect_reference.generation} not defined.",
__name__,
)
if self.strict:
raise PdfReadError("Could not find object.")
self.cache_indirect_object(
indirect_reference.generation, indirect_reference.idnum, retval
)
return retval
def getObject(
self, indirectReference: IndirectObject
) -> Optional[PdfObject]: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use :meth:`get_object` instead.
"""
deprecate_with_replacement("getObject", "get_object")
return self.get_object(indirectReference)
def read_object_header(self, stream: StreamType) -> Tuple[int, int]:
# Should never be necessary to read out whitespace, since the
# cross-reference table should put us in the right spot to read the
# object header. In reality... some files have stupid cross reference
# tables that are off by whitespace bytes.
extra = False
skip_over_comment(stream)
extra |= skip_over_whitespace(stream)
stream.seek(-1, 1)
idnum = read_until_whitespace(stream)
extra |= skip_over_whitespace(stream)
stream.seek(-1, 1)
generation = read_until_whitespace(stream)
extra |= skip_over_whitespace(stream)
stream.seek(-1, 1)
# although it's not used, it might still be necessary to read
_obj = stream.read(3) # noqa: F841
read_non_whitespace(stream)
stream.seek(-1, 1)
if extra and self.strict:
logger_warning(
f"Superfluous whitespace found in object header {idnum} {generation}", # type: ignore
__name__,
)
return int(idnum), int(generation)
def readObjectHeader(
self, stream: StreamType
) -> Tuple[int, int]: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use :meth:`read_object_header` instead.
"""
deprecate_with_replacement("readObjectHeader", "read_object_header")
return self.read_object_header(stream)
def cache_get_indirect_object(
self, generation: int, idnum: int
) -> Optional[PdfObject]:
return self.resolved_objects.get((generation, idnum))
def cacheGetIndirectObject(
self, generation: int, idnum: int
) -> Optional[PdfObject]: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use :meth:`cache_get_indirect_object` instead.
"""
deprecate_with_replacement(
"cacheGetIndirectObject", "cache_get_indirect_object"
)
return self.cache_get_indirect_object(generation, idnum)
def cache_indirect_object(
self, generation: int, idnum: int, obj: Optional[PdfObject]
) -> Optional[PdfObject]:
if (generation, idnum) in self.resolved_objects:
msg = f"Overwriting cache for {generation} {idnum}"
if self.strict:
raise PdfReadError(msg)
logger_warning(msg, __name__)
self.resolved_objects[(generation, idnum)] = obj
return obj
def cacheIndirectObject(
self, generation: int, idnum: int, obj: Optional[PdfObject]
) -> Optional[PdfObject]: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use :meth:`cache_indirect_object` instead.
"""
deprecate_with_replacement("cacheIndirectObject", "cache_indirect_object")
return self.cache_indirect_object(generation, idnum, obj)
def read(self, stream: StreamType) -> None:
self._basic_validation(stream)
self._find_eof_marker(stream)
startxref = self._find_startxref_pos(stream)
# check and eventually correct the startxref only in not strict
xref_issue_nr = self._get_xref_issues(stream, startxref)
if xref_issue_nr != 0:
if self.strict and xref_issue_nr:
raise PdfReadError("Broken xref table")
logger_warning(f"incorrect startxref pointer({xref_issue_nr})", __name__)
# read all cross reference tables and their trailers
self._read_xref_tables_and_trailers(stream, startxref, xref_issue_nr)
# if not zero-indexed, verify that the table is correct; change it if necessary
if self.xref_index and not self.strict:
loc = stream.tell()
for gen, xref_entry in self.xref.items():
if gen == 65535:
continue
xref_k = sorted(
xref_entry.keys()
) # must ensure ascendant to prevent damange
for id in xref_k:
stream.seek(xref_entry[id], 0)
try:
pid, _pgen = self.read_object_header(stream)
except ValueError:
break
if pid == id - self.xref_index:
# fixing index item per item is required for revised PDF.
self.xref[gen][pid] = self.xref[gen][id]
del self.xref[gen][id]
# if not, then either it's just plain wrong, or the
# non-zero-index is actually correct
stream.seek(loc, 0) # return to where it was
def _basic_validation(self, stream: StreamType) -> None:
# start at the end:
stream.seek(0, os.SEEK_END)
if not stream.tell():
raise EmptyFileError("Cannot read an empty file")
if self.strict:
stream.seek(0, os.SEEK_SET)
header_byte = stream.read(5)
if header_byte != b"%PDF-":
raise PdfReadError(
f"PDF starts with '{header_byte.decode('utf8')}', "
"but '%PDF-' expected"
)
stream.seek(0, os.SEEK_END)
def _find_eof_marker(self, stream: StreamType) -> None:
last_mb = 8 # to parse whole file
line = b""
while line[:5] != b"%%EOF":
if stream.tell() < last_mb:
raise PdfReadError("EOF marker not found")
line = read_previous_line(stream)
def _find_startxref_pos(self, stream: StreamType) -> int:
"""Find startxref entry - the location of the xref table"""
line = read_previous_line(stream)
try:
startxref = int(line)
except ValueError:
# 'startxref' may be on the same line as the location
if not line.startswith(b"startxref"):
raise PdfReadError("startxref not found")
startxref = int(line[9:].strip())
logger_warning("startxref on same line as offset", __name__)
else:
line = read_previous_line(stream)
if line[:9] != b"startxref":
raise PdfReadError("startxref not found")
return startxref
def _read_standard_xref_table(self, stream: StreamType) -> None:
# standard cross-reference table
ref = stream.read(4)
if ref[:3] != b"ref":
raise PdfReadError("xref table read error")
read_non_whitespace(stream)
stream.seek(-1, 1)
firsttime = True # check if the first time looking at the xref table
while True:
num = cast(int, read_object(stream, self))
if firsttime and num != 0:
self.xref_index = num
if self.strict:
logger_warning(
"Xref table not zero-indexed. ID numbers for objects will be corrected.",
__name__,
)
# if table not zero indexed, could be due to error from when PDF was created
# which will lead to mismatched indices later on, only warned and corrected if self.strict==True
firsttime = False
read_non_whitespace(stream)
stream.seek(-1, 1)
size = cast(int, read_object(stream, self))
read_non_whitespace(stream)
stream.seek(-1, 1)
cnt = 0
while cnt < size:
line = stream.read(20)
# It's very clear in section 3.4.3 of the PDF spec
# that all cross-reference table lines are a fixed
# 20 bytes (as of PDF 1.7). However, some files have
# 21-byte entries (or more) due to the use of \r\n
# (CRLF) EOL's. Detect that case, and adjust the line
# until it does not begin with a \r (CR) or \n (LF).
while line[0] in b"\x0D\x0A":
stream.seek(-20 + 1, 1)
line = stream.read(20)
# On the other hand, some malformed PDF files
# use a single character EOL without a preceding
# space. Detect that case, and seek the stream
# back one character. (0-9 means we've bled into
# the next xref entry, t means we've bled into the
# text "trailer"):
if line[-1] in b"0123456789t":
stream.seek(-1, 1)
try:
offset_b, generation_b = line[:16].split(b" ")
entry_type_b = line[17:18]
offset, generation = int(offset_b), int(generation_b)
except Exception:
# if something wrong occured
if hasattr(stream, "getbuffer"):
buf = bytes(stream.getbuffer()) # type: ignore
else:
p = stream.tell()
stream.seek(0, 0)
buf = stream.read(-1)
stream.seek(p)
f = re.search(f"{num}\\s+(\\d+)\\s+obj".encode(), buf)
if f is None:
logger_warning(
f"entry {num} in Xref table invalid; object not found",
__name__,
)
generation = 65535
offset = -1
else:
logger_warning(
f"entry {num} in Xref table invalid but object found",
__name__,
)
generation = int(f.group(1))
offset = f.start()
if generation not in self.xref:
self.xref[generation] = {}
self.xref_free_entry[generation] = {}
if num in self.xref[generation]:
# It really seems like we should allow the last
# xref table in the file to override previous
# ones. Since we read the file backwards, assume
# any existing key is already set correctly.
pass
else:
self.xref[generation][num] = offset
try:
self.xref_free_entry[generation][num] = entry_type_b == b"f"
except Exception:
pass
try:
self.xref_free_entry[65535][num] = entry_type_b == b"f"
except Exception:
pass
cnt += 1
num += 1
read_non_whitespace(stream)
stream.seek(-1, 1)
trailertag = stream.read(7)
if trailertag != b"trailer":
# more xrefs!
stream.seek(-7, 1)
else:
break
def _read_xref_tables_and_trailers(
self, stream: StreamType, startxref: Optional[int], xref_issue_nr: int
) -> None:
self.xref: Dict[int, Dict[Any, Any]] = {}
self.xref_free_entry: Dict[int, Dict[Any, Any]] = {}
self.xref_objStm: Dict[int, Tuple[Any, Any]] = {}
self.trailer = DictionaryObject()
while startxref is not None:
# load the xref table
stream.seek(startxref, 0)
x = stream.read(1)
if x in b"\r\n":
x = stream.read(1)
if x == b"x":
startxref = self._read_xref(stream)
elif xref_issue_nr:
try:
self._rebuild_xref_table(stream)
break
except Exception:
xref_issue_nr = 0
elif x.isdigit():
try:
xrefstream = self._read_pdf15_xref_stream(stream)
except Exception as e:
if TK.ROOT in self.trailer:
logger_warning(
f"Previous trailer can not be read {e.args}",
__name__,
)
break
else:
raise PdfReadError(f"trailer can not be read {e.args}")
trailer_keys = TK.ROOT, TK.ENCRYPT, TK.INFO, TK.ID
for key in trailer_keys:
if key in xrefstream and key not in self.trailer:
self.trailer[NameObject(key)] = xrefstream.raw_get(key)
if "/XRefStm" in xrefstream:
p = stream.tell()
stream.seek(cast(int, xrefstream["/XRefStm"]) + 1, 0)
self._read_pdf15_xref_stream(stream)
stream.seek(p, 0)
if "/Prev" in xrefstream:
startxref = cast(int, xrefstream["/Prev"])
else:
break
else:
startxref = self._read_xref_other_error(stream, startxref)
def _read_xref(self, stream: StreamType) -> Optional[int]:
self._read_standard_xref_table(stream)
read_non_whitespace(stream)
stream.seek(-1, 1)
new_trailer = cast(Dict[str, Any], read_object(stream, self))
for key, value in new_trailer.items():
if key not in self.trailer:
self.trailer[key] = value
if "/XRefStm" in new_trailer:
p = stream.tell()
stream.seek(cast(int, new_trailer["/XRefStm"]) + 1, 0)
try:
self._read_pdf15_xref_stream(stream)
except Exception:
logger_warning(
f"XRef object at {new_trailer['/XRefStm']} can not be read, some object may be missing",
__name__,
)
stream.seek(p, 0)
if "/Prev" in new_trailer:
startxref = new_trailer["/Prev"]
return startxref
else:
return None
def _read_xref_other_error(
self, stream: StreamType, startxref: int
) -> Optional[int]:
# some PDFs have /Prev=0 in the trailer, instead of no /Prev
if startxref == 0:
if self.strict:
raise PdfReadError(
"/Prev=0 in the trailer (try opening with strict=False)"
)
logger_warning(
"/Prev=0 in the trailer - assuming there is no previous xref table",
__name__,
)
return None
# bad xref character at startxref. Let's see if we can find
# the xref table nearby, as we've observed this error with an
# off-by-one before.
stream.seek(-11, 1)
tmp = stream.read(20)
xref_loc = tmp.find(b"xref")
if xref_loc != -1:
startxref -= 10 - xref_loc
return startxref
# No explicit xref table, try finding a cross-reference stream.
stream.seek(startxref, 0)
for look in range(5):
if stream.read(1).isdigit():
# This is not a standard PDF, consider adding a warning
startxref += look
return startxref
# no xref table found at specified location
if "/Root" in self.trailer and not self.strict:
# if Root has been already found, just raise warning
logger_warning("Invalid parent xref., rebuild xref", __name__)
try:
self._rebuild_xref_table(stream)
return None
except Exception:
raise PdfReadError("can not rebuild xref")
raise PdfReadError("Could not find xref table at specified location")
def _read_pdf15_xref_stream(
self, stream: StreamType
) -> Union[ContentStream, EncodedStreamObject, DecodedStreamObject]:
# PDF 1.5+ Cross-Reference Stream
stream.seek(-1, 1)
idnum, generation = self.read_object_header(stream)
xrefstream = cast(ContentStream, read_object(stream, self))
assert cast(str, xrefstream["/Type"]) == "/XRef"
self.cache_indirect_object(generation, idnum, xrefstream)
stream_data = BytesIO(b_(xrefstream.get_data()))
# Index pairs specify the subsections in the dictionary. If
# none create one subsection that spans everything.
idx_pairs = xrefstream.get("/Index", [0, xrefstream.get("/Size")])
entry_sizes = cast(Dict[Any, Any], xrefstream.get("/W"))
assert len(entry_sizes) >= 3
if self.strict and len(entry_sizes) > 3:
raise PdfReadError(f"Too many entry sizes: {entry_sizes}")
def get_entry(i: int) -> Union[int, Tuple[int, ...]]:
# Reads the correct number of bytes for each entry. See the
# discussion of the W parameter in PDF spec table 17.
if entry_sizes[i] > 0:
d = stream_data.read(entry_sizes[i])
return convert_to_int(d, entry_sizes[i])
# PDF Spec Table 17: A value of zero for an element in the
# W array indicates...the default value shall be used
if i == 0:
return 1 # First value defaults to 1
else:
return 0
def used_before(num: int, generation: Union[int, Tuple[int, ...]]) -> bool:
# We move backwards through the xrefs, don't replace any.
return num in self.xref.get(generation, []) or num in self.xref_objStm # type: ignore
# Iterate through each subsection
self._read_xref_subsections(idx_pairs, get_entry, used_before)
return xrefstream
@staticmethod
def _get_xref_issues(stream: StreamType, startxref: int) -> int:
"""Return an int which indicates an issue. 0 means there is no issue."""
stream.seek(startxref - 1, 0) # -1 to check character before
line = stream.read(1)
if line not in b"\r\n \t":
return 1
line = stream.read(4)
if line != b"xref":
# not an xref so check if it is an XREF object
line = b""
while line in b"0123456789 \t":
line = stream.read(1)
if line == b"":
return 2
line += stream.read(2) # 1 char already read, +2 to check "obj"
if line.lower() != b"obj":
return 3
# while stream.read(1) in b" \t\r\n":
# pass
# line = stream.read(256) # check that it is xref obj
# if b"/xref" not in line.lower():
# return 4
return 0
def _rebuild_xref_table(self, stream: StreamType) -> None:
self.xref = {}
stream.seek(0, 0)
f_ = stream.read(-1)
for m in re.finditer(rb"[\r\n \t][ \t]*(\d+)[ \t]+(\d+)[ \t]+obj", f_):
idnum = int(m.group(1))
generation = int(m.group(2))
if generation not in self.xref:
self.xref[generation] = {}
self.xref[generation][idnum] = m.start(1)
stream.seek(0, 0)
for m in re.finditer(rb"[\r\n \t][ \t]*trailer[\r\n \t]*(<<)", f_):
stream.seek(m.start(1), 0)
new_trailer = cast(Dict[Any, Any], read_object(stream, self))
# Here, we are parsing the file from start to end, the new data have to erase the existing.
for key, value in list(new_trailer.items()):
self.trailer[key] = value
def _read_xref_subsections(
self,
idx_pairs: List[int],
get_entry: Callable[[int], Union[int, Tuple[int, ...]]],
used_before: Callable[[int, Union[int, Tuple[int, ...]]], bool],
) -> None:
last_end = 0
for start, size in self._pairs(idx_pairs):
# The subsections must increase
assert start >= last_end
last_end = start + size
for num in range(start, start + size):
# The first entry is the type
xref_type = get_entry(0)
# The rest of the elements depend on the xref_type
if xref_type == 0:
# linked list of free objects
next_free_object = get_entry(1) # noqa: F841
next_generation = get_entry(2) # noqa: F841
elif xref_type == 1:
# objects that are in use but are not compressed
byte_offset = get_entry(1)
generation = get_entry(2)
if generation not in self.xref:
self.xref[generation] = {} # type: ignore
if not used_before(num, generation):
self.xref[generation][num] = byte_offset # type: ignore
elif xref_type == 2:
# compressed objects
objstr_num = get_entry(1)
obstr_idx = get_entry(2)
generation = 0 # PDF spec table 18, generation is 0
if not used_before(num, generation):
self.xref_objStm[num] = (objstr_num, obstr_idx)
elif self.strict:
raise PdfReadError(f"Unknown xref type: {xref_type}")
def _pairs(self, array: List[int]) -> Iterable[Tuple[int, int]]:
i = 0
while True:
yield array[i], array[i + 1]
i += 2
if (i + 1) >= len(array):
break
def read_next_end_line(
self, stream: StreamType, limit_offset: int = 0
) -> bytes: # pragma: no cover
""".. deprecated:: 2.1.0"""
deprecate_no_replacement("read_next_end_line", removed_in="4.0.0")
line_parts = []
while True:
# Prevent infinite loops in malformed PDFs
if stream.tell() == 0 or stream.tell() == limit_offset:
raise PdfReadError("Could not read malformed PDF file")
x = stream.read(1)
if stream.tell() < 2:
raise PdfReadError("EOL marker not found")
stream.seek(-2, 1)
if x in (b"\n", b"\r"): # \n = LF; \r = CR
crlf = False
while x in (b"\n", b"\r"):
x = stream.read(1)
if x in (b"\n", b"\r"): # account for CR+LF
stream.seek(-1, 1)
crlf = True
if stream.tell() < 2:
raise PdfReadError("EOL marker not found")
stream.seek(-2, 1)
stream.seek(
2 if crlf else 1, 1
) # if using CR+LF, go back 2 bytes, else 1
break
else:
line_parts.append(x)
line_parts.reverse()
return b"".join(line_parts)
def readNextEndLine(
self, stream: StreamType, limit_offset: int = 0
) -> bytes: # pragma: no cover
""".. deprecated:: 1.28.0"""
deprecate_no_replacement("readNextEndLine")
return self.read_next_end_line(stream, limit_offset)
def decrypt(self, password: Union[str, bytes]) -> PasswordType:
"""
When using an encrypted / secured PDF file with the PDF Standard
encryption handler, this function will allow the file to be decrypted.
It checks the given password against the document's user password and
owner password, and then stores the resulting decryption key if either
password is correct.
It does not matter which password was matched. Both passwords provide
the correct decryption key that will allow the document to be used with
this library.
:param str password: The password to match.
:return: `PasswordType`.
"""
if not self._encryption:
raise PdfReadError("Not encrypted file")
# TODO: raise Exception for wrong password
return self._encryption.verify(password)
def decode_permissions(self, permissions_code: int) -> Dict[str, bool]:
# Takes the permissions as an integer, returns the allowed access
permissions = {}
permissions["print"] = permissions_code & (1 << 3 - 1) != 0 # bit 3
permissions["modify"] = permissions_code & (1 << 4 - 1) != 0 # bit 4
permissions["copy"] = permissions_code & (1 << 5 - 1) != 0 # bit 5
permissions["annotations"] = permissions_code & (1 << 6 - 1) != 0 # bit 6
permissions["forms"] = permissions_code & (1 << 9 - 1) != 0 # bit 9
permissions["accessability"] = permissions_code & (1 << 10 - 1) != 0 # bit 10
permissions["assemble"] = permissions_code & (1 << 11 - 1) != 0 # bit 11
permissions["print_high_quality"] = (
permissions_code & (1 << 12 - 1) != 0
) # bit 12
return permissions
@property
def is_encrypted(self) -> bool:
"""
Read-only boolean property showing whether this PDF file is encrypted.
Note that this property, if true, will remain true even after the
:meth:`decrypt()<PyPDF2.PdfReader.decrypt>` method is called.
"""
return TK.ENCRYPT in self.trailer
def getIsEncrypted(self) -> bool: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use :py:attr:`is_encrypted` instead.
"""
deprecate_with_replacement("getIsEncrypted", "is_encrypted")
return self.is_encrypted
@property
def isEncrypted(self) -> bool: # pragma: no cover
"""
.. deprecated:: 1.28.0
Use :py:attr:`is_encrypted` instead.
"""
deprecate_with_replacement("isEncrypted", "is_encrypted")
return self.is_encrypted
@property
def xfa(self) -> Optional[Dict[str, Any]]:
tree: Optional[TreeObject] = None
retval: Dict[str, Any] = {}
catalog = cast(DictionaryObject, self.trailer[TK.ROOT])
if "/AcroForm" not in catalog or not catalog["/AcroForm"]:
return None
tree = cast(TreeObject, catalog["/AcroForm"])
if "/XFA" in tree:
fields = cast(ArrayObject, tree["/XFA"])
i = iter(fields)
for f in i:
tag = f
f = next(i)
if isinstance(f, IndirectObject):
field = cast(Optional[EncodedStreamObject], f.get_object())
if field:
es = zlib.decompress(field._data)
retval[tag] = es
return retval
class PdfFileReader(PdfReader): # pragma: no cover
def __init__(self, *args: Any, **kwargs: Any) -> None:
deprecate_with_replacement("PdfFileReader", "PdfReader")
if "strict" not in kwargs and len(args) < 2:
kwargs["strict"] = True # maintain the default
super().__init__(*args, **kwargs)
|
GHSA-hm9v-vj3r-r55m
|
PyPDF2/generic/_base.py
|
@@ -420,6 +420,14 @@ def writeToStream(
deprecate_with_replacement("writeToStream", "write_to_stream")
self.write_to_stream(stream, encryption_key)
+ @staticmethod
+ def unnumber(sin: str) -> str:
+ i = sin.find("#")
+ while i >= 0:
+ sin = sin[:i] + chr(int(sin[i + 1 : i + 3], 16)) + sin[i + 3 :]
+ i = sin.find("#")
+ return sin
+
@staticmethod
def read_from_stream(stream: StreamType, pdf: Any) -> "NameObject": # PdfReader
name = stream.read(1)
@@ -431,10 +439,11 @@ def read_from_stream(stream: StreamType, pdf: Any) -> "NameObject": # PdfReader
ret = name.decode("utf-8")
except (UnicodeEncodeError, UnicodeDecodeError):
ret = name.decode("gbk")
- return NameObject(ret)
- except (UnicodeEncodeError, UnicodeDecodeError) as e:
# Name objects should represent irregular characters
# with a '#' followed by the symbol's hex number
+ ret = NameObject.unnumber(ret)
+ return NameObject(ret)
+ except (UnicodeEncodeError, UnicodeDecodeError) as e:
if not pdf.strict:
logger_warning("Illegal character in Name Object", __name__)
return NameObject(name)
|
# Copyright (c) 2006, Mathieu Fenniak
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import codecs
import decimal
import hashlib
import re
from typing import Any, Callable, Optional, Union
from .._codecs import _pdfdoc_encoding_rev
from .._utils import (
StreamType,
b_,
deprecate_with_replacement,
hex_str,
hexencode,
logger_warning,
read_non_whitespace,
read_until_regex,
str_,
)
from ..errors import STREAM_TRUNCATED_PREMATURELY, PdfReadError, PdfStreamError
__author__ = "Mathieu Fenniak"
__author_email__ = "[email protected]"
class PdfObject:
# function for calculating a hash value
hash_func: Callable[..., "hashlib._Hash"] = hashlib.sha1
def hash_value_data(self) -> bytes:
return ("%s" % self).encode()
def hash_value(self) -> bytes:
return (
"%s:%s"
% (
self.__class__.__name__,
self.hash_func(self.hash_value_data()).hexdigest(),
)
).encode()
def get_object(self) -> Optional["PdfObject"]:
"""Resolve indirect references."""
return self
def getObject(self) -> Optional["PdfObject"]: # pragma: no cover
deprecate_with_replacement("getObject", "get_object")
return self.get_object()
def write_to_stream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None:
raise NotImplementedError
class NullObject(PdfObject):
def write_to_stream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None:
stream.write(b"null")
@staticmethod
def read_from_stream(stream: StreamType) -> "NullObject":
nulltxt = stream.read(4)
if nulltxt != b"null":
raise PdfReadError("Could not read Null object")
return NullObject()
def writeToStream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None: # pragma: no cover
deprecate_with_replacement("writeToStream", "write_to_stream")
self.write_to_stream(stream, encryption_key)
def __repr__(self) -> str:
return "NullObject"
@staticmethod
def readFromStream(stream: StreamType) -> "NullObject": # pragma: no cover
deprecate_with_replacement("readFromStream", "read_from_stream")
return NullObject.read_from_stream(stream)
class BooleanObject(PdfObject):
def __init__(self, value: Any) -> None:
self.value = value
def __eq__(self, __o: object) -> bool:
if isinstance(__o, BooleanObject):
return self.value == __o.value
elif isinstance(__o, bool):
return self.value == __o
else:
return False
def __repr__(self) -> str:
return "True" if self.value else "False"
def write_to_stream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None:
if self.value:
stream.write(b"true")
else:
stream.write(b"false")
def writeToStream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None: # pragma: no cover
deprecate_with_replacement("writeToStream", "write_to_stream")
self.write_to_stream(stream, encryption_key)
@staticmethod
def read_from_stream(stream: StreamType) -> "BooleanObject":
word = stream.read(4)
if word == b"true":
return BooleanObject(True)
elif word == b"fals":
stream.read(1)
return BooleanObject(False)
else:
raise PdfReadError("Could not read Boolean object")
@staticmethod
def readFromStream(stream: StreamType) -> "BooleanObject": # pragma: no cover
deprecate_with_replacement("readFromStream", "read_from_stream")
return BooleanObject.read_from_stream(stream)
class IndirectObject(PdfObject):
def __init__(self, idnum: int, generation: int, pdf: Any) -> None: # PdfReader
self.idnum = idnum
self.generation = generation
self.pdf = pdf
def get_object(self) -> Optional[PdfObject]:
obj = self.pdf.get_object(self)
if obj is None:
return None
return obj.get_object()
def __repr__(self) -> str:
return f"IndirectObject({self.idnum!r}, {self.generation!r}, {id(self.pdf)})"
def __eq__(self, other: Any) -> bool:
return (
other is not None
and isinstance(other, IndirectObject)
and self.idnum == other.idnum
and self.generation == other.generation
and self.pdf is other.pdf
)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def write_to_stream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None:
stream.write(b_(f"{self.idnum} {self.generation} R"))
def writeToStream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None: # pragma: no cover
deprecate_with_replacement("writeToStream", "write_to_stream")
self.write_to_stream(stream, encryption_key)
@staticmethod
def read_from_stream(stream: StreamType, pdf: Any) -> "IndirectObject": # PdfReader
idnum = b""
while True:
tok = stream.read(1)
if not tok:
raise PdfStreamError(STREAM_TRUNCATED_PREMATURELY)
if tok.isspace():
break
idnum += tok
generation = b""
while True:
tok = stream.read(1)
if not tok:
raise PdfStreamError(STREAM_TRUNCATED_PREMATURELY)
if tok.isspace():
if not generation:
continue
break
generation += tok
r = read_non_whitespace(stream)
if r != b"R":
raise PdfReadError(
f"Error reading indirect object reference at byte {hex_str(stream.tell())}"
)
return IndirectObject(int(idnum), int(generation), pdf)
@staticmethod
def readFromStream(
stream: StreamType, pdf: Any # PdfReader
) -> "IndirectObject": # pragma: no cover
deprecate_with_replacement("readFromStream", "read_from_stream")
return IndirectObject.read_from_stream(stream, pdf)
class FloatObject(decimal.Decimal, PdfObject):
def __new__(
cls, value: Union[str, Any] = "0", context: Optional[Any] = None
) -> "FloatObject":
try:
return decimal.Decimal.__new__(cls, str_(value), context)
except Exception:
# If this isn't a valid decimal (happens in malformed PDFs)
# fallback to 0
logger_warning(f"FloatObject ({value}) invalid; use 0.0 instead", __name__)
return decimal.Decimal.__new__(cls, "0.0")
def __repr__(self) -> str:
if self == self.to_integral():
return str(self.quantize(decimal.Decimal(1)))
else:
# Standard formatting adds useless extraneous zeros.
o = f"{self:.5f}"
# Remove the zeros.
while o and o[-1] == "0":
o = o[:-1]
return o
def as_numeric(self) -> float:
return float(repr(self).encode("utf8"))
def write_to_stream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None:
stream.write(repr(self).encode("utf8"))
def writeToStream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None: # pragma: no cover
deprecate_with_replacement("writeToStream", "write_to_stream")
self.write_to_stream(stream, encryption_key)
class NumberObject(int, PdfObject):
NumberPattern = re.compile(b"[^+-.0-9]")
def __new__(cls, value: Any) -> "NumberObject":
try:
return int.__new__(cls, int(value))
except ValueError:
logger_warning(f"NumberObject({value}) invalid; use 0 instead", __name__)
return int.__new__(cls, 0)
def as_numeric(self) -> int:
return int(repr(self).encode("utf8"))
def write_to_stream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None:
stream.write(repr(self).encode("utf8"))
def writeToStream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None: # pragma: no cover
deprecate_with_replacement("writeToStream", "write_to_stream")
self.write_to_stream(stream, encryption_key)
@staticmethod
def read_from_stream(stream: StreamType) -> Union["NumberObject", FloatObject]:
num = read_until_regex(stream, NumberObject.NumberPattern)
if num.find(b".") != -1:
return FloatObject(num)
return NumberObject(num)
@staticmethod
def readFromStream(
stream: StreamType,
) -> Union["NumberObject", FloatObject]: # pragma: no cover
deprecate_with_replacement("readFromStream", "read_from_stream")
return NumberObject.read_from_stream(stream)
class ByteStringObject(bytes, PdfObject):
"""
Represents a string object where the text encoding could not be determined.
This occurs quite often, as the PDF spec doesn't provide an alternate way to
represent strings -- for example, the encryption data stored in files (like
/O) is clearly not text, but is still stored in a "String" object.
"""
@property
def original_bytes(self) -> bytes:
"""For compatibility with TextStringObject.original_bytes."""
return self
def write_to_stream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None:
bytearr = self
if encryption_key:
from .._security import RC4_encrypt
bytearr = RC4_encrypt(encryption_key, bytearr) # type: ignore
stream.write(b"<")
stream.write(hexencode(bytearr))
stream.write(b">")
def writeToStream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None: # pragma: no cover
deprecate_with_replacement("writeToStream", "write_to_stream")
self.write_to_stream(stream, encryption_key)
class TextStringObject(str, PdfObject):
"""
Represents a string object that has been decoded into a real unicode string.
If read from a PDF document, this string appeared to match the
PDFDocEncoding, or contained a UTF-16BE BOM mark to cause UTF-16 decoding to
occur.
"""
autodetect_pdfdocencoding = False
autodetect_utf16 = False
@property
def original_bytes(self) -> bytes:
"""
It is occasionally possible that a text string object gets created where
a byte string object was expected due to the autodetection mechanism --
if that occurs, this "original_bytes" property can be used to
back-calculate what the original encoded bytes were.
"""
return self.get_original_bytes()
def get_original_bytes(self) -> bytes:
# We're a text string object, but the library is trying to get our raw
# bytes. This can happen if we auto-detected this string as text, but
# we were wrong. It's pretty common. Return the original bytes that
# would have been used to create this object, based upon the autodetect
# method.
if self.autodetect_utf16:
return codecs.BOM_UTF16_BE + self.encode("utf-16be")
elif self.autodetect_pdfdocencoding:
return encode_pdfdocencoding(self)
else:
raise Exception("no information about original bytes")
def write_to_stream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None:
# Try to write the string out as a PDFDocEncoding encoded string. It's
# nicer to look at in the PDF file. Sadly, we take a performance hit
# here for trying...
try:
bytearr = encode_pdfdocencoding(self)
except UnicodeEncodeError:
bytearr = codecs.BOM_UTF16_BE + self.encode("utf-16be")
if encryption_key:
from .._security import RC4_encrypt
bytearr = RC4_encrypt(encryption_key, bytearr)
obj = ByteStringObject(bytearr)
obj.write_to_stream(stream, None)
else:
stream.write(b"(")
for c in bytearr:
if not chr(c).isalnum() and c != b" ":
# This:
# stream.write(b_(rf"\{c:0>3o}"))
# gives
# https://github.com/davidhalter/parso/issues/207
stream.write(b_("\\%03o" % c))
else:
stream.write(b_(chr(c)))
stream.write(b")")
def writeToStream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None: # pragma: no cover
deprecate_with_replacement("writeToStream", "write_to_stream")
self.write_to_stream(stream, encryption_key)
class NameObject(str, PdfObject):
delimiter_pattern = re.compile(rb"\s+|[\(\)<>\[\]{}/%]")
surfix = b"/"
def write_to_stream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None:
stream.write(b_(self))
def writeToStream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None: # pragma: no cover
deprecate_with_replacement("writeToStream", "write_to_stream")
self.write_to_stream(stream, encryption_key)
@staticmethod
def read_from_stream(stream: StreamType, pdf: Any) -> "NameObject": # PdfReader
name = stream.read(1)
if name != NameObject.surfix:
raise PdfReadError("name read error")
name += read_until_regex(stream, NameObject.delimiter_pattern, ignore_eof=True)
try:
try:
ret = name.decode("utf-8")
except (UnicodeEncodeError, UnicodeDecodeError):
ret = name.decode("gbk")
return NameObject(ret)
except (UnicodeEncodeError, UnicodeDecodeError) as e:
# Name objects should represent irregular characters
# with a '#' followed by the symbol's hex number
if not pdf.strict:
logger_warning("Illegal character in Name Object", __name__)
return NameObject(name)
else:
raise PdfReadError("Illegal character in Name Object") from e
@staticmethod
def readFromStream(
stream: StreamType, pdf: Any # PdfReader
) -> "NameObject": # pragma: no cover
deprecate_with_replacement("readFromStream", "read_from_stream")
return NameObject.read_from_stream(stream, pdf)
def encode_pdfdocencoding(unicode_string: str) -> bytes:
retval = b""
for c in unicode_string:
try:
retval += b_(chr(_pdfdoc_encoding_rev[c]))
except KeyError:
raise UnicodeEncodeError(
"pdfdocencoding", c, -1, -1, "does not exist in translation table"
)
return retval
|
GHSA-hm9v-vj3r-r55m
|
PyPDF2/generic/_data_structures.py
|
@@ -67,7 +67,6 @@
from ._utils import read_hex_string_from_stream, read_string_from_stream
logger = logging.getLogger(__name__)
-ObjectPrefix = b"/<[tf(n%"
NumberSigns = b"+-"
IndirectPattern = re.compile(rb"[+-]?(\d+)\s+(\d+)\s+R[^a-zA-Z]")
@@ -263,10 +262,19 @@ def read_unsized_from_steam(stream: StreamType, pdf: Any) -> bytes: # PdfReader
stream.read(1)
break
stream.seek(-1, 1)
- key = read_object(stream, pdf)
- tok = read_non_whitespace(stream)
- stream.seek(-1, 1)
- value = read_object(stream, pdf, forced_encoding)
+ try:
+ key = read_object(stream, pdf)
+ tok = read_non_whitespace(stream)
+ stream.seek(-1, 1)
+ value = read_object(stream, pdf, forced_encoding)
+ except Exception as exc:
+ if pdf is not None and pdf.strict:
+ raise PdfReadError(exc.__repr__())
+ logger_warning(exc.__repr__(), __name__)
+ retval = DictionaryObject()
+ retval.update(data)
+ return retval # return partial data
+
if not data.get(key):
data[key] = value
else:
@@ -812,10 +820,9 @@ def read_object(
) -> Union[PdfObject, int, str, ContentStream]:
tok = stream.read(1)
stream.seek(-1, 1) # reset to start
- idx = ObjectPrefix.find(tok)
- if idx == 0:
+ if tok == b"/":
return NameObject.read_from_stream(stream, pdf)
- elif idx == 1:
+ elif tok == b"<":
# hexadecimal string OR dictionary
peek = stream.read(2)
stream.seek(-2, 1) # reset to start
@@ -824,15 +831,15 @@ def read_object(
return DictionaryObject.read_from_stream(stream, pdf, forced_encoding)
else:
return read_hex_string_from_stream(stream, forced_encoding)
- elif idx == 2:
+ elif tok == b"[":
return ArrayObject.read_from_stream(stream, pdf, forced_encoding)
- elif idx == 3 or idx == 4:
+ elif tok == b"t" or tok == b"f":
return BooleanObject.read_from_stream(stream)
- elif idx == 5:
+ elif tok == b"(":
return read_string_from_stream(stream, forced_encoding)
- elif idx == 6:
+ elif tok == b"n":
return NullObject.read_from_stream(stream)
- elif idx == 7:
+ elif tok == b"%":
# comment
while tok not in (b"\r", b"\n"):
tok = stream.read(1)
@@ -843,14 +850,18 @@ def read_object(
tok = read_non_whitespace(stream)
stream.seek(-1, 1)
return read_object(stream, pdf, forced_encoding)
- else:
+ elif tok in b"0123456789+-.":
# number object OR indirect reference
peek = stream.read(20)
stream.seek(-len(peek), 1) # reset to start
if IndirectPattern.match(peek) is not None:
return IndirectObject.read_from_stream(stream, pdf)
else:
return NumberObject.read_from_stream(stream)
+ else:
+ raise PdfReadError(
+ f"Invalid Elementary Object starting with {tok} @{stream.tell()}" # type: ignore
+ )
class Field(TreeObject):
|
# Copyright (c) 2006, Mathieu Fenniak
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "Mathieu Fenniak"
__author_email__ = "[email protected]"
import logging
import re
from io import BytesIO
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union, cast
from .._utils import (
WHITESPACES,
StreamType,
b_,
deprecate_with_replacement,
hex_str,
logger_warning,
read_non_whitespace,
read_until_regex,
skip_over_comment,
)
from ..constants import (
CheckboxRadioButtonAttributes,
FieldDictionaryAttributes,
)
from ..constants import FilterTypes as FT
from ..constants import OutlineFontFlag
from ..constants import StreamAttributes as SA
from ..constants import TypArguments as TA
from ..constants import TypFitArguments as TF
from ..errors import STREAM_TRUNCATED_PREMATURELY, PdfReadError, PdfStreamError
from ._base import (
BooleanObject,
FloatObject,
IndirectObject,
NameObject,
NullObject,
NumberObject,
PdfObject,
)
from ._utils import read_hex_string_from_stream, read_string_from_stream
logger = logging.getLogger(__name__)
ObjectPrefix = b"/<[tf(n%"
NumberSigns = b"+-"
IndirectPattern = re.compile(rb"[+-]?(\d+)\s+(\d+)\s+R[^a-zA-Z]")
class ArrayObject(list, PdfObject):
def items(self) -> Iterable[Any]:
"""
Emulate DictionaryObject.items for a list
(index, object)
"""
return enumerate(self)
def write_to_stream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None:
stream.write(b"[")
for data in self:
stream.write(b" ")
data.write_to_stream(stream, encryption_key)
stream.write(b" ]")
def writeToStream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None: # pragma: no cover
deprecate_with_replacement("writeToStream", "write_to_stream")
self.write_to_stream(stream, encryption_key)
@staticmethod
def read_from_stream(
stream: StreamType,
pdf: Any,
forced_encoding: Union[None, str, List[str], Dict[int, str]] = None,
) -> "ArrayObject": # PdfReader
arr = ArrayObject()
tmp = stream.read(1)
if tmp != b"[":
raise PdfReadError("Could not read array")
while True:
# skip leading whitespace
tok = stream.read(1)
while tok.isspace():
tok = stream.read(1)
stream.seek(-1, 1)
# check for array ending
peekahead = stream.read(1)
if peekahead == b"]":
break
stream.seek(-1, 1)
# read and append obj
arr.append(read_object(stream, pdf, forced_encoding))
return arr
@staticmethod
def readFromStream(
stream: StreamType, pdf: Any # PdfReader
) -> "ArrayObject": # pragma: no cover
deprecate_with_replacement("readFromStream", "read_from_stream")
return ArrayObject.read_from_stream(stream, pdf)
class DictionaryObject(dict, PdfObject):
def raw_get(self, key: Any) -> Any:
return dict.__getitem__(self, key)
def __setitem__(self, key: Any, value: Any) -> Any:
if not isinstance(key, PdfObject):
raise ValueError("key must be PdfObject")
if not isinstance(value, PdfObject):
raise ValueError("value must be PdfObject")
return dict.__setitem__(self, key, value)
def setdefault(self, key: Any, value: Optional[Any] = None) -> Any:
if not isinstance(key, PdfObject):
raise ValueError("key must be PdfObject")
if not isinstance(value, PdfObject):
raise ValueError("value must be PdfObject")
return dict.setdefault(self, key, value) # type: ignore
def __getitem__(self, key: Any) -> PdfObject:
return dict.__getitem__(self, key).get_object()
@property
def xmp_metadata(self) -> Optional[PdfObject]:
"""
Retrieve XMP (Extensible Metadata Platform) data relevant to the
this object, if available.
Stability: Added in v1.12, will exist for all future v1.x releases.
@return Returns a {@link #xmp.XmpInformation XmlInformation} instance
that can be used to access XMP metadata from the document. Can also
return None if no metadata was found on the document root.
"""
from ..xmp import XmpInformation
metadata = self.get("/Metadata", None)
if metadata is None:
return None
metadata = metadata.get_object()
if not isinstance(metadata, XmpInformation):
metadata = XmpInformation(metadata)
self[NameObject("/Metadata")] = metadata
return metadata
def getXmpMetadata(
self,
) -> Optional[PdfObject]: # pragma: no cover
"""
.. deprecated:: 1.28.3
Use :meth:`xmp_metadata` instead.
"""
deprecate_with_replacement("getXmpMetadata", "xmp_metadata")
return self.xmp_metadata
@property
def xmpMetadata(self) -> Optional[PdfObject]: # pragma: no cover
"""
.. deprecated:: 1.28.3
Use :meth:`xmp_metadata` instead.
"""
deprecate_with_replacement("xmpMetadata", "xmp_metadata")
return self.xmp_metadata
def write_to_stream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None:
stream.write(b"<<\n")
for key, value in list(self.items()):
key.write_to_stream(stream, encryption_key)
stream.write(b" ")
value.write_to_stream(stream, encryption_key)
stream.write(b"\n")
stream.write(b">>")
def writeToStream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None: # pragma: no cover
deprecate_with_replacement("writeToStream", "write_to_stream")
self.write_to_stream(stream, encryption_key)
@staticmethod
def read_from_stream(
stream: StreamType,
pdf: Any, # PdfReader
forced_encoding: Union[None, str, List[str], Dict[int, str]] = None,
) -> "DictionaryObject":
def get_next_obj_pos(
p: int, p1: int, rem_gens: List[int], pdf: Any
) -> int: # PdfReader
l = pdf.xref[rem_gens[0]]
for o in l:
if p1 > l[o] and p < l[o]:
p1 = l[o]
if len(rem_gens) == 1:
return p1
else:
return get_next_obj_pos(p, p1, rem_gens[1:], pdf)
def read_unsized_from_steam(stream: StreamType, pdf: Any) -> bytes: # PdfReader
# we are just pointing at beginning of the stream
eon = get_next_obj_pos(stream.tell(), 2**32, list(pdf.xref), pdf) - 1
curr = stream.tell()
rw = stream.read(eon - stream.tell())
p = rw.find(b"endstream")
if p < 0:
raise PdfReadError(
f"Unable to find 'endstream' marker for obj starting at {curr}."
)
stream.seek(curr + p + 9)
return rw[: p - 1]
tmp = stream.read(2)
if tmp != b"<<":
raise PdfReadError(
f"Dictionary read error at byte {hex_str(stream.tell())}: "
"stream must begin with '<<'"
)
data: Dict[Any, Any] = {}
while True:
tok = read_non_whitespace(stream)
if tok == b"\x00":
continue
elif tok == b"%":
stream.seek(-1, 1)
skip_over_comment(stream)
continue
if not tok:
raise PdfStreamError(STREAM_TRUNCATED_PREMATURELY)
if tok == b">":
stream.read(1)
break
stream.seek(-1, 1)
key = read_object(stream, pdf)
tok = read_non_whitespace(stream)
stream.seek(-1, 1)
value = read_object(stream, pdf, forced_encoding)
if not data.get(key):
data[key] = value
else:
# multiple definitions of key not permitted
msg = (
f"Multiple definitions in dictionary at byte "
f"{hex_str(stream.tell())} for key {key}"
)
if pdf is not None and pdf.strict:
raise PdfReadError(msg)
logger_warning(msg, __name__)
pos = stream.tell()
s = read_non_whitespace(stream)
if s == b"s" and stream.read(5) == b"tream":
eol = stream.read(1)
# odd PDF file output has spaces after 'stream' keyword but before EOL.
# patch provided by Danial Sandler
while eol == b" ":
eol = stream.read(1)
if eol not in (b"\n", b"\r"):
raise PdfStreamError("Stream data must be followed by a newline")
if eol == b"\r":
# read \n after
if stream.read(1) != b"\n":
stream.seek(-1, 1)
# this is a stream object, not a dictionary
if SA.LENGTH not in data:
raise PdfStreamError("Stream length not defined")
length = data[SA.LENGTH]
if isinstance(length, IndirectObject):
t = stream.tell()
length = pdf.get_object(length)
stream.seek(t, 0)
pstart = stream.tell()
data["__streamdata__"] = stream.read(length)
e = read_non_whitespace(stream)
ndstream = stream.read(8)
if (e + ndstream) != b"endstream":
# (sigh) - the odd PDF file has a length that is too long, so
# we need to read backwards to find the "endstream" ending.
# ReportLab (unknown version) generates files with this bug,
# and Python users into PDF files tend to be our audience.
# we need to do this to correct the streamdata and chop off
# an extra character.
pos = stream.tell()
stream.seek(-10, 1)
end = stream.read(9)
if end == b"endstream":
# we found it by looking back one character further.
data["__streamdata__"] = data["__streamdata__"][:-1]
elif not pdf.strict:
stream.seek(pstart, 0)
data["__streamdata__"] = read_unsized_from_steam(stream, pdf)
pos = stream.tell()
else:
stream.seek(pos, 0)
raise PdfReadError(
"Unable to find 'endstream' marker after stream at byte "
f"{hex_str(stream.tell())} (nd='{ndstream!r}', end='{end!r}')."
)
else:
stream.seek(pos, 0)
if "__streamdata__" in data:
return StreamObject.initialize_from_dictionary(data)
else:
retval = DictionaryObject()
retval.update(data)
return retval
@staticmethod
def readFromStream(
stream: StreamType, pdf: Any # PdfReader
) -> "DictionaryObject": # pragma: no cover
deprecate_with_replacement("readFromStream", "read_from_stream")
return DictionaryObject.read_from_stream(stream, pdf)
class TreeObject(DictionaryObject):
def __init__(self) -> None:
DictionaryObject.__init__(self)
def hasChildren(self) -> bool: # pragma: no cover
deprecate_with_replacement("hasChildren", "has_children", "4.0.0")
return self.has_children()
def has_children(self) -> bool:
return "/First" in self
def __iter__(self) -> Any:
return self.children()
def children(self) -> Iterable[Any]:
if not self.has_children():
return
child_ref = self[NameObject("/First")]
child = child_ref.get_object()
while True:
yield child
if child == self[NameObject("/Last")]:
return
child_ref = child.get(NameObject("/Next")) # type: ignore
if child_ref is None:
return
child = child_ref.get_object()
def addChild(self, child: Any, pdf: Any) -> None: # pragma: no cover
deprecate_with_replacement("addChild", "add_child")
self.add_child(child, pdf)
def add_child(self, child: Any, pdf: Any) -> None: # PdfWriter
child_obj = child.get_object()
child = pdf.get_reference(child_obj)
assert isinstance(child, IndirectObject)
prev: Optional[DictionaryObject]
if "/First" not in self:
self[NameObject("/First")] = child
self[NameObject("/Count")] = NumberObject(0)
prev = None
else:
prev = cast(
DictionaryObject, self["/Last"]
) # TABLE 8.3 Entries in the outline dictionary
self[NameObject("/Last")] = child
self[NameObject("/Count")] = NumberObject(self[NameObject("/Count")] + 1) # type: ignore
if prev:
prev_ref = pdf.get_reference(prev)
assert isinstance(prev_ref, IndirectObject)
child_obj[NameObject("/Prev")] = prev_ref
prev[NameObject("/Next")] = child
parent_ref = pdf.get_reference(self)
assert isinstance(parent_ref, IndirectObject)
child_obj[NameObject("/Parent")] = parent_ref
def removeChild(self, child: Any) -> None: # pragma: no cover
deprecate_with_replacement("removeChild", "remove_child")
self.remove_child(child)
def _remove_node_from_tree(
self, prev: Any, prev_ref: Any, cur: Any, last: Any
) -> None:
"""Adjust the pointers of the linked list and tree node count."""
next_ref = cur.get(NameObject("/Next"), None)
if prev is None:
if next_ref:
# Removing first tree node
next_obj = next_ref.get_object()
del next_obj[NameObject("/Prev")]
self[NameObject("/First")] = next_ref
self[NameObject("/Count")] = NumberObject(
self[NameObject("/Count")] - 1 # type: ignore
)
else:
# Removing only tree node
assert self[NameObject("/Count")] == 1
del self[NameObject("/Count")]
del self[NameObject("/First")]
if NameObject("/Last") in self:
del self[NameObject("/Last")]
else:
if next_ref:
# Removing middle tree node
next_obj = next_ref.get_object()
next_obj[NameObject("/Prev")] = prev_ref
prev[NameObject("/Next")] = next_ref
else:
# Removing last tree node
assert cur == last
del prev[NameObject("/Next")]
self[NameObject("/Last")] = prev_ref
self[NameObject("/Count")] = NumberObject(self[NameObject("/Count")] - 1) # type: ignore
def remove_child(self, child: Any) -> None:
child_obj = child.get_object()
if NameObject("/Parent") not in child_obj:
raise ValueError("Removed child does not appear to be a tree item")
elif child_obj[NameObject("/Parent")] != self:
raise ValueError("Removed child is not a member of this tree")
found = False
prev_ref = None
prev = None
cur_ref: Optional[Any] = self[NameObject("/First")]
cur: Optional[Dict[str, Any]] = cur_ref.get_object() # type: ignore
last_ref = self[NameObject("/Last")]
last = last_ref.get_object()
while cur is not None:
if cur == child_obj:
self._remove_node_from_tree(prev, prev_ref, cur, last)
found = True
break
# Go to the next node
prev_ref = cur_ref
prev = cur
if NameObject("/Next") in cur:
cur_ref = cur[NameObject("/Next")]
cur = cur_ref.get_object()
else:
cur_ref = None
cur = None
if not found:
raise ValueError("Removal couldn't find item in tree")
_reset_node_tree_relationship(child_obj)
def emptyTree(self) -> None: # pragma: no cover
deprecate_with_replacement("emptyTree", "empty_tree", "4.0.0")
self.empty_tree()
def empty_tree(self) -> None:
for child in self:
child_obj = child.get_object()
_reset_node_tree_relationship(child_obj)
if NameObject("/Count") in self:
del self[NameObject("/Count")]
if NameObject("/First") in self:
del self[NameObject("/First")]
if NameObject("/Last") in self:
del self[NameObject("/Last")]
def _reset_node_tree_relationship(child_obj: Any) -> None:
"""
Call this after a node has been removed from a tree.
This resets the nodes attributes in respect to that tree.
"""
del child_obj[NameObject("/Parent")]
if NameObject("/Next") in child_obj:
del child_obj[NameObject("/Next")]
if NameObject("/Prev") in child_obj:
del child_obj[NameObject("/Prev")]
class StreamObject(DictionaryObject):
def __init__(self) -> None:
self.__data: Optional[str] = None
self.decoded_self: Optional[DecodedStreamObject] = None
def hash_value_data(self) -> bytes:
data = super().hash_value_data()
data += b_(self._data)
return data
@property
def decodedSelf(self) -> Optional["DecodedStreamObject"]: # pragma: no cover
deprecate_with_replacement("decodedSelf", "decoded_self")
return self.decoded_self
@decodedSelf.setter
def decodedSelf(self, value: "DecodedStreamObject") -> None: # pragma: no cover
deprecate_with_replacement("decodedSelf", "decoded_self")
self.decoded_self = value
@property
def _data(self) -> Any:
return self.__data
@_data.setter
def _data(self, value: Any) -> None:
self.__data = value
def write_to_stream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None:
self[NameObject(SA.LENGTH)] = NumberObject(len(self._data))
DictionaryObject.write_to_stream(self, stream, encryption_key)
del self[SA.LENGTH]
stream.write(b"\nstream\n")
data = self._data
if encryption_key:
from .._security import RC4_encrypt
data = RC4_encrypt(encryption_key, data)
stream.write(data)
stream.write(b"\nendstream")
@staticmethod
def initializeFromDictionary(
data: Dict[str, Any]
) -> Union["EncodedStreamObject", "DecodedStreamObject"]: # pragma: no cover
return StreamObject.initialize_from_dictionary(data)
@staticmethod
def initialize_from_dictionary(
data: Dict[str, Any]
) -> Union["EncodedStreamObject", "DecodedStreamObject"]:
retval: Union["EncodedStreamObject", "DecodedStreamObject"]
if SA.FILTER in data:
retval = EncodedStreamObject()
else:
retval = DecodedStreamObject()
retval._data = data["__streamdata__"]
del data["__streamdata__"]
del data[SA.LENGTH]
retval.update(data)
return retval
def flateEncode(self) -> "EncodedStreamObject": # pragma: no cover
deprecate_with_replacement("flateEncode", "flate_encode")
return self.flate_encode()
def flate_encode(self) -> "EncodedStreamObject":
from ..filters import FlateDecode
if SA.FILTER in self:
f = self[SA.FILTER]
if isinstance(f, ArrayObject):
f.insert(0, NameObject(FT.FLATE_DECODE))
else:
newf = ArrayObject()
newf.append(NameObject("/FlateDecode"))
newf.append(f)
f = newf
else:
f = NameObject("/FlateDecode")
retval = EncodedStreamObject()
retval[NameObject(SA.FILTER)] = f
retval._data = FlateDecode.encode(self._data)
return retval
class DecodedStreamObject(StreamObject):
def get_data(self) -> Any:
return self._data
def set_data(self, data: Any) -> Any:
self._data = data
def getData(self) -> Any: # pragma: no cover
deprecate_with_replacement("getData", "get_data")
return self._data
def setData(self, data: Any) -> None: # pragma: no cover
deprecate_with_replacement("setData", "set_data")
self.set_data(data)
class EncodedStreamObject(StreamObject):
def __init__(self) -> None:
self.decoded_self: Optional[DecodedStreamObject] = None
@property
def decodedSelf(self) -> Optional["DecodedStreamObject"]: # pragma: no cover
deprecate_with_replacement("decodedSelf", "decoded_self")
return self.decoded_self
@decodedSelf.setter
def decodedSelf(self, value: DecodedStreamObject) -> None: # pragma: no cover
deprecate_with_replacement("decodedSelf", "decoded_self")
self.decoded_self = value
def get_data(self) -> Union[None, str, bytes]:
from ..filters import decode_stream_data
if self.decoded_self is not None:
# cached version of decoded object
return self.decoded_self.get_data()
else:
# create decoded object
decoded = DecodedStreamObject()
decoded._data = decode_stream_data(self)
for key, value in list(self.items()):
if key not in (SA.LENGTH, SA.FILTER, SA.DECODE_PARMS):
decoded[key] = value
self.decoded_self = decoded
return decoded._data
def getData(self) -> Union[None, str, bytes]: # pragma: no cover
deprecate_with_replacement("getData", "get_data")
return self.get_data()
def set_data(self, data: Any) -> None: # pragma: no cover
raise PdfReadError("Creating EncodedStreamObject is not currently supported")
def setData(self, data: Any) -> None: # pragma: no cover
deprecate_with_replacement("setData", "set_data")
return self.set_data(data)
class ContentStream(DecodedStreamObject):
def __init__(
self,
stream: Any,
pdf: Any,
forced_encoding: Union[None, str, List[str], Dict[int, str]] = None,
) -> None:
self.pdf = pdf
# The inner list has two elements:
# [0] : List
# [1] : str
self.operations: List[Tuple[Any, Any]] = []
# stream may be a StreamObject or an ArrayObject containing
# multiple StreamObjects to be cat'd together.
stream = stream.get_object()
if isinstance(stream, ArrayObject):
data = b""
for s in stream:
data += b_(s.get_object().get_data())
if data[-1] != b"\n":
data += b"\n"
stream_bytes = BytesIO(data)
else:
stream_data = stream.get_data()
assert stream_data is not None
stream_data_bytes = b_(stream_data)
stream_bytes = BytesIO(stream_data_bytes)
self.forced_encoding = forced_encoding
self.__parse_content_stream(stream_bytes)
def __parse_content_stream(self, stream: StreamType) -> None:
stream.seek(0, 0)
operands: List[Union[int, str, PdfObject]] = []
while True:
peek = read_non_whitespace(stream)
if peek == b"" or peek == 0:
break
stream.seek(-1, 1)
if peek.isalpha() or peek in (b"'", b'"'):
operator = read_until_regex(stream, NameObject.delimiter_pattern, True)
if operator == b"BI":
# begin inline image - a completely different parsing
# mechanism is required, of course... thanks buddy...
assert operands == []
ii = self._read_inline_image(stream)
self.operations.append((ii, b"INLINE IMAGE"))
else:
self.operations.append((operands, operator))
operands = []
elif peek == b"%":
# If we encounter a comment in the content stream, we have to
# handle it here. Typically, read_object will handle
# encountering a comment -- but read_object assumes that
# following the comment must be the object we're trying to
# read. In this case, it could be an operator instead.
while peek not in (b"\r", b"\n"):
peek = stream.read(1)
else:
operands.append(read_object(stream, None, self.forced_encoding))
def _read_inline_image(self, stream: StreamType) -> Dict[str, Any]:
# begin reading just after the "BI" - begin image
# first read the dictionary of settings.
settings = DictionaryObject()
while True:
tok = read_non_whitespace(stream)
stream.seek(-1, 1)
if tok == b"I":
# "ID" - begin of image data
break
key = read_object(stream, self.pdf)
tok = read_non_whitespace(stream)
stream.seek(-1, 1)
value = read_object(stream, self.pdf)
settings[key] = value
# left at beginning of ID
tmp = stream.read(3)
assert tmp[:2] == b"ID"
data = BytesIO()
# Read the inline image, while checking for EI (End Image) operator.
while True:
# Read 8 kB at a time and check if the chunk contains the E operator.
buf = stream.read(8192)
# We have reached the end of the stream, but haven't found the EI operator.
if not buf:
raise PdfReadError("Unexpected end of stream")
loc = buf.find(b"E")
if loc == -1:
data.write(buf)
else:
# Write out everything before the E.
data.write(buf[0:loc])
# Seek back in the stream to read the E next.
stream.seek(loc - len(buf), 1)
tok = stream.read(1)
# Check for End Image
tok2 = stream.read(1)
if tok2 == b"I" and buf[loc - 1 : loc] in WHITESPACES:
# Data can contain [\s]EI, so check for the separator \s; 4 chars suffisent Q operator not required.
tok3 = stream.read(1)
info = tok + tok2
# We need to find at least one whitespace after.
has_q_whitespace = False
while tok3 in WHITESPACES:
has_q_whitespace = True
info += tok3
tok3 = stream.read(1)
if has_q_whitespace:
stream.seek(-1, 1)
break
else:
stream.seek(-1, 1)
data.write(info)
else:
stream.seek(-1, 1)
data.write(tok)
return {"settings": settings, "data": data.getvalue()}
@property
def _data(self) -> bytes:
newdata = BytesIO()
for operands, operator in self.operations:
if operator == b"INLINE IMAGE":
newdata.write(b"BI")
dicttext = BytesIO()
operands["settings"].write_to_stream(dicttext, None)
newdata.write(dicttext.getvalue()[2:-2])
newdata.write(b"ID ")
newdata.write(operands["data"])
newdata.write(b"EI")
else:
for op in operands:
op.write_to_stream(newdata, None)
newdata.write(b" ")
newdata.write(b_(operator))
newdata.write(b"\n")
return newdata.getvalue()
@_data.setter
def _data(self, value: Union[str, bytes]) -> None:
self.__parse_content_stream(BytesIO(b_(value)))
def read_object(
stream: StreamType,
pdf: Any, # PdfReader
forced_encoding: Union[None, str, List[str], Dict[int, str]] = None,
) -> Union[PdfObject, int, str, ContentStream]:
tok = stream.read(1)
stream.seek(-1, 1) # reset to start
idx = ObjectPrefix.find(tok)
if idx == 0:
return NameObject.read_from_stream(stream, pdf)
elif idx == 1:
# hexadecimal string OR dictionary
peek = stream.read(2)
stream.seek(-2, 1) # reset to start
if peek == b"<<":
return DictionaryObject.read_from_stream(stream, pdf, forced_encoding)
else:
return read_hex_string_from_stream(stream, forced_encoding)
elif idx == 2:
return ArrayObject.read_from_stream(stream, pdf, forced_encoding)
elif idx == 3 or idx == 4:
return BooleanObject.read_from_stream(stream)
elif idx == 5:
return read_string_from_stream(stream, forced_encoding)
elif idx == 6:
return NullObject.read_from_stream(stream)
elif idx == 7:
# comment
while tok not in (b"\r", b"\n"):
tok = stream.read(1)
# Prevents an infinite loop by raising an error if the stream is at
# the EOF
if len(tok) <= 0:
raise PdfStreamError("File ended unexpectedly.")
tok = read_non_whitespace(stream)
stream.seek(-1, 1)
return read_object(stream, pdf, forced_encoding)
else:
# number object OR indirect reference
peek = stream.read(20)
stream.seek(-len(peek), 1) # reset to start
if IndirectPattern.match(peek) is not None:
return IndirectObject.read_from_stream(stream, pdf)
else:
return NumberObject.read_from_stream(stream)
class Field(TreeObject):
"""
A class representing a field dictionary.
This class is accessed through
:meth:`get_fields()<PyPDF2.PdfReader.get_fields>`
"""
def __init__(self, data: Dict[str, Any]) -> None:
DictionaryObject.__init__(self)
field_attributes = (
FieldDictionaryAttributes.attributes()
+ CheckboxRadioButtonAttributes.attributes()
)
for attr in field_attributes:
try:
self[NameObject(attr)] = data[attr]
except KeyError:
pass
# TABLE 8.69 Entries common to all field dictionaries
@property
def field_type(self) -> Optional[NameObject]:
"""Read-only property accessing the type of this field."""
return self.get(FieldDictionaryAttributes.FT)
@property
def fieldType(self) -> Optional[NameObject]: # pragma: no cover
"""
.. deprecated:: 1.28.3
Use :py:attr:`field_type` instead.
"""
deprecate_with_replacement("fieldType", "field_type")
return self.field_type
@property
def parent(self) -> Optional[DictionaryObject]:
"""Read-only property accessing the parent of this field."""
return self.get(FieldDictionaryAttributes.Parent)
@property
def kids(self) -> Optional[ArrayObject]:
"""Read-only property accessing the kids of this field."""
return self.get(FieldDictionaryAttributes.Kids)
@property
def name(self) -> Optional[str]:
"""Read-only property accessing the name of this field."""
return self.get(FieldDictionaryAttributes.T)
@property
def alternate_name(self) -> Optional[str]:
"""Read-only property accessing the alternate name of this field."""
return self.get(FieldDictionaryAttributes.TU)
@property
def altName(self) -> Optional[str]: # pragma: no cover
"""
.. deprecated:: 1.28.3
Use :py:attr:`alternate_name` instead.
"""
deprecate_with_replacement("altName", "alternate_name")
return self.alternate_name
@property
def mapping_name(self) -> Optional[str]:
"""
Read-only property accessing the mapping name of this field. This
name is used by PyPDF2 as a key in the dictionary returned by
:meth:`get_fields()<PyPDF2.PdfReader.get_fields>`
"""
return self.get(FieldDictionaryAttributes.TM)
@property
def mappingName(self) -> Optional[str]: # pragma: no cover
"""
.. deprecated:: 1.28.3
Use :py:attr:`mapping_name` instead.
"""
deprecate_with_replacement("mappingName", "mapping_name")
return self.mapping_name
@property
def flags(self) -> Optional[int]:
"""
Read-only property accessing the field flags, specifying various
characteristics of the field (see Table 8.70 of the PDF 1.7 reference).
"""
return self.get(FieldDictionaryAttributes.Ff)
@property
def value(self) -> Optional[Any]:
"""
Read-only property accessing the value of this field. Format
varies based on field type.
"""
return self.get(FieldDictionaryAttributes.V)
@property
def default_value(self) -> Optional[Any]:
"""Read-only property accessing the default value of this field."""
return self.get(FieldDictionaryAttributes.DV)
@property
def defaultValue(self) -> Optional[Any]: # pragma: no cover
"""
.. deprecated:: 1.28.3
Use :py:attr:`default_value` instead.
"""
deprecate_with_replacement("defaultValue", "default_value")
return self.default_value
@property
def additional_actions(self) -> Optional[DictionaryObject]:
"""
Read-only property accessing the additional actions dictionary.
This dictionary defines the field's behavior in response to trigger events.
See Section 8.5.2 of the PDF 1.7 reference.
"""
return self.get(FieldDictionaryAttributes.AA)
@property
def additionalActions(self) -> Optional[DictionaryObject]: # pragma: no cover
"""
.. deprecated:: 1.28.3
Use :py:attr:`additional_actions` instead.
"""
deprecate_with_replacement("additionalActions", "additional_actions")
return self.additional_actions
class Destination(TreeObject):
"""
A class representing a destination within a PDF file.
See section 8.2.1 of the PDF 1.6 reference.
:param str title: Title of this destination.
:param IndirectObject page: Reference to the page of this destination. Should
be an instance of :class:`IndirectObject<PyPDF2.generic.IndirectObject>`.
:param str typ: How the destination is displayed.
:param args: Additional arguments may be necessary depending on the type.
:raises PdfReadError: If destination type is invalid.
.. list-table:: Valid ``typ`` arguments (see PDF spec for details)
:widths: 50 50
* - /Fit
- No additional arguments
* - /XYZ
- [left] [top] [zoomFactor]
* - /FitH
- [top]
* - /FitV
- [left]
* - /FitR
- [left] [bottom] [right] [top]
* - /FitB
- No additional arguments
* - /FitBH
- [top]
* - /FitBV
- [left]
"""
def __init__(
self,
title: str,
page: Union[NumberObject, IndirectObject, NullObject, DictionaryObject],
typ: Union[str, NumberObject],
*args: Any, # ZoomArgType
) -> None:
DictionaryObject.__init__(self)
self[NameObject("/Title")] = title
self[NameObject("/Page")] = page
self[NameObject("/Type")] = typ
# from table 8.2 of the PDF 1.7 reference.
if typ == "/XYZ":
(
self[NameObject(TA.LEFT)],
self[NameObject(TA.TOP)],
self[NameObject("/Zoom")],
) = args
elif typ == TF.FIT_R:
(
self[NameObject(TA.LEFT)],
self[NameObject(TA.BOTTOM)],
self[NameObject(TA.RIGHT)],
self[NameObject(TA.TOP)],
) = args
elif typ in [TF.FIT_H, TF.FIT_BH]:
try: # Prefered to be more robust not only to null parameters
(self[NameObject(TA.TOP)],) = args
except Exception:
(self[NameObject(TA.TOP)],) = (NullObject(),)
elif typ in [TF.FIT_V, TF.FIT_BV]:
try: # Prefered to be more robust not only to null parameters
(self[NameObject(TA.LEFT)],) = args
except Exception:
(self[NameObject(TA.LEFT)],) = (NullObject(),)
elif typ in [TF.FIT, TF.FIT_B]:
pass
else:
raise PdfReadError(f"Unknown Destination Type: {typ!r}")
@property
def dest_array(self) -> ArrayObject:
return ArrayObject(
[self.raw_get("/Page"), self["/Type"]]
+ [
self[x]
for x in ["/Left", "/Bottom", "/Right", "/Top", "/Zoom"]
if x in self
]
)
def getDestArray(self) -> ArrayObject: # pragma: no cover
"""
.. deprecated:: 1.28.3
Use :py:attr:`dest_array` instead.
"""
deprecate_with_replacement("getDestArray", "dest_array")
return self.dest_array
def write_to_stream(
self, stream: StreamType, encryption_key: Union[None, str, bytes]
) -> None:
stream.write(b"<<\n")
key = NameObject("/D")
key.write_to_stream(stream, encryption_key)
stream.write(b" ")
value = self.dest_array
value.write_to_stream(stream, encryption_key)
key = NameObject("/S")
key.write_to_stream(stream, encryption_key)
stream.write(b" ")
value_s = NameObject("/GoTo")
value_s.write_to_stream(stream, encryption_key)
stream.write(b"\n")
stream.write(b">>")
@property
def title(self) -> Optional[str]:
"""Read-only property accessing the destination title."""
return self.get("/Title")
@property
def page(self) -> Optional[int]:
"""Read-only property accessing the destination page number."""
return self.get("/Page")
@property
def typ(self) -> Optional[str]:
"""Read-only property accessing the destination type."""
return self.get("/Type")
@property
def zoom(self) -> Optional[int]:
"""Read-only property accessing the zoom factor."""
return self.get("/Zoom", None)
@property
def left(self) -> Optional[FloatObject]:
"""Read-only property accessing the left horizontal coordinate."""
return self.get("/Left", None)
@property
def right(self) -> Optional[FloatObject]:
"""Read-only property accessing the right horizontal coordinate."""
return self.get("/Right", None)
@property
def top(self) -> Optional[FloatObject]:
"""Read-only property accessing the top vertical coordinate."""
return self.get("/Top", None)
@property
def bottom(self) -> Optional[FloatObject]:
"""Read-only property accessing the bottom vertical coordinate."""
return self.get("/Bottom", None)
@property
def color(self) -> Optional[ArrayObject]:
"""Read-only property accessing the color in (R, G, B) with values 0.0-1.0"""
return self.get(
"/C", ArrayObject([FloatObject(0), FloatObject(0), FloatObject(0)])
)
@property
def font_format(self) -> Optional[OutlineFontFlag]:
"""Read-only property accessing the font type. 1=italic, 2=bold, 3=both"""
return self.get("/F", 0)
@property
def outline_count(self) -> Optional[int]:
"""
Read-only property accessing the outline count.
positive = expanded
negative = collapsed
absolute value = number of visible descendents at all levels
"""
return self.get("/Count", None)
|
GHSA-hm9v-vj3r-r55m
|
tests/test_generic.py
|
@@ -175,6 +175,17 @@ def test_NameObject():
with pytest.raises(PdfReadError) as exc:
NameObject.read_from_stream(stream, None)
assert exc.value.args[0] == "name read error"
+ assert (
+ NameObject.read_from_stream(
+ BytesIO(b"/A;Name_With-Various***Characters?"), None
+ )
+ == "/A;Name_With-Various***Characters?"
+ )
+ assert (
+ NameObject.read_from_stream(BytesIO(b"/paired#28#29parentheses"), None)
+ == "/paired()parentheses"
+ )
+ assert NameObject.read_from_stream(BytesIO(b"/A#42"), None) == "/AB"
def test_destination_fit_r():
|
import os
from io import BytesIO
from pathlib import Path
from unittest.mock import patch
import pytest
from PyPDF2 import PdfMerger, PdfReader, PdfWriter
from PyPDF2.constants import CheckboxRadioButtonAttributes
from PyPDF2.constants import TypFitArguments as TF
from PyPDF2.errors import PdfReadError, PdfStreamError
from PyPDF2.generic import (
AnnotationBuilder,
ArrayObject,
BooleanObject,
ByteStringObject,
Destination,
DictionaryObject,
FloatObject,
IndirectObject,
NameObject,
NullObject,
NumberObject,
OutlineItem,
RectangleObject,
TextStringObject,
TreeObject,
create_string_object,
encode_pdfdocencoding,
read_hex_string_from_stream,
read_object,
read_string_from_stream,
)
from . import ReaderDummy, get_pdf_from_url
TESTS_ROOT = Path(__file__).parent.resolve()
PROJECT_ROOT = TESTS_ROOT.parent
RESOURCE_ROOT = PROJECT_ROOT / "resources"
def test_float_object_exception(caplog):
assert FloatObject("abc") == 0
assert caplog.text != ""
def test_number_object_exception(caplog):
assert NumberObject("0,0") == 0
assert caplog.text != ""
def test_number_object_no_exception():
NumberObject(2**100000000)
def test_create_string_object_exception():
with pytest.raises(TypeError) as exc:
create_string_object(123)
assert ( # typeguard is not running
exc.value.args[0] == "create_string_object should have str or unicode arg"
) or ( # typeguard is enabled
'type of argument "string" must be one of (str, bytes); got int instead'
in exc.value.args[0]
)
@pytest.mark.parametrize(
("value", "expected", "tell"), [(b"true", b"true", 4), (b"false", b"false", 5)]
)
def test_boolean_object(value, expected, tell):
stream = BytesIO(value)
assert BooleanObject.read_from_stream(stream).value == (expected == b"true")
stream.seek(0, 0)
assert stream.read() == expected
assert stream.tell() == tell
def test_boolean_object_write():
stream = BytesIO()
boolobj = BooleanObject(None)
boolobj.write_to_stream(stream, encryption_key=None)
stream.seek(0, 0)
assert stream.read() == b"false"
def test_boolean_eq():
boolobj = BooleanObject(True)
assert (boolobj == True) is True # noqa: E712
assert (boolobj == False) is False # noqa: E712
assert (boolobj == "True") is False
boolobj = BooleanObject(False)
assert (boolobj == True) is False # noqa: E712
assert (boolobj == False) is True # noqa: E712
assert (boolobj == "True") is False
def test_boolean_object_exception():
stream = BytesIO(b"False")
with pytest.raises(PdfReadError) as exc:
BooleanObject.read_from_stream(stream)
assert exc.value.args[0] == "Could not read Boolean object"
def test_array_object_exception():
stream = BytesIO(b"False")
with pytest.raises(PdfReadError) as exc:
ArrayObject.read_from_stream(stream, None)
assert exc.value.args[0] == "Could not read array"
def test_null_object_exception():
stream = BytesIO(b"notnull")
with pytest.raises(PdfReadError) as exc:
NullObject.read_from_stream(stream)
assert exc.value.args[0] == "Could not read Null object"
@pytest.mark.parametrize("value", [b"", b"False", b"foo ", b"foo ", b"foo bar"])
def test_indirect_object_premature(value):
stream = BytesIO(value)
with pytest.raises(PdfStreamError) as exc:
IndirectObject.read_from_stream(stream, None)
assert exc.value.args[0] == "Stream has ended unexpectedly"
def test_readHexStringFromStream():
stream = BytesIO(b"a1>")
assert read_hex_string_from_stream(stream) == "\x10"
def test_readHexStringFromStream_exception():
stream = BytesIO(b"")
with pytest.raises(PdfStreamError) as exc:
read_hex_string_from_stream(stream)
assert exc.value.args[0] == "Stream has ended unexpectedly"
def test_readStringFromStream_exception():
stream = BytesIO(b"x")
with pytest.raises(PdfStreamError) as exc:
read_string_from_stream(stream)
assert exc.value.args[0] == "Stream has ended unexpectedly"
def test_readStringFromStream_not_in_escapedict_no_digit():
stream = BytesIO(b"x\\y")
with pytest.raises(PdfReadError) as exc:
read_string_from_stream(stream)
assert exc.value.args[0] == "Stream has ended unexpectedly"
def test_readStringFromStream_multichar_eol():
stream = BytesIO(b"x\\\n )")
assert read_string_from_stream(stream) == " "
def test_readStringFromStream_multichar_eol2():
stream = BytesIO(b"x\\\n\n)")
assert read_string_from_stream(stream) == ""
def test_readStringFromStream_excape_digit():
stream = BytesIO(b"x\\1a )")
assert read_string_from_stream(stream) == "\x01a "
def test_readStringFromStream_excape_digit2():
stream = BytesIO(b"(hello \\1\\2\\3\\4)")
assert read_string_from_stream(stream) == "hello \x01\x02\x03\x04"
def test_NameObject():
stream = BytesIO(b"x")
with pytest.raises(PdfReadError) as exc:
NameObject.read_from_stream(stream, None)
assert exc.value.args[0] == "name read error"
def test_destination_fit_r():
d = Destination(
NameObject("title"),
NullObject(),
NameObject(TF.FIT_R),
FloatObject(0),
FloatObject(0),
FloatObject(0),
FloatObject(0),
)
assert d.title == NameObject("title")
assert d.typ == "/FitR"
assert d.zoom is None
assert d.left == FloatObject(0)
assert d.right == FloatObject(0)
assert d.top == FloatObject(0)
assert d.bottom == FloatObject(0)
assert list(d) == []
d.empty_tree()
def test_destination_fit_v():
Destination(NameObject("title"), NullObject(), NameObject(TF.FIT_V), FloatObject(0))
# Trigger Exception
Destination(NameObject("title"), NullObject(), NameObject(TF.FIT_V), None)
def test_destination_exception():
with pytest.raises(PdfReadError) as exc:
Destination(
NameObject("title"), NullObject(), NameObject("foo"), FloatObject(0)
)
assert exc.value.args[0] == "Unknown Destination Type: 'foo'"
def test_outline_item_write_to_stream():
stream = BytesIO()
oi = OutlineItem(
NameObject("title"), NullObject(), NameObject(TF.FIT_V), FloatObject(0)
)
oi.write_to_stream(stream, None)
stream.seek(0, 0)
assert stream.read() == b"<<\n/Title title\n/Dest [ null /FitV 0 ]\n>>"
def test_encode_pdfdocencoding_keyerror():
with pytest.raises(UnicodeEncodeError) as exc:
encode_pdfdocencoding("😀")
assert exc.value.args[0] == "pdfdocencoding"
def test_read_object_comment_exception():
stream = BytesIO(b"% foobar")
pdf = None
with pytest.raises(PdfStreamError) as exc:
read_object(stream, pdf)
assert exc.value.args[0] == "File ended unexpectedly."
def test_read_object_comment():
stream = BytesIO(b"% foobar\n1 ")
pdf = None
out = read_object(stream, pdf)
assert out == 1
def test_ByteStringObject():
bo = ByteStringObject("stream", encoding="utf-8")
stream = BytesIO(b"")
bo.write_to_stream(stream, encryption_key="foobar")
stream.seek(0, 0)
assert stream.read() == b"<1cdd628b972e>" # TODO: how can we verify this?
def test_DictionaryObject_key_is_no_pdfobject():
do = DictionaryObject({NameObject("/S"): NameObject("/GoTo")})
with pytest.raises(ValueError) as exc:
do["foo"] = NameObject("/GoTo")
assert exc.value.args[0] == "key must be PdfObject"
def test_DictionaryObject_xmp_meta():
do = DictionaryObject({NameObject("/S"): NameObject("/GoTo")})
assert do.xmp_metadata is None
def test_DictionaryObject_value_is_no_pdfobject():
do = DictionaryObject({NameObject("/S"): NameObject("/GoTo")})
with pytest.raises(ValueError) as exc:
do[NameObject("/S")] = "/GoTo"
assert exc.value.args[0] == "value must be PdfObject"
def test_DictionaryObject_setdefault_key_is_no_pdfobject():
do = DictionaryObject({NameObject("/S"): NameObject("/GoTo")})
with pytest.raises(ValueError) as exc:
do.setdefault("foo", NameObject("/GoTo"))
assert exc.value.args[0] == "key must be PdfObject"
def test_DictionaryObject_setdefault_value_is_no_pdfobject():
do = DictionaryObject({NameObject("/S"): NameObject("/GoTo")})
with pytest.raises(ValueError) as exc:
do.setdefault(NameObject("/S"), "/GoTo")
assert exc.value.args[0] == "value must be PdfObject"
def test_DictionaryObject_setdefault_value():
do = DictionaryObject({NameObject("/S"): NameObject("/GoTo")})
do.setdefault(NameObject("/S"), NameObject("/GoTo"))
def test_DictionaryObject_read_from_stream():
stream = BytesIO(b"<< /S /GoTo >>")
pdf = None
out = DictionaryObject.read_from_stream(stream, pdf)
assert out.get_object() == {NameObject("/S"): NameObject("/GoTo")}
def test_DictionaryObject_read_from_stream_broken():
stream = BytesIO(b"< /S /GoTo >>")
pdf = None
with pytest.raises(PdfReadError) as exc:
DictionaryObject.read_from_stream(stream, pdf)
assert (
exc.value.args[0]
== "Dictionary read error at byte 0x2: stream must begin with '<<'"
)
def test_DictionaryObject_read_from_stream_unexpected_end():
stream = BytesIO(b"<< \x00/S /GoTo")
pdf = None
with pytest.raises(PdfStreamError) as exc:
DictionaryObject.read_from_stream(stream, pdf)
assert exc.value.args[0] == "Stream has ended unexpectedly"
def test_DictionaryObject_read_from_stream_stream_no_newline():
stream = BytesIO(b"<< /S /GoTo >>stream")
pdf = None
with pytest.raises(PdfReadError) as exc:
DictionaryObject.read_from_stream(stream, pdf)
assert exc.value.args[0] == "Stream data must be followed by a newline"
@pytest.mark.parametrize(("strict"), [(True), (False)])
def test_DictionaryObject_read_from_stream_stream_no_stream_length(strict):
stream = BytesIO(b"<< /S /GoTo >>stream\n")
class Tst: # to replace pdf
strict = False
pdf = Tst()
pdf.strict = strict
with pytest.raises(PdfReadError) as exc:
DictionaryObject.read_from_stream(stream, pdf)
assert exc.value.args[0] == "Stream length not defined"
@pytest.mark.parametrize(
("strict", "length", "should_fail"),
[
(True, 6, False),
(True, 10, False),
(True, 4, True),
(False, 6, False),
(False, 10, False),
],
)
def test_DictionaryObject_read_from_stream_stream_stream_valid(
strict, length, should_fail
):
stream = BytesIO(b"<< /S /GoTo /Length %d >>stream\nBT /F1\nendstream\n" % length)
class Tst: # to replace pdf
strict = True
pdf = Tst()
pdf.strict = strict
with pytest.raises(PdfReadError) as exc:
do = DictionaryObject.read_from_stream(stream, pdf)
# TODO: What should happen with the stream?
assert do == {"/S": "/GoTo"}
if length in (6, 10):
assert b"BT /F1" in do._StreamObject__data
raise PdfReadError("__ALLGOOD__")
assert should_fail ^ (exc.value.args[0] == "__ALLGOOD__")
def test_RectangleObject():
ro = RectangleObject((1, 2, 3, 4))
assert ro.lower_left == (1, 2)
assert ro.lower_right == (3, 2)
assert ro.upper_left == (1, 4)
assert ro.upper_right == (3, 4)
ro.lower_left = (5, 6)
assert ro.lower_left == (5, 6)
ro.lower_right = (7, 8)
assert ro.lower_right == (7, 8)
ro.upper_left = (9, 11)
assert ro.upper_left == (9, 11)
ro.upper_right = (13, 17)
assert ro.upper_right == (13, 17)
def test_TextStringObject_exc():
tso = TextStringObject("foo")
with pytest.raises(Exception) as exc:
tso.get_original_bytes()
assert exc.value.args[0] == "no information about original bytes"
def test_TextStringObject_autodetect_utf16():
tso = TextStringObject("foo")
tso.autodetect_utf16 = True
assert tso.get_original_bytes() == b"\xfe\xff\x00f\x00o\x00o"
def test_remove_child_not_in_tree():
tree = TreeObject()
with pytest.raises(ValueError) as exc:
tree.remove_child(NameObject("foo"))
assert exc.value.args[0] == "Removed child does not appear to be a tree item"
def test_remove_child_not_in_that_tree():
class ChildDummy:
def __init__(self, parent):
self.parent = parent
def get_object(self):
tree = DictionaryObject()
tree[NameObject("/Parent")] = self.parent
return tree
tree = TreeObject()
child = ChildDummy(TreeObject())
tree.add_child(child, ReaderDummy())
with pytest.raises(ValueError) as exc:
tree.remove_child(child)
assert exc.value.args[0] == "Removed child is not a member of this tree"
def test_remove_child_not_found_in_tree():
class ChildDummy:
def __init__(self, parent):
self.parent = parent
def get_object(self):
tree = DictionaryObject()
tree[NameObject("/Parent")] = self.parent
return tree
tree = TreeObject()
child = ChildDummy(tree)
tree.add_child(child, ReaderDummy())
with pytest.raises(ValueError) as exc:
tree.remove_child(child)
assert exc.value.args[0] == "Removal couldn't find item in tree"
def test_remove_child_found_in_tree():
writer = PdfWriter()
# Add Tree
tree = TreeObject()
writer._add_object(tree)
# Add first child
# It's important to set a value, otherwise the writer.get_reference will
# return the same object when a second child is added.
child1 = TreeObject()
child1[NameObject("/Foo")] = TextStringObject("bar")
child1_ref = writer._add_object(child1)
tree.add_child(child1_ref, writer)
assert tree[NameObject("/Count")] == 1
assert len([el for el in tree.children()]) == 1
# Add second child
child2 = TreeObject()
child2[NameObject("/Foo")] = TextStringObject("baz")
child2_ref = writer._add_object(child2)
tree.add_child(child2_ref, writer)
assert tree[NameObject("/Count")] == 2
assert len([el for el in tree.children()]) == 2
# Remove last child
tree.remove_child(child2)
assert tree[NameObject("/Count")] == 1
assert len([el for el in tree.children()]) == 1
# Add new child
child3 = TreeObject()
child3[NameObject("/Foo")] = TextStringObject("3")
child3_ref = writer._add_object(child3)
tree.add_child(child3_ref, writer)
assert tree[NameObject("/Count")] == 2
assert len([el for el in tree.children()]) == 2
# Remove first child
child1 = tree[NameObject("/First")]
tree.remove_child(child1)
assert tree[NameObject("/Count")] == 1
assert len([el for el in tree.children()]) == 1
child4 = TreeObject()
child4[NameObject("/Foo")] = TextStringObject("4")
child4_ref = writer._add_object(child4)
tree.add_child(child4_ref, writer)
assert tree[NameObject("/Count")] == 2
assert len([el for el in tree.children()]) == 2
child5 = TreeObject()
child5[NameObject("/Foo")] = TextStringObject("5")
child5_ref = writer._add_object(child5)
tree.add_child(child5_ref, writer)
assert tree[NameObject("/Count")] == 3
assert len([el for el in tree.children()]) == 3
# Remove middle child
tree.remove_child(child4)
assert tree[NameObject("/Count")] == 2
assert len([el for el in tree.children()]) == 2
tree.empty_tree()
def test_remove_child_in_tree():
pdf = RESOURCE_ROOT / "form.pdf"
tree = TreeObject()
reader = PdfReader(pdf)
writer = PdfWriter()
writer.add_page(reader.pages[0])
writer.add_outline_item("foo", pagenum=0)
obj = writer._objects[-1]
tree.add_child(obj, writer)
tree.remove_child(obj)
tree.add_child(obj, writer)
tree.empty_tree()
def test_dict_read_from_stream(caplog):
url = "https://corpora.tika.apache.org/base/docs/govdocs1/984/984877.pdf"
name = "tika-984877.pdf"
reader = PdfReader(BytesIO(get_pdf_from_url(url, name=name)))
for page in reader.pages:
page.extract_text()
assert (
"Multiple definitions in dictionary at byte 0x1084 for key /Length"
in caplog.text
)
def test_parse_content_stream_peek_percentage():
url = "https://corpora.tika.apache.org/base/docs/govdocs1/985/985770.pdf"
name = "tika-985770.pdf"
reader = PdfReader(BytesIO(get_pdf_from_url(url, name=name)))
for page in reader.pages:
page.extract_text()
def test_read_inline_image_no_has_q():
# pdf/df7e1add3156af17a372bc165e47a244.pdf
url = "https://corpora.tika.apache.org/base/docs/govdocs1/998/998719.pdf"
name = "tika-998719.pdf"
reader = PdfReader(BytesIO(get_pdf_from_url(url, name=name)))
for page in reader.pages:
page.extract_text()
def test_read_inline_image_loc_neg_1():
url = "https://corpora.tika.apache.org/base/docs/govdocs1/935/935066.pdf"
name = "tika-935066.pdf"
reader = PdfReader(BytesIO(get_pdf_from_url(url, name=name)))
for page in reader.pages:
page.extract_text()
def test_text_string_write_to_stream():
url = "https://corpora.tika.apache.org/base/docs/govdocs1/924/924562.pdf"
name = "tika-924562.pdf"
reader = PdfReader(BytesIO(get_pdf_from_url(url, name=name)))
for page in reader.pages:
page.compress_content_streams()
def test_name_object_read_from_stream_unicode_error(): # L588
url = "https://corpora.tika.apache.org/base/docs/govdocs1/974/974966.pdf"
name = "tika-974966.pdf"
reader = PdfReader(BytesIO(get_pdf_from_url(url, name=name)))
for page in reader.pages:
page.extract_text()
def test_bool_repr(tmp_path):
url = "https://corpora.tika.apache.org/base/docs/govdocs1/932/932449.pdf"
name = "tika-932449.pdf"
reader = PdfReader(BytesIO(get_pdf_from_url(url, name=name)))
write_path = tmp_path / "tmp-fields-report.txt"
with open(write_path, "w") as fp:
fields = reader.get_fields(fileobj=fp)
assert fields
assert list(fields.keys()) == ["USGPOSignature"]
with open(write_path) as fp:
data = fp.read()
assert data.startswith(
"Field Name: USGPOSignature\nField Type: Signature\nField Flags: 1\n"
"Value: {'/Type': '/Sig', '/Filter': '/Adobe.PPKLite', "
"'/SubFilter':"
)
@patch("PyPDF2._reader.logger_warning")
def test_issue_997(mock_logger_warning):
url = "https://github.com/py-pdf/PyPDF2/files/8908874/Exhibit_A-2_930_Enterprise_Zone_Tax_Credits_final.pdf"
name = "gh-issue-997.pdf"
merger = PdfMerger()
merged_filename = "tmp-out.pdf"
merger.append(BytesIO(get_pdf_from_url(url, name=name))) # here the error raises
with open(merged_filename, "wb") as f:
merger.write(f)
merger.close()
mock_logger_warning.assert_called_with(
"Overwriting cache for 0 4", "PyPDF2._reader"
)
# Strict
merger = PdfMerger(strict=True)
merged_filename = "tmp-out.pdf"
with pytest.raises(PdfReadError) as exc:
merger.append(
BytesIO(get_pdf_from_url(url, name=name))
) # here the error raises
assert exc.value.args[0] == "Could not find object."
with open(merged_filename, "wb") as f:
merger.write(f)
merger.close()
# cleanup
os.remove(merged_filename)
def test_annotation_builder_free_text():
# Arrange
pdf_path = RESOURCE_ROOT / "crazyones.pdf"
reader = PdfReader(pdf_path)
page = reader.pages[0]
writer = PdfWriter()
writer.add_page(page)
# Act
free_text_annotation = AnnotationBuilder.free_text(
"Hello World - bold and italic\nThis is the second line!",
rect=(50, 550, 200, 650),
font="Arial",
bold=True,
italic=True,
font_size="20pt",
font_color="00ff00",
border_color="0000ff",
background_color="cdcdcd",
)
writer.add_annotation(0, free_text_annotation)
free_text_annotation = AnnotationBuilder.free_text(
"Another free text annotation (not bold, not italic)",
rect=(500, 550, 200, 650),
font="Arial",
bold=False,
italic=False,
font_size="20pt",
font_color="00ff00",
border_color="0000ff",
background_color="cdcdcd",
)
writer.add_annotation(0, free_text_annotation)
# Assert: You need to inspect the file manually
target = "annotated-pdf.pdf"
with open(target, "wb") as fp:
writer.write(fp)
os.remove(target) # comment this out for manual inspection
def test_annotation_builder_line():
# Arrange
pdf_path = RESOURCE_ROOT / "crazyones.pdf"
reader = PdfReader(pdf_path)
page = reader.pages[0]
writer = PdfWriter()
writer.add_page(page)
# Act
line_annotation = AnnotationBuilder.line(
text="Hello World\nLine2",
rect=(50, 550, 200, 650),
p1=(50, 550),
p2=(200, 650),
)
writer.add_annotation(0, line_annotation)
# Assert: You need to inspect the file manually
target = "annotated-pdf.pd"
with open(target, "wb") as fp:
writer.write(fp)
os.remove(target) # comment this out for manual inspection
def test_annotation_builder_link():
# Arrange
pdf_path = RESOURCE_ROOT / "outline-without-title.pdf"
reader = PdfReader(pdf_path)
page = reader.pages[0]
writer = PdfWriter()
writer.add_page(page)
# Act
# Part 1: Too many args
with pytest.raises(ValueError) as exc:
AnnotationBuilder.link(
rect=(50, 550, 200, 650),
url="https://martin-thoma.com/",
target_page_index=3,
)
assert (
exc.value.args[0]
== "Either 'url' or 'target_page_index' have to be provided. url=https://martin-thoma.com/, target_page_index=3"
)
# Part 2: Too few args
with pytest.raises(ValueError) as exc:
AnnotationBuilder.link(
rect=(50, 550, 200, 650),
)
assert (
exc.value.args[0]
== "Either 'url' or 'target_page_index' have to be provided. Both were None."
)
# Part 3: External Link
link_annotation = AnnotationBuilder.link(
rect=(50, 50, 100, 100),
url="https://martin-thoma.com/",
border=[1, 0, 6, [3, 2]],
)
writer.add_annotation(0, link_annotation)
# Part 4: Internal Link
link_annotation = AnnotationBuilder.link(
rect=(100, 100, 300, 200),
target_page_index=1,
border=[50, 10, 4],
)
writer.add_annotation(0, link_annotation)
for page in reader.pages[1:]:
writer.add_page(page)
# Assert: You need to inspect the file manually
target = "annotated-pdf-link.pdf"
with open(target, "wb") as fp:
writer.write(fp)
os.remove(target) # comment this out for manual inspection
def test_annotation_builder_text():
# Arrange
pdf_path = RESOURCE_ROOT / "outline-without-title.pdf"
reader = PdfReader(pdf_path)
page = reader.pages[0]
writer = PdfWriter()
writer.add_page(page)
# Act
text_annotation = AnnotationBuilder.text(
text="Hello World\nThis is the second line!",
rect=(50, 550, 500, 650),
open=True,
)
writer.add_annotation(0, text_annotation)
# Assert: You need to inspect the file manually
target = "annotated-pdf-popup.pdf"
with open(target, "wb") as fp:
writer.write(fp)
os.remove(target) # comment this out for manual inspection
def test_CheckboxRadioButtonAttributes_opt():
assert "/Opt" in CheckboxRadioButtonAttributes.attributes_dict()
def test_name_object_invalid_decode():
stream = BytesIO(b"/\x80\x02\x03")
# strict:
with pytest.raises(PdfReadError) as exc:
NameObject.read_from_stream(stream, ReaderDummy(strict=True))
assert exc.value.args[0] == "Illegal character in Name Object"
# non-strict:
stream.seek(0)
NameObject.read_from_stream(stream, ReaderDummy(strict=False))
def test_indirect_object_invalid_read():
stream = BytesIO(b"0 1 s")
with pytest.raises(PdfReadError) as exc:
IndirectObject.read_from_stream(stream, ReaderDummy())
assert exc.value.args[0] == "Error reading indirect object reference at byte 0x5"
def test_create_string_object_force():
assert create_string_object(b"Hello World", []) == "Hello World"
assert create_string_object(b"Hello World", {72: "A"}) == "Aello World"
assert create_string_object(b"Hello World", "utf8") == "Hello World"
|
GHSA-hm9v-vj3r-r55m
|
tensorflow/python/kernel_tests/array_ops/stack_op_test.py
|
@@ -16,12 +16,16 @@
import numpy as np
+from tensorflow.python import tf2
from tensorflow.python.eager import context
+from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.platform import test
@@ -69,6 +73,19 @@ def testSimpleParallelCPU(self):
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c, data)
+ def testParallelConcatShapeZero(self):
+ if not tf2.enabled():
+ self.skipTest("only fails in TF2")
+
+ @def_function.function
+ def f():
+ y = gen_array_ops.parallel_concat(values=[["tf"]], shape=0)
+ return y
+
+ with self.assertRaisesRegex(errors.InvalidArgumentError,
+ r"0th dimension of value .* is less than"):
+ f()
+
def testSimpleParallelGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Stack and ParallelStack Ops."""
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.platform import test
def np_split_squeeze(array, axis):
axis_len = array.shape[axis]
return [
np.squeeze(
arr, axis=(axis,)) for arr in np.split(
array, axis_len, axis=axis)
]
class StackOpTest(test.TestCase):
def randn(self, shape, dtype):
data = np.random.randn(*shape)
if dtype == np.bool_:
return data < 0 # Naive casting yields True with P(1)!
else:
return data.astype(dtype)
def testSimple(self):
np.random.seed(7)
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
rank = len(shape)
for axis in range(-rank, rank):
for dtype in [np.bool_, np.float32, np.int32, np.int64]:
data = self.randn(shape, dtype)
xs = np_split_squeeze(data, axis)
# Stack back into a single tensorflow tensor
with self.subTest(shape=shape, axis=axis, dtype=dtype):
c = array_ops.stack(xs, axis=axis)
self.assertAllEqual(c, data)
def testSimpleParallelCPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
np.random.seed(7)
with test_util.device(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c, data)
def testSimpleParallelGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
with test_util.device(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c, data)
def testConst(self):
np.random.seed(7)
with test_util.use_gpu():
# Verify that shape induction works with shapes produced via const stack
a = constant_op.constant([1, 2, 3, 4, 5, 6])
b = array_ops.reshape(a, array_ops.stack([2, 3]))
self.assertAllEqual(b.get_shape(), [2, 3])
# Check on a variety of shapes and types
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10):
for dtype in [np.bool_, np.float32, np.int16, np.int32, np.int64]:
with self.subTest(shape=shape, dtype=dtype):
data = self.randn(shape, dtype)
# Stack back into a single tensorflow tensor directly using np array
c = array_ops.stack(data)
if not context.executing_eagerly():
# This is implemented via a Const:
self.assertEqual(c.op.type, "Const")
self.assertAllEqual(c, data)
# Python lists also work for 1-D case:
if len(shape) == 1:
data_list = list(data)
cl = array_ops.stack(data_list)
if not context.executing_eagerly():
self.assertEqual(cl.op.type, "Const")
self.assertAllEqual(cl, data)
def testConstParallelCPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
np.random.seed(7)
with test_util.device(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl, data)
data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c, data)
def testConstParallelGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
np.random.seed(7)
with test_util.device(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl, data)
data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c, data)
def testGradientsAxis0(self):
np.random.seed(7)
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
data = np.random.randn(*shape)
with self.subTest(shape=shape):
with self.cached_session():
def func(*xs):
return array_ops.stack(xs)
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
theoretical, numerical = gradient_checker_v2.compute_gradient(
func, xs)
self.assertAllClose(theoretical, numerical)
def testGradientsAxis1(self):
np.random.seed(7)
for shape in (2, 3), (3, 2), (8, 2, 10):
data = np.random.randn(*shape)
out_shape = list(shape[1:])
out_shape.insert(1, shape[0])
with self.subTest(shape=shape):
with self.cached_session():
def func(*inp):
return array_ops.stack(inp, axis=1)
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
theoretical, numerical = gradient_checker_v2.compute_gradient(
func, xs)
self.assertAllClose(theoretical, numerical)
def testZeroSizeCPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
# Verify that stack doesn't crash for zero size inputs
with test_util.device(use_gpu=False):
for shape in (0,), (3, 0), (0, 3):
with self.subTest(shape=shape):
x = np.zeros((2,) + shape).astype(np.int32)
p = self.evaluate(array_ops.stack(list(x)))
self.assertAllEqual(p, x)
p = self.evaluate(array_ops.parallel_stack(list(x)))
self.assertAllEqual(p, x)
def testZeroSizeGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
# Verify that stack doesn't crash for zero size inputs
with test_util.device(use_gpu=True):
for shape in (0,), (3, 0), (0, 3):
with self.subTest(shape=shape):
x = np.zeros((2,) + shape).astype(np.int32)
p = self.evaluate(array_ops.stack(list(x)))
self.assertAllEqual(p, x)
p = self.evaluate(array_ops.parallel_stack(list(x)))
self.assertAllEqual(p, x)
def testAxis0DefaultCPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
with test_util.device(use_gpu=False):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = self.evaluate(array_ops.stack(t))
parallel_stacked = self.evaluate(array_ops.parallel_stack(t))
expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected)
def testAxis0DefaultGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
with test_util.device(use_gpu=True):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = self.evaluate(array_ops.stack(t))
parallel_stacked = self.evaluate(array_ops.parallel_stack(t))
expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected)
def testAgainstNumpy(self):
# For 1 to 5 dimensions.
for shape in (3,), (2, 2, 3), (4, 1, 2, 2), (8, 2, 10):
rank = len(shape)
expected = self.randn(shape, np.float32)
for dtype in [np.bool_, np.float32, np.int32, np.int64]:
# For all the possible axis to split it, including negative indices.
for axis in range(-rank, rank):
test_arrays = np_split_squeeze(expected, axis)
with self.cached_session():
with self.subTest(shape=shape, dtype=dtype, axis=axis):
actual_pack = array_ops.stack(test_arrays, axis=axis)
self.assertEqual(expected.shape, actual_pack.get_shape())
actual_pack = self.evaluate(actual_pack)
actual_stack = array_ops.stack(test_arrays, axis=axis)
self.assertEqual(expected.shape, actual_stack.get_shape())
actual_stack = self.evaluate(actual_stack)
self.assertNDArrayNear(expected, actual_stack, 1e-6)
def testDimOutOfRange(self):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
with self.assertRaisesRegex(ValueError,
r"Argument `axis` = 2 not in range \[-2, 2\)"):
array_ops.stack(t, axis=2)
def testDimOutOfNegativeRange(self):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
with self.assertRaisesRegex(ValueError,
r"Argument `axis` = -3 not in range \[-2, 2\)"):
array_ops.stack(t, axis=-3)
def testComplex(self):
np.random.seed(7)
with self.session():
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
for dtype in [np.complex64, np.complex128]:
with self.subTest(shape=shape, dtype=dtype):
data = self.randn(shape, dtype)
xs = list(map(constant_op.constant, data))
c = array_ops.stack(xs)
self.assertAllEqual(self.evaluate(c), data)
class AutomaticStackingTest(test.TestCase):
def testSimple(self):
self.assertAllEqual([1, 0, 2],
ops.convert_to_tensor([1, constant_op.constant(0), 2]))
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor([[0, 0, 0],
[0,
constant_op.constant(1), 0],
[0, 0, 0]]))
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor([[0, 0, 0],
constant_op.constant([0, 1, 0]),
[0, 0, 0]]))
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor([
constant_op.constant([0, 0, 0]),
constant_op.constant([0, 1, 0]),
constant_op.constant([0, 0, 0])
]))
def testWithNDArray(self):
with self.session():
result = ops.convert_to_tensor([[[0., 0.],
constant_op.constant([1., 1.])],
np.array(
[[2., 2.], [3., 3.]],
dtype=np.float32)])
self.assertAllEqual([[[0., 0.], [1., 1.]], [[2., 2.], [3., 3.]]],
self.evaluate(result))
def testDtype(self):
t_0 = ops.convert_to_tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])
self.assertEqual(dtypes.float32, t_0.dtype)
t_1 = ops.convert_to_tensor([[0., 0., 0.], constant_op.constant(
[0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]])
self.assertEqual(dtypes.float64, t_1.dtype)
t_2 = ops.convert_to_tensor(
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=dtypes.float64)
self.assertEqual(dtypes.float64, t_2.dtype)
t_3 = ops.convert_to_tensor(
[[0., 0., 0.],
constant_op.constant([0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]
],
dtype=dtypes.float32)
self.assertEqual(dtypes.float32, t_3.dtype)
t_4 = ops.convert_to_tensor(
[constant_op.constant([0., 0., 0.], dtype=dtypes.float64)],
dtype=dtypes.float32)
self.assertEqual(dtypes.float32, t_4.dtype)
with self.assertRaises(TypeError):
ops.convert_to_tensor([
constant_op.constant(
[0., 0., 0.], dtype=dtypes.float32), constant_op.constant(
[0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]
])
def testDtypeConversionWhenTensorDtypeMismatch(self):
t_0 = ops.convert_to_tensor([0., 0., 0.])
self.assertEqual(dtypes.float32, t_0.dtype)
t_1 = ops.convert_to_tensor([0, 0, 0])
self.assertEqual(dtypes.int32, t_1.dtype)
t_2 = ops.convert_to_tensor([t_0, t_0, t_1], dtype=dtypes.float64)
self.assertEqual(dtypes.float64, t_2.dtype)
if __name__ == "__main__":
test.main()
|
PYSEC-2021-618
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.