| id
				 int64 0 300k | label
				 stringlengths 1 74 ⌀ | text
				 stringlengths 4k 8k | 
|---|---|---|
| 100 | 
	get num thermals | 
	#!/usr/bin/env python
#
# Copyright (C) 2017 Accton Technology Corporation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------------
# HISTORY:
#    mm/dd/yyyy (A.D.)
#    11/13/2017: Polly Hsu, Create
#    1/10/2018:Jostar modify for as7716_32x
#    5/02/2019: Roy Lee modify for as7816_64x
# ------------------------------------------------------------------
try:
    import time
    import logging
    import glob
    from collections import namedtuple
except ImportError as e:
    raise ImportError('%s - required module not found' % str(e))
class ThermalUtil(object):
    """Platform-specific ThermalUtil class"""
    THERMAL_NUM_ON_MAIN_BROAD = 3
    THERMAL_NUM_1_IDX = 1
    BASE_VAL_PATH = '/sys/bus/i2c/devices/{0}-00{1}/hwmon/hwmon*/temp1_input'
    """ Dictionary where
        key1 = thermal id index (integer) starting from 1
        value = path to fan device file (string) """
    _thermal_to_device_path_mapping = {}
    _thermal_to_device_node_mapping = [
            ['51', '49'],
            ['52', '4a'],
            ['53', '4c'],
           ]
    logger = logging.getLogger(__name__)
    def __init__(self, log_level=logging.DEBUG):
        ch = logging.StreamHandler()
        ch.setLevel(log_level)
        self.logger.addHandler(ch)
        thermal_path = self.BASE_VAL_PATH
        for x in range(self.THERMAL_NUM_ON_MAIN_BROAD):
            self._thermal_to_device_path_mapping[x+1] = thermal_path.format(
                self._thermal_to_device_node_mapping[x][0],
                self._thermal_to_device_node_mapping[x][1])
            
    def _get_thermal_node_val(self, thermal_num):
        if thermal_num < self.THERMAL_NUM_1_IDX or thermal_num > self.THERMAL_NUM_ON_MAIN_BROAD:
            self.logger.debug('GET. Parameter error. thermal_num, %d', thermal_num)
            return None
        device_path = self.get_thermal_to_device_path(thermal_num)
        for filename in glob.glob(device_path):
            try:
                val_file = open(filename, 'r')
            except IOError as e:
                self.logger.error('GET. unable to open file: %s', str(e))
                return None
        content = val_file.readline().rstrip()
        if content == '':
            self.logger.debug('GET. content is NULL. device_path:%s', device_path)
            return None
        try:
            val_file.close()
        except:
            self.logger.debug('GET. unable to close file. device_path:%s', device_path)
            return None
      
        return int(content)
    def METHOD_NAME(self):
        return self.THERMAL_NUM_ON_MAIN_BROAD
    def get_idx_thermal_start(self):
        return self.THERMAL_NUM_1_IDX
    def get_size_node_map(self):
        return len(self._thermal_to_device_node_mapping)
    def get_size_path_map(self):
        return len(self._thermal_to_device_path_mapping)
    def get_thermal_to_device_path(self, thermal_num):
        return self._thermal_to_device_path_mapping[thermal_num]
    def get_thermal_2_val(self):
        return self._get_thermal_node_val(self.THERMAL_NUM_2_IDX)
    def get_thermal_temp(self):
        sum = 0
        o = []
        for x in range(self.THERMAL_NUM_ON_MAIN_BROAD):
            sum += self._get_thermal_node_val(x+1)
        avg = sum/self.THERMAL_NUM_ON_MAIN_BROAD    
        avg = (avg/1000)*1000    #round down for hysteresis.
        return avg 
#def main():
#    thermal = ThermalUtil()
#
#    print 'get_size_node_map : %d' % thermal.get_size_node_map()
#    print 'get_size_path_map : %d' % thermal.get_size_path_map()
#    for x in range(thermal.get_idx_thermal_start(), thermal.get_num_thermals()+1):
#        print thermal.get_thermal_to_device_path(x)
#
#if __name__ == '__main__':
#    main() | 
| 101 | 
	parse record | 
	# -*- coding: utf-8 -*-
"""Text parser plugin for vsftpd log files."""
import pyparsing
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import events
from plaso.lib import errors
from plaso.parsers import text_parser
from plaso.parsers.text_plugins import interface
class VsftpdLogEventData(events.EventData):
  """vsftpd log event data.
  Attributes:
    added_time (dfdatetime.DateTimeValues): date and time the log entry
        was added.
    text (str): vsftpd log message.
  """
  DATA_TYPE = 'vsftpd:log'
  def __init__(self):
    """Initializes event data."""
    super(VsftpdLogEventData, self).__init__(data_type=self.DATA_TYPE)
    self.added_time = None
    self.text = None
class VsftpdLogTextPlugin(interface.TextPlugin):
  """Text parser plugin for vsftpd log files."""
  NAME = 'vsftpd'
  DATA_FORMAT = 'vsftpd log file'
  _MONTH_DICT = {
      'jan': 1,
      'feb': 2,
      'mar': 3,
      'apr': 4,
      'may': 5,
      'jun': 6,
      'jul': 7,
      'aug': 8,
      'sep': 9,
      'oct': 10,
      'nov': 11,
      'dec': 12}
  _ONE_OR_TWO_DIGITS = pyparsing.Word(pyparsing.nums, max=2).setParseAction(
      lambda tokens: int(tokens[0], 10))
  _TWO_DIGITS = pyparsing.Word(pyparsing.nums, exact=2).setParseAction(
      lambda tokens: int(tokens[0], 10))
  _FOUR_DIGITS = pyparsing.Word(pyparsing.nums, exact=4).setParseAction(
      lambda tokens: int(tokens[0], 10))
  _THREE_LETTERS = pyparsing.Word(pyparsing.alphas, exact=3)
  # Date and time values are formatted as: Mon Jun  6 18:43:28 2016
  _DATE_TIME = pyparsing.Group(
      _THREE_LETTERS + _THREE_LETTERS + _ONE_OR_TWO_DIGITS +
      _TWO_DIGITS + pyparsing.Suppress(':') +
      _TWO_DIGITS + pyparsing.Suppress(':') + _TWO_DIGITS +
      _FOUR_DIGITS)
  _END_OF_LINE = pyparsing.Suppress(pyparsing.LineEnd())
  _LOG_LINE = (
      _DATE_TIME.setResultsName('date_time') +
      pyparsing.restOfLine().setResultsName('text') +
      _END_OF_LINE)
  _LINE_STRUCTURES = [('log_line', _LOG_LINE)]
  VERIFICATION_GRAMMAR = _LOG_LINE
  def METHOD_NAME(self, parser_mediator, key, structure):
    """Parses a pyparsing structure.
    Args:
      parser_mediator (ParserMediator): mediates interactions between parsers
          and other components, such as storage and dfVFS.
      key (str): name of the parsed structure.
      structure (pyparsing.ParseResults): tokens from a parsed log line.
    Raises:
      ParseError: if the structure cannot be parsed.
    """
    time_elements_structure = self._GetValueFromStructure(
        structure, 'date_time')
    event_data = VsftpdLogEventData()
    event_data.added_time = self._ParseTimeElements(time_elements_structure)
    # TODO: extract pid and username.
    event_data.text = self._GetStringValueFromStructure(structure, 'text')
    parser_mediator.ProduceEventData(event_data)
  def _ParseTimeElements(self, time_elements_structure):
    """Parses date and time elements of a log line.
    Args:
      time_elements_structure (pyparsing.ParseResults): date and time elements
          of a log line.
    Returns:
      dfdatetime.TimeElements: date and time value.
    Raises:
      ParseError: if a valid date and time value cannot be derived from
          the time elements.
    """
    try:
      _, month_string, day_of_month, hours, minutes, seconds, year = (
          time_elements_structure)
      month = self._MONTH_DICT.get(month_string.lower(), 0)
      time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds)
      date_time = dfdatetime_time_elements.TimeElements(
          time_elements_tuple=time_elements_tuple)
      date_time.is_local_time = True
      return date_time
    except (TypeError, ValueError) as exception:
      raise errors.ParseError(
          'Unable to parse time elements with error: {0!s}'.format(exception))
  def CheckRequiredFormat(self, parser_mediator, text_reader):
    """Check if the log record has the minimal structure required by the plugin.
    Args:
      parser_mediator (ParserMediator): mediates interactions between parsers
          and other components, such as storage and dfVFS.
      text_reader (EncodedTextReader): text reader.
    Returns:
      bool: True if this is the correct parser, False otherwise.
    """
    try:
      structure = self._VerifyString(text_reader.lines)
    except errors.ParseError:
      return False
    time_elements_structure = self._GetValueFromStructure(
        structure, 'date_time')
    try:
      self._ParseTimeElements(time_elements_structure)
    except errors.ParseError:
      return False
    return True
text_parser.TextLogParser.RegisterPlugin(VsftpdLogTextPlugin) | 
| 102 | 
	test copy file fails if not authenticated | 
	from ipaddress import IPv4Address
from pathlib import PureWindowsPath
from typing import List
from unittest.mock import MagicMock
import pytest
from agent_plugins.exploiters.wmi.src.smb_client import ShareInfo, SMBClient
from agent_plugins.exploiters.wmi.src.smb_options import SMBOptions
from agent_plugins.exploiters.wmi.src.smb_remote_access_client import (
    COPY_FILE_TAGS,
    EXECUTION_TAGS,
    LOGIN_TAGS,
    SHARE_DISCOVERY_TAGS,
    SMBRemoteAccessClient,
)
from tests.data_for_tests.propagation_credentials import FULL_CREDENTIALS
from common import OperatingSystem
from common.credentials import Credentials
from infection_monkey.exploit import IAgentBinaryRepository
from infection_monkey.exploit.tools import (
    RemoteAuthenticationError,
    RemoteCommandExecutionError,
    RemoteFileCopyError,
)
from infection_monkey.i_puppet import TargetHost
EXPLOITER_TAGS = {"smb-exploiter", "unit-test"}
CREDENTIALS: List[Credentials] = []
DESTINATION_PATH = PureWindowsPath("C:\\destination_path")
FILE = b"file content"
SHARED_RESOURECES = (
    ShareInfo("share1", PureWindowsPath("C:\\path1"), current_uses=10, max_uses=1000),
    ShareInfo("share2", PureWindowsPath("C:\\path2"), current_uses=100, max_uses=100),
    ShareInfo("share3", PureWindowsPath("C:\\"), current_uses=0, max_uses=10),
    ShareInfo("share4", PureWindowsPath("invalid_path"), current_uses=50, max_uses=100),
)
TARGET_HOST = TargetHost(ip=IPv4Address("1.1.1.1"), operating_system=OperatingSystem.WINDOWS)
def stub_command_builder(*args, **kwargs):
    return "command"
@pytest.fixture
def mock_smb_client():
    client = MagicMock(spec=SMBClient)
    client.connected.return_value = False
    def set_connected(value: bool):
        client.connected.return_value = value
    client.connect_with_user.side_effect = lambda *_, **__: set_connected(True)
    return client
@pytest.fixture
def mock_agent_binary_repository() -> IAgentBinaryRepository:
    return MagicMock(spec=IAgentBinaryRepository)
@pytest.fixture
def smb_remote_access_client(mock_smb_client) -> SMBRemoteAccessClient:
    return SMBRemoteAccessClient(TARGET_HOST, SMBOptions(), stub_command_builder, mock_smb_client)
def test_login__succeeds(
    smb_remote_access_client: SMBRemoteAccessClient,
):
    tags = EXPLOITER_TAGS.copy()
    smb_remote_access_client.login(FULL_CREDENTIALS[0], tags)
    assert tags == EXPLOITER_TAGS.union(LOGIN_TAGS)
def test_login__fails(
    mock_smb_client: SMBClient,
    smb_remote_access_client: SMBRemoteAccessClient,
):
    tags = EXPLOITER_TAGS.copy()
    mock_smb_client.connect_with_user.side_effect = Exception()
    with pytest.raises(RemoteAuthenticationError):
        smb_remote_access_client.login(FULL_CREDENTIALS[0], tags)
    assert tags == EXPLOITER_TAGS.union(LOGIN_TAGS)
def test_execute__fails_if_not_authenticated(
    smb_remote_access_client: SMBRemoteAccessClient,
):
    tags = EXPLOITER_TAGS.copy()
    with pytest.raises(RemoteCommandExecutionError):
        smb_remote_access_client.execute_agent(DESTINATION_PATH, tags)
    assert tags == EXPLOITER_TAGS
def test_execute__fails_if_command_not_executed(
    mock_smb_client: SMBClient,
    smb_remote_access_client: SMBRemoteAccessClient,
):
    tags = EXPLOITER_TAGS.copy()
    mock_smb_client.run_service.side_effect = Exception("file")
    smb_remote_access_client.login(FULL_CREDENTIALS[0], set())
    with pytest.raises(RemoteCommandExecutionError):
        smb_remote_access_client.execute_agent(DESTINATION_PATH, tags)
    assert tags == EXPLOITER_TAGS.union(EXECUTION_TAGS)
def test_execute__succeeds(
    mock_smb_client: SMBClient,
    smb_remote_access_client: SMBRemoteAccessClient,
):
    tags = EXPLOITER_TAGS.copy()
    smb_remote_access_client.login(FULL_CREDENTIALS[0], set())
    smb_remote_access_client.execute_agent(DESTINATION_PATH, tags)
    assert tags == EXPLOITER_TAGS.union(EXECUTION_TAGS)
def METHOD_NAME(
    mock_smb_client: SMBClient,
    smb_remote_access_client: SMBRemoteAccessClient,
):
    tags = EXPLOITER_TAGS.copy()
    mock_smb_client.connected.return_value = False
    with pytest.raises(RemoteFileCopyError):
        smb_remote_access_client.copy_file(FILE, DESTINATION_PATH, tags)
    assert tags == EXPLOITER_TAGS
def test_copy_file__fails_if_no_shares_found(
    mock_smb_client: SMBClient,
    smb_remote_access_client: SMBRemoteAccessClient,
):
    tags = EXPLOITER_TAGS.copy()
    mock_smb_client.query_shared_resources.return_value = ()
    smb_remote_access_client.login(FULL_CREDENTIALS[0], set())
    with pytest.raises(RemoteFileCopyError):
        smb_remote_access_client.copy_file(FILE, DESTINATION_PATH, tags)
    assert tags == EXPLOITER_TAGS.union(SHARE_DISCOVERY_TAGS)
def test_copy_file__fails_if_unable_to_connect_to_share(
    mock_smb_client: SMBClient,
    smb_remote_access_client: SMBRemoteAccessClient,
):
    tags = EXPLOITER_TAGS.copy()
    mock_smb_client.query_shared_resources.return_value = SHARED_RESOURECES
    mock_smb_client.connect_to_share.side_effect = Exception("failed")
    smb_remote_access_client.login(FULL_CREDENTIALS[0], set())
    with pytest.raises(RemoteFileCopyError):
        smb_remote_access_client.copy_file(FILE, DESTINATION_PATH, tags)
    assert tags == EXPLOITER_TAGS.union(SHARE_DISCOVERY_TAGS)
def test_copy_file__fails_if_unable_to_send_file(
    mock_smb_client: SMBClient,
    smb_remote_access_client: SMBRemoteAccessClient,
):
    tags = EXPLOITER_TAGS.copy()
    mock_smb_client.query_shared_resources.return_value = SHARED_RESOURECES
    mock_smb_client.send_file.side_effect = Exception("file")
    smb_remote_access_client.login(FULL_CREDENTIALS[0], set())
    with pytest.raises(RemoteFileCopyError):
        smb_remote_access_client.copy_file(FILE, DESTINATION_PATH, tags)
    assert tags == EXPLOITER_TAGS.union(SHARE_DISCOVERY_TAGS, COPY_FILE_TAGS)
def test_copy_file__success(
    mock_smb_client: SMBClient,
    smb_remote_access_client: SMBRemoteAccessClient,
):
    tags = EXPLOITER_TAGS.copy()
    mock_smb_client.query_shared_resources.return_value = SHARED_RESOURECES
    smb_remote_access_client.login(FULL_CREDENTIALS[0], set())
    smb_remote_access_client.copy_file(FILE, DESTINATION_PATH, tags)
    assert tags == EXPLOITER_TAGS.union(SHARE_DISCOVERY_TAGS, COPY_FILE_TAGS)
def test_get_writable_paths(
    mock_smb_client: SMBClient, smb_remote_access_client: SMBRemoteAccessClient
):
    mock_smb_client.query_shared_resources.return_value = SHARED_RESOURECES
    writable_paths = smb_remote_access_client.get_writable_paths()
    assert len(writable_paths) == 2
    assert SHARED_RESOURECES[0].path in writable_paths
    assert SHARED_RESOURECES[2].path in writable_paths | 
| 103 | 
	keys | 
	# Authors: 
#   Trevor Perrin
#   Martin von Loewis - python 3 port
#
# See the LICENSE file for legal information regarding use of this file.
"""Base class for SharedKeyDB and VerifierDB."""
try:
    import anydbm
except ImportError:
    # Python 3
    import dbm as anydbm
import threading
import time
import logging
class BaseDB(object):
    def __init__(self, filename, type):
        self.type = type
        self.filename = filename
        if self.filename:
            self.db = None
        else:
            self.db = {}
        self.lock = threading.Lock()
    def create(self):
        """
        Create a new on-disk database.
        :raises anydbm.error: If there's a problem creating the database.
        """
        logger = logging.getLogger(__name__)
        if self.filename:
            logger.debug('server %s - create - will open db', time.time())
            self.db = anydbm.open(self.filename, "n") #raises anydbm.error
            logger.debug('server %s - create - setting type', time.time())
            self.db["--Reserved--type"] = self.type
            logger.debug('server %s - create - syncing', time.time())
            self.db.sync()
            logger.debug('server %s - create - fun exit', time.time())
        else:
            logger.debug('server %s - create - using dict() as DB',
                         time.time())
            self.db = {}
    def open(self):
        """
        Open a pre-existing on-disk database.
        :raises anydbm.error: If there's a problem opening the database.
        :raises ValueError: If the database is not of the right type.
        """
        if not self.filename:
            raise ValueError("Can only open on-disk databases")
        self.db = anydbm.open(self.filename, "w") #raises anydbm.error
        try:
            if self.db["--Reserved--type"] != self.type:
                raise ValueError("Not a %s database" % self.type)
        except KeyError:
            raise ValueError("Not a recognized database")
    def __getitem__(self, username):
        if self.db == None:
            raise AssertionError("DB not open")
        self.lock.acquire()
        try:
            valueStr = self.db[username]
        finally:
            self.lock.release()
        return self._getItem(username, valueStr)
    def __setitem__(self, username, value):
        if self.db == None:
            raise AssertionError("DB not open")
        valueStr = self._setItem(username, value)
        self.lock.acquire()
        try:
            self.db[username] = valueStr
            if self.filename:
                self.db.sync()
        finally:
            self.lock.release()
    def __delitem__(self, username):
        if self.db == None:
            raise AssertionError("DB not open")
        self.lock.acquire()
        try:
            del(self.db[username])
            if self.filename:
                self.db.sync()
        finally:
            self.lock.release()
    def __contains__(self, username):
        """
        Check if the database contains the specified username.
        :param str username: The username to check for.
        :rtype: bool
        :returns: True if the database contains the username, False
            otherwise.
        """
        if self.db == None:
            raise AssertionError("DB not open")
        self.lock.acquire()
        try:
            return username in self.db
        finally:
            self.lock.release()
    def check(self, username, param):
        value = self.__getitem__(username)
        return self._checkItem(value, username, param)
    def METHOD_NAME(self):
        """
        Return a list of usernames in the database.
        :rtype: list
        :returns: The usernames in the database.
        """
        if self.db == None:
            raise AssertionError("DB not open")
        self.lock.acquire()
        try:
            usernames = self.db.METHOD_NAME()
        finally:
            self.lock.release()
        usernames = [u for u in usernames if not u.startswith("--Reserved--")]
        return usernames | 
| 104 | 
	has gdrcopy | 
	import subprocess
import functools
from common import SshConnectionError, is_ssh_connection_error, has_ssh_connection_err_msg, ClientServerTest
from retrying import retry
def efa_run_client_server_test(cmdline_args, executable, iteration_type,
                               completion_semantic, memory_type, message_size,
                               warmup_iteration_type=None, timeout=None,
                               completion_type="queue"):
    if timeout is None:
        timeout = cmdline_args.timeout
    # It is observed that cuda tests requires larger time-out limit to test all
    # message sizes (especailly when running with multiple workers).
    if "cuda" in memory_type:
        timeout = max(1000, timeout)
    test = ClientServerTest(cmdline_args, executable, iteration_type,
                            completion_semantic=completion_semantic,
                            datacheck_type="with_datacheck",
                            message_size=message_size,
                            memory_type=memory_type,
                            timeout=timeout,
                            warmup_iteration_type=warmup_iteration_type,
                            completion_type=completion_type)
    test.run()
@retry(retry_on_exception=is_ssh_connection_error, stop_max_attempt_number=3, wait_fixed=5000)
def efa_retrieve_hw_counter_value(hostname, hw_counter_name, efa_device_name=None):
    """
    retrieve the value of EFA's hardware counter
    hostname: a host that has efa
    hw_counter_name: EFA hardware counter name. Options are: lifespan, rdma_read_resp_bytes, rdma_read_wrs,recv_wrs,
                     rx_drops, send_bytes, tx_bytes, rdma_read_bytes,  rdma_read_wr_err, recv_bytes, rx_bytes, rx_pkts, send_wrs, tx_pkts
    efa_device_name: Name of the EFA device. Corresponds to the name of the EFA device's directory
    return: an integer that is sum of all EFA device's counter
    """
    if efa_device_name:
        efa_device_dir = efa_device_name
    else:
        efa_device_dir = '*'
    command = 'ssh {} cat "/sys/class/infiniband/{}/ports/*/hw_counters/{}"'.format(hostname, efa_device_dir, hw_counter_name)
    process = subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
    if process.returncode != 0:
        if process.stderr and has_ssh_connection_err_msg(process.stderr):
            print("encountered ssh connection issue")
            raise SshConnectionError()
        # this can happen when OS is using older version of EFA kernel module
        return None
    linelist = process.stdout.split()
    sumvalue = 0
    for strvalue in linelist:
        sumvalue += int(strvalue)
    return sumvalue
def METHOD_NAME(hostname):
    """
    determine whether a host has gdrcopy installed
    hostname: a host
    return: a boolean
    """
    command = "ssh {} /bin/bash --login -c lsmod | grep gdrdrv".format(hostname)
    process = subprocess.run(command, shell=True, check=False, stdout=subprocess.PIPE)
    return process.returncode == 0
def efa_retrieve_gid(hostname):
    """
    return the GID of efa device on a host
    hostname: a host
    return: a string if the host has efa device,
            None otherwise
    """
    command = "ssh {} ibv_devinfo  -v | grep GID | awk '{{print $NF}}' | head -n 1".format(hostname)
    try:
        process = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE)
    except subprocess.CalledProcessError:
        # this can happen on instance without EFA device
        return None
    return process.stdout.decode("utf-8").strip()
@retry(retry_on_exception=is_ssh_connection_error, stop_max_attempt_number=3, wait_fixed=5000)
def get_efa_domain_names(server_id):
    timeout = 60
    process_timed_out = False
    # This command returns a list of EFA domain names and its related info
    command = "ssh {} fi_info -p efa".format(server_id)
    p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
 
    try:
        p.wait(timeout=timeout)
    except subprocess.TimeoutExpired:
        p.terminate()
        process_timed_out = True
    assert not process_timed_out, "Process timed out"
    
    errors = p.stderr.readlines()
    for error in errors:
        error = error.strip()
        if "fi_getinfo: -61" in error:
            raise Exception("No EFA devices/domain names found")
        if has_ssh_connection_err_msg(error):
            raise SshConnectionError()
    efa_domain_names = []
    for line in p.stdout:
        line = line.strip()
        if 'domain' in line:
            domain_name = line.split(': ')[1]
            efa_domain_names.append(domain_name)
    return efa_domain_names
@functools.lru_cache(10)
@retry(retry_on_exception=is_ssh_connection_error, stop_max_attempt_number=3, wait_fixed=5000)
def get_efa_device_names(server_id):
    timeout = 60
    process_timed_out = False
    # This command returns a list of EFA devices names
    command = "ssh {} ibv_devices".format(server_id)
    proc = subprocess.run(command, shell=True,
                          stdout=subprocess.PIPE, stderr=subprocess.PIPE,
                          encoding="utf-8", timeout=timeout)
    if has_ssh_connection_err_msg(proc.stderr):
        raise SshConnectionError()
    devices = []
    stdouts = proc.stdout.strip().split("\n")
    #
    # Example out of ibv_devices are like the following:
    #     device                 node GUID
    #     ------              ----------------
    #     rdmap16s27          0000000000000000
    #     ...
    #
    # The first 2 lines are headers, and is ignored.
    for line in stdouts[2:]:
        devices.append(line.split()[0])
    return devices
def get_efa_device_name_for_cuda_device(ip, cuda_device_id, num_cuda_devices):
    # this function implemented a simple way to find the closest EFA device for a given
    # cuda device. It assumes EFA devices names are in order (which is usually true but not always)
    #
    # For example, one a system with 8 CUDA devies and 4 EFA devices, this function would
    # for GPU 0 and 1, return EFA device 0
    # for GPU 2 and 3, return EFA device 1
    # for GPU 4 and 5, return EFA device 2
    # for GPU 6 and 7, return EFA device 3
    efa_devices = get_efa_device_names(ip)
    num_efa = len(efa_devices)
    return efa_devices[(cuda_device_id * num_efa) // num_cuda_devices] | 
| 105 | 
	build | 
	
"""scons.Node.Alias
Alias nodes.
This creates a hash of global Aliases (dummy targets).
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Node/Alias.py  2014/07/05 09:42:21 garyo"
import collections
import SCons.Errors
import SCons.Node
import SCons.Util
class AliasNameSpace(collections.UserDict):
    def Alias(self, name, **kw):
        if isinstance(name, SCons.Node.Alias.Alias):
            return name
        try:
            a = self[name]
        except KeyError:
            a = SCons.Node.Alias.Alias(name, **kw)
            self[name] = a
        return a
    def lookup(self, name, **kw):
        try:
            return self[name]
        except KeyError:
            return None
class AliasNodeInfo(SCons.Node.NodeInfoBase):
    current_version_id = 1
    field_list = ['csig']
    def str_to_node(self, s):
        return default_ans.Alias(s)
class AliasBuildInfo(SCons.Node.BuildInfoBase):
    current_version_id = 1
class Alias(SCons.Node.Node):
    NodeInfo = AliasNodeInfo
    BuildInfo = AliasBuildInfo
    def __init__(self, name):
        SCons.Node.Node.__init__(self)
        self.name = name
    def str_for_display(self):
        return '"' + self.__str__() + '"'
    def __str__(self):
        return self.name
    def make_ready(self):
        self.get_csig()
    really_build = SCons.Node.Node.METHOD_NAME
    is_up_to_date = SCons.Node.Node.children_are_up_to_date
    def is_under(self, dir):
        # Make Alias nodes get built regardless of
        # what directory scons was run from. Alias nodes
        # are outside the filesystem:
        return 1
    def get_contents(self):
        """The contents of an alias is the concatenation
        of the content signatures of all its sources."""
        childsigs = [n.get_csig() for n in self.children()]
        return ''.join(childsigs)
    def sconsign(self):
        """An Alias is not recorded in .sconsign files"""
        pass
    #
    #
    #
    def changed_since_last_build(self, target, prev_ni):
        cur_csig = self.get_csig()
        try:
            return cur_csig != prev_ni.csig
        except AttributeError:
            return 1
    def METHOD_NAME(self):
        """A "builder" for aliases."""
        pass
    def convert(self):
        try: del self.builder
        except AttributeError: pass
        self.reset_executor()
        self.METHOD_NAME = self.really_build
    def get_csig(self):
        """
        Generate a node's content signature, the digested signature
        of its content.
        node - the node
        cache - alternate node to use for the signature cache
        returns - the content signature
        """
        try:
            return self.ninfo.csig
        except AttributeError:
            pass
        contents = self.get_contents()
        csig = SCons.Util.MD5signature(contents)
        self.get_ninfo().csig = csig
        return csig
default_ans = AliasNameSpace()
SCons.Node.arg2nodes_lookups.append(default_ans.lookup)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | 
| 106 | 
	a3 plus | 
	# Copyright (c) 2017 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from spinn_utilities.overrides import overrides
from spinn_front_end_common.interface.ds import DataType
from spinn_front_end_common.utilities.constants import BYTES_PER_WORD
from .abstract_has_a_plus_a_minus import AbstractHasAPlusAMinus
from .abstract_weight_dependence import AbstractWeightDependence
# Six words per synapse type
_SPACE_PER_SYNAPSE_TYPE = 6 * BYTES_PER_WORD
class WeightDependenceAdditiveTriplet(
        AbstractHasAPlusAMinus, AbstractWeightDependence):
    """
    An triplet-based additive weight dependence STDP rule.
    """
    __slots__ = [
        "__a3_minus",
        "__a3_plus",
        "__w_max",
        "__w_min"]
    __PARAM_NAMES = ('w_min', 'w_max', 'A3_plus', 'A3_minus')
    default_parameters = {'w_min': 0.0, 'w_max': 1.0, 'A3_plus': 0.01,
                          'A3_minus': 0.01}
    # noinspection PyPep8Naming
    def __init__(
            self, w_min=default_parameters['w_min'],
            w_max=default_parameters['w_max'],
            METHOD_NAME=default_parameters['A3_plus'],
            A3_minus=default_parameters['A3_minus']):
        """
        :param float w_min: :math:`w^{min}`
        :param float w_max: :math:`w^{max}`
        :param float A3_plus: :math:`A_3^+`
        :param float A3_minus: :math:`A_3^-`
        """
        super().__init__()
        self.__w_min = w_min
        self.__w_max = w_max
        self.__a3_plus = METHOD_NAME
        self.__a3_minus = A3_minus
    @property
    def w_min(self):
        """
        :math:`w^{min}`
        :rtype: float
        """
        return self.__w_min
    @property
    def w_max(self):
        """
        :math:`w^{max}`
        :rtype: float
        """
        return self.__w_max
    @property
    def METHOD_NAME(self):
        """
        :math:`A_3^+`
        :rtype: float
        """
        return self.__a3_plus
    @property
    def A3_minus(self):
        """
        :math:`A_3^-`
        :rtype: float
        """
        return self.__a3_minus
    @overrides(AbstractWeightDependence.is_same_as)
    def is_same_as(self, weight_dependence):
        if not isinstance(weight_dependence, WeightDependenceAdditiveTriplet):
            return False
        return (
            (self.__w_min == weight_dependence.w_min) and
            (self.__w_max == weight_dependence.w_max) and
            (self.A_plus == weight_dependence.A_plus) and
            (self.A_minus == weight_dependence.A_minus) and
            (self.__a3_plus == weight_dependence.METHOD_NAME) and
            (self.__a3_minus == weight_dependence.A3_minus))
    @property
    def vertex_executable_suffix(self):
        """
        The suffix to be appended to the vertex executable for this rule.
        :rtype: str
        """
        return "additive"
    @overrides(AbstractWeightDependence.get_parameters_sdram_usage_in_bytes)
    def get_parameters_sdram_usage_in_bytes(
            self, n_synapse_types, n_weight_terms):
        if n_weight_terms != 2:
            raise NotImplementedError(
                "Additive weight dependence only supports one or two terms")
        return _SPACE_PER_SYNAPSE_TYPE * n_synapse_types
    @overrides(AbstractWeightDependence.write_parameters)
    def write_parameters(
            self, spec, global_weight_scale, synapse_weight_scales,
            n_weight_terms):
        # Loop through each synapse type
        for _ in synapse_weight_scales:
            # Scale the weights
            spec.write_value(data=self.__w_min * global_weight_scale,
                             data_type=DataType.S1615)
            spec.write_value(data=self.__w_max * global_weight_scale,
                             data_type=DataType.S1615)
            # Based on http://data.andrewdavison.info/docs/PyNN/_modules/pyNN
            #                /standardmodels/synapses.html
            # Pre-multiply A+ and A- by Wmax
            spec.write_value(
                data=self.A_plus * self.__w_max * global_weight_scale,
                data_type=DataType.S1615)
            spec.write_value(
                data=self.A_minus * self.__w_max * global_weight_scale,
                data_type=DataType.S1615)
            spec.write_value(
                data=self.__a3_plus * self.__w_max * global_weight_scale,
                data_type=DataType.S1615)
            spec.write_value(
                data=self.__a3_minus * self.__w_max * global_weight_scale,
                data_type=DataType.S1615)
    @property
    def weight_maximum(self):
        """
        The maximum weight that will ever be set in a synapse as a result
        of this rule.
        :rtype: float
        """
        return self.__w_max
    @overrides(AbstractWeightDependence.get_parameter_names)
    def get_parameter_names(self):
        return self.__PARAM_NAMES | 
| 107 | 
	run | 
	#!/usr/bin/python
# -----------------------------------------------------------------------------
# Copyright Siemens AG, 2021. Part of the SW360 Portal Project.
#
# This program and the accompanying materials are made
# available under the terms of the Eclipse Public License 2.0
# which is available at https://www.eclipse.org/legal/epl-2.0/
#
# SPDX-License-Identifier: EPL-2.0
#
# This is a manual database migration script. It is assumed that a
# dedicated framework for automatic migration will be written in the
# future. When that happens, this script should be refactored to conform
# to the framework's prerequisites to be run by the framework. For
# example, server address and db name should be parameterized, the code
# reorganized into a single class or function, etc.
#
# This script gets all release iterates over the attachment and recomputes clearing state of release.
# -----------------------------------------------------------------------------
import time
import couchdb
import json
from webbrowser import get
# ---------------------------------------
# constants
# ---------------------------------------
DRY_RUN = True
COUCHUSERNAME = "*****"
COUCHPWD = "*****"
COUCHSERVER = "http://" + COUCHUSERNAME + ":" + COUCHPWD + "@localhost:5984/"
DBNAME = 'sw360db'
couch = couchdb.Server(COUCHSERVER)
db = couch[DBNAME]
# ----------------------------------------
# queries
# ----------------------------------------
# get all releases
all_releases_query = {"selector": {"type": {"$eq": "release"}},"limit": 200000}
# ---------------------------------------
# functions
# ---------------------------------------
def METHOD_NAME():
    log = {}
    log['updatedReleases'] = []
    print 'Getting all releases'
    all_releases = db.find(all_releases_query)
    print 'Received ' + str(len(all_releases)) + ' releases'
    for release in all_releases:
        attachmentTypeMatched = False
        attachmentAccepted = False
        if release.get("attachments") is not None:
            for attachmentInfo in release.get("attachments"):
                if attachmentInfo.get("attachmentType") == "COMPONENT_LICENSE_INFO_XML" or attachmentInfo.get("attachmentType") == "CLEARING_REPORT":
                    attachmentTypeMatched = True
                    if attachmentInfo.get("checkStatus") == "ACCEPTED":
                        attachmentAccepted = True
                        break
        updatedReleaseLog = {}
        isClearingStateUpdated = False;
        if attachmentTypeMatched and attachmentAccepted:
            isClearingStateUpdated = setClearingStateIfDifferent(release, "APPROVED", updatedReleaseLog)
        elif attachmentTypeMatched:
            isClearingStateUpdated = setClearingStateIfDifferent(release, "REPORT_AVAILABLE", updatedReleaseLog)
        elif release.get('clearingState') != "SENT_TO_CLEARING_TOOL" and release.get('clearingState') != "UNDER_CLEARING":
            isClearingStateUpdated = setClearingStateIfDifferent(release, "NEW_CLEARING", updatedReleaseLog)
        if isClearingStateUpdated: 
            print '\tUpdating release ID -> ' + release.get('_id') + ', Release Name -> ' + release.get('name') + ', Release Version -> ' + release.get('version') + ', Old Clearing State -> ' + str(updatedReleaseLog['oldClearingState']) + ', New Clearing State -> ' + str(updatedReleaseLog['newClearingState'])
            updatedReleaseLog['id'] = release.get('_id')
            updatedReleaseLog['name'] = release.get('name')
            updatedReleaseLog['version'] = release.get('version')
            log['updatedReleases'].append(updatedReleaseLog)
            if not DRY_RUN:
                db.save(release)
    resultFile = open('004_recompute_clearing_state_of_release.log', 'w')
    json.dump(log, resultFile, indent = 4, sort_keys = True)
    resultFile.close()
    print '\n'
    print '------------------------------------------'
    print 'Total Releases updated : ' + str(len(log['updatedReleases']))
    print '------------------------------------------'
    print 'Please check log file "004_recompute_clearing_state_of_release.log" in this directory for details'
    print '------------------------------------------'
# --------------------------------
def setClearingStateIfDifferent(release, correctClearingState, updatedReleaseLog):
    if release.get('clearingState') != correctClearingState:
        updatedReleaseLog['oldClearingState'] = release.get('clearingState')
        release["clearingState"] = correctClearingState
        updatedReleaseLog['newClearingState'] = release.get('clearingState')
        return True
    return False
startTime = time.time()
METHOD_NAME()
print '\nTime of migration: ' + "{0:.2f}".format(time.time() - startTime) + 's' | 
| 108 | 
	docker container inspect | 
	# pylint:disable=redefined-outer-name
# pylint:disable=unused-argument
import asyncio
import logging
from collections.abc import Iterable
from typing import Final
import pytest
from fastapi import FastAPI
from pydantic import PositiveFloat, PositiveInt
from pytest_mock import MockerFixture
from pytest_simcore.helpers.typing_env import EnvVarsDict
from pytest_simcore.helpers.utils_envs import setenvs_from_dict
from servicelib.exception_utils import _SKIPS_MESSAGE
from simcore_service_director_v2.models.dynamic_services_scheduler import (
    ContainerState,
    DockerContainerInspect,
    DockerStatus,
    SchedulerData,
)
from simcore_service_director_v2.modules.dynamic_sidecar.api_client import (
    BaseClientHTTPError,
)
from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core import _events
NETWORK_TOLERANCE_S: Final[PositiveFloat] = 0.1
STEPS: Final[PositiveFloat] = 10
SLEEP_BETWEEN_CALLS: Final[PositiveFloat] = NETWORK_TOLERANCE_S / STEPS
REPEAT_COUNT: Final[PositiveInt] = STEPS + 1
@pytest.fixture
def mock_env(
    disable_postgres: None,
    mock_env: EnvVarsDict,
    monkeypatch: pytest.MonkeyPatch,
) -> None:
    setenvs_from_dict(
        monkeypatch,
        {
            "S3_ENDPOINT": "",
            "S3_ACCESS_KEY": "",
            "S3_SECRET_KEY": "",
            "S3_BUCKET_NAME": "",
            "S3_SECURE": "false",
            "POSTGRES_HOST": "",
            "POSTGRES_USER": "",
            "POSTGRES_PASSWORD": "",
            "POSTGRES_DB": "",
            "DYNAMIC_SIDECAR_CLIENT_REQUEST_TIMEOUT_S": f"{NETWORK_TOLERANCE_S}",
        },
    )
@pytest.fixture
def mock_sidecars_client_always_fail(mocker: MockerFixture) -> None:
    class MockedObj:
        @classmethod
        async def containers_inspect(cls, *args, **kwargs) -> None:
            msg = "will always fail"
            raise BaseClientHTTPError(msg)
    mocker.patch.object(_events, "get_sidecars_client", return_value=MockedObj())
@pytest.fixture
def mock_sidecars_client_stops_failing(mocker: MockerFixture) -> None:
    class MockedObj:
        def __init__(self) -> None:
            self.counter = 0
        async def containers_inspect(self, *args, **kwargs) -> None:
            self.counter += 1
            if self.counter < STEPS / 2:
                msg = "will always fail"
                raise BaseClientHTTPError(msg)
    mocker.patch.object(_events, "get_sidecars_client", return_value=MockedObj())
@pytest.fixture
def METHOD_NAME() -> DockerContainerInspect:
    return DockerContainerInspect(
        status=DockerStatus.dead, container_state=ContainerState(**{}), name="", id=""
    )
@pytest.fixture
def scheduler_data(
    scheduler_data: SchedulerData, METHOD_NAME: DockerContainerInspect
) -> SchedulerData:
    scheduler_data.dynamic_sidecar.containers_inspect = [METHOD_NAME]
    return scheduler_data
@pytest.fixture()
def caplog_debug(
    caplog: pytest.LogCaptureFixture,
) -> Iterable[pytest.LogCaptureFixture]:
    with caplog.at_level(
        logging.DEBUG,
    ):
        yield caplog
async def test_event_get_status_network_connectivity(
    mock_sidecars_client_always_fail: None,
    minimal_app: FastAPI,
    scheduler_data: SchedulerData,
    caplog_debug: pytest.LogCaptureFixture,
):
    caplog_debug.clear()
    with pytest.raises(BaseClientHTTPError):
        for _ in range(REPEAT_COUNT):
            await _events.GetStatus.action(minimal_app, scheduler_data)
            await asyncio.sleep(SLEEP_BETWEEN_CALLS)
    assert caplog_debug.text.count(_SKIPS_MESSAGE) > 1
async def test_event_get_status_recovers_after_error(
    mock_sidecars_client_stops_failing: None,
    minimal_app: FastAPI,
    scheduler_data: SchedulerData,
    caplog_debug: pytest.LogCaptureFixture,
):
    caplog_debug.clear()
    for _ in range(REPEAT_COUNT):
        await _events.GetStatus.action(minimal_app, scheduler_data)
        await asyncio.sleep(SLEEP_BETWEEN_CALLS)
    assert caplog_debug.text.count(_SKIPS_MESSAGE) >= 1 | 
| 109 | 
	compute d p4deps rho | 
	#-------------------------------------------------------------------------------
# TillotsonEquationOfState
#-------------------------------------------------------------------------------
from PYB11Generator import *
from SolidEquationOfState import *
from EOSAbstractMethods import *
@PYB11template("Dimension")
@PYB11module("SpheralSolidMaterial")
class TillotsonEquationOfState(SolidEquationOfState):
    """TillotsonEquationOfState -- Tillotson  equation of state.
This equation of state is designed to represent metallic materials
over a range of pressure and density -- spanning solid, liquid, and
vapor states.
Reference: Tillotson 1962
    """
    PYB11typedefs = """
    typedef typename %(Dimension)s::Scalar Scalar;
    typedef Field<%(Dimension)s, Scalar> ScalarField;
"""
    #...........................................................................
    # Constructors
    def pyinit(self,
               referenceDensity = "const double",
               etamin = "const double",
               etamax = "const double",
               etamin_solid = "const double",
               etamax_solid = "const double",
               a = "const double",
               b = "const double",
               A = "const double",
               B = "const double",
               alpha = "const double",
               beta = "const double",
               eps0 = "const double",
               epsLiquid = "const double",
               epsVapor = "const double",
               atomicWeight = "const double",
               constants = "const PhysicalConstants&",
               externalPressure = ("const double", "0.0"),
               minimumPressure = ("const double", "std::numeric_limits<double>::lowest()"),
               maximumPressure = ("const double", "std::numeric_limits<double>::max()"),
               minimumPressureDamage = ("const double", "0.0"),
               minPressureType = ("const MaterialPressureMinType", "MaterialPressureMinType::PressureFloor")):
        "Tillotson EOS"
    #...........................................................................
    # Methods
    @PYB11const
    def pressure(self,
                 massDensity = "const Scalar",
                 specificThermalEnergy = "const Scalar"):
        return "Scalar"
    @PYB11const
    def temperature(self,
                    massDensity = "const Scalar",
                    specificThermalEnergy = "const Scalar"):
        return "Scalar"
    @PYB11const
    def specificThermalEnergy(self,
                              massDensity = "const Scalar",
                              temperature = "const Scalar"):
        return "Scalar"
    @PYB11const
    def specificHeat(self,
                     massDensity = "const Scalar",
                     temperature = "const Scalar"):
        return "Scalar"
    @PYB11const
    def soundSpeed(self,
                   massDensity = "const Scalar",
                   specificThermalEnergy = "const Scalar"):
        return "Scalar"
    @PYB11const
    def gamma(self,
              massDensity = "const Scalar",
              specificThermalEnergy = "const Scalar"):
        return "Scalar"
    @PYB11const
    def bulkModulus(self,
                    massDensity = "const Scalar",
                    specificThermalEnergy = "const Scalar"):
        return "Scalar"
    @PYB11const
    def entropy(self,
                massDensity = "const Scalar",
                specificThermalEnergy = "const Scalar"):
        return "Scalar"
    @PYB11const
    def computeDPDrho(self,
                      massDensity = "const Scalar",
                      specificThermalEnergy = "const Scalar"):
        "Compute the derivative of the pressure with respect to the density."
        return "double"
    @PYB11const
    def computePhi(self,
                   eta = "const double&",
                   eps = "const double&"):
        return "double"
    @PYB11const
    def computeP1(self,
                  mu = "const double&",
                  P2 = "const double&"):
        return "double"
    @PYB11const
    def computeP2(self,
                  phi = "const double&",
                  mu = "const double&",
                  rho = "const double&",
                  eps = "const double&"):
        return "double"
    @PYB11const
    def computeP4(self,
                  phi = "const double&",
                  mu = "const double&",
                  eta = "const double&",
                  rho = "const double&",
                  eps = "const double&"):
        return "double"
    @PYB11const
    def compute_dphidrho_eps(self,
                             rho0 = "const double&",
                             eta = "const double&",
                             eps = "const double&"):
        return "double"
    @PYB11const
    def compute_dP1drho_eps(self,
                            rho0 = "const double&",
                            mu = "const double&",
                            eps = "const double& dP2drho"):
        return "double"
    @PYB11const
    def compute_dP2drho_eps(self,
                            phi = "const double&",
                            dphidrho_eps = "const double&",
                            rho0 = "const double&",
                            rho = "const double&",
                            eps = "const double&"):
        return "double"
    @PYB11const
    def compute_dP4drho_eps(self,
                            phi = "const double&",
                            dphidrho_eps = "const double&",
                            rho0 = "const double&",
                            eta = "const double&",
                            mu = "const double&",
                            rho = "const double&",
                            eps = "const double&"):
        return "double"
    @PYB11const
    def compute_dphideps_rho(self,
                             eta = "const double&",
                             eps = "const double&"):
        return "double"
    @PYB11const
    def compute_dP2deps_rho(self,
                            phi = "const double&",
                            dphideps_rho = "const double&",
                            rho = "const double&",
                            eps = "const double&"):
        return "double"
    @PYB11const
    def METHOD_NAME(self,
                            phi = "const double&",
                            dphideps_rho = "const double&",
                            eta = "const double&",
                            rho = "const double&",
                            eps = "const double&"):
        return "double"
    #...........................................................................
    # Properties
    etamin_solid = PYB11property("double", "etamin_solid", "etamin_solid")
    etamax_solid = PYB11property("double", "etamax_solid", "etamax_solid")
    a = PYB11property("double", "a", "a")
    b = PYB11property("double", "b", "b")
    A = PYB11property("double", "A", "A")
    B = PYB11property("double", "B", "B")
    alpha = PYB11property("double", "alpha", "alpha")
    beta = PYB11property("double", "beta", "beta")
    eps0 = PYB11property("double", "eps0", "eps0")
    epsLiquid = PYB11property("double", "epsLiquid", "epsLiquid")
    epsVapor = PYB11property("double", "epsVapor", "epsVapor")
    atomicWeight = PYB11property("double", "atomicWeight", "atomicWeight")
#-------------------------------------------------------------------------------
# Inject EOS interface
#-------------------------------------------------------------------------------
PYB11inject(EOSAbstractMethods, TillotsonEquationOfState, virtual=True, pure_virtual=False) | 
| 110 | 
	make layer | 
	"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch.nn as nn
import torch
import torch.nn.functional as F
import math
__all__ = ["Res2Net", "res2net50"]
class Bottle2neck(nn.Module):
    expansion = 4
    def __init__(
        self,
        inplanes,
        planes,
        stride=1,
        downsample=None,
        baseWidth=26,
        scale=4,
        stype="normal",
    ):
        """ Constructor
        Args:
            inplanes: input channel dimensionality
            planes: output channel dimensionality
            stride: conv stride. Replaces pooling layer.
            downsample: None when stride = 1
            baseWidth: basic width of conv3x3
            scale: number of scale.
            type: 'normal': normal set. 'stage': first block of a new stage.
        """
        super(Bottle2neck, self).__init__()
        width = int(math.floor(planes * (baseWidth / 64.0)))
        self.conv1 = nn.Conv2d(inplanes, width * scale, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(width * scale)
        if scale == 1:
            self.nums = 1
        else:
            self.nums = scale - 1
        if stype == "stage":
            self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)
        convs = []
        bns = []
        for i in range(self.nums):
            convs.append(
                nn.Conv2d(
                    width, width, kernel_size=3, stride=stride, padding=1, bias=False
                )
            )
            bns.append(nn.BatchNorm2d(width))
        self.convs = nn.ModuleList(convs)
        self.bns = nn.ModuleList(bns)
        self.conv3 = nn.Conv2d(
            width * scale, planes * self.expansion, kernel_size=1, bias=False
        )
        self.bn3 = nn.BatchNorm2d(planes * self.expansion)
        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stype = stype
        self.scale = scale
        self.width = width
    def forward(self, x):
        residual = x
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)
        spx = torch.split(out, self.width, 1)
        for i in range(self.nums):
            if i == 0 or self.stype == "stage":
                sp = spx[i]
            else:
                sp = sp + spx[i]
            sp = self.convs[i](sp)
            sp = self.relu(self.bns[i](sp))
            if i == 0:
                out = sp
            else:
                out = torch.cat((out, sp), 1)
        if self.scale != 1 and self.stype == "normal":
            out = torch.cat((out, spx[self.nums]), 1)
        elif self.scale != 1 and self.stype == "stage":
            out = torch.cat((out, self.pool(spx[self.nums])), 1)
        out = self.conv3(out)
        out = self.bn3(out)
        if self.downsample is not None:
            residual = self.downsample(x)
        out += residual
        out = self.relu(out)
        return out
class Res2Net(nn.Module):
    def __init__(self, block, layers, baseWidth=26, scale=4, num_classes=1000):
        self.inplanes = 64
        super(Res2Net, self).__init__()
        self.baseWidth = baseWidth
        self.scale = scale
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self.METHOD_NAME(block, 64, layers[0])
        self.layer2 = self.METHOD_NAME(block, 128, layers[1], stride=2)
        self.layer3 = self.METHOD_NAME(block, 256, layers[2], stride=2)
        self.layer4 = self.METHOD_NAME(block, 512, layers[3], stride=2)
        self.avgpool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Linear(512 * block.expansion, num_classes)
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
    def METHOD_NAME(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(
                    self.inplanes,
                    planes * block.expansion,
                    kernel_size=1,
                    stride=stride,
                    bias=False,
                ),
                nn.BatchNorm2d(planes * block.expansion),
            )
        layers = []
        layers.append(
            block(
                self.inplanes,
                planes,
                stride,
                downsample=downsample,
                stype="stage",
                baseWidth=self.baseWidth,
                scale=self.scale,
            )
        )
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(self.inplanes, planes, baseWidth=self.baseWidth, scale=self.scale)
            )
        return nn.Sequential(*layers)
    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = self.avgpool(x)
        x = x.view(x.size(0), -1)
        x = self.fc(x)
        return x
def res2net50(pretrained=False, **kwargs):
    """Constructs a Res2Net-50 model.
    Res2Net-50 refers to the Res2Net-50_26w_4s.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth=26, scale=4, **kwargs)
    return model | 
| 111 | 
	derive flops | 
	from matplotlib import rcParams
import os
rcParams.update({'figure.autolayout': True})
KINDS = ["bar", "hist", "line"]
METRICS = {"bandwidth": "bandwidth(GiB/s)",
           "FLOPS": "GFLOPS",
           "speedup": "time(s)/time(s)",
           "throughput": "throughput(GProblemsize/s)",
           "time/rep": "time/rep(s)",
}
def _derive_bandwidth(thicket, header):
    return thicket.dataframe[header, "Bytes/Rep"] / _derive_time(thicket, header) / 10**9
def METHOD_NAME(thicket, header):
    return thicket.dataframe[header, "Flops/Rep"] / _derive_time(thicket, header) / 10**9
def _derive_speedup(thicket, header_list):
    return thicket.dataframe[header_list[0], "Total time"] / thicket.dataframe[header_list[1], "Total time"]
def _derive_throughput(thicket, header):
    return thicket.dataframe[header, "ProblemSize"] / _derive_time(thicket, header) / 10**9
def _derive_time(thicket, header):
    return thicket.dataframe[header, "Total time"] / thicket.dataframe[header, "Reps"]
def _graph_bar(df, metric, prefix):
    num_xticks = len(df)
    plt = df.plot(kind="bar", 
                    title=f"{METRICS[metric]}", 
                    ylabel=METRICS[metric],
                    grid=True,
                    figsize=(max(num_xticks*0.5, 4), 6,),
        )
    plt.figure.savefig(f"{prefix}/bar_{metric}.png")
def _graph_hist(df, metric, prefix):
    num_xticks = len(df)
    plt = df.plot(kind="hist", 
                    title=f"{METRICS[metric]}",
                    xlabel=METRICS[metric],
                    grid=True,
                    figsize=(max(num_xticks*0.5, 4), 6,),
                    subplots=True,
        )
    plt[0].figure.savefig(f"{prefix}/hist_{metric}.png")
def _graph_line(df, metric, prefix, name):
    plt = df.plot(kind="line", 
                    marker='o', 
                    title=f"{name}", 
                    ylabel=METRICS[metric], 
                    logx=True,
                    logy=True,
                    grid=True,
        )
    plt.figure.savefig(f"{prefix}/{name}.png")
def plot(thicket, kind=None, metric=None, prefix=None):
    """Prepares dataframe for plotting and calls appropriate plotting function
    
    Arguments:
        thicket (Thicket): Thicket object
        kind (str): Type of plot to make
        metric (str): Metric to plot
        prefix (str): Prefix for output file
    
    Returns:
        df (DataFrame): Dataframe used for plotting
    """
    if kind is None:
        raise ValueError(f"kind must be specified from: {KINDS}")
    if metric is None:
        raise ValueError(f"metric must be specified from: {list(METRICS.keys())}")
    func = None
    if metric == "bandwidth":
        func = _derive_bandwidth
        if prefix is None:
            prefix = "graphs/graph_bandwidth"
    elif metric == "FLOPS":
        func = METHOD_NAME
        if prefix is None:
            prefix = "graphs/graph_flops"
    elif metric == "speedup":
        func = _derive_speedup
        if prefix is None:
            prefix = "graphs"
    elif metric == "throughput":
        func = _derive_throughput
        if prefix is None:
            prefix = "graphs/graph_throughput"
    elif metric == "time/rep":
        func = _derive_time
        if prefix is None:
            prefix = "graphs/graph_time"
    g_func = None
    if kind == "bar":
        g_func = _graph_bar
    elif kind == "hist":
        g_func = _graph_hist
    elif kind == "line":
        g_func = _graph_line
    # Make dir
    if not os.path.exists(prefix):
        os.makedirs(prefix)
    # Add calculated column to dataframe
    header_list = [h for h in thicket.dataframe.columns.get_level_values(0).unique() if "name" not in h]
    if metric == "speedup":
        thicket.dataframe[f"{header_list[0]}/{header_list[1]}", metric] = func(thicket, header_list)
    else:
        for header in header_list:
            thicket.dataframe[header, metric] = func(thicket, header)
    # Make copy
    df = thicket.dataframe.copy(deep=True)
    if kind == "bar" or kind == "hist":
        df.reset_index(inplace=True)
        drop_cols = [col for col in df.columns if not "name" in col and not metric in col]
        df.drop(columns=drop_cols, inplace=True)
        df.set_index([("name", "")], inplace=True)
        df.columns = df.columns.droplevel(1)
        g_func(df, metric, prefix)
    elif kind == "line":
        # Plot for each node
        for node in set(thicket.dataframe.index.get_level_values("node")):
            df = thicket.dataframe.loc[node]
            name = df[("name", "")].iloc[0]
            drop_cols = [col for col in df.columns if col[1] != metric or df[col].isnull().values.all()]
            df = df.drop(columns=drop_cols, axis=1)
            df.columns = df.columns.droplevel(1)
            g_func(df, metric, prefix, name)
    
    return df
 | 
| 112 | 
	apply max | 
	import inspect
from PYB11Generator import *
from FieldBase import FieldBase
from Field import Field
#-------------------------------------------------------------------------------
# Add numeric operations to a Field
#-------------------------------------------------------------------------------
@PYB11template("Dimension", "Value")
@PYB11pycppname("Field")
class ArithmeticField(FieldBase):
    PYB11typedefs = """
  using FieldType = Field<%(Dimension)s, %(Value)s>;
  using Scalar = typename FieldType::Scalar;
  using ScalarFieldType = Field<%(Dimension)s, Scalar>;
"""
    def __add__(self):
        return
    def __sub__(self):
        return
    def __iadd__(self):
        return
    def __isub__(self):
        return
    @PYB11pyname("__add__")
    def __add__V__(self, rhs="%(Value)s()"):
        return
    @PYB11pyname("__sub__")
    def __sub__V__(self, rhs="%(Value)s()"):
        return
    @PYB11pyname("__iadd__")
    def __iadd__V__(self, rhs="%(Value)s()"):
        return
    @PYB11pyname("__isub__")
    def __isub__V__(self, rhs="%(Value)s()"):
        return
    @PYB11implementation("[](const FieldType& self, const ScalarFieldType& rhs) { return self * rhs; }")
    @PYB11operator
    def __mul__(self, rhs="const ScalarFieldType&"):
        return "FieldType"
    @PYB11implementation("[](const FieldType& self, const ScalarFieldType& rhs) { return self / rhs; }")
    @PYB11operator
    def __truediv__(self, rhs="const ScalarFieldType&"):
        return "FieldType"
    @PYB11implementation("[](FieldType& self, const ScalarFieldType& rhs) { return self *= rhs; }")
    @PYB11operator
    def __imul__(self, rhs="const ScalarFieldType&"):
        return
    @PYB11implementation("[](FieldType& self, const ScalarFieldType& rhs) { return self /= rhs; }")
    @PYB11operator
    def __itruediv__(self, rhs="const ScalarFieldType&"):
        return
    @PYB11pyname("__mul__")
    def __mul__S__(self, rhs="Scalar()"):
        return
    @PYB11pyname("__truediv__")
    def __truediv__S__(self, rhs="Scalar()"):
        return
    @PYB11pyname("__imul__")
    def __imul__S__(self, rhs="Scalar()"):
        return
    @PYB11pyname("__itruediv__")
    def __itruediv__S__(self, rhs="Scalar()"):
        return
    @PYB11const
    def sumElements(self):
        "Return the sum of the elements in the Field."
        return
    @PYB11const
    def localSumElements(self):
        "Return the sum of the elements in the Field local to each processor."
        return
    #...........................................................................
    # Comparators
    def __gt__(self):
        return
    def __lt__(self):
        return
    def __ge__(self):
        return "bool"
    def __le__(self):
        return "bool"
    def __gt__(self, rhs="%(Value)s()"):
        "Greater than comparision with a %(Value)s"
        return "bool"
    def __lt__(self, rhs="%(Value)s()"):
        "Less than comparision with a %(Value)s"
        return "bool"
    def __ge__(self, rhs="%(Value)s()"):
        "Greater than or equal comparision with a %(Value)s"
        return "bool"
    def __le__(self, rhs="%(Value)s()"):
        "Less than or equal comparision with a %(Value)s"
        return "bool"
    def applyMin(self):
        "Enforce a floor on the values of the Field."
        return
    def METHOD_NAME(self):
        "Enforce a ceiling on the values of the Field."
        return
    @PYB11const
    def min(self):
        "Return the mimimum value in the Field."
        return
    @PYB11const
    def max(self):
        "Return the maximum value in the Field."
        return
    @PYB11const
    def localMin(self):
        "Return the mimimum value in the Field local to each processor."
        return
    @PYB11const
    def localMax(self):
        "Return the maximum value in the Field local to each processor."
        return
#-------------------------------------------------------------------------------
# Inject base field methods
#-------------------------------------------------------------------------------
PYB11inject(Field, ArithmeticField) | 
| 113 | 
	test flow single protocol default random port | 
	import itertools
import os.path
import pytest
import requests as req
from docarray import Document, DocumentArray
from jina import Client, Executor, Flow, requests
from jina.helper import random_port
PROTOCOLS = ['grpc', 'http', 'websocket']
cur_dir = os.path.dirname(__file__)
class MyExecutor(Executor):
    @requests
    def foo(self, docs: DocumentArray, **kwargs):
        for doc in docs:
            doc.text = 'processed'
@pytest.mark.parametrize(
    'ports,protocols',
    [
        *[
            ([random_port(), random_port(), random_port()], list(protocols))
            for protocols in itertools.combinations(PROTOCOLS, r=3)
        ],
        *[
            ([random_port(), random_port()], list(protocols))
            for protocols in itertools.combinations(PROTOCOLS, r=2)
        ],
        *[
            ([random_port()], list(protocols))
            for protocols in itertools.combinations(PROTOCOLS, r=1)
        ],
    ],
)
def test_flow_multiprotocol(ports, protocols):
    flow = Flow().config_gateway(port=ports, protocol=protocols).add(uses=MyExecutor)
    with flow:
        for port, protocol in zip(ports, protocols):
            client = Client(port=port, protocol=protocol)
            docs = client.post('/', inputs=[Document()])
            for doc in docs:
                assert doc.text == 'processed'
@pytest.mark.parametrize(
    'protocols',
    [
        list(protocols)
        for protocols in itertools.chain(
            itertools.combinations(PROTOCOLS, r=3),
            itertools.combinations(PROTOCOLS, r=2),
        )
    ],
)
def test_flow_multiprotocol_default_random_ports(protocols):
    flow = Flow().config_gateway(protocol=protocols).add(uses=MyExecutor)
    with flow:
        for port, protocol in zip(flow.port, protocols):
            client = Client(port=port, protocol=protocol)
            docs = client.post('/', inputs=[Document()])
            for doc in docs:
                assert doc.text == 'processed'
@pytest.mark.parametrize(
    'protocols',
    [
        ['grpc'],
        ['http'],
        ['websocket'],
    ],
)
def METHOD_NAME(protocols):
    flow = Flow().config_gateway(protocol=protocols).add(uses=MyExecutor)
    with flow:
        for protocol in protocols:
            client = Client(port=flow.port, protocol=protocol)
            docs = client.post('/', inputs=[Document()])
            for doc in docs:
                assert doc.text == 'processed'
def test_flow_multiprotocol_aliases():
    ports = [random_port(), random_port(), random_port()]
    protocols = PROTOCOLS
    flow = Flow().config_gateway(ports=ports, protocols=protocols).add(uses=MyExecutor)
    with flow:
        for port, protocol in zip(ports, protocols):
            client = Client(port=port, protocol=protocol)
            docs = client.post('/', inputs=[Document()])
            for doc in docs:
                assert doc.text == 'processed'
def test_flow_multiprotocol_yaml():
    flow = Flow.load_config(os.path.join(cur_dir, 'yaml/multi-protocol.yml'))
    with flow:
        for port, protocol in zip([12345, 12344, 12343], ['grpc', 'http', 'websocket']):
            client = Client(port=port, protocol=protocol)
            client.post('/', inputs=[Document()])
def test_flow_multiprotocol_ports_protocols_mismatch():
    flow = Flow().config_gateway(port=[random_port(), random_port()], protocol=['grpc', 'http', 'websocket'])
    with pytest.raises(ValueError) as err_info:
        with flow:
            pass
    assert (
        'You need to specify as much protocols as ports if you want to use a jina built-in gateway'
        in err_info.value.args[0]
    )
def test_flow_multiprotocol_with_monitoring():
    port_monitoring = random_port()
    ports = [random_port(), random_port(), random_port()]
    protocols = PROTOCOLS
    flow = Flow().config_gateway(
        port=ports, protocol=protocols, monitoring=True, port_monitoring=port_monitoring
    )
    with flow:
        for port, protocol in zip(ports, protocols):
            client = Client(port=port, protocol=protocol)
            client.post('/', inputs=[Document()])
        resp = req.get(f'http://localhost:{port_monitoring}/')
        assert resp.status_code == 200
        assert (
            'jina_successful_requests_total{runtime_name="gateway/rep-0"} 3.0'
            in str(resp.content)
        )
def test_flow_multiprotocol_with_tracing():
    ports = [random_port(), random_port(), random_port()]
    protocols = PROTOCOLS
    flow = Flow().config_gateway(port=ports, protocol=protocols, tracing=True)
    with flow:
        for port, protocol in zip(ports, protocols):
            client = Client(port=port, protocol=protocol)
            client.post('/', inputs=[Document()]) | 
| 114 | 
	add events to session | 
	import logging
import json
from typing import Dict, List, Optional
from moonstreamdb.blockchain import AvailableBlockchainType, get_label_model
from moonstreamdb.models import Base
from moonworm.crawler.function_call_crawler import ContractFunctionCall  # type: ignore
from sqlalchemy.orm import Session
from ..settings import CRAWLER_LABEL
from .event_crawler import Event
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def _event_to_label(
    blockchain_type: AvailableBlockchainType, event: Event, label_name=CRAWLER_LABEL
) -> Base:
    """
    Creates a label model.
    """
    label_model = get_label_model(blockchain_type)
    sanityzed_label_data = json.loads(
        json.dumps(
            {
                "type": "event",
                "name": event.event_name,
                "args": event.args,
            }
        ).replace(r"\u0000", "")
    )
    label = label_model(
        label=label_name,
        label_data=sanityzed_label_data,
        address=event.address,
        block_number=event.block_number,
        block_timestamp=event.block_timestamp,
        transaction_hash=event.transaction_hash,
        log_index=event.log_index,
    )
    return label
def _function_call_to_label(
    blockchain_type: AvailableBlockchainType,
    function_call: ContractFunctionCall,
    label_name=CRAWLER_LABEL,
) -> Base:
    """
    Creates a label model.
    """
    label_model = get_label_model(blockchain_type)
    sanityzed_label_data = json.loads(
        json.dumps(
            {
                "type": "tx_call",
                "name": function_call.function_name,
                "caller": function_call.caller_address,
                "args": function_call.function_args,
                "status": function_call.status,
                "gasUsed": function_call.gas_used,
            }
        ).replace(r"\u0000", "")
    )
    label = label_model(
        label=label_name,
        label_data=sanityzed_label_data,
        address=function_call.contract_address,
        block_number=function_call.block_number,
        transaction_hash=function_call.transaction_hash,
        block_timestamp=function_call.block_timestamp,
    )
    return label
def get_last_labeled_block_number(
    db_session: Session,
    blockchain_type: AvailableBlockchainType,
    label_name=CRAWLER_LABEL,
) -> Optional[int]:
    label_model = get_label_model(blockchain_type)
    block_number = (
        db_session.query(label_model.block_number)
        .filter(label_model.label == label_name)
        .order_by(label_model.block_number.desc())
        .limit(1)
        .one_or_none()
    )
    return block_number[0] if block_number else None
def get_first_labeled_block_number(
    db_session: Session,
    blockchain_type: AvailableBlockchainType,
    address: str,
    label_name=CRAWLER_LABEL,
    only_events: bool = False,
) -> Optional[int]:
    label_model = get_label_model(blockchain_type)
    block_number_query = (
        db_session.query(label_model.block_number)
        .filter(label_model.label == label_name)
        .filter(label_model.address == address)
    )
    function_call_block_numbers = (
        block_number_query.filter(label_model.log_index == None)
        .order_by(label_model.block_number)
        .limit(50)
        .all()
    )
    event_block_numbers = (
        block_number_query.filter(label_model.log_index != None)
        .order_by(label_model.block_number)
        .limit(50)
        .all()
    )
    if only_events:
        return event_block_numbers[0][0] if event_block_numbers else None
    else:
        event_block_number = event_block_numbers[0][0] if event_block_numbers else -1
        function_call_block_number = (
            function_call_block_numbers[0][0] if function_call_block_numbers else -1
        )
        max_block_number = max(event_block_number, function_call_block_number)
        return max_block_number if max_block_number != -1 else None
def commit_session(db_session: Session) -> None:
    """
    Save labels in the database.
    """
    try:
        logger.info("Committing session to database")
        db_session.commit()
    except Exception as e:
        logger.error(f"Failed to save labels: {e}")
        db_session.rollback()
        raise e
def METHOD_NAME(
    db_session: Session,
    events: List[Event],
    blockchain_type: AvailableBlockchainType,
    label_name=CRAWLER_LABEL,
) -> None:
    label_model = get_label_model(blockchain_type)
    events_hashes_to_save = [event.transaction_hash for event in events]
    existing_labels = (
        db_session.query(label_model.transaction_hash, label_model.log_index)
        .filter(
            label_model.label == label_name,
            label_model.log_index != None,
            label_model.transaction_hash.in_(events_hashes_to_save),
        )
        .all()
    )
    existing_labels_transactions = []
    existing_log_index_by_tx_hash: Dict[str, List[int]] = {}
    for label in existing_labels:
        if label[0] not in existing_labels_transactions:
            existing_labels_transactions.append(label[0])
            existing_log_index_by_tx_hash[label[0]] = []
        existing_log_index_by_tx_hash[label[0]].append(label[1])
    labels_to_save = []
    for event in events:
        if event.transaction_hash not in existing_labels_transactions:
            labels_to_save.append(_event_to_label(blockchain_type, event, label_name))
        elif (
            event.log_index not in existing_log_index_by_tx_hash[event.transaction_hash]
        ):
            labels_to_save.append(_event_to_label(blockchain_type, event, label_name))
    logger.info(f"Saving {len(labels_to_save)} event labels to session")
    db_session.add_all(labels_to_save)
def add_function_calls_to_session(
    db_session: Session,
    function_calls: List[ContractFunctionCall],
    blockchain_type: AvailableBlockchainType,
    label_name=CRAWLER_LABEL,
) -> None:
    label_model = get_label_model(blockchain_type)
    transactions_hashes_to_save = [
        function_call.transaction_hash for function_call in function_calls
    ]
    existing_labels = (
        db_session.query(label_model.transaction_hash)
        .filter(
            label_model.label == label_name,
            label_model.log_index == None,
            label_model.transaction_hash.in_(transactions_hashes_to_save),
        )
        .all()
    )
    existing_labels_transactions = [label[0] for label in existing_labels]
    labels_to_save = [
        _function_call_to_label(blockchain_type, function_call)
        for function_call in function_calls
        if function_call.transaction_hash not in existing_labels_transactions
    ]
    logger.info(f"Saving {len(labels_to_save)} labels to session")
    db_session.add_all(labels_to_save) | 
| 115 | 
	raise exc | 
	import asyncio
import asynctest
from stig.client.errors import AuthError, ConnectionError
from stig.client.poll import RequestPoller
class TestRequestPoller(asynctest.ClockedTestCase):
    def setUp(self):
        self.mock_request_args = None
        self.mock_request_kwargs = None
        self.mock_request_calls = 0
    def make_poller(self, *args, **kwargs):
        rp = RequestPoller(*args, **kwargs)
        def METHOD_NAME(exc):
            raise exc
        rp.on_error(METHOD_NAME, autoremove=False)
        return rp
    async def mock_request(self, *args, **kwargs):
        self.mock_request_args = args
        self.mock_request_kwargs = kwargs
        self.mock_request_calls += 1
        return self.mock_request_calls
    async def test_start_stop(self):
        rp = self.make_poller(self.mock_request)
        self.assertEqual(rp.running, False)
        await rp.start()
        self.assertEqual(rp.running, True)
        await rp.start()
        self.assertEqual(rp.running, True)
        await rp.stop()
        self.assertEqual(rp.running, False)
        await rp.start()
        self.assertEqual(rp.running, True)
        await rp.stop()
        self.assertEqual(rp.running, False)
        await rp.stop()
        self.assertEqual(rp.running, False)
    async def test_request_args(self):
        rp = self.make_poller(self.mock_request, 1, 2, 3, foo='bar')
        await rp.start()
        await self.advance(0)
        self.assertEqual(self.mock_request_args, (1, 2, 3))
        self.assertEqual(self.mock_request_kwargs, {'foo': 'bar'})
        await rp.stop()
    async def test_interval(self):
        rp = self.make_poller(self.mock_request, interval=10)
        self.assertEqual(rp.interval, 10)
        rp.interval = 5
        self.assertEqual(rp.interval, 5)
        await rp.start()
        await self.advance(0)
        self.assertEqual(self.mock_request_calls, 1)
        await self.advance(rp.interval * 2)
        self.assertEqual(self.mock_request_calls, 3)
        await self.advance(rp.interval * 3)
        self.assertEqual(self.mock_request_calls, 6)
        await rp.stop()
    async def test_callbacks(self):
        rp = self.make_poller(self.mock_request)
        status = None
        def cb1(calls):
            nonlocal status
            if calls is None:
                status = None
            elif calls % 2 == 0:
                status = 'Even number of calls: %d' % calls
        def cb2(calls):
            nonlocal status
            if calls is None:
                status = None
            elif calls % 2 != 0:
                status = 'Uneven number of calls: %d' % calls
        rp.on_response(cb1)
        await rp.start()
        await self.advance(0)
        self.assertEqual(status, None)
        await self.advance(rp.interval)
        self.assertEqual(status, 'Even number of calls: 2')
        await self.advance(rp.interval)
        self.assertEqual(status, 'Even number of calls: 2')
        await self.advance(rp.interval)
        self.assertEqual(status, 'Even number of calls: 4')
        rp.on_response(cb2)
        await self.advance(rp.interval)
        self.assertEqual(status, 'Uneven number of calls: 5')
        await self.advance(rp.interval)
        self.assertEqual(status, 'Even number of calls: 6')
        await self.advance(rp.interval)
        self.assertEqual(status, 'Uneven number of calls: 7')
        await rp.stop()
    async def test_callback_gets_None_when_stopped(self):
        rp = self.make_poller(self.mock_request)
        status = None
        def cb(calls):
            nonlocal status
            status = '%s calls' % calls
        rp.on_response(cb)
        await rp.start()
        await self.advance(0)
        self.assertEqual(status, '1 calls')
        await self.advance(rp.interval)
        self.assertEqual(status, '2 calls')
        await rp.stop()
        await self.advance(0)
        self.assertEqual(status, 'None calls')
    async def test_request_raises_ClientError(self):
        # Connection fails after a few successful requests
        requests_before_failure = 3
        requests_before_different_failure = 6
        requests = 0
        async def mock_request():
            nonlocal requests
            requests += 1
            if requests > requests_before_different_failure:
                raise AuthError('Another error')
            elif requests > requests_before_failure:
                raise ConnectionError('Server unreachable')
            else:
                return requests
        rp = RequestPoller(mock_request)
        # Collect responses
        responses = []
        def handle_response(response):
            if response is None:
                responses.append('no response')
            else:
                responses.append('response #%d' % response)
        rp.on_response(handle_response)
        # Collect exceptions
        errors = []
        def handle_error(exc):
            errors.append(exc)
        rp.on_error(handle_error)
        await rp.start()
        await self.advance(rp.interval * (requests_before_failure + 5))
        self.assertEqual(rp.running, True)
        self.assertEqual(len(responses), 9)
        self.assertEqual(responses, ['response #1',
                                     'response #2',
                                     'response #3',
                                     'no response',
                                     'no response',
                                     'no response',
                                     'no response',
                                     'no response',
                                     'no response'])
        self.assertEqual(len(errors), 2)
        self.assertIsInstance(errors[0], ConnectionError)
        self.assertEqual(str(errors[0]), 'Failed to connect: Server unreachable')
        self.assertIsInstance(errors[1], AuthError)
        self.assertEqual(str(errors[1]), 'Authentication failed: Another error')
        await rp.stop()
    async def test_changing_request(self):
        status = []
        async def request1():
            status.append('{}: request1() called'.format(asyncio.get_event_loop().time()))
        async def request2(a, b):
            status.append('{}: request2({}, {}) called'.format(asyncio.get_event_loop().time(), a, b))
        rp = self.make_poller(request1)
        await rp.start()
        await self.advance(0)
        self.assertEqual(status, ['%d: request1() called' % 0])
        # Setting the request restarts the internal polling loop and resets
        # loop.time() to 0 for some reason.
        rp.set_request(request2, 'one', 2)
        await self.advance(rp.interval * 3)
        self.assertEqual(status, ['%d: request1() called' % 0,
                                  '%d: request2(one, 2) called' % (rp.interval * 1),
                                  '%d: request2(one, 2) called' % (rp.interval * 2),
                                  '%d: request2(one, 2) called' % (rp.interval * 3)])
        await rp.stop()
    async def test_manual_polling(self):
        rp = self.make_poller(self.mock_request)
        await rp.start()
        self.assertEqual(self.mock_request_calls, 0)
        await self.advance(0)
        self.assertEqual(self.mock_request_calls, 1)
        rp.poll()
        await self.advance(0)
        self.assertEqual(self.mock_request_calls, 2)
        rp.poll()
        await self.advance(0)
        self.assertEqual(self.mock_request_calls, 3)
        await rp.stop() | 
| 116 | 
	get next week | 
	import datetime
from collections.abc import Sequence
from typing import Any, TypeVar
from django.db import models
from django.db.models.query import QuerySet
from django.http import HttpRequest, HttpResponse
from django.utils.datastructures import _IndexableCollection
from django.views.generic.base import View
from django.views.generic.detail import BaseDetailView, SingleObjectTemplateResponseMixin
from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin
from typing_extensions import TypeAlias
_M = TypeVar("_M", bound=models.Model)
class YearMixin:
    year_format: str
    year: str | None
    def get_year_format(self) -> str: ...
    def get_year(self) -> str: ...
    def get_next_year(self, date: datetime.date) -> datetime.date | None: ...
    def get_previous_year(self, date: datetime.date) -> datetime.date | None: ...
class MonthMixin:
    month_format: str
    month: str | None
    def get_month_format(self) -> str: ...
    def get_month(self) -> str: ...
    def get_next_month(self, date: datetime.date) -> datetime.date | None: ...
    def get_previous_month(self, date: datetime.date) -> datetime.date | None: ...
class DayMixin:
    day_format: str
    day: str | None
    def get_day_format(self) -> str: ...
    def get_day(self) -> str: ...
    def get_next_day(self, date: datetime.date) -> datetime.date | None: ...
    def get_previous_day(self, date: datetime.date) -> datetime.date | None: ...
class WeekMixin:
    week_format: str
    week: str | None
    def get_week_format(self) -> str: ...
    def get_week(self) -> str: ...
    def METHOD_NAME(self, date: datetime.date) -> datetime.date | None: ...
    def get_previous_week(self, date: datetime.date) -> datetime.date | None: ...
class DateMixin:
    date_field: str | None
    allow_future: bool
    def get_date_field(self) -> str: ...
    def get_allow_future(self) -> bool: ...
    @property
    def uses_datetime_field(self) -> bool: ...
DatedItems: TypeAlias = tuple[_IndexableCollection[datetime.date] | None, _IndexableCollection[_M], dict[str, Any]]
class BaseDateListView(MultipleObjectMixin[_M], DateMixin, View):
    date_list_period: str
    def get(self, request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse: ...
    def get_dated_items(self) -> DatedItems: ...
    def get_ordering(self) -> str | Sequence[str]: ...
    def get_dated_queryset(self, **lookup: Any) -> models.query.QuerySet[_M]: ...
    def get_date_list_period(self) -> str: ...
    def get_date_list(
        self, queryset: models.query.QuerySet, date_type: str | None = ..., ordering: str = ...
    ) -> models.query.QuerySet: ...
class BaseArchiveIndexView(BaseDateListView[_M]):
    context_object_name: str
    def get_dated_items(self) -> DatedItems[_M]: ...
class ArchiveIndexView(MultipleObjectTemplateResponseMixin, BaseArchiveIndexView):
    template_name_suffix: str
class BaseYearArchiveView(YearMixin, BaseDateListView[_M]):
    date_list_period: str
    make_object_list: bool
    def get_dated_items(self) -> DatedItems[_M]: ...
    def get_make_object_list(self) -> bool: ...
class YearArchiveView(MultipleObjectTemplateResponseMixin, BaseYearArchiveView):
    template_name_suffix: str
class BaseMonthArchiveView(YearMixin, MonthMixin, BaseDateListView[_M]):
    date_list_period: str
    def get_dated_items(self) -> DatedItems[_M]: ...
class MonthArchiveView(MultipleObjectTemplateResponseMixin, BaseMonthArchiveView):
    template_name_suffix: str
class BaseWeekArchiveView(YearMixin, WeekMixin, BaseDateListView[_M]):
    def get_dated_items(self) -> DatedItems[_M]: ...
class WeekArchiveView(MultipleObjectTemplateResponseMixin, BaseWeekArchiveView):
    template_name_suffix: str
class BaseDayArchiveView(YearMixin, MonthMixin, DayMixin, BaseDateListView[_M]):
    def get_dated_items(self) -> DatedItems[_M]: ...
class DayArchiveView(MultipleObjectTemplateResponseMixin, BaseDayArchiveView):
    template_name_suffix: str
class BaseTodayArchiveView(BaseDayArchiveView[_M]):
    def get_dated_items(self) -> DatedItems[_M]: ...
class TodayArchiveView(MultipleObjectTemplateResponseMixin, BaseTodayArchiveView):
    template_name_suffix: str
class BaseDateDetailView(YearMixin, MonthMixin, DayMixin, DateMixin, BaseDetailView[_M]):
    def get_object(self, queryset: QuerySet[_M] | None = ...) -> _M: ...
class DateDetailView(SingleObjectTemplateResponseMixin, BaseDateDetailView):
    template_name_suffix: str
def timezone_today() -> datetime.date: ... | 
| 117 | 
	method | 
	# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
    "network express-route auth create",
)
class Create(AAZCommand):
    """Create a new link authorization for an ExpressRoute circuit.
    :example: Create a new link authorization for an ExpressRoute circuit.
        az network express-route auth create --circuit-name MyCircuit -g MyResourceGroup -n MyAuthorization
    """
    _aaz_info = {
        "version": "2022-01-01",
        "resources": [
            ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/expressroutecircuits/{}/authorizations/{}", "2022-01-01"],
        ]
    }
    AZ_SUPPORT_NO_WAIT = True
    def _handler(self, command_args):
        super()._handler(command_args)
        return self.build_lro_poller(self._execute_operations, self._output)
    _args_schema = None
    @classmethod
    def _build_arguments_schema(cls, *args, **kwargs):
        if cls._args_schema is not None:
            return cls._args_schema
        cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
        # define Arg Group ""
        _args_schema = cls._args_schema
        _args_schema.name = AAZStrArg(
            options=["-n", "--name"],
            help="Authorization name.",
            required=True,
        )
        _args_schema.circuit_name = AAZStrArg(
            options=["--circuit-name"],
            help="ExpressRoute circuit name.",
            required=True,
        )
        _args_schema.resource_group = AAZResourceGroupNameArg(
            required=True,
        )
        # define Arg Group "AuthorizationParameters"
        # define Arg Group "Properties"
        return cls._args_schema
    def _execute_operations(self):
        self.pre_operations()
        yield self.ExpressRouteCircuitAuthorizationsCreateOrUpdate(ctx=self.ctx)()
        self.post_operations()
    @register_callback
    def pre_operations(self):
        pass
    @register_callback
    def post_operations(self):
        pass
    def _output(self, *args, **kwargs):
        result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
        return result
    class ExpressRouteCircuitAuthorizationsCreateOrUpdate(AAZHttpOperation):
        CLIENT_TYPE = "MgmtClient"
        def __call__(self, *args, **kwargs):
            request = self.make_request()
            session = self.client.send_request(request=request, stream=False, **kwargs)
            if session.http_response.status_code in [202]:
                return self.client.build_lro_polling(
                    self.ctx.args.no_wait,
                    session,
                    self.on_200_201,
                    self.on_error,
                    lro_options={"final-state-via": "azure-async-operation"},
                    path_format_arguments=self.url_parameters,
                )
            if session.http_response.status_code in [200, 201]:
                return self.client.build_lro_polling(
                    self.ctx.args.no_wait,
                    session,
                    self.on_200_201,
                    self.on_error,
                    lro_options={"final-state-via": "azure-async-operation"},
                    path_format_arguments=self.url_parameters,
                )
            return self.on_error(session.http_response)
        @property
        def url(self):
            return self.client.format_url(
                "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}",
                **self.url_parameters
            )
        @property
        def METHOD_NAME(self):
            return "PUT"
        @property
        def error_format(self):
            return "ODataV4Format"
        @property
        def url_parameters(self):
            parameters = {
                **self.serialize_url_param(
                    "authorizationName", self.ctx.args.name,
                    required=True,
                ),
                **self.serialize_url_param(
                    "circuitName", self.ctx.args.circuit_name,
                    required=True,
                ),
                **self.serialize_url_param(
                    "resourceGroupName", self.ctx.args.resource_group,
                    required=True,
                ),
                **self.serialize_url_param(
                    "subscriptionId", self.ctx.subscription_id,
                    required=True,
                ),
            }
            return parameters
        @property
        def query_parameters(self):
            parameters = {
                **self.serialize_query_param(
                    "api-version", "2022-01-01",
                    required=True,
                ),
            }
            return parameters
        @property
        def header_parameters(self):
            parameters = {
                **self.serialize_header_param(
                    "Content-Type", "application/json",
                ),
                **self.serialize_header_param(
                    "Accept", "application/json",
                ),
            }
            return parameters
        @property
        def content(self):
            _content_value, _builder = self.new_content_builder(
                self.ctx.args,
                typ=AAZObjectType,
                typ_kwargs={"flags": {"required": True, "client_flatten": True}}
            )
            _builder.set_prop("name", AAZStrType, ".name")
            _builder.set_prop("properties", AAZObjectType, typ_kwargs={"flags": {"client_flatten": True}})
            return self.serialize_content(_content_value)
        def on_200_201(self, session):
            data = self.deserialize_http_content(session)
            self.ctx.set_var(
                "instance",
                data,
                schema_builder=self._build_schema_on_200_201
            )
        _schema_on_200_201 = None
        @classmethod
        def _build_schema_on_200_201(cls):
            if cls._schema_on_200_201 is not None:
                return cls._schema_on_200_201
            cls._schema_on_200_201 = AAZObjectType()
            _schema_on_200_201 = cls._schema_on_200_201
            _schema_on_200_201.etag = AAZStrType(
                flags={"read_only": True},
            )
            _schema_on_200_201.id = AAZStrType()
            _schema_on_200_201.name = AAZStrType()
            _schema_on_200_201.properties = AAZObjectType(
                flags={"client_flatten": True},
            )
            _schema_on_200_201.type = AAZStrType(
                flags={"read_only": True},
            )
            properties = cls._schema_on_200_201.properties
            properties.authorization_key = AAZStrType(
                serialized_name="authorizationKey",
            )
            properties.authorization_use_status = AAZStrType(
                serialized_name="authorizationUseStatus",
            )
            properties.provisioning_state = AAZStrType(
                serialized_name="provisioningState",
                flags={"read_only": True},
            )
            return cls._schema_on_200_201
class _CreateHelper:
    """Helper class for Create"""
__all__ = ["Create"] | 
| 118 | 
	test list outputs only | 
	import json
from dvc.cli import parse_args
from dvc.commands.ls import CmdList
def _test_cli(mocker, *args):
    cli_args = parse_args(["list", *args])
    assert cli_args.func == CmdList
    cmd = cli_args.func(cli_args)
    m = mocker.patch("dvc.repo.Repo.ls")
    assert cmd.run() == 0
    return m
def test_list(mocker):
    url = "local_dir"
    m = _test_cli(mocker, url)
    m.assert_called_once_with(
        url,
        None,
        recursive=False,
        rev=None,
        dvc_only=False,
        config=None,
        remote=None,
        remote_config=None,
    )
def test_list_recursive(mocker):
    url = "local_dir"
    m = _test_cli(mocker, url, "-R")
    m.assert_called_once_with(
        url,
        None,
        recursive=True,
        rev=None,
        dvc_only=False,
        config=None,
        remote=None,
        remote_config=None,
    )
def test_list_git_ssh_rev(mocker):
    url = "git@github.com:repo"
    m = _test_cli(mocker, url, "--rev", "123")
    m.assert_called_once_with(
        url,
        None,
        recursive=False,
        rev="123",
        dvc_only=False,
        config=None,
        remote=None,
        remote_config=None,
    )
def test_list_targets(mocker):
    url = "local_dir"
    target = "subdir"
    m = _test_cli(mocker, url, target)
    m.assert_called_once_with(
        url,
        target,
        recursive=False,
        rev=None,
        dvc_only=False,
        config=None,
        remote=None,
        remote_config=None,
    )
def METHOD_NAME(mocker):
    url = "local_dir"
    m = _test_cli(mocker, url, None, "--dvc-only")
    m.assert_called_once_with(
        url,
        None,
        recursive=False,
        rev=None,
        dvc_only=True,
        config=None,
        remote=None,
        remote_config=None,
    )
def test_list_config(mocker):
    url = "local_dir"
    m = _test_cli(
        mocker,
        url,
        None,
        "--config",
        "myconfig",
        "--remote",
        "myremote",
        "--remote-config",
        "k1=v1",
        "k2=v2",
    )
    m.assert_called_once_with(
        url,
        None,
        recursive=False,
        rev=None,
        dvc_only=False,
        config="myconfig",
        remote="myremote",
        remote_config={"k1": "v1", "k2": "v2"},
    )
def test_show_json(mocker, capsys):
    cli_args = parse_args(["list", "local_dir", "--json"])
    assert cli_args.func == CmdList
    cmd = cli_args.func(cli_args)
    result = [{"key": "val"}]
    mocker.patch("dvc.repo.Repo.ls", return_value=result)
    assert cmd.run() == 0
    out, _ = capsys.readouterr()
    assert json.dumps(result) in out
def test_show_colors(mocker, capsys, monkeypatch):
    cli_args = parse_args(["list", "local_dir"])
    assert cli_args.func == CmdList
    cmd = cli_args.func(cli_args)
    monkeypatch.setenv("LS_COLORS", "ex=01;32:rs=0:di=01;34:*.xml=01;31:*.dvc=01;33:")
    result = [
        {"isdir": False, "isexec": 0, "isout": False, "path": ".dvcignore"},
        {"isdir": False, "isexec": 0, "isout": False, "path": ".gitignore"},
        {"isdir": False, "isexec": 0, "isout": False, "path": "README.md"},
        {"isdir": True, "isexec": 0, "isout": True, "path": "data"},
        {"isdir": False, "isexec": 0, "isout": True, "path": "structure.xml"},
        {
            "isdir": False,
            "isexec": 0,
            "isout": False,
            "path": "structure.xml.dvc",
        },
        {"isdir": True, "isexec": 0, "isout": False, "path": "src"},
        {"isdir": False, "isexec": 1, "isout": False, "path": "run.sh"},
    ]
    mocker.patch("dvc.repo.Repo.ls", return_value=result)
    assert cmd.run() == 0
    out, _ = capsys.readouterr()
    entries = out.splitlines()
    assert entries == [
        ".dvcignore",
        ".gitignore",
        "README.md",
        "\x1b[01;34mdata\x1b[0m",
        "\x1b[01;31mstructure.xml\x1b[0m",
        "\x1b[01;33mstructure.xml.dvc\x1b[0m",
        "\x1b[01;34msrc\x1b[0m",
        "\x1b[01;32mrun.sh\x1b[0m",
    ]
def test_list_alias():
    cli_args = parse_args(["ls", "local_dir"])
    assert cli_args.func == CmdList | 
| 119 | 
	flatten | 
	from typing import List, Optional, NamedTuple
from mlagents.torch_utils import torch
import numpy as np
from mlagents.trainers.torch_entities.utils import ModelUtils
from mlagents.trainers.buffer import AgentBuffer, BufferKey
from mlagents_envs.base_env import _ActionTupleBase
class LogProbsTuple(_ActionTupleBase):
    """
    An object whose fields correspond to the log probs of actions of different types.
    Continuous and discrete are numpy arrays
    Dimensions are of (n_agents, continuous_size) and (n_agents, discrete_size),
    respectively. Note, this also holds when continuous or discrete size is
    zero.
    """
    @property
    def discrete_dtype(self) -> np.dtype:
        """
        The dtype of a discrete log probability.
        """
        return np.float32
    @staticmethod
    def empty_log_probs() -> "LogProbsTuple":
        """
        Generates a dummy LogProbsTuple
        """
        return LogProbsTuple()
class ActionLogProbs(NamedTuple):
    """
    A NamedTuple containing the tensor for continuous log probs and list of tensors for
    discrete log probs of individual actions as well as all the log probs for an entire branch.
    Utility functions provide numpy <=> tensor conversions to be used by the optimizers.
    :param continuous_tensor: Torch tensor corresponding to log probs of continuous actions
    :param discrete_list: List of Torch tensors each corresponding to log probs of the discrete actions that were
    sampled.
    :param all_discrete_list: List of Torch tensors each corresponding to all log probs of
    a discrete action branch, even the discrete actions that were not sampled. all_discrete_list is a list of Tensors,
    each Tensor corresponds to one discrete branch log probabilities.
    """
    continuous_tensor: torch.Tensor
    discrete_list: Optional[List[torch.Tensor]]
    all_discrete_list: Optional[List[torch.Tensor]]
    @property
    def discrete_tensor(self):
        """
        Returns the discrete log probs list as a stacked tensor
        """
        return torch.stack(self.discrete_list, dim=-1)
    @property
    def all_discrete_tensor(self):
        """
        Returns the discrete log probs of each branch as a tensor
        """
        return torch.cat(self.all_discrete_list, dim=1)
    def to_log_probs_tuple(self) -> LogProbsTuple:
        """
        Returns a LogProbsTuple. Only adds if tensor is not None. Otherwise,
        LogProbsTuple uses a default.
        """
        log_probs_tuple = LogProbsTuple()
        if self.continuous_tensor is not None:
            continuous = ModelUtils.to_numpy(self.continuous_tensor)
            log_probs_tuple.add_continuous(continuous)
        if self.discrete_list is not None:
            discrete = ModelUtils.to_numpy(self.discrete_tensor)
            log_probs_tuple.add_discrete(discrete)
        return log_probs_tuple
    def _to_tensor_list(self) -> List[torch.Tensor]:
        """
        Returns the tensors in the ActionLogProbs as a flat List of torch Tensors. This
        is private and serves as a utility for self.flatten()
        """
        tensor_list: List[torch.Tensor] = []
        if self.continuous_tensor is not None:
            tensor_list.append(self.continuous_tensor)
        if self.discrete_list is not None:
            tensor_list.append(self.discrete_tensor)
        return tensor_list
    def METHOD_NAME(self) -> torch.Tensor:
        """
        A utility method that returns all log probs in ActionLogProbs as a flattened tensor.
        This is useful for algorithms like PPO which can treat all log probs in the same way.
        """
        return torch.cat(self._to_tensor_list(), dim=1)
    @staticmethod
    def from_buffer(buff: AgentBuffer) -> "ActionLogProbs":
        """
        A static method that accesses continuous and discrete log probs fields in an AgentBuffer
        and constructs the corresponding ActionLogProbs from the retrieved np arrays.
        """
        continuous: torch.Tensor = None
        discrete: List[torch.Tensor] = None  # type: ignore
        if BufferKey.CONTINUOUS_LOG_PROBS in buff:
            continuous = ModelUtils.list_to_tensor(buff[BufferKey.CONTINUOUS_LOG_PROBS])
        if BufferKey.DISCRETE_LOG_PROBS in buff:
            discrete_tensor = ModelUtils.list_to_tensor(
                buff[BufferKey.DISCRETE_LOG_PROBS]
            )
            # This will keep discrete_list = None which enables flatten()
            if discrete_tensor.shape[1] > 0:
                discrete = [
                    discrete_tensor[..., i] for i in range(discrete_tensor.shape[-1])
                ]
        return ActionLogProbs(continuous, discrete, None) | 
| 120 | 
	mock io call | 
	"""
Benchmark for async tree workload, which calls asyncio.gather() on a tree
(6 levels deep, 6 branches per level) with the leaf nodes simulating some
(potentially) async work (depending on the benchmark variant). Benchmark
variants include:
1) "none": No actual async work in the async tree.
2) "io": All leaf nodes simulate async IO workload (async sleep 50ms).
3) "memoization": All leaf nodes simulate async IO workload with 90% of 
                  the data memoized
4) "cpu_io_mixed": Half of the leaf nodes simulate CPU-bound workload and 
                   the other half simulate the same workload as the 
                   "memoization" variant.
"""
import asyncio
import math
import random
from memray_helper import get_tracker
import pyperf
NUM_RECURSE_LEVELS = 6
NUM_RECURSE_BRANCHES = 6
RANDOM_SEED = 0
IO_SLEEP_TIME = 0.05
MEMOIZABLE_PERCENTAGE = 90
CPU_PROBABILITY = 0.5
FACTORIAL_N = 500
class AsyncTree:
    def __init__(self):
        self.cache = {}
        # set to deterministic random, so that the results are reproducible
        random.seed(RANDOM_SEED)
    async def METHOD_NAME(self):
        await asyncio.sleep(IO_SLEEP_TIME)
    async def workload_func(self):
        raise NotImplementedError("To be implemented by each variant's derived class.")
    async def recurse(self, recurse_level):
        if recurse_level == 0:
            await self.workload_func()
            return
        await asyncio.gather(
            *[self.recurse(recurse_level - 1) for _ in range(NUM_RECURSE_BRANCHES)]
        )
    async def run(self):
        with get_tracker():
            await self.recurse(NUM_RECURSE_LEVELS)
class NoneAsyncTree(AsyncTree):
    async def workload_func(self):
        return
class IOAsyncTree(AsyncTree):
    async def workload_func(self):
        await self.METHOD_NAME()
class MemoizationAsyncTree(AsyncTree):
    async def workload_func(self):
        # deterministic random, seed set in AsyncTree.__init__()
        data = random.randint(1, 100)
        if data <= MEMOIZABLE_PERCENTAGE:
            if self.cache.get(data):
                return data
            self.cache[data] = True
        await self.METHOD_NAME()
        return data
class CpuIoMixedAsyncTree(MemoizationAsyncTree):
    async def workload_func(self):
        # deterministic random, seed set in AsyncTree.__init__()
        if random.random() < CPU_PROBABILITY:
            # mock cpu-bound call
            return math.factorial(FACTORIAL_N)
        else:
            return await MemoizationAsyncTree.workload_func(self)
def add_metadata(runner):
    runner.metadata["description"] = "Async tree workloads."
    runner.metadata["async_tree_recurse_levels"] = NUM_RECURSE_LEVELS
    runner.metadata["async_tree_recurse_branches"] = NUM_RECURSE_BRANCHES
    runner.metadata["async_tree_random_seed"] = RANDOM_SEED
    runner.metadata["async_tree_io_sleep_time"] = IO_SLEEP_TIME
    runner.metadata["async_tree_memoizable_percentage"] = MEMOIZABLE_PERCENTAGE
    runner.metadata["async_tree_cpu_probability"] = CPU_PROBABILITY
    runner.metadata["async_tree_factorial_n"] = FACTORIAL_N
def add_cmdline_args(cmd, args):
    cmd.append(args.benchmark)
def add_parser_args(parser):
    parser.add_argument(
        "benchmark",
        choices=BENCHMARKS,
        help="""\
Determines which benchmark to run. Options:
1) "none": No actual async work in the async tree.
2) "io": All leaf nodes simulate async IO workload (async sleep 50ms).
3) "memoization": All leaf nodes simulate async IO workload with 90% of 
                  the data memoized
4) "cpu_io_mixed": Half of the leaf nodes simulate CPU-bound workload and 
                   the other half simulate the same workload as the 
                   "memoization" variant.
""",
    )
BENCHMARKS = {
    "none": NoneAsyncTree,
    "io": IOAsyncTree,
    "memoization": MemoizationAsyncTree,
    "cpu_io_mixed": CpuIoMixedAsyncTree,
}
if __name__ == "__main__":
    runner = pyperf.Runner(add_cmdline_args=add_cmdline_args)
    add_metadata(runner)
    add_parser_args(runner.argparser)
    args = runner.parse_args()
    benchmark = args.benchmark
    async_tree_class = BENCHMARKS[benchmark]
    async_tree = async_tree_class()
    runner.bench_async_func(f"async_tree_{benchmark}", async_tree.run) | 
| 121 | 
	test check | 
	# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import pytest
from six import PY2
from datadog_checks.dev.utils import get_metadata_metrics
from datadog_checks.kong import Kong
from .common import HERE, METRICS_URL
pytestmark = [pytest.mark.unit, pytest.mark.skipif(PY2, reason='Test only available on Python 3')]
EXPECTED_METRICS = {
    'kong.bandwidth.count': 'monotonic_count',
    'kong.http.consumer.status.count': 'monotonic_count',
    'kong.http.status.count': 'monotonic_count',
    'kong.latency.bucket': 'monotonic_count',
    'kong.latency.count': 'monotonic_count',
    'kong.latency.sum': 'monotonic_count',
    'kong.memory.lua.shared_dict.bytes': 'gauge',
    'kong.memory.lua.shared_dict.total_bytes': 'gauge',
    'kong.memory.workers.lua.vms.bytes': 'gauge',
    'kong.nginx.http.current_connections': 'gauge',
    'kong.nginx.stream.current_connections': 'gauge',
    'kong.stream.status.count': 'monotonic_count',
}
EXPECTED_METRICS_v3 = {
    'kong.bandwidth.bytes.count': 'monotonic_count',
    'kong.http.requests.count': 'monotonic_count',
    'kong.kong.latency.ms.bucket': 'monotonic_count',
    'kong.kong.latency.ms.count': 'monotonic_count',
    'kong.kong.latency.ms.sum': 'monotonic_count',
    'kong.memory.lua.shared_dict.bytes': 'gauge',
    'kong.memory.lua.shared_dict.total_bytes': 'gauge',
    'kong.memory.workers.lua.vms.bytes': 'gauge',
    'kong.nginx.connections.total': 'gauge',
    'kong.nginx.requests.total': 'gauge',
    'kong.nginx.timers': 'gauge',
    'kong.request.latency.ms.bucket': 'monotonic_count',
    'kong.request.latency.ms.count': 'monotonic_count',
    'kong.request.latency.ms.sum': 'monotonic_count',
    'kong.upstream.latency.ms.bucket': 'monotonic_count',
    'kong.upstream.latency.ms.count': 'monotonic_count',
    'kong.upstream.latency.ms.sum': 'monotonic_count',
}
def get_fixture_path(filename):
    return os.path.join(HERE, 'fixtures', filename)
def test_check_v3(aggregator, dd_run_check, mock_http_response):
    mock_http_response(file_path=get_fixture_path('prometheus-v3.txt'))
    instance = {
        'openmetrics_endpoint': METRICS_URL,
        'extra_metrics': [{'kong_memory_workers_lua_vms_bytes': 'memory.workers.lua.vms.bytes'}],
    }
    check = Kong('kong', {}, [instance])
    dd_run_check(check)
    for metric_name, metric_type in EXPECTED_METRICS_v3.items():
        aggregator.assert_metric(metric_name, metric_type=getattr(aggregator, metric_type.upper()))
    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(get_metadata_metrics())
    aggregator.assert_service_check(
        'kong.datastore.reachable', status=Kong.OK, tags=['endpoint:{}'.format(METRICS_URL)], count=1
    )
def METHOD_NAME(aggregator, dd_run_check, mock_http_response):
    mock_http_response(file_path=get_fixture_path('prometheus.txt'))
    instance = {
        'openmetrics_endpoint': METRICS_URL,
        'extra_metrics': [{'kong_memory_workers_lua_vms_bytes': 'memory.workers.lua.vms.bytes'}],
    }
    check = Kong('kong', {}, [instance])
    dd_run_check(check)
    aggregator.assert_service_check(
        'kong.openmetrics.health', status=Kong.OK, tags=['endpoint:{}'.format(METRICS_URL)], count=1
    )
    for metric_name, metric_type in EXPECTED_METRICS.items():
        aggregator.assert_metric(metric_name, metric_type=getattr(aggregator, metric_type.upper()))
    aggregator.assert_all_metrics_covered()
    aggregator.assert_service_check(
        'kong.datastore.reachable', status=Kong.OK, tags=['endpoint:{}'.format(METRICS_URL)], count=1
    )
    assert len(aggregator.service_checks('kong.upstream.target.health')) == 3
    aggregator.assert_service_check(
        'kong.upstream.target.health',
        status=Kong.OK,
        tags=['address:localhost:1002', 'endpoint:{}'.format(METRICS_URL), 'target:target2', 'upstream:upstream2'],
        count=1,
    )
    aggregator.assert_service_check(
        'kong.upstream.target.health',
        status=Kong.CRITICAL,
        tags=['address:localhost:1003', 'endpoint:{}'.format(METRICS_URL), 'target:target3', 'upstream:upstream3'],
        count=1,
    )
    aggregator.assert_service_check(
        'kong.upstream.target.health',
        status=Kong.CRITICAL,
        tags=['address:localhost:1004', 'endpoint:{}'.format(METRICS_URL), 'target:target4', 'upstream:upstream4'],
        count=1,
    ) | 
| 122 | 
	test templatetags get related category pages draft | 
	"""Test suite for the GetRelatedCategories template tag."""
from django.test import RequestFactory
from cms.api import add_plugin
from cms.test_utils.testcases import CMSTestCase
from richie.apps.core.factories import PageFactory
from richie.apps.courses.factories import CategoryFactory
class GetRelatedCategoriesTemplateTagsTestCase(CMSTestCase):
    """
    Integration tests to validate the behavior of the `get_related_category_pages` template tag.
    """
    @staticmethod
    def _attach_categories(page, category_page_draft, category_page_published):
        """
        Not a test. Utility method to easily create and attach a draft and a published
        category to a page passed as argument;
        """
        placeholder = page.placeholders.all()[0]
        add_plugin(placeholder, "CategoryPlugin", "en", page=category_page_draft)
        add_plugin(placeholder, "CategoryPlugin", "en", page=category_page_published)
    # pylint: disable=too-many-locals
    def METHOD_NAME(self):
        """
        On a draft page, the "get_related_category_pages" template tag should inject in the
        context, all categories related to a queryset of pages via a CategoryPlugin.
        """
        page_main = PageFactory(
            template="richie/single_column.html", should_publish=True
        )
        page1, page2 = PageFactory.create_batch(
            2, template="richie/single_column.html", should_publish=True
        )
        (
            category_draft_page1_draft,
            category_draft_page1_published,
            category_draft_page2_draft,
            category_draft_page2_published,
        ) = [c.extended_object for c in CategoryFactory.create_batch(4)]
        (
            category_published_page1_draft,
            category_published_page1_published,
            category_published_page2_draft,
            category_published_page2_published,
        ) = [
            c.extended_object
            for c in CategoryFactory.create_batch(4, should_publish=True)
        ]
        self._attach_categories(
            page1, category_draft_page1_draft, category_published_page1_draft
        )
        self._attach_categories(
            page1.get_public_object(),
            category_draft_page1_published,
            category_published_page1_published,
        )
        self._attach_categories(
            page2, category_draft_page2_draft, category_published_page2_draft
        )
        self._attach_categories(
            page2.get_public_object(),
            category_draft_page2_published,
            category_published_page2_published,
        )
        request = RequestFactory().get("/")
        template = (
            "{% load cms_tags category_tags %}"
            "{% get_related_category_pages pages as categories %}"
            "{% for category in categories %}{{ category.extended_object.id }}{% endfor %}"
        )
        # 1. Test categories present on the draft page
        # - Linked with one of the pages
        with self.assertNumQueries(1):
            output = self.render_template_obj(
                template, {"current_page": page_main, "pages": [page1]}, request
            )
        expected = [category_draft_page1_draft, category_published_page1_draft]
        self.assertEqual(output, "".join([str(c.id) for c in expected]))
        # - Linked with either of the 2 pages
        with self.assertNumQueries(1):
            output = self.render_template_obj(
                template, {"current_page": page_main, "pages": [page1, page2]}, request
            )
        expected = [
            category_draft_page1_draft,
            category_draft_page2_draft,
            category_published_page1_draft,
            category_published_page2_draft,
        ]
        self.assertEqual(output, "".join([str(c.id) for c in expected]))
        # - Linked with a page in a different publication status
        with self.assertNumQueries(1):
            output = self.render_template_obj(
                template,
                {"current_page": page_main, "pages": [page1.get_public_object()]},
                request,
            )
        expected = [category_draft_page1_draft, category_published_page1_draft]
        self.assertEqual(output, "")
        # 2. Test categories on the public page
        current_page = page_main.get_public_object()
        # - Linked with one of the pages
        with self.assertNumQueries(1):
            output = self.render_template_obj(
                template,
                {"current_page": current_page, "pages": [page1.get_public_object()]},
                request,
            )
        self.assertEqual(
            output, str(category_published_page1_published.get_public_object().id)
        )
        # - Linked with either of the 2 pages
        with self.assertNumQueries(1):
            output = self.render_template_obj(
                template,
                {
                    "current_page": current_page,
                    "pages": [page1.get_public_object(), page2.get_public_object()],
                },
                request,
            )
        expected = [
            category_published_page1_published.get_public_object(),
            category_published_page2_published.get_public_object(),
        ]
        self.assertEqual(output, "".join([str(c.id) for c in expected]))
        # - Linked with a page in a different publication status
        with self.assertNumQueries(1):
            output = self.render_template_obj(
                template,
                {"current_page": current_page, "pages": [page1]},
                request,
            )
        self.assertEqual(output, "") | 
| 123 | 
	test non critical cleanups not called | 
	# pylint: disable=unused-argument,unused-variable,redefined-outer-name
import gossip
import pytest
import slash
import slash.hooks
from .conftest import Checkpoint
def test_interruption(interrupted_suite, interrupted_index):
    interrupted_suite.run(expect_interruption=True)
def test_interruption_added_to_result(interrupted_suite, interrupted_index):
    caught = []
    @gossip.register('slash.interruption_added')
    def interruption_added(result, exception):
        caught.append(exception)
    summary = interrupted_suite.run(expect_interruption=True)
    assert len(caught) == 1
    [err] = caught              # pylint: disable=unbalanced-tuple-unpacking
    assert err.exception_type is KeyboardInterrupt
def test_interruption_triggers_gossip(request, interrupted_suite, interrupted_test):
    test_id = {'value': None}
    @gossip.register('slash.test_interrupt')
    def skip():
        test_id['value'] = slash.test.__slash__.id
    @request.addfinalizer
    def cleanup():
        skip.gossip.unregister()
    summary = interrupted_suite.run(expect_interruption=True)
    assert test_id['value'] is not None
    for result in summary.get_all_results_for_test(interrupted_test):
        assert result.test_metadata.id == test_id['value']
def test_critical_cleanups_called(interrupted_suite, interrupted_test):
    cleanup = interrupted_test.add_deferred_event(
        'slash.add_critical_cleanup', 'critical_cleanup')
    summary = interrupted_suite.run(expect_interruption=True)
    assert cleanup in summary.events
def METHOD_NAME(interrupted_suite, interrupted_test):
    cleanup = interrupted_test.add_cleanup()
    summary = interrupted_suite.run(expect_interruption=True)
    assert cleanup not in summary.events
def test_sigterm_interrupt(suite, suite_test):
    suite_test.append_line('raise slash.exceptions.TerminatedException()')
    suite_test.expect_interruption()
    for test in suite.iter_all_after(suite_test):
        test.expect_deselect()
    suite.run(expect_interruption=True)
@pytest.mark.parametrize('hook_name', ['session_start', 'test_start'])
def test_sigterm_on_hook(suite, hook_name):
    @gossip.register('slash.{}'.format(hook_name))
    def session_start():  # pylint: disable=unused-variable
        raise slash.exceptions.TerminatedException('Terminated by signal')
    assert suite
    for index, test in enumerate(suite):
        if index == 0 and hook_name == 'test_start':
            # first test should be interrupted...
            test.expect_interruption()
        else:
            test.expect_deselect()
    result = suite.run(expect_interruption=True)
def test_test_end_called_for_interrupted_test(interrupted_suite, interrupted_test):
    ended = []
    @gossip.register('slash.test_end')
    def test_end():
        ended.append(slash.context.test.__slash__.id)
    s = interrupted_suite.run(expect_interruption=True)
    result = s[interrupted_test]
    assert result.test_metadata.id in ended
def test_ayalas(interrupted_suite, interrupted_test, interrupted_index, config_override, tmpdir):
    config_override('log.format', 'file: {record.message}')
    config_override('log.console_format', 'console: {record.message}')
    config_override('log.root', str(tmpdir))
    callback = Checkpoint()
    slash.hooks.log_file_closed.register(callback) # pylint: disable=no-member
    result = interrupted_suite.run(expect_interruption=True)
    num_closed_log_files = interrupted_index + 2 # One for each test that run (the index is zero based) + session log
    assert callback.called_count == num_closed_log_files
def test_session_interruption_in_start(suite, suite_test, session_interrupt):
    @suite.slashconf.append_body
    def __code__():
        @slash.hooks.session_start.register # pylint: disable=no-member
        def session_cleanup():
            raise KeyboardInterrupt()
    for test in suite:
        test.expect_deselect()
    suite.run(expect_interruption=True)
    assert session_interrupt.called_count == 1
def test_interrupt_hooks_should_be_called_once(suite, suite_test, is_last_test, session_interrupt, test_interrupt_callback):
    @suite_test.append_body
    def __code__():
        @slash.add_critical_cleanup
        def cleanup():
            raise KeyboardInterrupt('A')
        raise KeyboardInterrupt('B')
    suite_test.expect_interruption()
    for t in suite.iter_all_after(suite_test, assert_has_more=not is_last_test):
        t.expect_deselect()
    result = suite.run(expect_interruption=True)
    assert test_interrupt_callback.called_count == 1
    assert session_interrupt.called_count == 1
    assert result.session.results.global_result.is_interrupted()
def test_interrupted_with_custom_exception(suite, suite_test, request):
    import test
    class CustomException(Exception):
        pass
    test.__interruption_exception__ = CustomException
    prev_interruption_exceptions = slash.exceptions.INTERRUPTION_EXCEPTIONS
    slash.exceptions.INTERRUPTION_EXCEPTIONS += (CustomException,)
    @request.addfinalizer
    def cleanup():
        del test.__interruption_exception__
        slash.exceptions.INTERRUPTION_EXCEPTIONS = prev_interruption_exceptions
    suite_test.append_line('import test')
    suite_test.append_line('raise test.__interruption_exception__()')
    suite_test.expect_interruption()
    for t in suite.iter_all_after(suite_test):
        t.expect_deselect()
    results = suite.run(expect_interruption=True)
def test_test_interrupt_hook_exception(suite_builder):
    # pylint: disable=reimported,redefined-outer-name
    @suite_builder.first_file.add_code
    def __code__():
        import slash
        @slash.hooks.test_interrupt.register # pylint: disable=no-member
        def test_interrupt(**_):
            1/0 # pylint: disable=pointless-statement
        def test_1():
            raise KeyboardInterrupt()
        def test_2():
            pass
    [res] = suite_builder.build().run().assert_results(1)
    assert res.is_interrupted()
@pytest.mark.parametrize('hook_name', ['before_session_cleanup', 'session_start', 'before_session_start'])
def test_session_scope_interruption(hook_name, suite, checkpoint):
    @gossip.register('slash.{}'.format(hook_name))
    def hook(*_, **__):
        raise KeyboardInterrupt()
    @gossip.register('slash.session_interrupt')
    def interrupt(*_, **__):
        checkpoint()
    if 'session_start' in hook_name:
        for test in suite:
            test.expect_deselect()
    else:
        assert hook_name == 'before_session_cleanup'
        suite[-1].expect_interruption()
    results = suite.run(expect_interruption=True)
    assert results.session.results.global_result.is_interrupted()
    assert checkpoint.called
@pytest.fixture
def session_interrupt():
    callback = Checkpoint()
    slash.hooks.session_interrupt.register(callback) # pylint: disable=no-member
    return callback
@pytest.fixture
def test_interrupt_callback():
    callback = Checkpoint()
    slash.hooks.test_interrupt.register(callback) # pylint: disable=no-member
    return callback
@pytest.fixture
def interrupted_suite(suite, interrupted_index):
    for index, test in enumerate(suite):
        if index == interrupted_index:
            test.append_line('raise KeyboardInterrupt()')
            test.expect_interruption()
        elif index > interrupted_index:
            test.expect_deselect()
    return suite
@pytest.fixture
def interrupted_test(interrupted_suite, interrupted_index):
    return interrupted_suite[interrupted_index]
@pytest.fixture
def interrupted_index(suite):
    return int(len(suite) // 2) | 
| 124 | 
	test create blob outside workdir | 
	# Copyright 2010-2023 The pygit2 contributors
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# In addition to the permissions in the GNU General Public License,
# the authors give you unlimited permission to link the compiled
# version of this file into combinations with other programs,
# and to distribute those combinations without any restriction
# coming from the use of this file.  (The General Public License
# restrictions do apply in other respects; for example, they cover
# modification of the file, and distribution when not linked into
# a combined executable.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING.  If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
"""Tests for Blob objects."""
import io
from pathlib import Path
import pytest
import pygit2
from . import utils
BLOB_SHA = 'a520c24d85fbfc815d385957eed41406ca5a860b'
BLOB_CONTENT = """hello world
hola mundo
bonjour le monde
""".encode()
BLOB_NEW_CONTENT = b'foo bar\n'
BLOB_FILE_CONTENT = b'bye world\n'
BLOB_PATCH = r"""diff --git a/file b/file
index a520c24..95d09f2 100644
--- a/file
+++ b/file
@@ -1,3 +1 @@
-hello world
-hola mundo
-bonjour le monde
+hello world
\ No newline at end of file
"""
BLOB_PATCH_2 = """diff --git a/file b/file
index a520c24..d675fa4 100644
--- a/file
+++ b/file
@@ -1,3 +1 @@
-hello world
-hola mundo
-bonjour le monde
+foo bar
"""
BLOB_PATCH_DELETED = """diff --git a/file b/file
deleted file mode 100644
index a520c24..0000000
--- a/file
+++ /dev/null
@@ -1,3 +0,0 @@
-hello world
-hola mundo
-bonjour le monde
"""
def test_read_blob(testrepo):
    blob = testrepo[BLOB_SHA]
    assert blob.hex == BLOB_SHA
    sha = blob.id.hex
    assert sha == BLOB_SHA
    assert isinstance(blob, pygit2.Blob)
    assert not blob.is_binary
    assert pygit2.GIT_OBJ_BLOB == blob.type
    assert BLOB_CONTENT == blob.data
    assert len(BLOB_CONTENT) == blob.size
    assert BLOB_CONTENT == blob.read_raw()
def test_create_blob(testrepo):
    blob_oid = testrepo.create_blob(BLOB_NEW_CONTENT)
    blob = testrepo[blob_oid]
    assert isinstance(blob, pygit2.Blob)
    assert pygit2.GIT_OBJ_BLOB == blob.type
    assert blob_oid == blob.id
    assert utils.gen_blob_sha1(BLOB_NEW_CONTENT) == blob_oid.hex
    assert BLOB_NEW_CONTENT == blob.data
    assert len(BLOB_NEW_CONTENT) == blob.size
    assert BLOB_NEW_CONTENT == blob.read_raw()
    blob_buffer = memoryview(blob)
    assert len(BLOB_NEW_CONTENT) == len(blob_buffer)
    assert BLOB_NEW_CONTENT == blob_buffer
    def set_content():
        blob_buffer[:2] = b'hi'
    with pytest.raises(TypeError): set_content()
def test_create_blob_fromworkdir(testrepo):
    blob_oid = testrepo.create_blob_fromworkdir("bye.txt")
    blob = testrepo[blob_oid]
    assert isinstance(blob, pygit2.Blob)
    assert pygit2.GIT_OBJ_BLOB == blob.type
    assert blob_oid == blob.id
    assert utils.gen_blob_sha1(BLOB_FILE_CONTENT) == blob_oid.hex
    assert BLOB_FILE_CONTENT == blob.data
    assert len(BLOB_FILE_CONTENT) == blob.size
    assert BLOB_FILE_CONTENT == blob.read_raw()
def test_create_blob_fromworkdir_aspath(testrepo):
    blob_oid = testrepo.create_blob_fromworkdir(Path("bye.txt"))
    blob = testrepo[blob_oid]
    assert isinstance(blob, pygit2.Blob)
def METHOD_NAME(testrepo):
    with pytest.raises(KeyError):
        testrepo.create_blob_fromworkdir(__file__)
def test_create_blob_fromdisk(testrepo):
    blob_oid = testrepo.create_blob_fromdisk(__file__)
    blob = testrepo[blob_oid]
    assert isinstance(blob, pygit2.Blob)
    assert pygit2.GIT_OBJ_BLOB == blob.type
def test_create_blob_fromiobase(testrepo):
    with pytest.raises(TypeError):
        testrepo.create_blob_fromiobase('bad type')
    f = io.BytesIO(BLOB_CONTENT)
    blob_oid = testrepo.create_blob_fromiobase(f)
    blob = testrepo[blob_oid]
    assert isinstance(blob, pygit2.Blob)
    assert pygit2.GIT_OBJ_BLOB == blob.type
    assert blob_oid == blob.id
    assert BLOB_SHA == blob_oid.hex
def test_diff_blob(testrepo):
    blob = testrepo[BLOB_SHA]
    old_blob = testrepo['3b18e512dba79e4c8300dd08aeb37f8e728b8dad']
    patch = blob.diff(old_blob, old_as_path="hello.txt")
    assert len(patch.hunks) == 1
def test_diff_blob_to_buffer(testrepo):
    blob = testrepo[BLOB_SHA]
    patch = blob.diff_to_buffer("hello world")
    assert len(patch.hunks) == 1
def test_diff_blob_to_buffer_patch_patch(testrepo):
    blob = testrepo[BLOB_SHA]
    patch = blob.diff_to_buffer("hello world")
    assert patch.text == BLOB_PATCH
def test_diff_blob_to_buffer_delete(testrepo):
    blob = testrepo[BLOB_SHA]
    patch = blob.diff_to_buffer(None)
    assert patch.text == BLOB_PATCH_DELETED
def test_diff_blob_create(testrepo):
    old = testrepo[testrepo.create_blob(BLOB_CONTENT)]
    new = testrepo[testrepo.create_blob(BLOB_NEW_CONTENT)]
    patch = old.diff(new)
    assert patch.text == BLOB_PATCH_2
def test_blob_from_repo(testrepo):
    blob = testrepo[BLOB_SHA]
    patch_one = blob.diff_to_buffer(None)
    blob = testrepo[BLOB_SHA]
    patch_two = blob.diff_to_buffer(None)
    assert patch_one.text == patch_two.text | 
| 125 | 
	on textinput gain focus | 
	import random
from travertino.constants import COLUMN
import toga
from toga.style import Pack
WIDGETS_GROUP = toga.Group("Widgets", order=2)
FOCUS_ORDER_GROUP = toga.Group("Focus Order", order=3)
class ExampleFocusApp(toga.App):
    def startup(self):
        # Window class
        #   Main window of the application with title and size
        #   Also make the window non-resizable and non-minimizable.
        self.main_window = toga.MainWindow(
            title=self.name, size=(800, 500), resizeable=False, minimizable=False
        )
        self.a_button = toga.Button("A", on_press=self.on_button_press)
        self.b_button = toga.Button("B", on_press=self.on_button_press)
        self.c_button = toga.Button("C", on_press=self.on_button_press)
        self.text_input_focus_count = 0
        self.text_input = toga.TextInput(
            placeholder="I get focused on startup.",
            style=Pack(height=25, width=200, font_size=10),
            on_gain_focus=self.METHOD_NAME,
            on_lose_focus=self.on_textinput_lose_focus,
        )
        self.other_text_input = toga.TextInput(
            placeholder="A non-focussed text input.",
            style=Pack(height=25, width=200, font_size=10),
        )
        self.switch = toga.Switch("Switch", on_change=self.on_switch_toggle)
        self.info_label = toga.Label(
            "Use keyboard shortcuts to focus on the different widgets",
            style=Pack(font_size=10),
        )
        # Add the content on the main window
        self.main_window.content = toga.Box(
            style=Pack(direction=COLUMN),
            children=[
                toga.Box(children=[self.a_button, self.b_button, self.c_button]),
                toga.Box(children=[self.text_input]),
                toga.Box(children=[self.other_text_input]),
                toga.Box(children=[self.switch]),
                toga.Box(children=[self.info_label]),
            ],
        )
        self.commands.add(
            toga.Command(
                lambda widget: self.focus_with_label(self.a_button),
                text="Focus on A",
                shortcut=toga.Key.MOD_1 + "a",
                group=WIDGETS_GROUP,
            ),
            toga.Command(
                lambda widget: self.focus_with_label(self.b_button),
                text="Focus on B",
                shortcut=toga.Key.MOD_1 + "b",
                group=WIDGETS_GROUP,
            ),
            toga.Command(
                lambda widget: self.focus_with_label(self.c_button),
                text="Focus on C",
                shortcut=toga.Key.MOD_1 + "c",
                group=WIDGETS_GROUP,
            ),
            toga.Command(
                lambda widget: self.text_input.focus(),
                text="Focus on text input",
                shortcut=toga.Key.MOD_1 + "t",
                group=WIDGETS_GROUP,
            ),
            toga.Command(
                lambda widget: self.focus_with_label(self.switch),
                text="Focus on switch",
                shortcut=toga.Key.MOD_1 + "s",
                group=WIDGETS_GROUP,
            ),
            toga.Command(
                lambda widget: self.order_focus_by_appearance(),
                text="Order focus by appearance",
                shortcut=toga.Key.MOD_1 + "o",
                group=FOCUS_ORDER_GROUP,
            ),
            toga.Command(
                lambda widget: self.order_focus_by_reversed_appearance(),
                text="Order focus by reversed appearance",
                shortcut=toga.Key.MOD_1 + "r",
                group=FOCUS_ORDER_GROUP,
            ),
            toga.Command(
                lambda widget: self.shuffle_focus(),
                text="Shuffle focus order",
                shortcut=toga.Key.MOD_1 + "f",
                group=FOCUS_ORDER_GROUP,
            ),
        )
        # Show the main window
        self.main_window.show()
        self.text_input.focus()
    def on_button_press(self, widget: toga.Button):
        self.info_label.text = "{widget_text} was pressed!".format(
            widget_text=widget.text
        )
    def on_switch_toggle(self, widget: toga.Switch):
        on_off = "on" if widget.value else "off"
        self.info_label.text = f"Switch turned {on_off}!"
    def METHOD_NAME(self, widget: toga.TextInput):
        self.info_label.text = "TextInput has previously had focus " "{} times".format(
            self.text_input_focus_count
        )
    def on_textinput_lose_focus(self, widget: toga.TextInput):
        self.text_input_focus_count += 1
    def focus_with_label(self, widget: toga.Widget):
        widget.focus()
        self.info_label.text = f"{widget.text} is focused!"
    def order_focus_by_appearance(self):
        self.set_focus_order(list(range(1, 7)))
    def order_focus_by_reversed_appearance(self):
        self.set_focus_order(list(range(6, 0, -1)))
    def shuffle_focus(self):
        indices = list(range(1, 7))
        random.shuffle(indices)
        self.set_focus_order(indices)
    def set_focus_order(self, indices):
        assert len(indices) == 6
        self.a_button.tab_index = indices[0]
        self.b_button.tab_index = indices[1]
        self.c_button.tab_index = indices[2]
        self.text_input.tab_index = indices[3]
        self.other_text_input.tab_index = indices[4]
        self.switch.tab_index = indices[5]
def main():
    # Application class
    #   App name and namespace
    app = ExampleFocusApp("Focus", "org.beeware.widgets.focus")
    return app
if __name__ == "__main__":
    app = main()
    app.main_loop() | 
| 126 | 
	split and sort | 
	"""pytest configuration
Extends output capture as needed by pybind11: ignore constructors, optional unordered lines.
Adds docstring and exceptions message sanitizers.
"""
import contextlib
import difflib
import gc
import re
import textwrap
import pytest
# Early diagnostic for failed imports
import pybind11_tests
_long_marker = re.compile(r"([0-9])L")
_hexadecimal = re.compile(r"0x[0-9a-fA-F]+")
# Avoid collecting Python3 only files
collect_ignore = []
def _strip_and_dedent(s):
    """For triple-quote strings"""
    return textwrap.dedent(s.lstrip("\n").rstrip())
def METHOD_NAME(s):
    """For output which does not require specific line order"""
    return sorted(_strip_and_dedent(s).splitlines())
def _make_explanation(a, b):
    """Explanation for a failed assert -- the a and b arguments are List[str]"""
    return ["--- actual / +++ expected"] + [
        line.strip("\n") for line in difflib.ndiff(a, b)
    ]
class Output:
    """Basic output post-processing and comparison"""
    def __init__(self, string):
        self.string = string
        self.explanation = []
    def __str__(self):
        return self.string
    def __eq__(self, other):
        # Ignore constructor/destructor output which is prefixed with "###"
        a = [
            line
            for line in self.string.strip().splitlines()
            if not line.startswith("###")
        ]
        b = _strip_and_dedent(other).splitlines()
        if a == b:
            return True
        else:
            self.explanation = _make_explanation(a, b)
            return False
class Unordered(Output):
    """Custom comparison for output without strict line ordering"""
    def __eq__(self, other):
        a = METHOD_NAME(self.string)
        b = METHOD_NAME(other)
        if a == b:
            return True
        else:
            self.explanation = _make_explanation(a, b)
            return False
class Capture:
    def __init__(self, capfd):
        self.capfd = capfd
        self.out = ""
        self.err = ""
    def __enter__(self):
        self.capfd.readouterr()
        return self
    def __exit__(self, *args):
        self.out, self.err = self.capfd.readouterr()
    def __eq__(self, other):
        a = Output(self.out)
        b = other
        if a == b:
            return True
        else:
            self.explanation = a.explanation
            return False
    def __str__(self):
        return self.out
    def __contains__(self, item):
        return item in self.out
    @property
    def unordered(self):
        return Unordered(self.out)
    @property
    def stderr(self):
        return Output(self.err)
@pytest.fixture
def capture(capsys):
    """Extended `capsys` with context manager and custom equality operators"""
    return Capture(capsys)
class SanitizedString:
    def __init__(self, sanitizer):
        self.sanitizer = sanitizer
        self.string = ""
        self.explanation = []
    def __call__(self, thing):
        self.string = self.sanitizer(thing)
        return self
    def __eq__(self, other):
        a = self.string
        b = _strip_and_dedent(other)
        if a == b:
            return True
        else:
            self.explanation = _make_explanation(a.splitlines(), b.splitlines())
            return False
def _sanitize_general(s):
    s = s.strip()
    s = s.replace("pybind11_tests.", "m.")
    s = _long_marker.sub(r"\1", s)
    return s
def _sanitize_docstring(thing):
    s = thing.__doc__
    s = _sanitize_general(s)
    return s
@pytest.fixture
def doc():
    """Sanitize docstrings and add custom failure explanation"""
    return SanitizedString(_sanitize_docstring)
def _sanitize_message(thing):
    s = str(thing)
    s = _sanitize_general(s)
    s = _hexadecimal.sub("0", s)
    return s
@pytest.fixture
def msg():
    """Sanitize messages and add custom failure explanation"""
    return SanitizedString(_sanitize_message)
# noinspection PyUnusedLocal
def pytest_assertrepr_compare(op, left, right):
    """Hook to insert custom failure explanation"""
    if hasattr(left, "explanation"):
        return left.explanation
@contextlib.contextmanager
def suppress(exception):
    """Suppress the desired exception"""
    try:
        yield
    except exception:
        pass
def gc_collect():
    """Run the garbage collector twice (needed when running
    reference counting tests with PyPy)"""
    gc.collect()
    gc.collect()
def pytest_configure():
    pytest.suppress = suppress
    pytest.gc_collect = gc_collect
def pytest_report_header(config):
    del config  # Unused.
    assert (
        pybind11_tests.compiler_info is not None
    ), "Please update pybind11_tests.cpp if this assert fails."
    return (
        "C++ Info:"
        f" {pybind11_tests.compiler_info}"
        f" {pybind11_tests.cpp_std}"
        f" {pybind11_tests.PYBIND11_INTERNALS_ID}"
    ) | 
| 127 | 
	build | 
	#!/usr/bin/env python
#
# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import fnmatch
import os
import shutil
import subprocess
import sys
import tempfile
srcdir = os.path.realpath(os.path.dirname(__file__)+'/../../..')
sys.path.insert(0, os.path.join(srcdir, 'deploy', 'packaging'))
import linux_build_packages as common
def getPackageFiles(buildroot, includes, excludes):
    files = list()
    for f in includes:
        f = buildroot+f
        if os.path.isdir(f):
            for root, dirs, filenams in os.walk(f):
                for filenam in filenams:
                    files.append(os.path.join(root, filenam))
        elif ('?' in f) or ('*' in f):
            dirnam = f
            while ('?' in dirnam) or ('*' in dirnam):
                dirnam = os.path.dirname(dirnam)
            for root, dirs, filenams in os.walk(dirnam):
                if fnmatch.fnmatch(root, f):
                    files = files + \
                        getPackageFiles(
                            buildroot, [root[len(buildroot):], ], excludes)
                else:
                    for filenam in filenams:
                        filenam = os.path.join(root, filenam)
                        if fnmatch.fnmatch(filenam, f):
                            files.append(filenam)
        else:
            try:
                os.stat(f)
                files.append(f)
            except:
                pass
    if len(excludes) > 0:
        hasuid = False
        for exclude in excludes:
            if '/xuid' in exclude:
                print("excluding: %s" % exclude)
                hasuid = True
        excludefiles = getPackageFiles(buildroot, excludes, [])
        if hasuid:
            print("Found %d" % len(excludefiles))
            for exclude in excludefiles:
                print("excluding: %s" % exclude)
        for exclude in excludefiles:
            if exclude in files:
                files.remove(exclude)
    return files
def doRequire(info, out, root, require):
    if 'external' in require.attrib:
        pkg = common.external_package(info, root, require.attrib['package'])
        if pkg:
            os.write(out, "Depends: %s\n" % pkg)
    else:
        info['reqpkg'] = require.attrib['package']
    os.write(
        out, "Depends: mdsplus%(bname)s-%(reqpkg)s (>= %(major)d.%(minor)d.%(release)d\n" % info)
def METHOD_NAME():
    info = common.get_info()
    root = common.get_root()
    debs = list()
    for package in root.iter('package'):
        pkg = package.attrib['name']
        if pkg == 'MDSplus':
            info['packagename'] = ""
        else:
            info['packagename'] = "-%s" % pkg
        info['description'] = package.attrib['description']
        info['tmpdir'] = tempfile.mkdtemp()
        try:
            os.mkdir("%(tmpdir)s/DEBIAN" % info)
            includes = list()
            for inc in package.iter('include'):
                for inctype in inc.attrib:
                    include = inc.attrib[inctype]
                    if inctype != "dironly":
                        includes.append(include)
            excludes = list()
            for exc in package.iter('exclude'):
                for exctype in exc.attrib:
                    excludes.append(exc.attrib[exctype])
            if package.find("exclude_staticlibs") is not None:
                excludes.append("/usr/local/mdsplus/lib/*.a")
            if package.find("include_staticlibs") is not None:
                includes.append("/usr/local/mdsplus/lib/*.a")
            files = getPackageFiles(info['buildroot'], includes, excludes)
            for f in files:
                filepath = f.replace('\\', '\\\\').replace("'", "\\'")
                relpath = filepath[len(info['buildroot'])+1:]
                target = "%s/%s/" % (info['tmpdir'], os.path.dirname(relpath))
                if subprocess.Popen("mkdir -p '%s'&&cp -a '%s' '%s'" % (target, filepath, target), shell=True).wait() != 0:
                    for k, v in info.items():
                        print("%s=%s" % (k, v))
                    print("filepath=" % filepath)
                    print("target=" % target)
                    raise Exception("Error building deb")
                sys.stdout.flush()
            depends = list()
            for require in package.iter("requires"):
                if 'external' in require.attrib:
                    pkg = common.external_package(
                        info, root, require.attrib['package'])
                    if pkg is not None:
                        depends.append(pkg)
                else:
                    depends.append(
                        "mdsplus%s-%s" % (info['bname'], require.attrib['package'].replace('_', '-')))
            if len(depends) == 0:
                info['depends'] = ''
            else:
                info['depends'] = "\nDepends: %s" % ','.join(depends)
            info['name'] = info['packagename'].replace('_', '-')
            f = open("%(tmpdir)s/DEBIAN/control" % info, "w")
            f.write("""Package: mdsplus%(bname)s%(name)s
Version: %(major)d.%(minor)d.%(release)d
Section: admin
Priority: optional
Architecture: %(arch)s%(depends)s
Maintainer: Tom Fredian <twf@www.mdsplus.org>
Description: %(description)s
""" % info)
            f.close()
            for s in ("preinst", "postinst", "prerm", "postrm"):
                script = package.find(s)
                if script is not None and ("type" not in script.attrib or script.attrib["type"] != "rpm"):
                    info['script'] = s
                    f = open("%(tmpdir)s/DEBIAN/%(script)s" % info, "w")
                    f.write("#!/bin/bash\n")
                    f.write("%s" % (script.text.replace(
                        "__INSTALL_PREFIX__", "/usr/local")))
                    f.close()
                    os.chmod("%(tmpdir)s/DEBIAN/%(script)s" % info, 0o775)
            info['debfile'] = "/release/%(flavor)s/DEBS/%(arch)s/mdsplus%(bname)s%(packagename)s_%(major)d.%(minor)d.%(release)d_%(arch)s.deb" % info
            if subprocess.Popen("dpkg-deb --build %(tmpdir)s %(debfile)s" % info, shell=True).wait() != 0:
                for k, v in info.items():
                    print("%s=%s" % (k, v))
                raise Exception("Problem building package")
            sys.stdout.flush()
            debs.append({"deb": info["debfile"], "arch": info["arch"]})
        finally:
            shutil.rmtree("%(tmpdir)s" % info)
if __name__ == "__main__":
    METHOD_NAME() | 
| 128 | 
	copy | 
	#!/usr/bin/env python
import logging
l = logging.getLogger("claripy.frontends.full_frontend")
from ..frontend import Frontend
_VALIDATE_BALANCER = False
class HybridFrontend(Frontend):
    def __init__(self, exact_frontend, approximate_frontend, approximate_first=False, **kwargs):
        Frontend.__init__(self, **kwargs)
        self._exact_frontend = exact_frontend
        self._approximate_frontend = approximate_frontend
        self._approximate_first = approximate_first
        if _VALIDATE_BALANCER:
            approximate_frontend._validation_frontend = self._exact_frontend
    def _blank_copy(self, c):
        c._exact_frontend = self._exact_frontend.blank_copy()
        c._approximate_frontend = self._approximate_frontend.blank_copy()
        c._approximate_first = self._approximate_first
        if _VALIDATE_BALANCER:
            c._approximate_frontend._validation_frontend = self._exact_frontend
    def METHOD_NAME(self, c):
        self._exact_frontend.METHOD_NAME(c._exact_frontend)
        self._approximate_frontend.METHOD_NAME(c._approximate_frontend)
        self._approximate_first = c._approximate_first
        if _VALIDATE_BALANCER:
            c._approximate_frontend._validation_frontend = self._exact_frontend
    #
    # Some passthroughs
    #
    @property
    def constraints(self):
        return self._exact_frontend.constraints
    @property
    def variables(self):
        return self._exact_frontend.variables
    #
    # Serialization support
    #
    def __getstate__(self):
        return (self._exact_frontend, self._approximate_frontend, super().__getstate__())
    def __setstate__(self, s):
        self._exact_frontend, self._approximate_frontend, base_state = s
        super().__setstate__(base_state)
    #
    # Hybrid solving
    #
    def _do_call(self, f_name, *args, **kwargs):
        exact = kwargs.pop("exact", True)
        # if approximating, try the approximation backend
        if exact is False:
            try:
                return False, getattr(self._approximate_frontend, f_name)(*args, **kwargs)
            except ClaripyFrontendError:
                pass
        # if that fails, try the exact backend
        return True, getattr(self._exact_frontend, f_name)(*args, **kwargs)
    def _hybrid_call(self, f_name, *args, **kwargs):
        _, solution = self._do_call(f_name, *args, **kwargs)
        return solution
    def _approximate_first_call(self, f_name, e, n, *args, **kwargs):
        exact_used, solutions = self._do_call(f_name, e, n + 1, exact=False, *args, **kwargs)
        if exact_used is False and len(solutions) > n:
            if any(getattr(c, "variables", set()) & e.variables for c in self.constraints):
                _, _solutions = self._do_call(f_name, e, n + 1, exact=True, *args, **kwargs)
                return _solutions[:n] if len(_solutions) < len(solutions) else solutions[:n]
        return solutions[:n]
    def satisfiable(self, extra_constraints=(), exact=None):
        return self._hybrid_call("satisfiable", extra_constraints=extra_constraints, exact=exact)
    def eval_to_ast(self, e, n, extra_constraints=(), exact=None):
        if self._approximate_first and exact is None and n > 2:
            return self._approximate_first_call("eval_to_ast", e, n, extra_constraints=extra_constraints)
        return self._hybrid_call("eval_to_ast", e, n, extra_constraints=extra_constraints, exact=exact)
    def eval(self, e, n, extra_constraints=(), exact=None):
        if self._approximate_first and exact is None and n > 2:
            return self._approximate_first_call("eval", e, n, extra_constraints=extra_constraints)
        return self._hybrid_call("eval", e, n, extra_constraints=extra_constraints, exact=exact)
    def batch_eval(self, e, n, extra_constraints=(), exact=None):
        if self._approximate_first and exact is None and n > 2:
            return self._approximate_first_call("batch_eval", e, n, extra_constraints=extra_constraints)
        return self._hybrid_call("batch_eval", e, n, extra_constraints=extra_constraints, exact=exact)
    def max(self, e, extra_constraints=(), signed=False, exact=None):
        return self._hybrid_call("max", e, extra_constraints=extra_constraints, signed=signed, exact=exact)
    def min(self, e, extra_constraints=(), signed=False, exact=None):
        return self._hybrid_call("min", e, extra_constraints=extra_constraints, signed=signed, exact=exact)
    def solution(self, e, v, extra_constraints=(), exact=None):
        return self._hybrid_call("solution", e, v, extra_constraints=extra_constraints, exact=exact)
    def is_true(self, e, extra_constraints=(), exact=None):
        return self._hybrid_call("is_true", e, extra_constraints=extra_constraints, exact=exact)
    def is_false(self, e, extra_constraints=(), exact=None):
        return self._hybrid_call("is_false", e, extra_constraints=extra_constraints, exact=exact)
    def unsat_core(self, extra_constraints=()):
        return self._hybrid_call("unsat_core", extra_constraints=extra_constraints)
    #
    # Lifecycle
    #
    def add(self, constraints):
        added = self._exact_frontend.add(constraints)
        self._approximate_frontend.add(constraints)
        return added
    def combine(self, others):
        other_exact = [o._exact_frontend for o in others]
        other_approximate = [o._approximate_frontend for o in others]
        new_exact = self._exact_frontend.combine(other_exact)
        new_approximate = self._approximate_frontend.combine(other_approximate)
        return HybridFrontend(new_exact, new_approximate)
    def merge(self, others, merge_conditions, common_ancestor=None):
        other_exact = [o._exact_frontend for o in others]
        other_approximate = [o._approximate_frontend for o in others]
        e_merged, new_exact = self._exact_frontend.merge(
            other_exact,
            merge_conditions,
            common_ancestor=common_ancestor._exact_frontend if common_ancestor is not None else None,
        )
        new_approximate = self._approximate_frontend.merge(
            other_approximate,
            merge_conditions,
            common_ancestor=common_ancestor._approximate_frontend if common_ancestor is not None else None,
        )[-1]
        return (e_merged, HybridFrontend(new_exact, new_approximate))
    def simplify(self):
        self._approximate_frontend.simplify()
        return self._exact_frontend.simplify()
    def downsize(self):
        self._exact_frontend.downsize()
        self._approximate_frontend.downsize()
    def finalize(self):
        self._exact_frontend.finalize()
        self._approximate_frontend.finalize()
    def split(self):
        results = []
        exacts = self._exact_frontend.split()
        for e in exacts:
            a = self._approximate_frontend.blank_copy()
            a.add(e.constraints)
            results.append(HybridFrontend(e, a))
        return results
from ..errors import ClaripyFrontendError | 
| 129 | 
	serialize results | 
	# Copyright (c) 2022, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import datasets
import numpy as np
from lm_eval.base import rf
from ..metrics import mean
from .common import HFTask
class each:
    def __init__(self, f):
        self.f = f
    def __rrshift__(self, other):
        return list(map(self.f, other))
class RACE(HFTask):
    VERSION = 0
    DATASET_PATH = "race"
    DATASET_NAME = "high"
    cache = {}
    letter_to_num = {"A": 0, "B": 1, "C": 2, "D": 3}
    def has_training_docs(self):
        return True
    def has_validation_docs(self):
        return True
    def has_test_docs(self):
        return True
    def _collate_data(self, set):
        if set in self.cache:
            return self.cache[set]
        # One big issue with HF's implementation of this dataset: it makes a
        # separate document for each question; meanwhile, in the GPT3 paper it
        # is shown that one document is made per passage.
        r = collections.defaultdict(list)
        for item in datasets.load_dataset(path=self.DATASET_PATH, name=self.DATASET_NAME)[set]:
            r[item["article"]].append(item)
        res = list(
            r.values()
            >> each(
                lambda x: {
                    "article": x[0]["article"],
                    "problems": x
                    >> each(lambda y: {"question": y["question"], "answer": y["answer"], "options": y["options"],}),
                }
            )
        )
        self.cache[set] = res
        return res
    def training_docs(self):
        return self._collate_data("train")
    def validation_docs(self):
        return self._collate_data("validation")
    def test_docs(self):
        return self._collate_data("test")
    def fewshot_description(self):
        # TODO: figure out description
        return ""
    @classmethod
    def get_answer_option(cls, problem):
        answer = cls.letter_to_num[problem["answer"]]
        return problem["options"][answer]
    @classmethod
    def last_problem(cls, doc):
        return doc["problems"][-1]
    def doc_to_text(self, doc):
        text = "Article: " + doc["article"] + "\n\n"
        for problem in doc["problems"][:-1]:
            question = "Question: " + problem["question"] + "\n"
            answer = "Answer: " + self.get_answer_option(problem) + "\n"
            text += question + answer
        text += "Question: " + self.last_problem(doc)["question"] + "\n" + "Answer:"
        return text
    def doc_to_target(self, doc):
        return " " + self.get_answer_option(self.last_problem(doc))
    def construct_requests(self, doc, ctx):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.
        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        """
        problem = self.last_problem(doc)
        ll_choices = [rf.loglikelihood(ctx, " " + problem["options"][i])[0] for i in range(4)] + [
            rf.loglikelihood("Answer:", " " + problem["options"][i])[0] for i in range(4)
        ]
        return ll_choices
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a
        dict where keys are the names of submetrics and values are the values of
        the metric for that one document
        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
        gold = self.letter_to_num[self.last_problem(doc)["answer"]]
        context_conditional_ll = results[:4]
        context_free_ll = results[4:]
        assert len(context_free_ll) == len(context_conditional_ll)
        ll_gain = [ccl - cfl for ccl, cfl in zip(context_conditional_ll, context_free_ll)]
        pred = np.argmax(ll_gain)
        pred_raw_ll = np.argmax(results[:4])
        return {
            "acc": int(pred_raw_ll == gold),
            "acc_norm": int(pred == gold),
        }
    def METHOD_NAME(self, doc, results):
        gold = self.letter_to_num[self.last_problem(doc)["answer"]]
        context_conditional_ll = results[:4]
        context_free_ll = results[4:]
        assert len(context_free_ll) == len(context_conditional_ll)
        ll_gain = [ccl - cfl for ccl, cfl in zip(context_conditional_ll, context_free_ll)]
        pred = np.argmax(ll_gain)
        return {
            "format": self.doc_to_text(doc),
            "gold_choice": self.last_problem(doc)["options"][gold],
            "model_choice": self.last_problem(doc)["options"][pred],
            "choices (ll, ull, ll_gain)": dict(
                zip(self.last_problem(doc)["options"], zip(results[:4], results[4:], ll_gain))
            ),
        }
    def aggregation(self):
        """
        :returns: {str: [float] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metrics
        """
        return {
            "acc": mean,
            "acc_norm": mean,
        }
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        return {
            "acc": True,
            "acc_norm": True,
        } | 
| 130 | 
	gettransactions | 
	# -*- coding: utf-8 -*-
#
#    BitcoinLib - Python Cryptocurrency Library
#    Litecore.io Client
#    © 2018-2022 October - 1200 Web Development <http://1200wd.com/>
#
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU Affero General Public License as
#    published by the Free Software Foundation, either version 3 of the
#    License, or (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU Affero General Public License for more details.
#
#    You should have received a copy of the GNU Affero General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
import logging
from datetime import datetime
from bitcoinlib.main import MAX_TRANSACTIONS
from bitcoinlib.services.baseclient import BaseClient
from bitcoinlib.transactions import Transaction
PROVIDERNAME = 'insightdash'
REQUEST_LIMIT = 50
_logger = logging.getLogger(__name__)
class InsightDashClient(BaseClient):
    def __init__(self, network, base_url, denominator, *args):
        super(self.__class__, self).__init__(network, PROVIDERNAME, base_url, denominator, *args)
    def compose_request(self, category, data, cmd='', variables=None, method='get', offset=0):
        url_path = category
        if data:
            url_path += '/' + data + '/' + cmd
        if variables is None:
            variables = {}
        variables.update({'from': offset, 'to': offset+REQUEST_LIMIT})
        return self.request(url_path, variables, method=method)
    def _convert_to_transaction(self, tx):
        if tx['confirmations']:
            status = 'confirmed'
        else:
            status = 'unconfirmed'
        fees = None if 'fees' not in tx else int(round(float(tx['fees']) * self.units, 0))
        value_in = 0 if 'valueIn' not in tx else tx['valueIn']
        isCoinbase = False
        if 'isCoinBase' in tx and tx['isCoinBase']:
            isCoinbase = True
        txdate = None
        if 'blocktime' in tx:
            txdate = datetime.utcfromtimestamp(tx['blocktime'])
        t = Transaction(locktime=tx['locktime'], version=tx['version'], network=self.network,
                        fee=fees, size=tx['size'], txid=tx['txid'],
                        date=txdate, confirmations=tx['confirmations'],
                        block_height=tx['blockheight'], status=status,
                        input_total=int(round(float(value_in) * self.units, 0)), coinbase=isCoinbase,
                        output_total=int(round(float(tx['valueOut']) * self.units, 0)))
        for ti in tx['vin']:
            if isCoinbase:
                t.add_input(prev_txid=32 * b'\0', output_n=4*b'\xff', unlocking_script=ti['coinbase'], index_n=ti['n'],
                            script_type='coinbase', sequence=ti['sequence'], value=0)
            else:
                value = int(round(float(ti['value']) * self.units, 0))
                t.add_input(prev_txid=ti['txid'], output_n=ti['vout'], unlocking_script=ti['scriptSig']['hex'],
                            index_n=ti['n'], value=value, sequence=ti['sequence'],
                            double_spend=False if ti['doubleSpentTxID'] is None else ti['doubleSpentTxID'],
                            strict=self.strict)
        for to in tx['vout']:
            value = int(round(float(to['value']) * self.units, 0))
            t.add_output(value=value, lock_script=to['scriptPubKey']['hex'],
                         spent=True if to['spentTxId'] else False, output_n=to['n'],
                         spending_txid=None if not to['spentTxId'] else to['spentTxId'],
                         spending_index_n=None if not to['spentIndex'] else to['spentIndex'], strict=self.strict)
        return t
    def getbalance(self, addresslist):
        balance = 0
        addresslist = self._addresslist_convert(addresslist)
        for a in addresslist:
            res = self.compose_request('addr', a.address, 'balance')
            balance += res
        return balance
    def getutxos(self, address, after_txid='', limit=MAX_TRANSACTIONS):
        address = self._address_convert(address)
        res = self.compose_request('addrs', address.address, 'utxo')
        txs = []
        for tx in res:
            if tx['txid'] == after_txid:
                break
            txs.append({
                'address': address.address_orig,
                'txid': tx['txid'],
                'confirmations': tx['confirmations'],
                'output_n': tx['vout'],
                'input_n': 0,
                'block_height': tx['height'],
                'fee': None,
                'size': 0,
                'value': tx['satoshis'],
                'script': tx['scriptPubKey'],
                'date': None
            })
        return txs[::-1][:limit]
    def gettransaction(self, tx_id):
        tx = self.compose_request('tx', tx_id)
        return self._convert_to_transaction(tx)
    def METHOD_NAME(self, address, after_txid='', limit=MAX_TRANSACTIONS):
        address = self._address_convert(address)
        res = self.compose_request('addrs', address.address, 'txs')
        txs = []
        txs_dict = res['items'][::-1]
        if after_txid:
            txs_dict = txs_dict[[t['txid'] for t in txs_dict].index(after_txid) + 1:]
        for tx in txs_dict[:limit]:
            if tx['txid'] == after_txid:
                break
            txs.append(self._convert_to_transaction(tx))
        return txs
    def getrawtransaction(self, tx_id):
        res = self.compose_request('rawtx', tx_id)
        return res['rawtx']
    def sendrawtransaction(self, rawtx):
        res = self.compose_request('tx', 'send', variables={'rawtx': rawtx}, method='post')
        return {
            'txid': res['txid'],
            'response_dict': res
        }
    # def estimatefee
    def blockcount(self):
        res = self.compose_request('status', '', variables={'q': 'getinfo'})
        return res['info']['blocks']
    def mempool(self, txid):
        res = self.compose_request('tx', txid)
        if res['confirmations'] == 0:
            return res['txid']
        return []
    def getblock(self, blockid, parse_transactions, page, limit):
        bd = self.compose_request('block', str(blockid))
        if parse_transactions:
            txs = []
            for txid in bd['tx'][(page-1)*limit:page*limit]:
                try:
                    txs.append(self.gettransaction(txid))
                except Exception as e:
                    _logger.error("Could not parse tx %s with error %s" % (txid, e))
        else:
            txs = bd['tx']
        block = {
            'bits': bd['bits'],
            'depth': bd['confirmations'],
            'hash': bd['hash'],
            'height': bd['height'],
            'merkle_root': bd['merkleroot'],
            'nonce': bd['nonce'],
            'prev_block': bd['previousblockhash'],
            'time': datetime.utcfromtimestamp(bd['time']),
            'total_txs': len(bd['tx']),
            'txs': txs,
            'version': bd['version'],
            'page': page,
            'pages': None if not limit else int(len(bd['tx']) // limit) + (len(bd['tx']) % limit > 0),
            'limit': limit
        }
        return block
    def isspent(self, txid, output_n):
        t = self.gettransaction(txid)
        return 1 if t.outputs[output_n].spent else 0
    def getinfo(self):
        info = self.compose_request('status', '')['info']
        return {
            'blockcount': info['blocks'],
            'chain': info['network'],
            'difficulty': int(float(info['difficulty'])),
            'hashrate': 0,
            'mempool_size': 0,
        } | 
| 131 | 
	test voltage to uint16 | 
	import unittest
import numpy as np
try:
    import zhinst.utils
except ImportError:
    zhinst = None
from qupulse.utils.types import TimeType
from qupulse.hardware.util import voltage_to_uint16, find_positions, get_sample_times, not_none_indices, \
    zhinst_voltage_to_uint16
from tests.pulses.sequencing_dummies import DummyWaveform
class VoltageToBinaryTests(unittest.TestCase):
    def METHOD_NAME(self):
        with self.assertRaises(ValueError):
            voltage_to_uint16(np.zeros(0), 0, 0, 0)
        linspace_voltage = np.linspace(0, 1, 128)
        with self.assertRaises(ValueError):
            voltage_to_uint16(linspace_voltage, 0.9, 0, 1)
        with self.assertRaises(ValueError):
            voltage_to_uint16(linspace_voltage, 1.1, -1, 1)
        expected_data = np.arange(0, 128, dtype=np.uint16)
        received_data = voltage_to_uint16(linspace_voltage, 0.5, 0.5, 7)
        self.assertTrue(np.all(expected_data == received_data))
    def test_zero_level_14bit(self):
        zero_level = voltage_to_uint16(np.zeros(1), 0.5, 0., 14)
        self.assertEqual(zero_level, 8192)
class FindPositionTest(unittest.TestCase):
    def test_find_position(self):
        data = [2, 6, -24, 65, 46, 5, -10, 9]
        to_find = [54, 12, 5, -10, 45, 6, 2]
        positions = find_positions(data, to_find)
        self.assertEqual(positions.tolist(), [-1, -1, 5, 6, -1, 1, 0])
class SampleTimeCalculationTest(unittest.TestCase):
    def test_get_sample_times(self):
        sample_rate = TimeType.from_fraction(12, 10)
        wf1 = DummyWaveform(duration=TimeType.from_fraction(20, 12))
        wf2 = DummyWaveform(duration=TimeType.from_fraction(400000000001, 120000000000))
        wf3 = DummyWaveform(duration=TimeType.from_fraction(1, 10**15))
        expected_times = np.arange(4) / 1.2
        times, n_samples = get_sample_times([wf1, wf2], sample_rate_in_GHz=sample_rate)
        np.testing.assert_equal(expected_times, times)
        np.testing.assert_equal(n_samples, np.asarray([2, 4]))
        with self.assertRaises(AssertionError):
            get_sample_times([], sample_rate_in_GHz=sample_rate)
        with self.assertRaisesRegex(ValueError, "non integer length"):
            get_sample_times([wf1, wf2], sample_rate_in_GHz=sample_rate, tolerance=0.)
        with self.assertRaisesRegex(ValueError, "length <= zero"):
            get_sample_times([wf1, wf3], sample_rate_in_GHz=sample_rate)
    def test_get_sample_times_single_wf(self):
        sample_rate = TimeType.from_fraction(12, 10)
        wf = DummyWaveform(duration=TimeType.from_fraction(40, 12))
        expected_times = np.arange(4) / 1.2
        times, n_samples = get_sample_times(wf, sample_rate_in_GHz=sample_rate)
        np.testing.assert_equal(times, expected_times)
        np.testing.assert_equal(n_samples, np.asarray(4))
class NotNoneIndexTest(unittest.TestCase):
    def test_not_none_indices(self):
        self.assertEqual(([None, 0, 1, None, None, 2], 3),
                         not_none_indices([None, 'a', 'b', None, None, 'c']))
@unittest.skipIf(zhinst is None, "zhinst not installed")
class ZHInstVoltageToUint16Test(unittest.TestCase):
    def test_size_exception(self):
        with self.assertRaisesRegex(ValueError, "No input"):
            zhinst_voltage_to_uint16(None, None, (None, None, None, None))
        with self.assertRaisesRegex(ValueError, "dimension"):
            zhinst_voltage_to_uint16(np.zeros(192), np.zeros(191), (None, None, None, None))
        with self.assertRaisesRegex(ValueError, "dimension"):
            zhinst_voltage_to_uint16(np.zeros(192), None, (np.zeros(191), None, None, None))
    def test_range_exception(self):
        with self.assertRaisesRegex(ValueError, "invalid"):
            zhinst_voltage_to_uint16(2.*np.ones(192), None, (None, None, None, None))
        # this should work
        zhinst_voltage_to_uint16(None, None, (2. * np.ones(192), None, None, None))
    def test_zeros(self):
        combined = zhinst_voltage_to_uint16(None, np.zeros(192), (None, None, None, None))
        np.testing.assert_array_equal(np.zeros(3*192, dtype=np.uint16), combined)
    def test_full(self):
        ch1 = np.linspace(0, 1., num=192)
        ch2 = np.linspace(0., -1., num=192)
        markers = tuple(np.array(([1.] + [0.]*m) * 192)[:192] for m in range(1, 5))
        combined = zhinst_voltage_to_uint16(ch1, ch2, markers)
        marker_data = [sum(int(markers[m][idx] > 0) << m for m in range(4))
                       for idx in range(192)]
        marker_data = np.array(marker_data, dtype=np.uint16)
        expected = zhinst.utils.convert_awg_waveform(ch1, ch2, marker_data)
        np.testing.assert_array_equal(expected, combined) | 
| 132 | 
	adjust | 
	"""
This file defines some primitives to draw a circuit schematic on Widgets.
For now it is only used in IqManagerWidget.
"""
import os.path as osp
from qtpy import QtCore, QtWidgets, QtGui
IMAGE_PATH = osp.join(osp.split(osp.dirname(__file__))[0], "images")
class MyLabelSignal(QtWidgets.QLabel):
    pass
class MyItem(QtWidgets.QWidget):
    def __init__(self, widget_name, y, label, parent, x_offset=0):
        super(MyItem, self).__init__()
        self.lay = QtWidgets.QVBoxLayout()
        self.setLayout(self.lay)
        self.item = QtWidgets.QLabel(label)
        self.setStyleSheet('background-color:transparent')
        self.lay.addWidget(self.item)
        self.widget_name = widget_name
        self.y = y
        self.x_offset = x_offset
        self.parent = parent
        parent.graphic_items.append(self)
        self.item.setStyleSheet(
            "QLabel{border: 1px solid black; border-radius: 5px; "
            "font-size: 15px; background-color:white}")
        self.proxy = parent.scene.addWidget(self)
        self.proxy.setZValue(2)
    def item_x(self):
        return self.pos().x() + self.item.pos().x()
    def item_y(self):
        return self.pos().y() + self.item.pos().y()
    def move_to_right_position(self):
        widget = self.parent.module_widgets[0].attribute_widgets[
            self.widget_name]
        x = widget.pos().x() + widget.width()/2 + self.x_offset - \
            self.item.width()/2 + self.item.x()
        y = self.y*self.parent.view.height() - self.item.height()/2 + \
            self.item.y()
        self.move(x, y)
class MyLabel(MyItem):
    pass
class MyImage(MyItem):
    def __init__(self, widget_name, y, filename, label, parent, x_offset=0):
        super(MyImage, self).__init__(widget_name, y, label, parent, x_offset)
        self.pixmap = QtGui.QPixmap(osp.join(IMAGE_PATH, filename))
        self.item.setPixmap(self.pixmap)
        self.item.setFixedSize(self.pixmap.size())
        self.label = QtWidgets.QLabel(label)
        self.lay.addWidget(self.label)
        #self.setText(self.widget_name)
class Connection(object):
    arrow_height = 10
    arrow_width = 15
    margin = 15
    def __init__(self, widget1, widget2, h_first, parent, show_arrow=True):
        self.parent = parent
        self.widget_start = widget1
        self.widget_stop = widget2
        self.h_first = h_first
        self.show_arrow = show_arrow
        self.brush = QtGui.QBrush(QtCore.Qt.black)
        self.arrow = QtWidgets.QGraphicsPolygonItem()
        self.arrow.setBrush(self.brush)
        self.pen = QtGui.QPen(QtCore.Qt.black,
                              3,
                              QtCore.Qt.SolidLine,
                              QtCore.Qt.RoundCap,
                              QtCore.Qt.RoundJoin)
        self.line1 = QtWidgets.QGraphicsLineItem()
        self.line1.setPen(self.pen)
        self.line2 = QtWidgets.QGraphicsLineItem()
        self.line2.setPen(self.pen)
        self.line1.setZValue(1)
        self.line2.setZValue(1)
        self.parent.scene.addItem(self.line1)
        self.parent.scene.addItem(self.line2)
        self.parent.scene.addItem(self.arrow)
        self.METHOD_NAME()
    def METHOD_NAME(self):
        x1 = self.widget_start.item_x() + self.widget_start.item.width() / 2
        x2 = self.widget_stop.item_x() + self.widget_stop.item.width() / 2
        y1 = self.widget_start.item_y() + self.widget_start.item.height() / 2
        y2 = self.widget_stop.item_y() + self.widget_stop.item.height() / 2
        if self.h_first:
            self.line1.setLine(x1, y1, x1, y2)
            self.line2.setLine(x1, y2, x2, y2)
        else:
            self.line1.setLine(x1, y1, x2, y1)
            self.line2.setLine(x2, y1, x2, y2)
        if self.show_arrow:
            if self.h_first:
                x = x2 - self.widget_stop.width() / 2
                y = y2
                arrow = QtGui.QPolygonF(
                    [QtCore.QPoint(x - self.margin, y - self.arrow_height / 2),
                     QtCore.QPoint(x - self.margin, y + self.arrow_height / 2),
                     QtCore.QPoint(x - self.margin + self.arrow_width, y)])
            else:
                x = x2
                y = y2 - self.widget_stop.height() / 2
                if y2 < y1:
                    margin = - self.margin
                    arrow_width = - self.arrow_width
                    y = y2 + self.widget_stop.height() / 2
                else:
                    margin = self.margin
                    arrow_width = self.arrow_width
                arrow = QtGui.QPolygonF(
                    [QtCore.QPoint(x - self.arrow_height / 2, y - margin),
                     QtCore.QPoint(x + self.arrow_height / 2, y - margin),
                     QtCore.QPoint(x, y - margin + arrow_width)])
            if self.show_arrow:
                self.arrow.setPolygon(arrow)
class MyFrame(QtWidgets.QFrame):
    def __init__(self, parent):
        super(MyFrame, self).__init__(parent)
        self.setStyleSheet("background-color: white;")
        self.parent = parent
        self.lower()
class MyFrameDrawing(QtWidgets.QFrame):
    def __init__(self, parent):
        super(MyFrameDrawing, self).__init__()
        self.setStyleSheet("background-color: white;")
        self.parent = parent
        self.lower()
        self.proxy = self.parent.scene.addWidget(self)
        self.proxy.setZValue(-1 | 
| 133 | 
	parse footnote reference | 
	#!/usr/bin/env python3
"""Convert Google Docs V1 API's JSON to Markdown.
"""
__copyright__ = "Copyright (C) 2019  Martin Blais"
__license__ = "GNU GPLv2"
from os import path
import argparse
import textwrap
import datetime
import hashlib
import collections
import json
import logging
import os
import pickle
import pprint
import re
import shelve
import shutil
import subprocess
import tempfile
import urllib.parse
from typing import List
import bs4
import apiclient.errors
from apiclient import discovery
import httplib2
from oauth2client import service_account
def _get(obj, path):
    for comp in path.split('/'):
        if comp not in obj:
            return
        obj = obj[comp]
    return obj
def _dispatch(table, elem):
    celem = elem.copy()
    celem.pop('startIndex', None)
    celem.pop('endIndex', None)
    assert len(celem) == 1
    etype, econtents = celem.popitem()
    return table[etype](econtents)
TextRun = collections.namedtuple('TextRun', 'text family')
def parse_TextRun(contents):
    family = _get(contents, 'textStyle/weightedFontFamily/fontFamily')
    return TextRun(contents['content'], family)
def parse_AutoText(contents):
    raise NotImplementedError
def parse_PageBreak(contents):
    pass
def parse_ColumnBreak(contents):
    raise NotImplementedError
def METHOD_NAME(contents):
    pass
    #raise NotImplementedError(pprint.pformat(contents))
def parse_HorizontalRule(contents):
    pass
def parse_Equation(contents):
    raise NotImplementedError
def parse_InlineObjectElement(contents):
    pass
    #raise NotImplementedError
_dispatch_Element = {
    'textRun': parse_TextRun,
    'autoText': parse_AutoText,
    'pageBreak': parse_PageBreak,
    'columnBreak': parse_ColumnBreak,
    'footnoteReference': METHOD_NAME,
    'horizontalRule': parse_HorizontalRule,
    'equation': parse_Equation,
    'inlineObjectElement': parse_InlineObjectElement,
}
def parse_Element(elem):
    return _dispatch(_dispatch_Element, elem)
def parse_SectionBreak(econtents):
    assert econtents.keys() == {'sectionStyle'}, econtents
def parse_Table(econtents):
    pass
    #raise NotImplementedError
def parse_Paragraph(paragraph):
    style = paragraph['paragraphStyle']['namedStyleType']
    # Compress runs of text together.
    parelems = []
    for element in paragraph['elements']:
        pelem = parse_Element(element)
        if isinstance(pelem, TextRun):
            last = parelems[-1] if parelems else None
            if last and isinstance(last, TextRun) and last.family == pelem.family:
                parelems.pop(-1)
                pelem = TextRun(last.text + pelem.text, last.family)
            parelems.append(pelem)
        else:
            assert pelem is None
    # Convert all the hard newlines to soft ones.
    parelems = [elem._replace(text=elem.text.replace('\x0b', '\n'))
                if isinstance(elem, TextRun)
                else elem
                for elem in parelems]
    return (style, parelems)
def parse_TableOfContents(econtents):
    assert econtents.keys() == {'content'}, econtents.keys()
_dispatch_StructuralElement = {
    'sectionBreak': parse_SectionBreak,
    'paragraph': parse_Paragraph,
    'table': parse_Table,
    'tableOfContents': parse_TableOfContents,
}
def parse_StructuralElement(selem):
    return _dispatch(_dispatch_StructuralElement, selem)
def parse_Body(body):
    assert set(body.keys()) == {'content'}
    return list(filter(None, [parse_StructuralElement(selem)
                              for selem in body['content']]))
def parse_Document(document):
    return (document['title'], parse_Body(document['body']))
def remove_default_fonts(body, default_font='Cambria'):
    """Remove text runs with the default font."""
    new_body = []
    for etype, runs in body:
        new_runs = []
        for run in runs:
            if run.family == default_font:
                run = run._replace(family=None)
            new_runs.append(run)
        new_body.append((etype, new_runs))
    return new_body
def merge_runs(body):
    """Merge consecutive text runs with the same font."""
    new_body = []
    for etype, runs in body:
        new_runs = []
        last_run = None
        for run in runs:
            if last_run is None:
                last_run = run
            elif run.family == last_run.family:
                run = last_run = run._replace(text=(last_run.text + run.text))
                new_runs.pop(-1)
            new_runs.append(run)
        new_body.append((etype, new_runs))
    return new_body
class Renderer:
    def __init__(self, outfile):
        self.file = outfile
    def TITLE(self, item):
        print("= {} =\n".format(item.text.strip()), file=self.file)
    def HEADING_1(self, item):
        print("== {} ==\n".format(item.text.strip()), file=self.file)
    def HEADING_2(self, item):
        print("=== {} ===\n".format(item.text.strip()), file=self.file)
    def HEADING_3(self, item):
        print("==== {} ====\n".format(item.text.strip()), file=self.file)
    def HEADING_4(self, item):
        print("===== {} =====\n".format(item.text.strip()), file=self.file)
    def HEADING_5(self, item):
        print("====== {} ======\n".format(item.text.strip()), file=self.file)
    def NORMAL_TEXT(self, item):
        if item.family == 'Consolas':
            lines = item.text.split('\n')
            print('\n'.join("   {}".format(line) for line in lines), file=self.file)
        else:
            print(textwrap.fill(item.text.strip(), 80), file=self.file)
        print(file=self.file)
def main():
    logging.basicConfig(level=logging.INFO, format='%(levelname)-8s: %(message)s')
    parser = argparse.ArgumentParser(description=__doc__.strip())
    parser.add_argument('--fileordir', action='store', default=os.getcwd(),
                        help="The JSON file or directory to process")
    args = parser.parse_args()
    if path.isfile(args.fileordir):
        filenames = [args.fileordir]
    else:
        filenames = [path.join(args.fileordir, x)
                     for x in os.listdir(args.fileordir)
                     if re.search('\.json$', x)]
    for filename in filenames:
        with open(filename, 'r') as infile:
            document = json.load(infile)
        title, body = parse_Document(document)
        for item in body:
            assert len(item) == 2
        body = remove_default_fonts(body)
        body = merge_runs(body)
        output_filename = filename.replace('.json', '.md')
        with open(output_filename, 'w') as outfile:
            renderer = Renderer(outfile)
            for etype, runs in body:
                fun = getattr(renderer, etype, None)
                if fun is None:
                    print(etype)
                else:
                    for run in runs:
                        fun(run)
            # print(title, file=outfile)
            # print(pprint.pformat(body), file=outfile)
if __name__ == '__main__':
    main() | 
| 134 | 
	download with login | 
	# coding: utf-8
#!/usr/bin/env python
# encoding: utf-8
'''
    Wrapper script around common use of i18n.
    1. Add new language: creates PO files and instruct how to add to transifex.
    2. Update languages: Regenerate English PO files. TX gets it automaticaly.
    3. Update translations: download translations from TX and compiles them.
'''
import os
import sys
import io
import tempfile
import types
import shutil
import contextlib
import twill
from twill import commands as tw, get_browser
from twill.errors import TwillAssertionError
from clint import args
from clint.textui import puts, colored, indent
from shell_command import shell_call
# List of languages we care about
LANGS = ['en', 'fr', 'es', 'it', 'nl', 'zh', 'ne', 'km']
I18N_APPS = ['onadata.apps.main', 'onadata.apps.viewer']
TX_LOGIN_URL = 'https://www.transifex.com/signin/'
REPO_ROOT = os.join('..', os.path.dirname(os.path.abspath(__file__)))
class DownloadFailed(StandardError):
    pass
@contextlib.contextmanager
def chdir(dirname):
    curdir = os.getcwd()
    try:
        os.chdir(dirname)
        yield
    finally:
        os.chdir(curdir)
def METHOD_NAME(url, login_url, login=None,
                        password=None, ext='',
                        username_field='username',
                        password_field='password',
                        form_id=1):
    ''' Download a URI from a website using Django by loging-in first
        1. Logs in using supplied login & password (if provided)
        2. Create a temp file on disk using extension if provided
        3. Write content of URI into file '''
    # log-in to Django site
    if login and password:
        tw.go(login_url)
        tw.formvalue('%s' % form_id, username_field, login)
        tw.formvalue('%s' % form_id, password_field, password)
        tw.submit()
    # retrieve URI
    try:
        tw.go(url)
        tw.code('200')
    except TwillAssertionError:
        code = get_browser().get_code()
        # ensure we don't keep credentials
        tw.reset_browser()
        raise DownloadFailed("Unable to download %(url)s. "
                             "Received HTTP #%(code)s."
                             % {'url': url, 'code': code})
    buff = StringIO.StringIO()
    twill.set_output(buff)
    try:
        tw.show()
    finally:
        twill.set_output(None)
        tw.reset_browser()
    # write file on disk
    suffix = '.%s' % ext if ext else ''
    fileh, filename = tempfile.mkstemp(suffix=suffix)
    os.write(fileh, buff.getvalue())
    os.close(fileh)
    buff.close()
    return filename
def getlangs(lang):
    if not lang:
        return LANGS
    if isinstance(lang, types.ListType):
        return lang
    return [lang, ]
def add(lang):
    langs = getlangs(lang)
    puts("Adding %s" % ', '.join(langs))
    for loc in langs:
        with indent(2):
            puts("Generating PO for %s" % loc)
        shell_call("django-admin.py makemessages -l %(lang)s "
                   "-e py,html,email,txt" % {'lang': loc})
        for app in I18N_APPS:
            with indent(4):
                puts("Generating PO for app %s" % app)
            with chdir(os.path.join(REPO_ROOT, app)):
                shell_call("django-admin.py makemessages "
                           "-d djangojs -l %(lang)s" % {'lang': loc})
        puts(colored.green("sucesssfuly generated %s" % loc))
def update(user, password, lang=None):
    langs = getlangs(lang)
    puts("Updating %s" % ', '.join(langs))
    for loc in langs:
        with indent(2):
            puts("Downloading PO for %s" % loc)
        url = ('https://www.transifex.com/projects/p/formhub/'
               'resource/django/l/%(lang)s/download/for_use/' % {'lang': loc})
        try:
            tmp_po_file = METHOD_NAME(url, TX_LOGIN_URL,
                                              login=user, password=password,
                                              ext='po',
                                              username_field='identification',
                                              password_field='password',
                                              form_id=1)
            po_file = os.path.join(REPO_ROOT, 'locale', loc,
                                   'LC_MESSAGES', 'django.po')
            with indent(2):
                puts("Copying downloaded file to %s" % po_file)
            shutil.move(tmp_po_file, po_file)
        except Exception as e:
            puts(colored.red("Unable to update %s "
                             "from Transifex: %r" % (loc, e)))
        puts(colored.green("sucesssfuly retrieved %s" % loc))
    compile_mo(langs)
def compile_mo(lang=None):
    langs = getlangs(lang)
    puts("Compiling %s" % ', '.join(langs))
    for loc in langs:
        with indent(2):
            puts("Compiling %s" % loc)
        shell_call("django-admin.py compilemessages -l %(lang)s "
                   % {'lang': loc})
        for app in I18N_APPS:
            with indent(4):
                puts("Compiling app %s" % app)
            with chdir(os.path.join(REPO_ROOT, app)):
                shell_call("django-admin.py compilemessages -l %(lang)s"
                           % {'lang': loc})
        puts(colored.green("sucesssfuly compiled %s" % loc))
def usage(exit=True, code=1):
    print("i18n wrapper script for formhub.\n")
    with indent(4):
        puts(colored.yellow(",/i18ntool.py add --lang <lang>"))
        puts("Create required files for enabling translation "
             "of language with code <lang>\n")
        puts(colored.yellow("./i18ntool.py refresh [--lang <lang>]"))
        puts("Update the PO file for <lang> based on code.\n"
             "<lang> is optionnal as we only use EN and do "
             "all translations in Transifex.\n")
        puts(colored.yellow("./i18ntool.py update --user <tx_user> "
                            "--password <tx_pass> [--lang <lang>]"))
        puts("Downloads new PO files for <lang> (or all) from Transifex "
             "then compiles new MO files\n")
        puts(colored.yellow("./i18ntool.py compile [--lang <lang>]"))
        puts("Compiles all PO files for <lang> (or all) into MO files.\n"
             "Not required unless you want to.\n")
    if exit:
        sys.exit(code)
COMMANDS = {
    'add': add,
    'refresh': add,
    'update': update,
    'compile': compile_mo,
    'usage': usage,
    'help': usage
}
def main():
    try:
        command = COMMANDS.get(args.all.pop(0).lower(), usage)
    except:
        command = usage
    # fallback to usage.
    if command is usage:
        return command()
    # retrieve lang
    try:
        lang = args.grouped.get('lang', []).pop(0)
        if lang not in LANGS:
            raise ValueError("Unknown lang code")
    except ValueError as e:
        puts(colored.red(e.message))
        usage()
    except IndexError:
        lang = None
    # update cmd requires more args.
    if command is update:
        # extract user & password
        try:
            user = args.grouped.get('--user', []).pop(0)
            password = args.grouped.get('--password', []).pop(0)
        except:
            raise
            user = password = None
        if not user or not password:
            print(colored.red(
                "You need to provide Transifex.com credentials"))
            usage()
        return command(user, password, lang)
    # execute command with lang argument.
    return command(lang)
if __name__ == '__main__':
    main() | 
| 135 | 
	url | 
	# -*- coding: utf-8 -*-
from time import time
from uuid import uuid5, NAMESPACE_URL
from urllib.parse import urlencode
def clean(data):
    """Remove all keys where value is None"""
    return dict((k, v) for k, v in data.items() if v is not None)
DEFAULT_VARS = {
    "string": "",
    "integer": 0,
    "number": 0,
}
class Request(object):
    """Wraps a Swagger operation into a Postman Request"""
    def __init__(self, collection, path, params, method, operation):
        self.collection = collection
        self.path = path
        self.params = params
        self.method = method.upper()
        self.operation = operation
    @property
    def id(self):
        seed = str(" ".join((self.method, self.METHOD_NAME)))
        return str(uuid5(self.collection.uuid, seed))
    @property
    def METHOD_NAME(self):
        return self.collection.api.base_url.rstrip("/") + self.path
    @property
    def headers(self):
        headers = {}
        # Handle content-type
        if self.method != "GET":
            consumes = self.collection.api.__schema__.get("consumes", [])
            consumes = self.operation.get("consumes", consumes)
            if len(consumes):
                headers["Content-Type"] = consumes[-1]
        # Add all parameters headers
        for param in self.operation.get("parameters", []):
            if param["in"] == "header":
                headers[param["name"]] = param.get("default", "")
        # Add security headers if needed (global then local)
        for security in self.collection.api.__schema__.get("security", []):
            for key, header in self.collection.apikeys.items():
                if key in security:
                    headers[header] = ""
        for security in self.operation.get("security", []):
            for key, header in self.collection.apikeys.items():
                if key in security:
                    headers[header] = ""
        lines = [":".join(line) for line in headers.items()]
        return "\n".join(lines)
    @property
    def folder(self):
        if "tags" not in self.operation or len(self.operation["tags"]) == 0:
            return
        tag = self.operation["tags"][0]
        for folder in self.collection.folders:
            if folder.tag == tag:
                return folder.id
    def as_dict(self, urlvars=False):
        METHOD_NAME, variables = self.process_url(urlvars)
        return clean(
            {
                "id": self.id,
                "method": self.method,
                "name": self.operation["operationId"],
                "description": self.operation.get("summary"),
                "url": METHOD_NAME,
                "headers": self.headers,
                "collectionId": self.collection.id,
                "folder": self.folder,
                "pathVariables": variables,
                "time": int(time()),
            }
        )
    def process_url(self, urlvars=False):
        METHOD_NAME = self.METHOD_NAME
        path_vars = {}
        url_vars = {}
        params = dict((p["name"], p) for p in self.params)
        params.update(
            dict((p["name"], p) for p in self.operation.get("parameters", []))
        )
        if not params:
            return METHOD_NAME, None
        for name, param in params.items():
            if param["in"] == "path":
                METHOD_NAME = METHOD_NAME.replace("{%s}" % name, ":%s" % name)
                path_vars[name] = DEFAULT_VARS.get(param["type"], "")
            elif param["in"] == "query" and urlvars:
                default = DEFAULT_VARS.get(param["type"], "")
                url_vars[name] = param.get("default", default)
        if url_vars:
            METHOD_NAME = "?".join((METHOD_NAME, urlencode(url_vars)))
        return METHOD_NAME, path_vars
class Folder(object):
    def __init__(self, collection, tag):
        self.collection = collection
        self.tag = tag["name"]
        self.description = tag["description"]
    @property
    def id(self):
        return str(uuid5(self.collection.uuid, str(self.tag)))
    @property
    def order(self):
        return [r.id for r in self.collection.requests if r.folder == self.id]
    def as_dict(self):
        return clean(
            {
                "id": self.id,
                "name": self.tag,
                "description": self.description,
                "order": self.order,
                "collectionId": self.collection.id,
            }
        )
class PostmanCollectionV1(object):
    """Postman Collection (V1 format) serializer"""
    def __init__(self, api, swagger=False):
        self.api = api
        self.swagger = swagger
    @property
    def uuid(self):
        return uuid5(NAMESPACE_URL, self.api.base_url)
    @property
    def id(self):
        return str(self.uuid)
    @property
    def requests(self):
        if self.swagger:
            # First request is Swagger specifications
            yield Request(
                self,
                "/swagger.json",
                {},
                "get",
                {
                    "operationId": "Swagger specifications",
                    "summary": "The API Swagger specifications as JSON",
                },
            )
        # Then iter over API paths and methods
        for path, operations in self.api.__schema__["paths"].items():
            path_params = operations.get("parameters", [])
            for method, operation in operations.items():
                if method != "parameters":
                    yield Request(self, path, path_params, method, operation)
    @property
    def folders(self):
        for tag in self.api.__schema__["tags"]:
            yield Folder(self, tag)
    @property
    def apikeys(self):
        return dict(
            (name, secdef["name"])
            for name, secdef in self.api.__schema__.get("securityDefinitions").items()
            if secdef.get("in") == "header" and secdef.get("type") == "apiKey"
        )
    def as_dict(self, urlvars=False):
        return clean(
            {
                "id": self.id,
                "name": " ".join((self.api.title, self.api.version)),
                "description": self.api.description,
                "order": [r.id for r in self.requests if not r.folder],
                "requests": [r.as_dict(urlvars=urlvars) for r in self.requests],
                "folders": [f.as_dict() for f in self.folders],
                "timestamp": int(time()),
            }
        ) | 
| 136 | 
	test save | 
	import unittest
from os.path import join
from rastervision.pipeline.file_system.utils import get_tmp_dir, file_exists
from rastervision.core.box import Box
from rastervision.core.data import (ClassConfig, IdentityCRSTransformer,
                                    ChipClassificationGeoJSONStore)
from rastervision.core.data.label.chip_classification_labels import (
    ClassificationLabel, ChipClassificationLabels)
from tests import data_file_path
class TestChipClassificationLabels(unittest.TestCase):
    def setUp(self):
        self.labels = ChipClassificationLabels()
        self.cell1 = Box.make_square(0, 0, 2)
        self.class_id1 = 1
        self.labels.set_cell(self.cell1, self.class_id1)
        self.cell2 = Box.make_square(0, 2, 2)
        self.class_id2 = 2
        self.labels.set_cell(self.cell2, self.class_id2)
    def test_get_cell(self):
        cell = Box.make_square(0, 2, 3)
        class_id = self.labels.get_cell_class_id(cell)
        self.assertEqual(class_id, None)
        class_id = self.labels.get_cell_class_id(self.cell1)
        self.assertEqual(class_id, self.class_id1)
        class_id = self.labels.get_cell_class_id(self.cell2)
        self.assertEqual(class_id, self.class_id2)
    def test_get_singleton_labels(self):
        labels = self.labels.get_singleton_labels(self.cell1)
        cells = labels.get_cells()
        self.assertEqual(len(cells), 1)
        class_id = labels.get_cell_class_id(self.cell1)
        self.assertEqual(class_id, self.class_id1)
    def test_get_cells(self):
        cells = self.labels.get_cells()
        self.assertEqual(len(cells), 2)
        # ordering of cells isn't known
        self.assertTrue((cells[0] == self.cell1 and cells[1] == self.cell2)
                        or (cells[1] == self.cell1 and cells[0] == self.cell2))
    def test_get_class_ids(self):
        cells = self.labels.get_cells()
        class_ids = self.labels.get_class_ids()
        # check that order of class_ids corresponds to order of cells
        if (cells[0] == self.cell1 and cells[1] == self.cell2):
            self.assertListEqual(class_ids, [1, 2])
        elif (cells[1] == self.cell1 and cells[0] == self.cell2):
            self.assertListEqual(class_ids, [2, 1])
    def test_extend(self):
        labels = ChipClassificationLabels()
        cell3 = Box.make_square(0, 4, 2)
        class_id3 = 1
        labels.set_cell(cell3, class_id3)
        self.labels.extend(labels)
        cells = self.labels.get_cells()
        self.assertEqual(len(cells), 3)
        self.assertTrue(cell3 in cells)
    def test_filter_by_aoi(self):
        aois = [Box.make_square(0, 0, 2).to_shapely()]
        filt_labels = self.labels.filter_by_aoi(aois)
        exp_labels = ChipClassificationLabels()
        cell1 = Box.make_square(0, 0, 2)
        class_id1 = 1
        exp_labels.set_cell(cell1, class_id1)
        self.assertEqual(filt_labels, exp_labels)
        aois = [Box.make_square(4, 4, 2).to_shapely()]
        filt_labels = self.labels.filter_by_aoi(aois)
        exp_labels = ChipClassificationLabels()
        self.assertEqual(filt_labels, exp_labels)
    def test_len(self):
        self.assertEqual(len(self.labels), 2)
    def test_get_values(self):
        values_expected = [
            ClassificationLabel(class_id=self.class_id1),
            ClassificationLabel(class_id=self.class_id2)
        ]
        self.assertListEqual(self.labels.get_values(), values_expected)
    def test_get_scores(self):
        self.assertIsNone(self.labels.get_cell_scores(self.cell1))
        self.assertIsNone(self.labels.get_cell_scores(Box(0, 0, 10, 10)))
    def METHOD_NAME(self):
        uri = data_file_path('bboxes.geojson')
        class_config = ClassConfig(names=['1', '2'])
        crs_transformer = IdentityCRSTransformer()
        ls = ChipClassificationGeoJSONStore(
            uri,
            class_config=class_config,
            crs_transformer=crs_transformer,
        )
        labels = ls.get_labels()
        with get_tmp_dir() as tmp_dir:
            save_uri = join(tmp_dir, 'labels.geojson')
            labels.save(save_uri, class_config, crs_transformer)
            self.assertTrue(file_exists(save_uri))
if __name__ == '__main__':
    unittest.main() | 
| 137 | 
	format value | 
	import base64
import hashlib
import os.path
import random
import string
import urllib
from django import forms
from django.conf import settings
from django.core.files.uploadedfile import UploadedFile
from django.forms import ClearableFileInput
from django.shortcuts import resolve_url
from django.template.defaultfilters import filesizeformat
from django.utils.safestring import mark_safe
from quizzes.types.base import QuestionHelper, BaseConfigForm, MISSING_ANSWER_HTML
from submission.models.codefile import FILENAME_TYPES, validate_filename
FILE_SECRET_LENGTH = 32
def new_file_secret():
    """
    A random secret for unauth access to uploaded files
    """
    alphabet = string.ascii_uppercase + string.ascii_lowercase + string.digits
    return ''.join(random.choice(alphabet) for _ in range(FILE_SECRET_LENGTH))
class CleanClearableFileInput(ClearableFileInput):
    template_name = 'quizzes/clean_clearable_file_input.html'
    def METHOD_NAME(self, value):
        # format as just the filename
        if value and value.name:
            _, filename = os.path.split(value.name)
            return filename
        else:
            return 'none'
    def value_from_datadict(self, data, files, name):
        # override to accept the case "clear + file upload" without ValidationError
        upload = super().value_from_datadict(data, files, name)
        if not self.is_required and forms.CheckboxInput().value_from_datadict(
                data, files, self.clear_checkbox_name(name)):
            #if upload:
            #    return FILE_INPUT_CONTRADICTION
            # False signals to clear any existing value, as opposed to just None
            return False
        return upload
class FileAnswerField(forms.FileField):
    widget = CleanClearableFileInput
    def __init__(self, max_size: int, filename: str, filename_type: str, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.max_size = max_size
        self.filename = filename
        self.filename_type = filename_type
    def clean(self, data, initial=None):
        cleaned = super().clean(data)
        if cleaned and cleaned.size > self.max_size * 1024:
            raise forms.ValidationError('Submitted files can be at most %i kilobytes in size.' % (self.max_size,))
        return cleaned
class FileAnswer(QuestionHelper):
    name = 'File Upload'
    class ConfigForm(BaseConfigForm):
        max_size = forms.IntegerField(initial=10000, min_value=0, max_value=settings.MAX_SUBMISSION_SIZE, help_text='Maximum file size that can be uploaded by the student, in kilobytes.')
        filename = forms.CharField(max_length=500, required=False, help_text='Required filename for submitted files. Interpreted as specified in the filename type. Blank for no restriction.')
        filename_type = forms.ChoiceField(choices=FILENAME_TYPES, required=True, initial='EXT', help_text='How should your filename be interpreted?')
    def get_entry_field(self, questionanswer=None, student=None):
        max_size = self.version.config.get('max_size', 10000)
        filename = self.version.config.get('filename', '')
        filename_type = self.version.config.get('filename_type', 'EXT')
        if questionanswer:
            initial = questionanswer.file
        else:
            initial = None
        helptext = None
        if filename:
            if filename_type == 'INS':
                helptext = "Filename must be “%s” (case doesn't matter)." % (filename,)
            elif filename_type == 'MAT':
                helptext = "Filename must be “%s” (case sensitive)." % (filename,)
            elif filename_type == 'EXT':
                helptext = "Filename must end with “%s”." % (filename,)
            elif filename_type == 'REX':
                helptext = "Filename must match the regular expression “%s”." % (filename,)
        field = FileAnswerField(required=False, max_length=100, max_size=max_size, filename=filename,
                                filename_type=filename_type, initial=initial, help_text=helptext,
                                validators=[lambda upfile: validate_filename(filename, filename_type, upfile.name)])
        field.widget.attrs.update({'class': 'file-answer'})
        return field
    def to_jsonable(self, cleaned_data):
        data = {}
        if isinstance(cleaned_data, UploadedFile):
            data['filename'] = cleaned_data.name
            data['size'] = cleaned_data.size
            data['content-type'] = cleaned_data.content_type
            data['charset'] = cleaned_data.charset
            data['secret'] = new_file_secret()
            h = hashlib.sha256()
            for c in cleaned_data.chunks(1000):
                h.update(c)
            data['sha256'] = h.hexdigest()
        return {'data': data, '_file': cleaned_data}
    @staticmethod
    def unchanged_answer(prev_ans, new_ans):
        return (new_ans['_file'] is None
                or ('sha256' in prev_ans['data'] and 'sha256' in new_ans['data']
                        and prev_ans['data']['sha256'] == new_ans['data']['sha256']
                        and 'filename' in prev_ans['data'] and 'filename' in new_ans['data']
                        and prev_ans['data']['filename'] == new_ans['data']['filename'])
        )
    def secret_url(self, questionanswer):
        return settings.BASE_ABS_URL + resolve_url(
            'offering:quiz:submitted_file',
            course_slug=self.question.quiz.activity.offering.slug,
            activity_slug=self.question.quiz.activity.slug,
            userid=questionanswer.student.person.userid_or_emplid(),
            answer_id=questionanswer.id,
            secret=questionanswer.answer['data'].get('secret', '?')
        )
    def is_blank(self, questionanswer):
        data = questionanswer.answer['data']
        return not ('filename' in data and 'secret' in data)
    def to_text(self, questionanswer):
        data = questionanswer.answer['data']
        if 'filename' in data and 'secret' in data:
            return self.secret_url(questionanswer)
        else:
            return None
    def to_html(self, questionanswer):
        data = questionanswer.answer['data']
        if 'filename' in data and 'secret' in data:
            html = '<p><a href="%s">%s</a> (%s)</p>' % (
                self.secret_url(questionanswer),
                data['filename'],
                filesizeformat(data['size']),
            )
        else:
            html = MISSING_ANSWER_HTML
        return mark_safe(html)
    # unused but maybe useful later?
    def to_data_url(self, questionanswer):
        size = questionanswer.answer['data']['size']
        if size < 1024 * 10:
            data = questionanswer.file.read()
            parts = [
                'data:',
                urllib.parse.quote(questionanswer.answer['data']['content-type']),
                ';base64,',
                urllib.parse.quote(base64.b64encode(data).decode('ascii'))
            ]
            content = ''.join(parts)
        else:
            content = 'file %i bytes, type %s' % (size, questionanswer.answer['data']['content-type'])
        return content | 
| 138 | 
	name | 
	# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
    'GetRouteFilterResult',
    'AwaitableGetRouteFilterResult',
    'get_route_filter',
    'get_route_filter_output',
]
@pulumi.output_type
class GetRouteFilterResult:
    """
    Route Filter Resource.
    """
    def __init__(__self__, etag=None, id=None, ipv6_peerings=None, location=None, METHOD_NAME=None, peerings=None, provisioning_state=None, rules=None, tags=None, type=None):
        if etag and not isinstance(etag, str):
            raise TypeError("Expected argument 'etag' to be a str")
        pulumi.set(__self__, "etag", etag)
        if id and not isinstance(id, str):
            raise TypeError("Expected argument 'id' to be a str")
        pulumi.set(__self__, "id", id)
        if ipv6_peerings and not isinstance(ipv6_peerings, list):
            raise TypeError("Expected argument 'ipv6_peerings' to be a list")
        pulumi.set(__self__, "ipv6_peerings", ipv6_peerings)
        if location and not isinstance(location, str):
            raise TypeError("Expected argument 'location' to be a str")
        pulumi.set(__self__, "location", location)
        if METHOD_NAME and not isinstance(METHOD_NAME, str):
            raise TypeError("Expected argument 'name' to be a str")
        pulumi.set(__self__, "name", METHOD_NAME)
        if peerings and not isinstance(peerings, list):
            raise TypeError("Expected argument 'peerings' to be a list")
        pulumi.set(__self__, "peerings", peerings)
        if provisioning_state and not isinstance(provisioning_state, str):
            raise TypeError("Expected argument 'provisioning_state' to be a str")
        pulumi.set(__self__, "provisioning_state", provisioning_state)
        if rules and not isinstance(rules, list):
            raise TypeError("Expected argument 'rules' to be a list")
        pulumi.set(__self__, "rules", rules)
        if tags and not isinstance(tags, dict):
            raise TypeError("Expected argument 'tags' to be a dict")
        pulumi.set(__self__, "tags", tags)
        if type and not isinstance(type, str):
            raise TypeError("Expected argument 'type' to be a str")
        pulumi.set(__self__, "type", type)
    @property
    @pulumi.getter
    def etag(self) -> str:
        """
        A unique read-only string that changes whenever the resource is updated.
        """
        return pulumi.get(self, "etag")
    @property
    @pulumi.getter
    def id(self) -> Optional[str]:
        """
        Resource ID.
        """
        return pulumi.get(self, "id")
    @property
    @pulumi.getter(METHOD_NAME="ipv6Peerings")
    def ipv6_peerings(self) -> Sequence['outputs.ExpressRouteCircuitPeeringResponse']:
        """
        A collection of references to express route circuit ipv6 peerings.
        """
        return pulumi.get(self, "ipv6_peerings")
    @property
    @pulumi.getter
    def location(self) -> str:
        """
        Resource location.
        """
        return pulumi.get(self, "location")
    @property
    @pulumi.getter
    def METHOD_NAME(self) -> str:
        """
        Resource name.
        """
        return pulumi.get(self, "name")
    @property
    @pulumi.getter
    def peerings(self) -> Sequence['outputs.ExpressRouteCircuitPeeringResponse']:
        """
        A collection of references to express route circuit peerings.
        """
        return pulumi.get(self, "peerings")
    @property
    @pulumi.getter(METHOD_NAME="provisioningState")
    def provisioning_state(self) -> str:
        """
        The provisioning state of the route filter resource.
        """
        return pulumi.get(self, "provisioning_state")
    @property
    @pulumi.getter
    def rules(self) -> Optional[Sequence['outputs.RouteFilterRuleResponse']]:
        """
        Collection of RouteFilterRules contained within a route filter.
        """
        return pulumi.get(self, "rules")
    @property
    @pulumi.getter
    def tags(self) -> Optional[Mapping[str, str]]:
        """
        Resource tags.
        """
        return pulumi.get(self, "tags")
    @property
    @pulumi.getter
    def type(self) -> str:
        """
        Resource type.
        """
        return pulumi.get(self, "type")
class AwaitableGetRouteFilterResult(GetRouteFilterResult):
    # pylint: disable=using-constant-test
    def __await__(self):
        if False:
            yield self
        return GetRouteFilterResult(
            etag=self.etag,
            id=self.id,
            ipv6_peerings=self.ipv6_peerings,
            location=self.location,
            METHOD_NAME=self.METHOD_NAME,
            peerings=self.peerings,
            provisioning_state=self.provisioning_state,
            rules=self.rules,
            tags=self.tags,
            type=self.type)
def get_route_filter(expand: Optional[str] = None,
                     resource_group_name: Optional[str] = None,
                     route_filter_name: Optional[str] = None,
                     opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRouteFilterResult:
    """
    Gets the specified route filter.
    Azure REST API version: 2023-02-01.
    :param str expand: Expands referenced express route bgp peering resources.
    :param str resource_group_name: The name of the resource group.
    :param str route_filter_name: The name of the route filter.
    """
    __args__ = dict()
    __args__['expand'] = expand
    __args__['resourceGroupName'] = resource_group_name
    __args__['routeFilterName'] = route_filter_name
    opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
    __ret__ = pulumi.runtime.invoke('azure-native:network:getRouteFilter', __args__, opts=opts, typ=GetRouteFilterResult).value
    return AwaitableGetRouteFilterResult(
        etag=pulumi.get(__ret__, 'etag'),
        id=pulumi.get(__ret__, 'id'),
        ipv6_peerings=pulumi.get(__ret__, 'ipv6_peerings'),
        location=pulumi.get(__ret__, 'location'),
        METHOD_NAME=pulumi.get(__ret__, 'name'),
        peerings=pulumi.get(__ret__, 'peerings'),
        provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
        rules=pulumi.get(__ret__, 'rules'),
        tags=pulumi.get(__ret__, 'tags'),
        type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_route_filter)
def get_route_filter_output(expand: Optional[pulumi.Input[Optional[str]]] = None,
                            resource_group_name: Optional[pulumi.Input[str]] = None,
                            route_filter_name: Optional[pulumi.Input[str]] = None,
                            opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRouteFilterResult]:
    """
    Gets the specified route filter.
    Azure REST API version: 2023-02-01.
    :param str expand: Expands referenced express route bgp peering resources.
    :param str resource_group_name: The name of the resource group.
    :param str route_filter_name: The name of the route filter.
    """
    ... | 
| 139 | 
	test handle message dump one | 
	from typing import List
import pytest
from mock import AsyncMock
from opentrons.drivers.rpi_drivers.types import USBPort
from opentrons.hardware_control.emulation.module_server import (
    helpers,
    ModuleStatusClient,
)
from opentrons.hardware_control.emulation.module_server import models
from opentrons.hardware_control.modules import ModuleAtPort
@pytest.fixture
def mock_callback() -> AsyncMock:
    """Callback mock."""
    return AsyncMock(spec=helpers.NotifyMethod)
@pytest.fixture
def mock_client() -> AsyncMock:
    """Mock client."""
    return AsyncMock(spec=ModuleStatusClient)
@pytest.fixture
def subject(mock_callback: AsyncMock, mock_client: AsyncMock) -> helpers.ModuleListener:
    """Test subject."""
    return helpers.ModuleListener(client=mock_client, notify_method=mock_callback)
@pytest.fixture
def connections() -> List[models.ModuleConnection]:
    """Connection models."""
    return [
        models.ModuleConnection(
            url=f"url{i}", module_type=f"module_type{i}", identifier=f"identifier{i}"
        )
        for i in range(5)
    ]
@pytest.fixture
def modules_at_port() -> List[ModuleAtPort]:
    """Connection models."""
    return [
        ModuleAtPort(
            port=f"url{i}",
            name=f"module_type{i}",
            usb_port=USBPort(name=f"identifier{i}", port_number=i + 1),
        )
        for i in range(5)
    ]
async def test_handle_message_connected_empty(
    subject: helpers.ModuleListener, mock_callback: AsyncMock
) -> None:
    """It should call the call back with the correct modules to add."""
    message = models.Message(status="connected", connections=[])
    await subject.handle_message(message=message)
    mock_callback.assert_called_once_with([], [])
async def test_handle_message_connected_one(
    subject: helpers.ModuleListener,
    mock_callback: AsyncMock,
    connections: List[models.ModuleConnection],
    modules_at_port: List[ModuleAtPort],
) -> None:
    """It should call the call back with the correct modules to add."""
    message = models.Message(status="connected", connections=connections[:1])
    await subject.handle_message(message=message)
    mock_callback.assert_called_once_with(modules_at_port[:1], [])
async def test_handle_message_connected_many(
    subject: helpers.ModuleListener,
    mock_callback: AsyncMock,
    connections: List[models.ModuleConnection],
    modules_at_port: List[ModuleAtPort],
) -> None:
    """It should call the call back with the correct modules to add."""
    message = models.Message(status="connected", connections=connections)
    await subject.handle_message(message=message)
    mock_callback.assert_called_once_with(modules_at_port, [])
async def test_handle_message_disconnected_empty(
    subject: helpers.ModuleListener, mock_callback: AsyncMock
) -> None:
    """It should call the call back with the correct modules to remove."""
    message = models.Message(status="disconnected", connections=[])
    await subject.handle_message(message=message)
    mock_callback.assert_called_once_with([], [])
async def test_handle_message_disconnected_one(
    subject: helpers.ModuleListener,
    mock_callback: AsyncMock,
    connections: List[models.ModuleConnection],
    modules_at_port: List[ModuleAtPort],
) -> None:
    """It should call the call back with the correct modules to remove."""
    message = models.Message(status="disconnected", connections=connections[:1])
    await subject.handle_message(message=message)
    mock_callback.assert_called_once_with([], modules_at_port[:1])
async def test_handle_message_disconnected_many(
    subject: helpers.ModuleListener,
    mock_callback: AsyncMock,
    connections: List[models.ModuleConnection],
    modules_at_port: List[ModuleAtPort],
) -> None:
    """It should call the call back with the correct modules to remove."""
    message = models.Message(status="disconnected", connections=connections)
    await subject.handle_message(message=message)
    mock_callback.assert_called_once_with([], modules_at_port)
async def test_handle_message_dump_empty(
    subject: helpers.ModuleListener, mock_callback: AsyncMock
) -> None:
    """It should call the call back with the correct modules to load."""
    message = models.Message(status="dump", connections=[])
    await subject.handle_message(message=message)
    mock_callback.assert_called_once_with([], [])
async def METHOD_NAME(
    subject: helpers.ModuleListener,
    mock_callback: AsyncMock,
    connections: List[models.ModuleConnection],
    modules_at_port: List[ModuleAtPort],
) -> None:
    """It should call the call back with the correct modules to load."""
    message = models.Message(status="dump", connections=connections[:1])
    await subject.handle_message(message=message)
    mock_callback.assert_called_once_with(modules_at_port[:1], [])
async def test_handle_message_dump_many(
    subject: helpers.ModuleListener,
    mock_callback: AsyncMock,
    connections: List[models.ModuleConnection],
    modules_at_port: List[ModuleAtPort],
) -> None:
    """It should call the call back with the correct modules to load."""
    message = models.Message(status="dump", connections=connections)
    await subject.handle_message(message=message)
    mock_callback.assert_called_once_with(modules_at_port, []) | 
| 140 | 
	get object from version | 
	from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
import logging
from alembic.operations import Operations, MigrateOperation
from flask import current_app
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
LOGGER = logging.getLogger('alembic.env')
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
config.set_main_option('sqlalchemy.url',
                       current_app.config.get('SQLALCHEMY_DATABASE_URI'))
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
# The below code is for running plpgsql(PSQL for postgres) stored procedures safely
# please refer alembic cookbook http://alembic.zzzcomputing.com/en/latest/cookbook.html
# for more
class ReversibleOp(MigrateOperation):
    def __init__(self, target):
        self.target = target
    @classmethod
    def invoke_for_target(cls, operations, target):
        op = cls(target)
        return operations.invoke(op)
    def reverse(self):
        raise NotImplementedError()
    @classmethod
    def METHOD_NAME(cls, operations, ident):
        version, objname = ident.split(".")
        module = operations.get_context().script.get_revision(version).module
        obj = getattr(module, objname)
        return obj
    @classmethod
    def replace(cls, operations, target, replaces=None, replace_with=None):
        if replaces:
            old_obj = cls.METHOD_NAME(operations, replaces)
            drop_old = cls(old_obj).reverse()
            create_new = cls(target)
        elif replace_with:
            old_obj = cls.METHOD_NAME(operations, replace_with)
            drop_old = cls(target).reverse()
            create_new = cls(old_obj)
        else:
            raise TypeError("replaces or replace_with is required")
        operations.invoke(drop_old)
        operations.invoke(create_new)
@Operations.register_operation("create_or_replace_sp", "invoke_for_target")
@Operations.register_operation("replace_sp", "replace")
class CreateSPOp(ReversibleOp):
    def reverse(self):
        return DropSPOp(self.target)
@Operations.register_operation("drop_sp", "invoke_for_target")
class DropSPOp(ReversibleOp):
    def reverse(self):
        return CreateSPOp(self.target)
@Operations.implementation_for(CreateSPOp)
def create_or_replace_sp(operations, operation):
    operations.execute(
        "CREATE OR REPLACE FUNCTION %s %s" % (
            operation.target.name, operation.target.sqltext
        )
    )
@Operations.implementation_for(DropSPOp)
def drop_sp(operations, operation):
    operations.execute("DROP FUNCTION %s" % operation.target.name)
def run_migrations_offline():
    """Run migrations in 'offline' mode.
    This configures the context with just a URL
    and not an Engine, though an Engine is acceptable
    here as well.  By skipping the Engine creation
    we don't even need a DBAPI to be available.
    Calls to context.execute() here emit the given string to the
    script output.
    """
    url = config.get_main_option("sqlalchemy.url")
    context.configure(url=url)
    with context.begin_transaction():
        context.run_migrations()
def run_migrations_online():
    """Run migrations in 'online' mode.
    In this scenario we need to create an Engine
    and associate a connection with the context.
    """
    # this callback is used to prevent an auto-migration from being generated
    # when there are no changes to the schema
    # reference: http://alembic.readthedocs.org/en/latest/cookbook.html
    def process_revision_directives(context, revision, directives):
        if getattr(config.cmd_opts, 'autogenerate', False):
            script = directives[0]
            if script.upgrade_ops.is_empty():
                directives[:] = []
                LOGGER.info('No changes in schema detected.')
    engine = engine_from_config(config.get_section(config.config_ini_section),
                                prefix='sqlalchemy.',
                                poolclass=pool.NullPool)
    connection = engine.connect()
    context.configure(connection=connection,
                      target_metadata=target_metadata,
                      process_revision_directives=process_revision_directives,
                      compare_type=True,
                      **current_app.extensions['migrate'].configure_args)
    try:
        with context.begin_transaction():
            context.run_migrations()
    finally:
        connection.close()
if context.is_offline_mode():
    run_migrations_offline()
else:
    run_migrations_online()
 | 
| 141 | 
	parse downloads | 
	import re
from loguru import logger
from flexget import plugin
from flexget.components.sites.urlrewriting import UrlRewritingError
from flexget.event import event
from flexget.utils import requests
from flexget.utils.soup import get_soup
logger = logger.bind(name='serienjunkies')
regex_single_ep = re.compile(r'(S\d+E\d\d+)(?!-E)', re.I)
regex_multi_ep = re.compile(r'(?P<season>S\d\d)E(?P<startep>\d\d+)-E?(?P<stopep>\d\d+)', re.I)
regex_season = re.compile(r'(?<=\.|\-)S\d\d(?:[-\.]S\d\d)*(?!E\d\d+)', re.I)
regex_language_container = re.compile(r'Sprache')
regex_is_german = re.compile(r'german|deutsch', re.I)
regex_is_foreign = re.compile(
    r'englisc?h|französisch|japanisch|dänisch|norwegisch|niederländisch|ungarisch|italienisch|portugiesisch',
    re.I,
)
regex_is_subtitle = re.compile(r'Untertitel|Subs?|UT', re.I)
LANGUAGE = ['german', 'foreign', 'subtitle', 'dual']
HOSTER = ['ul', 'cz', 'so', 'all']
DEFAULT_LANGUAGE = 'dual'
DEFAULT_HOSTER = 'ul'
class UrlRewriteSerienjunkies:
    """
    Serienjunkies urlrewriter
    Version 1.0.2
    Language setting works like a whitelist, the selected is needed,
    but others are still possible.
    Configuration
    language: [german|foreign|subtitle|dual] default "foreign"
    hoster: [ul|cz|so|all] default "ul"
    """
    schema = {
        'type': 'object',
        'properties': {
            'language': {'type': 'string', 'enum': LANGUAGE},
            'hoster': {'type': 'string', 'enum': HOSTER},
        },
        'additionalProperties': False,
    }
    # Since the urlrewriter relies on a config, we need to create a default one
    config = {'hoster': DEFAULT_HOSTER, 'language': DEFAULT_LANGUAGE}
    def on_task_start(self, task, config):
        self.config = config
    # urlrewriter API
    def url_rewritable(self, task, entry):
        url = entry['url']
        if url.startswith('http://www.serienjunkies.org/') or url.startswith(
            'http://serienjunkies.org/'
        ):
            return True
        return False
    # urlrewriter API
    def url_rewrite(self, task, entry):
        series_url = entry['url']
        search_title = re.sub(r'\[.*\] ', '', entry['title'])
        download_urls = self.METHOD_NAME(series_url, search_title)
        if not download_urls:
            entry.reject('No Episode found')
        else:
            entry['url'] = download_urls[-1]
            entry['description'] = ", ".join(download_urls)
        # Debug Information
        logger.debug('TV Show URL: {}', series_url)
        logger.debug('Episode: {}', search_title)
        logger.debug('Download URL: {}', download_urls)
    @plugin.internet(logger)
    def METHOD_NAME(self, series_url, search_title):
        page = requests.get(series_url).content
        try:
            soup = get_soup(page)
        except Exception as e:
            raise UrlRewritingError(e)
        urls = []
        # find all titles
        episode_titles = self.find_all_titles(search_title)
        if not episode_titles:
            raise UrlRewritingError('Unable to find episode')
        for ep_title in episode_titles:
            # find matching download
            episode_title = soup.find('strong', text=re.compile(ep_title, re.I))
            if not episode_title:
                continue
            # find download container
            episode = episode_title.parent
            if not episode:
                continue
            # find episode language
            episode_lang = episode.find_previous('strong', text=re.compile('Sprache')).next_sibling
            if not episode_lang:
                logger.warning('No language found for: {}', series_url)
                continue
            # filter language
            if not self.check_language(episode_lang):
                logger.warning(
                    'languages not matching: {} <> {}', self.config['language'], episode_lang
                )
                continue
            # find download links
            links = episode.find_all('a')
            if not links:
                logger.warning('No links found for: {}', series_url)
                continue
            for link in links:
                if not link.has_attr('href'):
                    continue
                url = link['href']
                pattern = (
                    r'http:\/\/download\.serienjunkies\.org.*%s_.*\.html' % self.config['hoster']
                )
                if re.match(pattern, url) or self.config['hoster'] == 'all':
                    urls.append(url)
                else:
                    continue
        return urls
    def find_all_titles(self, search_title):
        search_titles = []
        # Check type
        if regex_multi_ep.search(search_title):
            logger.debug('Title seems to describe multiple episodes')
            first_ep = int(regex_multi_ep.search(search_title).group('startep'))
            last_ep = int(regex_multi_ep.search(search_title).group('stopep'))
            season = regex_multi_ep.search(search_title).group('season') + 'E'
            for i in range(first_ep, last_ep + 1):
                # ToDO: Umlaute , Mehrzeilig etc.
                search_titles.append(
                    regex_multi_ep.sub(
                        season + str(i).zfill(2) + '[\\\\w\\\\.\\\\(\\\\)]*', search_title
                    )
                )
        elif regex_season.search(search_title):
            logger.debug('Title seems to describe one or more season')
            search_string = regex_season.search(search_title).group(0)
            for s in re.findall(r'(?<!\-)S\d\d(?!\-)', search_string):
                search_titles.append(regex_season.sub(s + '[\\\\w\\\\.]*', search_title))
            for s in re.finditer(r'(?<!\-)S(\d\d)-S(\d\d)(?!\-)', search_string):
                season_start = int(s.group(1))
                season_end = int(s.group(2))
                for i in range(season_start, season_end + 1):
                    search_titles.append(
                        regex_season.sub('S' + str(i).zfill(2) + '[\\\\w\\\\.]*', search_title)
                    )
        else:
            logger.debug('Title seems to describe a single episode')
            search_titles.append(re.escape(search_title))
        return search_titles
    def check_language(self, languages):
        # Cut additional Subtitles
        languages = languages.split('|', 1)[0]
        language_list = re.split(r'[,&]', languages)
        try:
            if self.config['language'] == 'german':
                if regex_is_german.search(language_list[0]):
                    return True
            elif self.config['language'] == 'foreign':
                if (regex_is_foreign.search(language_list[0]) and len(language_list) == 1) or (
                    len(language_list) > 1 and not regex_is_subtitle.search(language_list[1])
                ):
                    return True
            elif self.config['language'] == 'subtitle':
                if len(language_list) > 1 and regex_is_subtitle.search(language_list[1]):
                    return True
            elif self.config['language'] == 'dual':
                if len(language_list) > 1 and not regex_is_subtitle.search(language_list[1]):
                    return True
        except (KeyError, re.error):
            pass
        return False
@event('plugin.register')
def register_plugin():
    plugin.register(
        UrlRewriteSerienjunkies, 'serienjunkies', interfaces=['urlrewriter', 'task'], api_ver=2
    ) | 
| 142 | 
	solve | 
	import numpy as np
from scipy.sparse import bmat
from scipy.sparse.linalg import spsolve
import vtk
import vtk.util.numpy_support as vnp
from fealpy.decorator import barycentric
from fealpy.functionspace import IsoLagrangeFiniteElementSpace
class PhaseFieldCrystalModel():
    def __init__(self, mesh, timeline, options):
        self.options = options
        self.timeline = timeline
        self.mesh = mesh 
        self.space = IsoLagrangeFiniteElementSpace(mesh, options['order'])
        self.A = self.space.stiff_matrix()
        self.M = self.space.mass_matrix()
        self.uh0 = self.space.function()
        self.uh1 = self.space.function()
        self.ftype = mesh.ftype
        self.itype = mesh.itype
        self.H = []
        self.G = []
    def options(
            self,
            c=1,
            s=0.3,
            epsilon=-1,
            order=1
            ):
        options = {
                'c': c,
                's': s,
                'epsilon': epsilon,
                'order': order
            }
        return options
    def set_init_solution(self, u):
        self.uh0[:] = u
    def get_current_left_matrix(self):
        dt = self.timeline.current_time_step_length()
        A = self.A
        M = self.M
        S = bmat([[M + dt*(M - 2*A), -dt*A], [A, M]], format='csr')
        return S
    def get_current_right_vector(self):
        dt = self.timeline.current_time_step_length()
        gdof = self.space.number_of_global_dofs()
        s = self.options['s']
        epsilon = self.options['epsilon']
        uh0 = self.uh0 
        M = self.M
        F = np.zeros((2*gdof, ), dtype=self.ftype)
        F[:gdof] = M@uh0
        F[:gdof] *= 1 - dt*epsilon
        @barycentric
        def f(bcs):
            val = uh0(bcs)
            return s*val**2/2 - val**3/6
        F[:gdof] += dt*self.space.source_vector(f)
        return F
    def one_step_solve(self):
        """
        Notes
        -----
            求解一个时间层的数值解
        """
        gdof = self.space.number_of_global_dofs()
        A = self.get_current_left_matrix()
        F = self.get_current_right_vector()
        x = spsolve(A, F)
        self.uh0[:] = x[:gdof]
        self.uh1[:] = x[gdof:]
    def post_process(self):
        area = np.sum(self.space.cellmeasure)
        self.uh0 -= self.space.integralalg.mesh_integral(self.uh0)/area
    def Hamilton(self):
        s = self.options['s']
        epsilon = self.options['epsilon']
        uh0 = self.uh0
        uh1 = self.uh1
        @barycentric
        def f0(bcs):
            val0 = uh0(bcs)
            val1 = uh1(bcs)
            val = (val0 + val1)**2/2
            val += epsilon*val0**2/2
            val -= s*val0**3/6
            val += val0**4/24
            return val
        H = self.space.integralalg.mesh_integral(f0)
        self.H.append(H)
        @barycentric
        def f1(bcs):
            val = uh0(bcs)
            return s*val**2/2 - val**3/6
        grad = -self.M*uh0 
        grad -= epsilon*self.M*uh0 
        grad += 2*self.A*uh0 
        grad += self.space.source_vector(f1) 
        grad += self.A*uh1
        self.G.append(np.linalg.norm(grad))
    def METHOD_NAME(self, disp=True, output=False, rdir='.', step=1, postprocess=False):
        """
        Notes
        -----
        计算所有的时间层。
        """
        timeline = self.timeline
        dt = timeline.current_time_step_length()
        timeline.reset() # 时间置零
        if postprocess:
            self.post_process()
        self.Hamilton()
        if output:
            fname = rdir + '/step_'+ str(timeline.current).zfill(10) + '.vtu'
            print(fname)
            self.write_to_vtk(fname)
        if disp:
            print(timeline.current, "Current Hamilton energy ", self.H[-1], " with gradient ",
                    self.G[-1] )
            print("Max phase value:", np.max(self.uh0))
            print("Min phase value:", np.min(self.uh0))
        while not timeline.stop():
            self.one_step_solve()
            if postprocess:
                self.post_process()
            self.Hamilton()
            timeline.current += 1
            if disp:
                print("Current Hamilton energy ", self.H[-1], " with gradient ",
                        self.G[-1])
                print("Max phase value:", np.max(self.uh0))
                print("Min phase value:", np.min(self.uh0))
            if output & (timeline.current%step == 0):
                fname = rdir + '/step_'+ str(timeline.current).zfill(10) + '.vtu'
                print(fname)
                self.write_to_vtk(fname)
        timeline.reset()
    def write_to_vtk(self, fname):
        self.mesh.nodedata['uh0'] = self.uh0
        self.mesh.nodedata['uh1'] = self.uh1
        self.mesh.to_vtk(fname=fname)
 | 
| 143 | 
	main | 
	import os
import sys
from time import time
sys.path.append(".")
import mindspore as ms
import mindspore.nn as nn
from mindspore import Tensor, ops
from mindspore.communication import get_group_size, get_rank, init
from mindspore.parallel._utils import _get_device_num, _get_gradients_mean
from mindcv.data import create_dataset, create_loader, create_transforms
from mindcv.loss import create_loss
from mindcv.models import create_model
from mindcv.optim import create_optimizer
from mindcv.utils import AllReduceSum
try:
    from mindspore import jit
except ImportError:
    from mindspore import ms_function as jit
def METHOD_NAME():
    ms.set_seed(1)
    ms.set_context(mode=ms.PYNATIVE_MODE)
    # --------------------------- Prepare data -------------------------#
    # create dataset for train and val
    init()
    device_num = get_group_size()
    rank_id = get_rank()
    ms.set_auto_parallel_context(
        device_num=device_num,
        parallel_mode="data_parallel",
        gradients_mean=True,
    )
    num_classes = 10
    num_workers = 8
    data_dir = "/data/cifar-10-batches-bin"
    download = False if os.path.exists(data_dir) else True
    dataset_train = create_dataset(
        name="cifar10",
        root=data_dir,
        split="train",
        shuffle=True,
        download=download,
        num_shards=device_num,
        shard_id=rank_id,
        num_parallel_workers=num_workers,
    )
    dataset_test = create_dataset(
        name="cifar10",
        root=data_dir,
        split="test",
        shuffle=False,
        download=False,
        num_shards=device_num,
        shard_id=rank_id,
        num_parallel_workers=num_workers,
    )
    # create transform and get trans list
    trans_train = create_transforms(dataset_name="cifar10", is_training=True)
    trans_test = create_transforms(dataset_name="cifar10", is_training=False)
    # get data loader
    loader_train = create_loader(
        dataset=dataset_train,
        batch_size=64,
        is_training=True,
        num_classes=num_classes,
        transform=trans_train,
        num_parallel_workers=num_workers,
        drop_remainder=True,
    )
    loader_test = create_loader(
        dataset=dataset_test,
        batch_size=32,
        is_training=False,
        num_classes=num_classes,
        transform=trans_test,
    )
    num_batches = loader_train.get_dataset_size()
    print("Num batches: ", num_batches)
    # --------------------------- Build model -------------------------#
    network = create_model(model_name="resnet18", num_classes=num_classes, pretrained=False)
    loss = create_loss(name="CE")
    opt = create_optimizer(network.trainable_params(), opt="adam", lr=1e-3)
    # --------------------------- Training and monitoring -------------------------#
    epochs = 10
    for t in range(epochs):
        print(f"Epoch {t + 1}\n-------------------------------")
        save_path = f"./resnet18-{t + 1}_{num_batches}.ckpt"
        b = time()
        train_epoch(network, loader_train, loss, opt)
        print("Epoch time cost: ", time() - b)
        test_epoch(network, loader_test)
        if rank_id in [None, 0]:
            ms.save_checkpoint(network, save_path, async_save=True)
    print("Done!")
def train_epoch(network, dataset, loss_fn, optimizer):
    # Define forward function
    def forward_fn(data, label):
        logits = network(data)
        loss = loss_fn(logits, label)
        return loss, logits
    # Get gradient function
    grad_fn = ops.value_and_grad(forward_fn, None, optimizer.parameters, has_aux=True)
    mean = _get_gradients_mean()
    degree = _get_device_num()
    grad_reducer = nn.DistributedGradReducer(optimizer.parameters, mean, degree)
    # Define function of one-step training,
    @jit
    def train_step_parallel(data, label):
        (loss, _), grads = grad_fn(data, label)
        grads = grad_reducer(grads)
        loss = ops.depend(loss, optimizer(grads))
        return loss
    network.set_train()
    size = dataset.get_dataset_size()
    for batch, (data, label) in enumerate(dataset.create_tuple_iterator()):
        loss = train_step_parallel(data, label)
        if batch % 100 == 0:
            loss, current = loss.asnumpy(), batch
            print(f"loss: {loss:>7f}  [{current:>3d}/{size:>3d}]")
def test_epoch(network, dataset):
    network.set_train(False)
    total, correct = 0, 0
    for data, label in dataset.create_tuple_iterator():
        pred = network(data)
        total += len(data)
        if len(label.shape) == 1:
            correct += (pred.argmax(1) == label).asnumpy().sum()
        else:  # one-hot or soft label
            correct += (pred.argmax(1) == label.argmax(1)).asnumpy().sum()
    all_reduce = AllReduceSum()
    correct = all_reduce(Tensor(correct, ms.float32))
    total = all_reduce(Tensor(total, ms.float32))
    correct /= total
    acc = 100 * correct.asnumpy()
    print(f"Test Accuracy: {acc:>0.2f}% \n")
    return acc
if __name__ == "__main__":
    METHOD_NAME() | 
| 144 | 
	random bdims | 
	# Copyright 2021 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sparse test utilities."""
from collections.abc import Sequence
import functools
from typing import Any, Callable, Union
import numpy as np
import jax
from jax import lax
from jax._src import test_util as jtu
from jax._src.typing import DTypeLike
from jax import tree_util
from jax.util import safe_zip, split_list
from jax.experimental import sparse
import jax.numpy as jnp
def is_sparse(x):
  return isinstance(x, sparse.JAXSparse)
class SparseTestCase(jtu.JaxTestCase):
  def assertSparseArraysEquivalent(self, x, y, *, check_dtypes=True, atol=None,
                                   rtol=None, canonicalize_dtypes=True, err_msg=''):
    x_bufs, x_tree = tree_util.tree_flatten(x)
    y_bufs, y_tree = tree_util.tree_flatten(y)
    self.assertEqual(x_tree, y_tree)
    self.assertAllClose(x_bufs, y_bufs, check_dtypes=check_dtypes, atol=atol, rtol=rtol,
                        canonicalize_dtypes=canonicalize_dtypes, err_msg=err_msg)
  def _CheckAgainstDense(self, dense_op, sparse_op, args_maker, check_jit=True,
                         check_dtypes=True, tol=None, atol=None, rtol=None,
                         canonicalize_dtypes=True):
    """Check an operation against a dense equivalent"""
    sparse_args = args_maker()
    dense_args = tree_util.tree_map(sparse.todense, sparse_args, is_leaf=is_sparse)
    expected = dense_op(*dense_args)
    sparse_ans = sparse_op(*sparse_args)
    actual = tree_util.tree_map(sparse.todense, sparse_ans, is_leaf=is_sparse)
    self.assertAllClose(expected, actual, check_dtypes=check_dtypes,
                        atol=atol or tol, rtol=rtol or tol,
                        canonicalize_dtypes=canonicalize_dtypes)
    if check_jit:
      sparse_ans_jit = jax.jit(sparse_op)(*sparse_args)
      self.assertSparseArraysEquivalent(sparse_ans, sparse_ans_jit,
                                        atol=atol or tol, rtol=rtol or tol)
  def _CheckGradsSparse(self, dense_fun, sparse_fun, args_maker, *,
                        argnums=None, modes=('fwd', 'rev'), atol=None, rtol=None):
    assert all(mode in ['fwd', 'rev'] for mode in modes)
    args = args_maker()
    args_flat, tree = tree_util.tree_flatten(args)
    num_bufs = [len(tree_util.tree_flatten(arg)[0]) for arg in args]
    argnums_flat = np.cumsum([0, *num_bufs[:-1]]).tolist()
    if argnums is not None:
      argnums_flat = [argnums_flat[n] for n in argnums]
    def dense_fun_flat(*args_flat):
      args = tree_util.tree_unflatten(tree, args_flat)
      args_dense = tree_util.tree_map(sparse.todense, args, is_leaf=is_sparse)
      return dense_fun(*args_dense)
    def sparse_fun_flat(*args_flat):
      out = sparse_fun(*tree_util.tree_unflatten(tree, args_flat))
      return tree_util.tree_map(sparse.todense, out, is_leaf=is_sparse)
    if 'rev' in modes:
      result_de = jax.jacrev(dense_fun_flat, argnums=argnums_flat)(*args_flat)
      result_sp = jax.jacrev(sparse_fun_flat, argnums=argnums_flat)(*args_flat)
      self.assertAllClose(result_de, result_sp, atol=atol, rtol=rtol)
    if 'fwd' in modes:
      result_de = jax.jacfwd(dense_fun_flat, argnums=argnums_flat)(*args_flat)
      result_sp = jax.jacfwd(sparse_fun_flat, argnums=argnums_flat)(*args_flat)
      self.assertAllClose(result_de, result_sp, atol=atol, rtol=rtol)
  def METHOD_NAME(self, *args):
    rng = self.rng()
    return [rng.randint(0, arg + 1) for arg in args]
  def _CheckBatchingSparse(self, dense_fun, sparse_fun, args_maker, *, batch_size=3, bdims=None,
                           check_jit=False, check_dtypes=True, tol=None, atol=None, rtol=None,
                           canonicalize_dtypes=True):
    if bdims is None:
      bdims = self.METHOD_NAME(*(arg.n_batch if is_sparse(arg) else arg.ndim
                                   for arg in args_maker()))
    def concat(args, bdim):
      return sparse.sparsify(functools.partial(lax.concatenate, dimension=bdim))(args)
    def expand(arg, bdim):
      return sparse.sparsify(functools.partial(lax.expand_dims, dimensions=[bdim]))(arg)
    def batched_args_maker():
      args = list(zip(*(args_maker() for _ in range(batch_size))))
      return [arg[0] if bdim is None else concat([expand(x, bdim) for x in arg], bdim)
              for arg, bdim in safe_zip(args, bdims)]
    self._CheckAgainstDense(jax.vmap(dense_fun, bdims), jax.vmap(sparse_fun, bdims), batched_args_maker,
                            check_dtypes=check_dtypes, tol=tol, atol=atol, rtol=rtol, check_jit=check_jit,
                            canonicalize_dtypes=canonicalize_dtypes)
def _rand_sparse(shape: Sequence[int], dtype: DTypeLike, *,
                 rng: np.random.RandomState, rand_method: Callable[..., Any],
                 nse: Union[int, float], n_batch: int, n_dense: int,
                 sparse_format: str) -> Union[sparse.BCOO, sparse.BCSR]:
  if sparse_format not in ['bcoo', 'bcsr']:
    raise ValueError(f"Sparse format {sparse_format} not supported.")
  n_sparse = len(shape) - n_batch - n_dense
  if n_sparse < 0 or n_batch < 0 or n_dense < 0:
    raise ValueError(f"Invalid parameters: {shape=} {n_batch=} {n_sparse=}")
  if sparse_format == 'bcsr' and n_sparse != 2:
    raise ValueError("bcsr array must have 2 sparse dimensions; "
                     f"{n_sparse} is given.")
  batch_shape, sparse_shape, dense_shape = split_list(shape,
                                                      [n_batch, n_sparse])
  if 0 <= nse < 1:
    nse = int(np.ceil(nse * np.prod(sparse_shape)))
  nse_int = int(nse)
  data_rng = rand_method(rng)
  data_shape = (*batch_shape, nse_int, *dense_shape)
  data = jnp.array(data_rng(data_shape, dtype))
  int32 = np.dtype('int32')
  if sparse_format == 'bcoo':
    index_shape = (*batch_shape, nse_int, n_sparse)
    indices = jnp.array(
      rng.randint(0, sparse_shape, size=index_shape, dtype=int32))
    return sparse.BCOO((data, indices), shape=shape)
  else:
    index_shape = (*batch_shape, nse_int)
    indptr_shape = (*batch_shape, sparse_shape[0] + 1)
    indices = jnp.array(
      rng.randint(0, sparse_shape[1], size=index_shape, dtype=int32))
    indptr = jnp.sort(
      rng.randint(0, nse_int + 1, size=indptr_shape, dtype=int32), axis=-1)
    indptr = indptr.at[..., 0].set(0)
    return sparse.BCSR((data, indices, indptr), shape=shape)
def rand_bcoo(rng: np.random.RandomState,
              rand_method: Callable[..., Any]=jtu.rand_default,
              nse: Union[int, float]=0.5, n_batch: int=0, n_dense: int=0):
  """Generates a random BCOO array."""
  return functools.partial(_rand_sparse, rng=rng, rand_method=rand_method,
                           nse=nse, n_batch=n_batch, n_dense=n_dense,
                           sparse_format='bcoo')
def rand_bcsr(rng: np.random.RandomState,
              rand_method: Callable[..., Any]=jtu.rand_default,
              nse: Union[int, float]=0.5, n_batch: int=0, n_dense: int=0):
  """Generates a random BCSR array."""
  return functools.partial(_rand_sparse, rng=rng, rand_method=rand_method,
                           nse=nse, n_batch=n_batch, n_dense=n_dense,
                           sparse_format='bcsr') | 
| 145 | 
	test help advanced global | 
	# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import json
import re
import textwrap
from pants.testutil.pants_integration_test import run_pants
from pants.util.docutil import doc_url
def test_help() -> None:
    pants_run = run_pants(["help"])
    pants_run.assert_success()
    assert "Usage:" in pants_run.stdout
def test_help_global() -> None:
    pants_run = run_pants(["help", "global"])
    pants_run.assert_success()
    assert "--level" in pants_run.stdout
    assert "Global options" in pants_run.stdout
def METHOD_NAME() -> None:
    pants_run = run_pants(["help-advanced", "global"])
    pants_run.assert_success()
    assert "Global advanced options" in pants_run.stdout
    # Spot check to see that a global advanced option is printed
    assert "--loop-max" in pants_run.stdout
def test_help_targets() -> None:
    pants_run = run_pants(["help", "targets"])
    pants_run.assert_success()
    lines = [" ".join(line.split()) for line in pants_run.stdout.splitlines()]
    assert "archive A ZIP or TAR file containing loose files and code packages." in lines
    assert "to get help for a specific target" in pants_run.stdout
def test_help_subsystems() -> None:
    pants_run = run_pants(["--backend-packages=pants.backend.python", "help", "subsystems"])
    pants_run.assert_success()
    assert (
        "pex                     How Pants uses Pex to run Python subprocesses" in pants_run.stdout
    )
    assert "to get help for a specific subsystem" in pants_run.stdout
    assert not re.search(r"^test\s+", pants_run.stdout)
def test_help_specific_target() -> None:
    pants_run = run_pants(["help", "archive"])
    pants_run.assert_success()
    assert (
        textwrap.dedent(
            """
            `archive` target
            ----------------
            A ZIP or TAR file containing loose files and code packages.
            Activated by pants.core
            Valid fields:
            """
        )
        in pants_run.stdout
    )
    assert (
        textwrap.dedent(
            """
            format
                type: 'tar' | 'tar.bz2' | 'tar.gz' | 'tar.xz' | 'zip'
                required
                The type of archive file to be generated.
            """
        )
        in pants_run.stdout
    )
def test_help_goals() -> None:
    pants_run = run_pants(["help", "goals"])
    pants_run.assert_success()
    assert "to get help for a specific goal" in pants_run.stdout
    # Spot check a few core goals.
    for goal in ["filedeps", "list", "roots"]:
        assert goal in pants_run.stdout
def test_help_goals_only_show_implemented() -> None:
    # Some core goals, such as `./pants test`, require downstream implementations to work
    # properly. We should only show those goals when an implementation is provided.
    goals_that_need_implementation = ["fmt", "test"]
    command = ["--pants-config-files=[]", "help", "goals"]
    not_implemented_run = run_pants(["--backend-packages=[]", *command])
    not_implemented_run.assert_success()
    for goal in goals_that_need_implementation:
        assert goal not in not_implemented_run.stdout
    implemented_run = run_pants(
        [
            "--backend-packages=['pants.backend.python', 'pants.backend.python.lint.isort']",
            *command,
        ],
    )
    implemented_run.assert_success()
    for goal in goals_that_need_implementation:
        assert goal in implemented_run.stdout
def test_help_all() -> None:
    pants_run = run_pants(["--backend-packages=pants.backend.python", "help-all"])
    pants_run.assert_success()
    all_help = json.loads(pants_run.stdout)
    # Spot check the data.
    assert "name_to_goal_info" in all_help
    assert "test" in all_help["name_to_goal_info"]
    assert "scope_to_help_info" in all_help
    assert "" in all_help["scope_to_help_info"]
    assert "pytest" in all_help["scope_to_help_info"]
    assert len(all_help["scope_to_help_info"]["pytest"]["basic"]) > 0
def test_unknown_goal() -> None:
    pants_run = run_pants(["testx"])
    pants_run.assert_failure()
    assert "Unknown goal: testx" in pants_run.stdout
    assert "Did you mean test" in pants_run.stdout
def test_unknown_global_flags() -> None:
    pants_run = run_pants(["--pants-workdirx", "goals"])
    pants_run.assert_failure()
    assert "Unknown flag --pants-workdirx on global scope" in pants_run.stdout
    assert "Did you mean --pants-workdir" in pants_run.stdout
def test_unknown_scoped_flags() -> None:
    pants_run = run_pants(["test", "--forcex"])
    pants_run.assert_failure()
    assert "Unknown flag --forcex on test scope" in pants_run.stdout
    assert "Did you mean --force" in pants_run.stdout
def test_global_flag_in_scoped_position() -> None:
    pants_run = run_pants(
        ["test", "--pants-distdir=dist/"],
    )
    pants_run.assert_failure()
    assert "Unknown flag --pants-distdir on test scope" in pants_run.stdout
    assert "Did you mean to use the global --pants-distdir?" in pants_run.stdout
def test_help_provided_target_plugin_field() -> None:
    pants_run = run_pants(
        [
            "--backend-packages=['pants.backend.python', 'pants.backend.experimental.python']",
            "help",
            "python_distribution",
        ]
    )
    pants_run.assert_success()
    assert (
        textwrap.dedent(
            f"""
            `python_distribution` target
            ----------------------------
            A publishable Python setuptools distribution (e.g. an sdist or wheel).
            See {doc_url("python-distributions")}.
            Activated by pants.backend.python
            Valid fields:
            """
        )
        in pants_run.stdout
    )
    assert (
        textwrap.dedent(
            """
            skip_twine
                from: pants.backend.experimental.python
                type: bool
                default: False
                If true, don't publish this target's packages using Twine.
            tags
                type: Iterable[str] | None
                default: None
                Arbitrary strings to describe a target.
            """
        )
        in pants_run.stdout
    )
def test_help_ignore_specs() -> None:
    pants_run = run_pants(
        ["test", "src/python/pants/bsp/protocol_test.py", "--help"],
    )
    pants_run.assert_success()
    assert "`test` goal options" in pants_run.stdout | 
| 146 | 
	ltp bind payload | 
	# SPDX-License-Identifier: GPL-2.0-or-later
# This file is part of Scapy
# See https://scapy.net/ for more information
# Copyright 2012 (C) The MITRE Corporation
"""
.. centered::
    NOTICE
    This software/technical data was produced for the U.S. Government
    under Prime Contract No. NASA-03001 and JPL Contract No. 1295026
    and is subject to FAR 52.227-14 (6/87) Rights in Data General,
    and Article GP-51, Rights in Data  General, respectively.
    This software is publicly released under MITRE case #12-3054
"""
# scapy.contrib.description = Licklider Transmission Protocol (LTP)
# scapy.contrib.status = loads
from scapy.packet import Packet, bind_layers, bind_top_down
from scapy.fields import BitEnumField, BitField, BitFieldLenField, \
    ByteEnumField, ConditionalField, PacketListField, StrLenField
from scapy.layers.inet import UDP
from scapy.config import conf
from scapy.contrib.sdnv import SDNV2, SDNV2FieldLenField
# LTP https://tools.ietf.org/html/rfc5326
_ltp_flag_vals = {
    0: '0x0 Red data, NOT (Checkpoint, EORP or EOB)',
    1: '0x1 Red data, Checkpoint, NOT (EORP or EOB)',
    2: '0x2 Red data, Checkpoint, EORP, NOT EOB',
    3: '0x3 Red data, Checkpoint, EORP, EOB',
    4: '0x4 Green data, NOT EOB',
    5: '0x5 Green data, undefined',
    6: '0x6 Green data, undefined',
    7: '0x7 Green data, EOB',
    8: '0x8 Report segment',
    9: '0x9 Report-acknowledgment segmen',
    10: '0xA Control segment, undefined',
    11: '0xB Control segment, undefined',
    12: '0xC Cancel segment from block sender',
    13: '0xD Cancel-acknowledgment segment to block sender',
    14: '0xE Cancel segment from block receiver',
    15: '0xF Cancel-acknowledgment segment to block receiver'}
_ltp_cancel_reasons = {
    0: 'USR_CNCLD  - Client service canceled session.',
    1: 'UNREACH    - Unreachable client service.',
    2: 'RLEXC      - Retransmission limit exceeded.',
    3: 'MISCOLORED - Received miscolored segment.',
    4: 'SYS_CNCLD  - System error condition.',
    5: 'RXMTCYCEXC - Exceeded the retransmission cycles limit.',
    6: 'RESERVED'}   # Reserved 0x06-0xFF
# LTP Extensions https://tools.ietf.org/html/rfc5327
_ltp_extension_tag = {
    0: 'LTP authentication extension',
    1: 'LTP cookie extension'
}
_ltp_data_segment = [0, 1, 2, 3, 4, 5, 6, 7]
_ltp_checkpoint_segment = [1, 2, 3]
_ltp_payload_conditions = {}
def METHOD_NAME(cls, lambd):
    """Bind payload class to the LTP packets.
    :param cls: the class to bind
    :param lambd: lambda that will be called to check
        whether or not the cls should be used
        ex: lambda pkt: ...
    """
    _ltp_payload_conditions[cls] = lambd
class LTPex(Packet):
    name = "LTP Extension"
    fields_desc = [
        ByteEnumField("ExTag", 0, _ltp_extension_tag),
        SDNV2FieldLenField("ExLength", None, length_of="ExData"),
        # SDNV2FieldLenField
        StrLenField("ExData", "", length_from=lambda x: x.ExLength)
    ]
    def default_payload_class(self, pay):
        return conf.padding_layer
class LTPReceptionClaim(Packet):
    name = "LTP Reception Claim"
    fields_desc = [SDNV2("ReceptionClaimOffset", 0),
                   SDNV2("ReceptionClaimLength", 0)]
    def default_payload_class(self, pay):
        return conf.padding_layer
def _ltp_guess_payload(pkt, *args):
    for k, v in _ltp_payload_conditions.items():
        if v(pkt):
            return k
    return conf.raw_layer
class LTP(Packet):
    name = "LTP"
    fields_desc = [
        BitField('version', 0, 4),
        BitEnumField('flags', 0, 4, _ltp_flag_vals),
        SDNV2("SessionOriginator", 0),
        SDNV2("SessionNumber", 0),
        BitFieldLenField("HeaderExtensionCount", None, 4, count_of="HeaderExtensions"),  # noqa: E501
        BitFieldLenField("TrailerExtensionCount", None, 4, count_of="TrailerExtensions"),  # noqa: E501
        PacketListField("HeaderExtensions", [], LTPex, count_from=lambda x: x.HeaderExtensionCount),  # noqa: E501
        #
        # LTP segments containing data have a DATA header
        #
        ConditionalField(SDNV2("DATA_ClientServiceID", 0),
                         lambda x: x.flags in _ltp_data_segment),
        ConditionalField(SDNV2("DATA_PayloadOffset", 0),
                         lambda x: x.flags in _ltp_data_segment),
        ConditionalField(SDNV2FieldLenField("DATA_PayloadLength", None, length_of="LTP_Payload"),  # noqa: E501
                         lambda x: x.flags in _ltp_data_segment),
        #
        # LTP segments that are checkpoints will have a checkpoint serial number and report serial number.  # noqa: E501
        #
        ConditionalField(SDNV2("CheckpointSerialNo", 0),
                         lambda x: x.flags in _ltp_checkpoint_segment),
        #
        # For segments that are checkpoints or reception reports.
        #
        ConditionalField(SDNV2("ReportSerialNo", 0),
                         lambda x: x.flags in _ltp_checkpoint_segment \
                         or x.flags == 8),
        #
        # Then comes the actual payload for data carrying segments.
        #
        ConditionalField(PacketListField("LTP_Payload", None, next_cls_cb=_ltp_guess_payload,  # noqa: E501
                                         length_from=lambda x: x.DATA_PayloadLength),  # noqa: E501
                         lambda x: x.flags in _ltp_data_segment),
        #
        # Report ACKS acknowledge a particular report serial number.
        #
        ConditionalField(SDNV2("RA_ReportSerialNo", 0),
                         lambda x: x.flags == 9),
        #
        # Reception reports have the following fields,
        # excluding ReportSerialNo defined above.
        #
        ConditionalField(SDNV2("ReportCheckpointSerialNo", 0),
                         lambda x: x.flags == 8),
        ConditionalField(SDNV2("ReportUpperBound", 0),
                         lambda x: x.flags == 8),
        ConditionalField(SDNV2("ReportLowerBound", 0),
                         lambda x: x.flags == 8),
        ConditionalField(SDNV2FieldLenField("ReportReceptionClaimCount", None, count_of="ReportReceptionClaims"),  # noqa: E501
                         lambda x: x.flags == 8),
        ConditionalField(PacketListField("ReportReceptionClaims", [], LTPReceptionClaim,  # noqa: E501
                                         count_from=lambda x: x.ReportReceptionClaimCount),  # noqa: E501
                         lambda x: x.flags == 8 and (not x.ReportReceptionClaimCount or x.ReportReceptionClaimCount > 0)),  # noqa: E501
        #
        # Cancellation Requests
        #
        ConditionalField(ByteEnumField("CancelFromSenderReason",
                                       15, _ltp_cancel_reasons),
                         lambda x: x.flags == 12),
        ConditionalField(ByteEnumField("CancelFromReceiverReason",
                                       15, _ltp_cancel_reasons),
                         lambda x: x.flags == 14),
        #
        # Cancellation Acknowldgements
        #
        ConditionalField(SDNV2("CancelAckToBlockSender", 0),
                         lambda x: x.flags == 13),
        ConditionalField(SDNV2("CancelAckToBlockReceiver", 0),
                         lambda x: x.flags == 15),
        #
        # Finally, trailing extensions
        #
        PacketListField("TrailerExtensions", [], LTPex, count_from=lambda x: x.TrailerExtensionCount)  # noqa: E501
    ]
    def mysummary(self):
        return self.sprintf("LTP %SessionNumber%"), [UDP]
bind_top_down(UDP, LTP, sport=1113)
bind_top_down(UDP, LTP, dport=1113)
bind_top_down(UDP, LTP, sport=2113)
bind_top_down(UDP, LTP, dport=2113)
bind_layers(UDP, LTP, sport=1113, dport=1113) | 
| 147 | 
	gl debug wrapper | 
	# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""This module provides a (functional) API to OpenGL ES 2.0.
There are multiple backend implementations of this API, available as
submodules of this module. One can use one of the backends directly,
or call `gl.use_gl()` to select one. The backend system allow running
visualizations using Angle, WebGL, or other forms of remote rendering.
This is in part possible by the widespread availability of OpenGL ES 2.0.
All functions that this API provides accept and return Python arguments
(no ctypes is required); strings are real strings and you can pass
data as numpy arrays. In general the input arguments are not checked
(for performance reasons). Each function results in exactly one OpenGL
API call, except when using the pyopengl backend.
The functions do not have docstrings, but most IDE's should provide you
with the function signature. For more documentation see
http://www.khronos.org/opengles/sdk/docs/man/
"""
# NOTE: modules in this package that start with one underscore are
# autogenerated, and should not be edited.
from __future__ import division
import os
from ...util import config, logger
from ._constants import *  # noqa
from ._proxy import BaseGLProxy
# Variable that will hold the module corresponding to the current backend
# This variable is used in our proxy classes to call the right functions.
current_backend = None
class MainProxy(BaseGLProxy):
    """Main proxy for the GL ES 2.0 API.
    The functions in this namespace always call into the correct GL
    backend. Therefore these function objects can be safely stored for
    reuse. However, for efficienty it would probably be better to store the
    function name and then do ``func = getattr(gloo.gl, funcname)``.
    """
    def __call__(self, funcname, returns, *args):
        func = getattr(current_backend, funcname)
        return func(*args)
# Instantiate proxy objects
proxy = MainProxy()
def use_gl(target=None):
    """Let Vispy use the target OpenGL ES 2.0 implementation
    Also see ``vispy.use()``.
    Parameters
    ----------
    target : str
        The target GL backend to use. Default gl2 or es2, depending on the platform.
    Available backends:
    * gl2 - Use ES 2.0 subset of desktop (i.e. normal) OpenGL
    * gl+ - Use the desktop ES 2.0 subset plus all non-deprecated GL
      functions on your system (requires PyOpenGL)
    * es2 - Use the ES2 library (Angle/DirectX on Windows)
    * pyopengl2 - Use ES 2.0 subset of pyopengl (for fallback and testing)
    * dummy - Prevent usage of gloo.gl (for when rendering occurs elsewhere)
    You can use vispy's config option "gl_debug" to check for errors
    on each API call. Or, one can specify it as the target, e.g. "gl2
    debug". (Debug does not apply to 'gl+', since PyOpenGL has its own
    debug mechanism)
    """
    target = target or default_backend.__name__.split(".")[-1]
    target = target.replace('+', 'plus')
    # Get options
    target, _, options = target.partition(' ')
    debug = config['gl_debug'] or 'debug' in options
    # Select modules to import names from
    try:
        mod = __import__(target, globals(), level=1)
    except ImportError as err:
        msg = 'Could not import gl target "%s":\n%s' % (target, str(err))
        raise RuntimeError(msg)
    # Apply
    global current_backend
    current_backend = mod
    _clear_namespace()
    if 'plus' in target:
        # Copy PyOpenGL funcs, extra funcs, constants, no debug
        _copy_gl_functions(mod._pyopengl2, globals(), debug=debug)
        _copy_gl_functions(mod, globals(), True, debug=debug)
    else:
        _copy_gl_functions(mod, globals(), debug=debug)
def _clear_namespace():
    """Clear names that are not part of the strict ES API"""
    ok_names = set(default_backend.__dict__)
    ok_names.update(['gl2', 'glplus'])  # don't remove the module
    NS = globals()
    for name in list(NS.keys()):
        if name.lower().startswith('gl'):
            if name not in ok_names:
                del NS[name]
def _copy_gl_functions(source, dest, constants=False, debug=False):
    """Inject all objects that start with 'gl' from the source
    into the dest. source and dest can be dicts, modules or BaseGLProxy's.
    """
    # Get dicts
    if isinstance(source, BaseGLProxy):
        s = {}
        for key in dir(source):
            s[key] = getattr(source, key)
        source = s
    elif not isinstance(source, dict):
        source = source.__dict__
    if not isinstance(dest, dict):
        dest = dest.__dict__
    # Copy names
    funcnames = [name for name in source.keys() if name.startswith('gl')]
    for name in funcnames:
        if debug and name != 'glGetError':
            dest[name] = make_debug_wrapper(source[name])
        else:
            dest[name] = source[name]
    # Copy constants
    if constants:
        constnames = [name for name in source.keys() if name.startswith('GL_')]
        for name in constnames:
            dest[name] = source[name]
def _arg_repr(arg):
    """Get a useful (and not too large) represetation of an argument."""
    r = repr(arg)
    max = 40
    if len(r) > max:
        if hasattr(arg, 'shape'):
            r = 'array:' + 'x'.join([repr(s) for s in arg.shape])
        else:
            r = r[:max-3] + '...'
    return r
def make_debug_wrapper(fn):
    def METHOD_NAME(*args):
        # Log function call
        argstr = ', '.join(map(_arg_repr, args))
        logger.debug("%s(%s)" % (fn.__name__, argstr))
        # Call function
        ret = fn(*args)
        # Log return value
        if ret is not None:
            if fn.__name__ == 'glReadPixels':
                logger.debug(" <= %s[%s]" % (type(ret), len(ret)))
            else:
                logger.debug(" <= %s" % repr(ret))
        # Check for errors (raises if an error occured)
        check_error(fn.__name__)
        # Return
        return ret
    METHOD_NAME.__name__ = fn.__name__ + '_debug_wrapper'
    # Store reference to wrapped function just for introspection
    METHOD_NAME._wrapped_function = fn
    return METHOD_NAME
def check_error(when='periodic check'):
    """Check this from time to time to detect GL errors.
    Parameters
    ----------
    when : str
        Shown in the exception to help the developer determine when
        this check was done.
    """
    errors = []
    while True:
        err = glGetError()
        if err == GL_NO_ERROR or (errors and err == errors[-1]):
            break
        errors.append(err)
    if errors:
        msg = ', '.join([repr(ENUM_MAP.get(e, e)) for e in errors])
        err = RuntimeError('OpenGL got errors (%s): %s' % (when, msg))
        err.errors = errors
        err.err = errors[-1]  # pyopengl compat
        raise err
def _fix_osmesa_gl_lib_if_testing():
    """
    This functions checks if we a running test with the osmesa backends and
    fix the GL library if needed.
    Since we have to fix the VISPY_GL_LIB *before* any import from the gl
    module, we have to run this here.
    Test discovery utilities (like pytest) will try to import modules
    before running tests, so we have to modify the GL lib really early.
    The other solution would be to setup pre-run hooks for the test utility,
    but there doesn't seem to be a standard way to do that (e.g. conftest.py
    for py.test)
    """
    test_name = os.getenv('_VISPY_TESTING_APP', None)
    if test_name == 'osmesa':
        from ...util.osmesa_gl import fix_osmesa_gl_lib
        fix_osmesa_gl_lib()
_fix_osmesa_gl_lib_if_testing()
# Load default gl backend
from . import gl2 as default_backend  # noqa
if default_backend._lib is None:  # Probably Android or RPi
    from . import es2 as default_backend  # noqa
# Call use to start using our default backend
use_gl() | 
| 148 | 
	test one of every n export versions | 
	# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for learn.utils.gc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from six.moves import xrange  # pylint: disable=redefined-builtin
from tensorflow.contrib.learn.python.learn.utils import gc
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.util import compat
def _create_parser(base_dir):
  # create a simple parser that pulls the export_version from the directory.
  def parser(path):
    # Modify the path object for RegEx match for Windows Paths
    if os.name == "nt":
      match = re.match(
          r"^" + compat.as_str_any(base_dir).replace("\\", "/") + r"/(\d+)$",
          compat.as_str_any(path.path).replace("\\", "/"))
    else:
      match = re.match(r"^" + compat.as_str_any(base_dir) + r"/(\d+)$",
                       compat.as_str_any(path.path))
    if not match:
      return None
    return path._replace(export_version=int(match.group(1)))
  return parser
class GcTest(test_util.TensorFlowTestCase):
  def testLargestExportVersions(self):
    paths = [gc.Path("/foo", 8), gc.Path("/foo", 9), gc.Path("/foo", 10)]
    newest = gc.largest_export_versions(2)
    n = newest(paths)
    self.assertEqual(n, [gc.Path("/foo", 9), gc.Path("/foo", 10)])
  def testLargestExportVersionsDoesNotDeleteZeroFolder(self):
    paths = [gc.Path("/foo", 0), gc.Path("/foo", 3)]
    newest = gc.largest_export_versions(2)
    n = newest(paths)
    self.assertEqual(n, [gc.Path("/foo", 0), gc.Path("/foo", 3)])
  def testModExportVersion(self):
    paths = [
        gc.Path("/foo", 4),
        gc.Path("/foo", 5),
        gc.Path("/foo", 6),
        gc.Path("/foo", 9)
    ]
    mod = gc.mod_export_version(2)
    self.assertEqual(mod(paths), [gc.Path("/foo", 4), gc.Path("/foo", 6)])
    mod = gc.mod_export_version(3)
    self.assertEqual(mod(paths), [gc.Path("/foo", 6), gc.Path("/foo", 9)])
  def METHOD_NAME(self):
    paths = [
        gc.Path("/foo", 0),
        gc.Path("/foo", 1),
        gc.Path("/foo", 3),
        gc.Path("/foo", 5),
        gc.Path("/foo", 6),
        gc.Path("/foo", 7),
        gc.Path("/foo", 8),
        gc.Path("/foo", 33)
    ]
    one_of = gc.one_of_every_n_export_versions(3)
    self.assertEqual(
        one_of(paths), [
            gc.Path("/foo", 3),
            gc.Path("/foo", 6),
            gc.Path("/foo", 8),
            gc.Path("/foo", 33)
        ])
  def testOneOfEveryNExportVersionsZero(self):
    # Zero is a special case since it gets rolled into the first interval.
    # Test that here.
    paths = [gc.Path("/foo", 0), gc.Path("/foo", 4), gc.Path("/foo", 5)]
    one_of = gc.one_of_every_n_export_versions(3)
    self.assertEqual(one_of(paths), [gc.Path("/foo", 0), gc.Path("/foo", 5)])
  def testUnion(self):
    paths = []
    for i in xrange(10):
      paths.append(gc.Path("/foo", i))
    f = gc.union(gc.largest_export_versions(3), gc.mod_export_version(3))
    self.assertEqual(
        f(paths), [
            gc.Path("/foo", 0),
            gc.Path("/foo", 3),
            gc.Path("/foo", 6),
            gc.Path("/foo", 7),
            gc.Path("/foo", 8),
            gc.Path("/foo", 9)
        ])
  def testNegation(self):
    paths = [
        gc.Path("/foo", 4),
        gc.Path("/foo", 5),
        gc.Path("/foo", 6),
        gc.Path("/foo", 9)
    ]
    mod = gc.negation(gc.mod_export_version(2))
    self.assertEqual(mod(paths), [gc.Path("/foo", 5), gc.Path("/foo", 9)])
    mod = gc.negation(gc.mod_export_version(3))
    self.assertEqual(mod(paths), [gc.Path("/foo", 4), gc.Path("/foo", 5)])
  def testPathsWithParse(self):
    base_dir = os.path.join(test.get_temp_dir(), "paths_parse")
    self.assertFalse(gfile.Exists(base_dir))
    for p in xrange(3):
      gfile.MakeDirs(os.path.join(base_dir, "%d" % p))
    # add a base_directory to ignore
    gfile.MakeDirs(os.path.join(base_dir, "ignore"))
    self.assertEqual(
        gc.get_paths(base_dir, _create_parser(base_dir)), [
            gc.Path(os.path.join(base_dir, "0"), 0),
            gc.Path(os.path.join(base_dir, "1"), 1),
            gc.Path(os.path.join(base_dir, "2"), 2)
        ])
  def testMixedStrTypes(self):
    temp_dir = compat.as_bytes(test.get_temp_dir())
    for sub_dir in ["str", b"bytes", u"unicode"]:
      base_dir = os.path.join(
          (temp_dir
           if isinstance(sub_dir, bytes) else temp_dir.decode()), sub_dir)
      self.assertFalse(gfile.Exists(base_dir))
      gfile.MakeDirs(os.path.join(compat.as_str_any(base_dir), "42"))
      gc.get_paths(base_dir, _create_parser(base_dir))
if __name__ == "__main__":
  test.main() | 
| 149 | 
	compile | 
	#!/usr/bin/env python3
# Contest Management System - http://cms-dev.github.io/
# Copyright © 2010-2012 Giovanni Mascellani <mascellani@poisson.phc.unipi.it>
# Copyright © 2010-2018 Stefano Maggiolo <s.maggiolo@gmail.com>
# Copyright © 2010-2012 Matteo Boscariol <boscarim@hotmail.com>
# Copyright © 2012-2013 Luca Wehrstedt <luca.wehrstedt@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""Task type for output only tasks.
"""
import logging
from cms.grading.ParameterTypes import ParameterTypeChoice
from . import TaskType, eval_output
logger = logging.getLogger(__name__)
# Dummy function to mark translatable string.
def N_(message):
    return message
class OutputOnly(TaskType):
    """Task type class for output only tasks, with submission composed
    of testcase_number text files, to be evaluated diffing or using a
    comparator.
    Parameters are a list of string with one element (for future
    possible expansions), which maybe 'diff' or 'comparator', meaning that
    the evaluation is done via white diff or via a comparator.
    """
    # Codename of the checker, if it is used.
    CHECKER_CODENAME = "checker"
    # Template for the filename of the output files provided by the user; %s
    # represent the testcase codename.
    USER_OUTPUT_FILENAME_TEMPLATE = "output_%s.txt"
    # Constants used in the parameter definition.
    OUTPUT_EVAL_DIFF = "diff"
    OUTPUT_EVAL_CHECKER = "comparator"
    # Other constants to specify the task type behaviour and parameters.
    ALLOW_PARTIAL_SUBMISSION = True
    _EVALUATION = ParameterTypeChoice(
        "Output evaluation",
        "output_eval",
        "",
        {OUTPUT_EVAL_DIFF: "Outputs compared with white diff",
         OUTPUT_EVAL_CHECKER: "Outputs are compared by a comparator"})
    ACCEPTED_PARAMETERS = [_EVALUATION]
    @property
    def name(self):
        """See TaskType.name."""
        # TODO add some details if a comparator is used, etc...
        return "Output only"
    testable = False
    def __init__(self, parameters):
        super().__init__(parameters)
        self.output_eval = self.parameters[0]
    def get_compilation_commands(self, unused_submission_format):
        """See TaskType.get_compilation_commands."""
        return None
    def get_user_managers(self):
        """See TaskType.get_user_managers."""
        return []
    def get_auto_managers(self):
        """See TaskType.get_auto_managers."""
        return []
    def _uses_checker(self):
        return self.output_eval == OutputOnly.OUTPUT_EVAL_CHECKER
    @staticmethod
    def _get_user_output_filename(job):
        return OutputOnly.USER_OUTPUT_FILENAME_TEMPLATE % \
            job.operation.testcase_codename
    def METHOD_NAME(self, job, file_cacher):
        """See TaskType.compile."""
        # No compilation needed.
        job.success = True
        job.compilation_success = True
        job.text = [N_("No compilation needed")]
        job.plus = {}
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        user_output_filename = self._get_user_output_filename(job)
        # Since we allow partial submission, if the file is not
        # present we report that the outcome is 0.
        if user_output_filename not in job.files:
            job.success = True
            job.outcome = "0.0"
            job.text = [N_("File not submitted")]
            job.plus = {}
            return
        # First and only step: eval the user output.
        box_success, outcome, text = eval_output(
            file_cacher, job,
            OutputOnly.CHECKER_CODENAME if self._uses_checker() else None,
            user_output_digest=job.files[user_output_filename].digest)
        # Fill in the job with the results.
        job.success = box_success
        job.outcome = str(outcome) if outcome is not None else None
        job.text = text
        # There is no actual evaluation, so no statistics.
        job.plus = {} if box_success else None | 
| 150 | 
	extract module names | 
	from __future__ import annotations
from typing import Iterable, Set
import mypy.types as types
from mypy.types import TypeVisitor
from mypy.util import split_module_names
def METHOD_NAME(type_name: str | None) -> list[str]:
    """Returns the module names of a fully qualified type name."""
    if type_name is not None:
        # Discard the first one, which is just the qualified name of the type
        possible_module_names = split_module_names(type_name)
        return possible_module_names[1:]
    else:
        return []
class TypeIndirectionVisitor(TypeVisitor[Set[str]]):
    """Returns all module references within a particular type."""
    def __init__(self) -> None:
        self.cache: dict[types.Type, set[str]] = {}
        self.seen_aliases: set[types.TypeAliasType] = set()
    def find_modules(self, typs: Iterable[types.Type]) -> set[str]:
        self.seen_aliases.clear()
        return self._visit(typs)
    def _visit(self, typ_or_typs: types.Type | Iterable[types.Type]) -> set[str]:
        typs = [typ_or_typs] if isinstance(typ_or_typs, types.Type) else typ_or_typs
        output: set[str] = set()
        for typ in typs:
            if isinstance(typ, types.TypeAliasType):
                # Avoid infinite recursion for recursive type aliases.
                if typ in self.seen_aliases:
                    continue
                self.seen_aliases.add(typ)
            if typ in self.cache:
                modules = self.cache[typ]
            else:
                modules = typ.accept(self)
                self.cache[typ] = set(modules)
            output.update(modules)
        return output
    def visit_unbound_type(self, t: types.UnboundType) -> set[str]:
        return self._visit(t.args)
    def visit_any(self, t: types.AnyType) -> set[str]:
        return set()
    def visit_none_type(self, t: types.NoneType) -> set[str]:
        return set()
    def visit_uninhabited_type(self, t: types.UninhabitedType) -> set[str]:
        return set()
    def visit_erased_type(self, t: types.ErasedType) -> set[str]:
        return set()
    def visit_deleted_type(self, t: types.DeletedType) -> set[str]:
        return set()
    def visit_type_var(self, t: types.TypeVarType) -> set[str]:
        return self._visit(t.values) | self._visit(t.upper_bound) | self._visit(t.default)
    def visit_param_spec(self, t: types.ParamSpecType) -> set[str]:
        return self._visit(t.upper_bound) | self._visit(t.default)
    def visit_type_var_tuple(self, t: types.TypeVarTupleType) -> set[str]:
        return self._visit(t.upper_bound) | self._visit(t.default)
    def visit_unpack_type(self, t: types.UnpackType) -> set[str]:
        return t.type.accept(self)
    def visit_parameters(self, t: types.Parameters) -> set[str]:
        return self._visit(t.arg_types)
    def visit_instance(self, t: types.Instance) -> set[str]:
        out = self._visit(t.args)
        if t.type:
            # Uses of a class depend on everything in the MRO,
            # as changes to classes in the MRO can add types to methods,
            # change property types, change the MRO itself, etc.
            for s in t.type.mro:
                out.update(split_module_names(s.module_name))
            if t.type.metaclass_type is not None:
                out.update(split_module_names(t.type.metaclass_type.type.module_name))
        return out
    def visit_callable_type(self, t: types.CallableType) -> set[str]:
        out = self._visit(t.arg_types) | self._visit(t.ret_type)
        if t.definition is not None:
            out.update(METHOD_NAME(t.definition.fullname))
        return out
    def visit_overloaded(self, t: types.Overloaded) -> set[str]:
        return self._visit(t.items) | self._visit(t.fallback)
    def visit_tuple_type(self, t: types.TupleType) -> set[str]:
        return self._visit(t.items) | self._visit(t.partial_fallback)
    def visit_typeddict_type(self, t: types.TypedDictType) -> set[str]:
        return self._visit(t.items.values()) | self._visit(t.fallback)
    def visit_literal_type(self, t: types.LiteralType) -> set[str]:
        return self._visit(t.fallback)
    def visit_union_type(self, t: types.UnionType) -> set[str]:
        return self._visit(t.items)
    def visit_partial_type(self, t: types.PartialType) -> set[str]:
        return set()
    def visit_type_type(self, t: types.TypeType) -> set[str]:
        return self._visit(t.item)
    def visit_type_alias_type(self, t: types.TypeAliasType) -> set[str]:
        return self._visit(types.get_proper_type(t)) | 
| 151 | 
	create tiles | 
	# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""Test desispec.specstatus
"""
import unittest, os
import numpy as np
from astropy.table import Table
from desispec.specstatus import update_specstatus
class TestSpecStatus(unittest.TestCase):
    """Test desispec.specstatus
    """
    @classmethod
    def setUpClass(cls):
        pass
    @classmethod
    def tearDownClass(cls):
        pass
    def METHOD_NAME(self, n):
        """
        Create a test tiles table with n rows
        """
        tiles = Table()
        tiles['TILEID'] = np.arange(n, dtype=int)+1
        tiles['LASTNIGHT'] = np.ones(n, dtype=int) * 20201010
        tiles['EFFTIME_SPEC'] = np.ones(n) * 1000
        tiles['GOALTIME'] = np.ones(n) * 1000
        tiles['MINTFRAC'] = np.ones(n) * 0.85
        return tiles
    def _create_specstatus(self, n):
        """
        Create a test specstatus table with n rows
        """
        specstatus = self.METHOD_NAME(n)
        specstatus['USER'] = 'test'
        specstatus['QA'] = 'none'
        specstatus['OVERRIDE'] = np.zeros(n, dtype=int)
        specstatus['ZDONE'] = 'false'
        specstatus['QANIGHT'] = np.zeros(n, dtype=int)
        specstatus['ARCHIVEDATE'] = np.zeros(n, dtype=int)
        return specstatus
    def test_add(self):
        """Test adding a new tile"""
        specstatus = self._create_specstatus(3)
        tiles = self.METHOD_NAME(4)
        self.assertNotIn(4, specstatus['TILEID'])
        newstatus = update_specstatus(specstatus, tiles)
        self.assertEqual(len(newstatus), 4)
        self.assertIn(4, newstatus['TILEID'])
    def test_update(self):
        """Test updating a tile due to new LASTNIGHT"""
        specstatus = self._create_specstatus(3)
        tiles = self.METHOD_NAME(3)
        tiles['LASTNIGHT'][0] += 1
        tiles['EFFTIME_SPEC'][0] += 1
        tiles['EFFTIME_SPEC'][1] += 2   #- but not updating LASTNIGHT for this
        orig_lastnight = specstatus['LASTNIGHT'][0]
        orig_efftime = specstatus['EFFTIME_SPEC'][0]
        newstatus = update_specstatus(specstatus, tiles, update_only=True)
        #- new status has updated EFFTIME_SPEC because LASTNIGHT was new
        self.assertEqual(newstatus['LASTNIGHT'][0], tiles['LASTNIGHT'][0])
        self.assertEqual(newstatus['EFFTIME_SPEC'][0], tiles['EFFTIME_SPEC'][0])
        #- but other entries are unchanged
        self.assertEqual(newstatus['LASTNIGHT'][1], specstatus['LASTNIGHT'][1])
        self.assertEqual(newstatus['EFFTIME_SPEC'][1], specstatus['EFFTIME_SPEC'][1])
        #- and original specstatus is unchanged
        self.assertEqual(specstatus['LASTNIGHT'][0], orig_lastnight)
        self.assertEqual(specstatus['EFFTIME_SPEC'][1], orig_efftime)
    def test_noqa_update(self):
        """Even if tiles has QA info, don't update specstatus with it"""
        specstatus = self._create_specstatus(3)
        tiles = self.METHOD_NAME(3)
        tiles['QA'] = 'good'
        tiles['LASTNIGHT'] += 1
        specstatus['QA'] = 'none'
        newstatus = update_specstatus(specstatus, tiles)
        self.assertTrue(np.all(newstatus['QA'] == 'none'))
    def test_update_all(self):
        """test updating non-QA for all tiles even if LASTNIGHT isn't new"""
        specstatus = self._create_specstatus(3)
        tiles = self.METHOD_NAME(3)
        self.assertTrue(np.all(tiles['EFFTIME_SPEC'] == specstatus['EFFTIME_SPEC']))
        specstatus['QA'] = 'none'
        tiles['EFFTIME_SPEC'] += 1  #- should be updated
        tiles['QA'] = 'good'        #- should be skipped
        newstatus = update_specstatus(specstatus, tiles)
        #- LASTNIGHT didn't change
        self.assertTrue(np.all(newstatus['LASTNIGHT'] == specstatus['LASTNIGHT']))
        self.assertTrue(np.all(newstatus['LASTNIGHT'] == tiles['LASTNIGHT']))
        #- but EFFTIME_SPEC did
        self.assertTrue(np.all(newstatus['EFFTIME_SPEC'] != specstatus['EFFTIME_SPEC']))
        self.assertTrue(np.all(newstatus['EFFTIME_SPEC'] == tiles['EFFTIME_SPEC']))
        #- and QA did not
        self.assertTrue(np.all(newstatus['QA'] == specstatus['QA']))
        self.assertTrue(np.all(newstatus['QA'] != tiles['QA']))
    def test_update_subset(self):
        """Test that it's ok to update a subset of the specstatus tiles"""
        specstatus = self._create_specstatus(5)
        tiles = self.METHOD_NAME(2)
        tiles['LASTNIGHT'] += 1
        tiles['EFFTIME_SPEC'] += 1
        newstatus = update_specstatus(specstatus, tiles)
        self.assertEqual(len(newstatus), len(specstatus))
        self.assertTrue(np.all(tiles['EFFTIME_SPEC'] == newstatus['EFFTIME_SPEC'][0:2]))
        tiles['TILEID'][0] = 1000
        newstatus = update_specstatus(specstatus, tiles)
        self.assertEqual(len(newstatus), len(specstatus)+1)
        self.assertIn(1000, newstatus['TILEID'])
    def test_update_lastnight(self):
        """update_all should update LASTNIGHT, even if it became earlier"""
        specstatus = self._create_specstatus(3)
        tiles = self.METHOD_NAME(3)
        tiles['LASTNIGHT'] -= 1
        newstatus = update_specstatus(specstatus, tiles, update_only=True)
        self.assertTrue(np.all(newstatus['LASTNIGHT'] == specstatus['LASTNIGHT']))
        self.assertTrue(np.all(newstatus['LASTNIGHT'] != tiles['LASTNIGHT']))
        newstatus = update_specstatus(specstatus, tiles, update_only=False)
        self.assertTrue(np.all(newstatus['LASTNIGHT'] != specstatus['LASTNIGHT']))
        self.assertTrue(np.all(newstatus['LASTNIGHT'] == tiles['LASTNIGHT']))
def test_suite():
    """Allows testing of only this module with the command::
        python setup.py test -m <modulename>
    """
    return unittest.defaultTestLoader.loadTestsFromName(__name__)
#- run all unit tests in this file
if __name__ == '__main__':
    unittest.main() | 
| 152 | 
	test repeated qubit | 
	# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#     http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the optimization transform ``merge_amplitude_embedding``.
"""
import pytest
from pennylane import numpy as np
import pennylane as qml
from pennylane.transforms.optimization import merge_amplitude_embedding
from pennylane._device import DeviceError
class TestMergeAmplitudeEmbedding:
    """Test that amplitude embedding gates are combined into a single."""
    def test_multi_amplitude_embedding(self):
        """Test that the transformation is working correctly by joining two AmplitudeEmbedding."""
        def qfunc():
            qml.AmplitudeEmbedding([0.0, 1.0], wires=0)
            qml.AmplitudeEmbedding([0.0, 1.0], wires=1)
            qml.Hadamard(wires=0)
            qml.Hadamard(wires=0)
            qml.state()
        transformed_qfunc = merge_amplitude_embedding(qfunc)
        ops = qml.tape.make_qscript(transformed_qfunc)().operations
        assert len(ops) == 3
        # Check that the solution is as expected.
        dev = qml.device("default.qubit", wires=2)
        assert np.allclose(qml.QNode(transformed_qfunc, dev)()[-1], 1)
    def METHOD_NAME(self):
        """Check that AmplitudeEmbedding cannot be applied if the qubit has already been used."""
        def qfunc():
            qml.CNOT(wires=[0.0, 1.0])
            qml.AmplitudeEmbedding([0.0, 1.0], wires=1)
        transformed_qfunc = merge_amplitude_embedding(qfunc)
        dev = qml.device("default.qubit", wires=2)
        qnode = qml.QNode(transformed_qfunc, dev)
        with pytest.raises(DeviceError, match="applied in the same qubit"):
            qnode()
    def test_decorator(self):
        """Check that the decorator works."""
        @merge_amplitude_embedding
        def qfunc():
            qml.AmplitudeEmbedding([0, 1, 0, 0], wires=[0, 1])
            qml.AmplitudeEmbedding([0, 1], wires=2)
            return qml.state()
        dev = qml.device("default.qubit", wires=3)
        qnode = qml.QNode(qfunc, dev)
        assert qnode()[3] == 1.0
    def test_broadcasting(self):
        """Test that merging preserves the batch dimension"""
        dev = qml.device("default.qubit", wires=3)
        @qml.qnode(dev)
        @qml.transforms.merge_amplitude_embedding
        def qnode():
            qml.AmplitudeEmbedding([[1, 0], [0, 1]], wires=0)
            qml.AmplitudeEmbedding([1, 0], wires=1)
            qml.AmplitudeEmbedding([[0, 1], [1, 0]], wires=2)
            return qml.state()
        res = qnode()
        assert qnode.tape.batch_size == 2
        # |001> and |100>
        expected = np.array([[0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0]])
        assert np.allclose(res, expected)
class TestMergeAmplitudeEmbeddingInterfaces:
    """Test that merging amplitude embedding operations works in all interfaces."""
    @pytest.mark.autograd
    def test_merge_amplitude_embedding_autograd(self):
        """Test QNode in autograd interface."""
        def qfunc(amplitude):
            qml.AmplitudeEmbedding(amplitude, wires=0)
            qml.AmplitudeEmbedding(amplitude, wires=1)
            return qml.state()
        dev = qml.device("default.qubit", wires=2)
        optimized_qfunc = qml.transforms.merge_amplitude_embedding(qfunc)
        optimized_qnode = qml.QNode(optimized_qfunc, dev)
        amplitude = np.array([0.0, 1.0], requires_grad=True)
        # Check the state |11> is being generated.
        assert optimized_qnode(amplitude)[-1] == 1
    @pytest.mark.torch
    def test_merge_amplitude_embedding_torch(self):
        """Test QNode in torch interface."""
        import torch
        def qfunc(amplitude):
            qml.AmplitudeEmbedding(amplitude, wires=0)
            qml.AmplitudeEmbedding(amplitude, wires=1)
            return qml.state()
        dev = qml.device("default.qubit", wires=2)
        optimized_qfunc = qml.transforms.merge_amplitude_embedding(qfunc)
        optimized_qnode = qml.QNode(optimized_qfunc, dev)
        amplitude = torch.tensor([0.0, 1.0], requires_grad=True)
        # Check the state |11> is being generated.
        assert optimized_qnode(amplitude)[-1] == 1
    @pytest.mark.tf
    def test_merge_amplitude_embedding_tf(self):
        """Test QNode in tensorflow interface."""
        import tensorflow as tf
        def qfunc(amplitude):
            qml.AmplitudeEmbedding(amplitude, wires=0)
            qml.AmplitudeEmbedding(amplitude, wires=1)
            return qml.state()
        dev = qml.device("default.qubit", wires=2)
        optimized_qfunc = qml.transforms.merge_amplitude_embedding(qfunc)
        optimized_qnode = qml.QNode(optimized_qfunc, dev)
        amplitude = tf.Variable([0.0, 1.0])
        # Check the state |11> is being generated.
        assert optimized_qnode(amplitude)[-1] == 1
    @pytest.mark.jax
    def test_merge_amplitude_embedding_jax(self):
        """Test QNode in JAX interface."""
        from jax import numpy as jnp
        from jax.config import config
        config.update("jax_enable_x64", True)
        def qfunc(amplitude):
            qml.AmplitudeEmbedding(amplitude, wires=0)
            qml.AmplitudeEmbedding(amplitude, wires=1)
            return qml.state()
        dev = qml.device("default.qubit", wires=2)
        optimized_qfunc = qml.transforms.merge_amplitude_embedding(qfunc)
        optimized_qnode = qml.QNode(optimized_qfunc, dev)
        amplitude = jnp.array([0.0, 1.0], dtype=jnp.float64)
        # Check the state |11> is being generated.
        assert optimized_qnode(amplitude)[-1] == 1 | 
| 153 | 
	create subdomain id | 
	import logging
import os
import re
import shortuuid
from libs.python.helperJson import addKeyValuePair, saveJsonToFile
from libs.python.helperServices import BTPSERVICE
log = logging.getLogger(__name__)
def getTimingsForStatusRequest(btpUsecase, thisService):
    search_every_x_seconds = btpUsecase.repeatstatusrequest
    usecaseTimeout = btpUsecase.repeatstatustimeout
    # If the service has defined its own time to repeat a status request, take that time instead
    if isinstance(thisService, BTPSERVICE):
        if thisService.repeatstatusrequest is not None:
            search_every_x_seconds = thisService.repeatstatusrequest
        if thisService.repeatstatustimeout is not None:
            usecaseTimeout = thisService.repeatstatustimeout
    else:
        if "repeatstatusrequest" in thisService:
            search_every_x_seconds = thisService["repeatstatusrequest"]
        if "repeatstatustimeout" in thisService:
            usecaseTimeout = thisService["repeatstatustimeout"]
    return search_every_x_seconds, usecaseTimeout
def getServiceByServiceName(btpUsecase, serviceName):
    for service in btpUsecase.definedServices:
        if service.name == serviceName:
            return service
    return None
def getNamingPattern(btpUsecase, prefix, suffix):
    result = None
    if prefix is None:
        prefix = ""
    if suffix is None:
        suffix = ""
    if prefix is None:
        prefix = ""
    if suffix is None:
        suffix = ""
    result = prefix + suffix
    result = re.sub(r"[^\w\s]", "-", result)
    result = result.replace(" ", "-").lower()
    result = result.replace("_", "-").lower()
    return result
def getNamingPatternForServiceSuffix(btpUsecase):
    result = getNamingPattern(btpUsecase, " instance ", None)
    return result
def createDirectoryName(btpUsecase):
    result = None
    if btpUsecase.directoryname is not None and btpUsecase.directoryname != "":
        result = btpUsecase.directoryname.strip()
    else:
        result = "BTP setup automator (Directory)"
    btpUsecase.accountMetadata = addKeyValuePair(
        btpUsecase.accountMetadata, "directory", result
    )
    return result
def createSubaccountName(btpUsecase):
    result = None
    if btpUsecase.subaccountname is not None and btpUsecase.subaccountname != "":
        result = btpUsecase.subaccountname.strip()
    else:
        result = "BTP setup automator (" + btpUsecase.region + ")"
    btpUsecase.accountMetadata = addKeyValuePair(
        btpUsecase.accountMetadata, "subaccount", result
    )
    return result
def createInstanceName(btpUsecase, service):
    result = "instance"
    if service.category != "CF_CUP_SERVICE":
        if service.instancename is not None:
            return service.instancename
        else:
            result = (
                service.name + "-" + service.plan + "-" + btpUsecase.suffixinstancename
            )
        result = re.sub(r"[^\w\s]", "-", result)
        result = result.replace("--", "-")
        result = result.replace("_", "-")
        if result[len(result) - 1] == "-":
            result = result[:-1]
        result = result[:40]
    else:
        result += "-" + service.name
    return result
def METHOD_NAME(btpUsecase):
    result = None
    if btpUsecase.subdomain is not None and btpUsecase.subdomain != "":
        result = btpUsecase.subdomain.strip()
    else:
        result = (
            btpUsecase.accountMetadata["subaccount"]
            + "-"
            + shortuuid.ShortUUID().random(length=10)
        )
    result = re.sub(r"[^\w\s]", "-", result)
    result = result.replace(" ", "-")
    result = result.replace("_", "-")
    result = result.replace("--", "-")
    if result[len(result) - 1] == "-":
        result = result[:-1]
    result = result[:60].lower()
    btpUsecase.accountMetadata = addKeyValuePair(
        btpUsecase.accountMetadata, "subdomain", result
    )
    return result
def createOrgName(btpUsecase, envName):
    result = None
    if envName == "cloudfoundry":
        if btpUsecase.org is not None and btpUsecase.org != "":
            result = btpUsecase.org
        else:
            result = "cf-" + btpUsecase.accountMetadata["subdomain"]
    if envName == "kymaruntime":
        result = "kyma-" + btpUsecase.accountMetadata["subdomain"]
    if envName != "kymaruntime" and envName != "cloudfoundry":
        result = envName + "-" + btpUsecase.accountMetadata["subdomain"]
    result = re.sub(r"[^\w\s]", "-", result)
    result = result.replace(" ", "-").lower()
    result = result.replace("_", "-").lower()
    result = result.replace("--", "-").lower()
    result = result[:60]
    return result
def buildUrltoSubaccount(btpUsecase):
    region = btpUsecase.region
    url = ""
    if btpUsecase.accountMetadata["licenseType"] == "TRIAL":
        url = "https://cockpit.hanatrial.ondemand.com/trial/#/"
    else:
        url = "https://cockpit." + region + ".hana.ondemand.com/cockpit/#/"
    url += "globalaccount/" + btpUsecase.accountMetadata["global_account_id"] + "/"
    url += (
        "subaccount/"
        + btpUsecase.accountMetadata["subaccountid"]
        + "/service-instances"
    )
    return url
def getDictWithEnvVariables(btpUsecase):
    result = None
    if btpUsecase.envvariables is not None and len(btpUsecase.envvariables) > 0:
        for variable, value in btpUsecase.envvariables.items():
            os.environ[variable] = value
    if btpUsecase.accountMetadata is not None and len(btpUsecase.accountMetadata) > 0:
        for variable, value in btpUsecase.accountMetadata.items():
            if isinstance(value, str):
                os.environ[variable.upper()] = value
    result = dict(os.environ)
    return result
def getEnvVariableValue(variable):
    result = None
    if os.environ.get(variable):
        result = os.environ[variable]
    return result
def save_collected_metadata(btpUsecase):
    accountMetadata = btpUsecase.accountMetadata
    filename = btpUsecase.metadatafile
    saveJsonToFile(filename, accountMetadata) | 
| 154 | 
	graph positional encoder | 
	from typing import Tuple, Union, Optional, Dict, Any, OrderedDict
from copy import deepcopy
import numpy as np
import torch
from scipy.sparse import spmatrix
from collections import OrderedDict as OderedDictClass
from graphium.features.spectral import compute_laplacian_pe
from graphium.features.rw import compute_rwse
from graphium.features.electrostatic import compute_electrostatic_interactions
from graphium.features.commute import compute_commute_distances
from graphium.features.graphormer import compute_graphormer_distances
from graphium.features.transfer_pos_level import transfer_pos_level
def get_all_positional_encodings(
    adj: Union[np.ndarray, spmatrix],
    num_nodes: int,
    pos_kwargs: Optional[Dict] = None,
) -> Tuple["OrderedDict[str, np.ndarray]"]:
    r"""
    Get features positional encoding.
    Parameters:
        adj [num_nodes, num_nodes]: Adjacency matrix of the graph
        num_nodes: Number of nodes in the graph
        pos_encoding_as_features: keyword arguments for function `graph_positional_encoder`
            to generate positional encoding for node features.
    Returns:
        pe_dict: Dictionary of positional and structural encodings
    """
    pos_kwargs = {} if pos_kwargs is None else pos_kwargs
    pe_dict = OderedDictClass()
    # Initialize cache
    cache = {}
    # Get the positional encoding for the features
    if len(pos_kwargs) > 0:
        for pos_name, this_pos_kwargs in pos_kwargs["pos_types"].items():
            this_pos_kwargs = deepcopy(this_pos_kwargs)
            pos_type = this_pos_kwargs.pop("pos_type", None)
            pos_level = this_pos_kwargs.pop("pos_level", None)
            this_pe, cache = METHOD_NAME(
                deepcopy(adj),
                num_nodes,
                pos_type=pos_type,
                pos_level=pos_level,
                pos_kwargs=this_pos_kwargs,
                cache=cache,
            )
            if pos_level == "node":
                pe_dict.update({f"{pos_type}": this_pe})
            else:
                pe_dict.update({f"{pos_level}_{pos_type}": this_pe})
    return pe_dict
def METHOD_NAME(
    adj: Union[np.ndarray, spmatrix],
    num_nodes: int,
    pos_type: Optional[str] = None,
    pos_level: Optional[str] = None,
    pos_kwargs: Optional[Dict[str, Any]] = None,
    cache: Optional[Dict[str, Any]] = None,
) -> Tuple[Dict[str, np.ndarray], Dict[str, Any]]:
    r"""
    Get a positional encoding that depends on the parameters.
    Parameters:
        adj [num_nodes, num_nodes]: Adjacency matrix of the graph
        num_nodes: Number of nodes in the graph
        pos_type: The type of positional encoding to use. If None, it must be provided by `pos_kwargs["pos_type"]`. Supported types are:
            - laplacian_eigvec              \
            - laplacian_eigval               \  -> cache connected comps. & eigendecomp.
            - rwse
            - electrostatic                 \
            - commute                        \  -> cache pinvL
            - graphormer
        pos_level: Positional level to output. If None, it must be provided by `pos_kwargs["pos_level"]`.
            - node
            - edge
            - nodepair
            - graph
        pos_kwargs: Extra keyword arguments for the positional encoding. Can include the keys pos_type and pos_level.
        cache: Dictionary of cached objects
    Returns:
        pe: Positional or structural encoding
        cache: Updated dictionary of cached objects
    """
    pos_kwargs = deepcopy(pos_kwargs)
    if pos_kwargs is None:
        pos_kwargs = {}
    if cache is None:
        cache = {}
    # Get the positional type
    pos_type2 = pos_kwargs.pop("pos_type", None)
    if pos_type is None:
        pos_type = pos_type2
    if pos_type2 is not None:
        assert (
            pos_type == pos_type2
        ), f"The positional type must be the same in `pos_type` and `pos_kwargs['pos_type']`. Provided: {pos_type} and {pos_type2}"
    assert pos_type is not None, "Either `pos_type` or `pos_kwargs['pos_type']` must be provided."
    # Get the positional level
    pos_level2 = pos_kwargs.pop("pos_level", None)
    if pos_level is None:
        pos_level = pos_level2
    if pos_level2 is not None:
        assert (
            pos_level == pos_level2
        ), f"The positional level must be the same in `pos_level` and `pos_kwargs['pos_level']`. Provided: {pos_level} and {pos_level2}"
    assert pos_level is not None, "Either `pos_level` or `pos_kwargs['pos_level']` must be provided."
    # Convert to numpy array
    if isinstance(adj, torch.sparse.Tensor):
        adj = adj.to_dense().numpy()
    elif isinstance(adj, torch.Tensor):
        adj = adj.numpy()
    adj = adj.astype(np.float64)
    # Calculate positional encoding
    if pos_type == "laplacian_eigvec":
        _, pe, base_level, cache = compute_laplacian_pe(adj, cache=cache, **pos_kwargs)
    elif pos_type == "laplacian_eigval":
        pe, _, base_level, cache = compute_laplacian_pe(adj, cache=cache, **pos_kwargs)
    elif pos_type == "rw_return_probs":
        pe, base_level, cache = compute_rwse(
            adj.astype(np.float32), num_nodes=num_nodes, cache=cache, pos_type=pos_type, **pos_kwargs
        )
    elif pos_type == "rw_transition_probs":
        pe, base_level, cache = compute_rwse(
            adj.astype(np.float32), num_nodes=num_nodes, cache=cache, pos_type=pos_type, **pos_kwargs
        )
    elif pos_type == "electrostatic":
        pe, base_level, cache = compute_electrostatic_interactions(adj, cache, **pos_kwargs)
    elif pos_type == "commute":
        pe, base_level, cache = compute_commute_distances(adj, num_nodes, cache, **pos_kwargs)
    elif pos_type == "graphormer":
        pe, base_level, cache = compute_graphormer_distances(adj, num_nodes, cache, **pos_kwargs)
    else:
        raise ValueError(f"Unknown `pos_type`: {pos_type}")
    # Convert to float32 and Convert between different pos levels
    if isinstance(pe, (list, tuple)):
        pe = [this_pe.astype(np.float32) for this_pe in pe]
        pe = [transfer_pos_level(this_pe, base_level, pos_level, adj, num_nodes, cache) for this_pe in pe]
    else:
        pe = np.real(pe).astype(np.float32)
        pe = transfer_pos_level(pe, base_level, pos_level, adj, num_nodes, cache)
    return pe, cache | 
| 155 | 
	validate name | 
	from rest_framework import fields, serializers
from .primitive_serializers import FilterBlobSerializer, FilterPreviewSerializer
from .media import api_safely_get_medium_object
from .models import DestinationConfig, Filter, Media, NotificationProfile, TimeRecurrence, Timeslot
class TimeRecurrenceSerializer(serializers.ModelSerializer):
    ALL_DAY_KEY = "all_day"
    days = fields.MultipleChoiceField(choices=TimeRecurrence.Day.choices)
    class Meta:
        model = TimeRecurrence
        fields = [
            "days",
            "start",
            "end",
        ]
    def validate(self, attrs: dict):
        if attrs["start"] >= attrs["end"]:
            raise serializers.ValidationError("'start' must be before 'end'.")
        return attrs
    def to_internal_value(self, data: dict):
        if data.get(self.ALL_DAY_KEY):
            data["start"] = TimeRecurrence.DAY_START
            data["end"] = TimeRecurrence.DAY_END
        return super().to_internal_value(data)
    def to_representation(self, instance: TimeRecurrence):
        instance_dict = super().to_representation(instance)
        # `days` is initially represented as a set; this converts it into a sorted list
        # (`days` is stored sorted in the DB - see `TimeRecurrence.save()`)
        instance_dict["days"] = sorted(instance_dict["days"])
        if instance_dict["start"] == str(TimeRecurrence.DAY_START) and instance_dict["end"] == str(
            TimeRecurrence.DAY_END
        ):
            instance_dict[self.ALL_DAY_KEY] = True
        return instance_dict
class TimeslotSerializer(serializers.ModelSerializer):
    time_recurrences = TimeRecurrenceSerializer(many=True)
    class Meta:
        model = Timeslot
        fields = [
            "pk",
            "name",
            "time_recurrences",
        ]
        # "user" isn't in the list of fields so we can't use a UniqueTogetherValidator
    def METHOD_NAME(self, name):
        owner = self.context["request"].user
        qs = Timeslot.objects.filter(user=owner, name=name)
        if not qs.exists():  # create
            return name
        instance = getattr(self, "instance", None)  # update
        if instance and qs.filter(pk=instance.pk).exists():
            return name
        raise serializers.ValidationError(
            f'The name "{name}" is already in use for a another timeslot owned by user {owner}.'
        )
    def create(self, validated_data: dict):
        time_recurrences_data = validated_data.pop("time_recurrences")
        timeslot = Timeslot.objects.create(**validated_data)
        for time_recurrence_data in time_recurrences_data:
            TimeRecurrence.objects.create(timeslot=timeslot, **time_recurrence_data)
        return timeslot
    def update(self, timeslot: Timeslot, validated_data: dict):
        time_recurrences_data = validated_data.pop("time_recurrences", None)
        name = validated_data.pop("name", None)
        if name:
            timeslot.name = name
            timeslot.save()
        # Replace existing time recurrences with posted time recurrences
        if time_recurrences_data is not None:
            timeslot.time_recurrences.all().delete()
            for time_recurrence_data in time_recurrences_data:
                TimeRecurrence.objects.create(timeslot=timeslot, **time_recurrence_data)
        return timeslot
class FilterSerializer(serializers.ModelSerializer):
    filter = FilterBlobSerializer(required=False)
    class Meta:
        model = Filter
        fields = [
            "pk",
            "name",
            "filter",
        ]
class MediaSerializer(serializers.ModelSerializer):
    class Meta:
        model = Media
        fields = [
            "slug",
            "name",
        ]
class JSONSchemaSerializer(serializers.Serializer):
    json_schema = serializers.JSONField()
class ResponseDestinationConfigSerializer(serializers.ModelSerializer):
    media = MediaSerializer()
    suggested_label = serializers.SerializerMethodField(method_name="get_suggested_label")
    class Meta:
        model = DestinationConfig
        fields = [
            "pk",
            "media",
            "label",
            "suggested_label",
            "settings",
        ]
    def get_suggested_label(self, destination: DestinationConfig) -> str:
        medium = api_safely_get_medium_object(destination.media.slug)
        return f"{destination.media.name}: {medium.get_label(destination)}"
class RequestDestinationConfigSerializer(serializers.ModelSerializer):
    class Meta:
        model = DestinationConfig
        fields = [
            "media",
            "label",
            "settings",
        ]
    def validate(self, attrs: dict):
        if self.instance and "media" in attrs.keys() and not attrs["media"].slug == self.instance.media.slug:
            raise serializers.ValidationError("Media cannot be updated, only settings.")
        if "settings" in attrs.keys():
            if type(attrs["settings"]) != dict:
                raise serializers.ValidationError("Settings has to be a dictionary.")
            if self.instance:
                medium = api_safely_get_medium_object(self.instance.media.slug)
            else:
                medium = api_safely_get_medium_object(attrs["media"].slug)
            attrs["settings"] = medium.validate(self, attrs, self.context["request"].user)
        return attrs
    def update(self, destination: DestinationConfig, validated_data: dict):
        medium = api_safely_get_medium_object(destination.media.slug)
        updated_destination = medium.update(destination, validated_data)
        if updated_destination:
            return updated_destination
        return super().update(destination, validated_data)
class DuplicateDestinationSerializer(serializers.Serializer):
    is_duplicate = serializers.BooleanField(read_only=True)
class ResponseNotificationProfileSerializer(serializers.ModelSerializer):
    timeslot = TimeslotSerializer()
    filters = FilterSerializer(many=True)
    destinations = ResponseDestinationConfigSerializer(many=True)
    class Meta:
        model = NotificationProfile
        fields = [
            "pk",
            "name",
            "timeslot",
            "filters",
            "destinations",
            "active",
        ]
class RequestNotificationProfileSerializer(serializers.ModelSerializer):
    class Meta:
        model = NotificationProfile
        fields = [
            "name",
            "timeslot",
            "filters",
            "destinations",
            "active",
        ]
    def validate(self, attrs: dict):
        if attrs["timeslot"].user != self.context["request"].user:
            raise serializers.ValidationError("The user of 'timeslot' must be the same as the requesting user.")
        return attrs | 
| 156 | 
	get dates | 
	#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import datetime
import difflib
import glob
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
    "filenames",
    help="list of files to check, all files if unspecified",
    nargs='*')
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument(
    "--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
parser.add_argument(
    "--boilerplate-dir", default=default_boilerplate_dir)
parser.add_argument(
    "-v", "--verbose",
    help="give verbose output regarding why a file does not pass",
    action="store_true")
args = parser.parse_args()
verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
def get_refs():
    refs = {}
    for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
        extension = os.path.basename(path).split(".")[1]
        ref_file = open(path, 'r')
        ref = ref_file.read().splitlines()
        ref_file.close()
        refs[extension] = ref
    return refs
def is_generated_file(filename, data, regexs):
    for d in skipped_ungenerated_files:
        if d in filename:
            return False
    p = regexs["generated"]
    return p.search(data)
def file_passes(filename, refs, regexs):
    try:
        f = open(filename, 'r')
    except Exception as exc:
        print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
        return False
    data = f.read()
    f.close()
    # determine if the file is automatically generated
    generated = is_generated_file(filename, data, regexs)
    basename = os.path.basename(filename)
    extension = file_extension(filename)
    if generated:
        if extension == "go":
            extension = "generatego"
        elif extension == "bzl":
            extension = "generatebzl"
    if extension != "":
        ref = refs[extension]
    else:
        ref = refs[basename]
    # remove extra content from the top of files
    if extension == "go" or extension == "generatego":
        p = regexs["go_build_constraints"]
        (data, found) = p.subn("", data, 1)
    elif extension in ["sh", "py"]:
        p = regexs["shebang"]
        (data, found) = p.subn("", data, 1)
    data = data.splitlines()
    # if our test file is smaller than the reference it surely fails!
    if len(ref) > len(data):
        print('File %s smaller than reference (%d < %d)' %
              (filename, len(data), len(ref)),
              file=verbose_out)
        return False
    # trim our file to the same number of lines as the reference file
    data = data[:len(ref)]
    p = regexs["year"]
    for d in data:
        if p.search(d):
            if generated:
                print('File %s has the YEAR field, but it should not be in generated file' %
                      filename, file=verbose_out)
            else:
                print('File %s has the YEAR field, but missing the year of date' %
                      filename, file=verbose_out)
            return False
    if not generated:
        # Replace all occurrences of the regex "2014|2015|2016|2017|2018" with "YEAR"
        p = regexs["date"]
        for i, d in enumerate(data):
            (data[i], found) = p.subn('YEAR', d)
            if found != 0:
                break
    # if we don't match the reference at this point, fail
    if ref != data:
        print("Header in %s does not match reference, diff:" %
              filename, file=verbose_out)
        if args.verbose:
            print(file=verbose_out)
            for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
                print(line, file=verbose_out)
            print(file=verbose_out)
        return False
    return True
def file_extension(filename):
    return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = []
# list all the files contain 'DO NOT EDIT', but are not generated
skipped_ungenerated_files = ['hack/boilerplate/boilerplate.py']
def normalize_files(files):
    newfiles = []
    for pathname in files:
        if any(x in pathname for x in skipped_dirs):
            continue
        newfiles.append(pathname)
    for i, pathname in enumerate(newfiles):
        if not os.path.isabs(pathname):
            newfiles[i] = os.path.join(args.rootdir, pathname)
    return newfiles
def get_files(extensions):
    files = []
    if len(args.filenames) > 0:
        files = args.filenames
    else:
        for root, dirs, walkfiles in os.walk(args.rootdir):
            # don't visit certain dirs. This is just a performance improvement
            # as we would prune these later in normalize_files(). But doing it
            # cuts down the amount of filesystem walking we do and cuts down
            # the size of the file list
            for d in skipped_dirs:
                if d in dirs:
                    dirs.remove(d)
            for name in walkfiles:
                pathname = os.path.join(root, name)
                files.append(pathname)
    files = normalize_files(files)
    outfiles = []
    for pathname in files:
        basename = os.path.basename(pathname)
        extension = file_extension(pathname)
        if extension in extensions or basename in extensions:
            outfiles.append(pathname)
    return outfiles
def METHOD_NAME():
    years = datetime.datetime.now().year
    return '(%s)' % '|'.join((str(year) for year in range(2014, years+1)))
def get_regexs():
    regexs = {}
    # Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
    regexs["year"] = re.compile('YEAR')
    # get_dates return 2014, 2015, 2016, 2017, or 2018 until the current year as a regex like: "(2014|2015|2016|2017|2018)";
    # company holder names can be anything
    regexs["date"] = re.compile(METHOD_NAME())
    # strip // +build \n\n build constraints
    regexs["go_build_constraints"] = re.compile(
            r"^(// \+build.*\n|//go:build.*\n)+\n", re.MULTILINE)
    # strip #!.* from scripts
    regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
    # Search for generated files
    regexs["generated"] = re.compile('DO NOT EDIT')
    return regexs
def main():
    regexs = get_regexs()
    refs = get_refs()
    filenames = get_files(refs.keys())
    for filename in filenames:
        if not file_passes(filename, refs, regexs):
            print(filename, file=sys.stdout)
    return 0
if __name__ == "__main__":
    sys.exit(main()) | 
| 157 | 
	get channel ids | 
	from __future__ import absolute_import, division, print_function
import NuRadioReco.framework.base_station
import NuRadioReco.framework.channel
import NuRadioReco.framework.sim_channel
import collections
try:
    import cPickle as pickle
except ImportError:
    import pickle
import logging
logger = logging.getLogger('SimStation')
class SimStation(NuRadioReco.framework.base_station.BaseStation):
    def __init__(self, station_id):
        NuRadioReco.framework.base_station.BaseStation.__init__(self, station_id)
        self.__magnetic_field_vector = None
        self.__simulation_weight = None
        self.__channels = collections.OrderedDict()
    def get_magnetic_field_vector(self):
        return self.__magnetic_field_vector
    def set_magnetic_field_vector(self, magnetic_field_vector):
        self.__magnetic_field_vector = magnetic_field_vector
    def get_simulation_weight(self):
        return self.__simulation_weight
    def set_simulation_weight(self, simulation_weight):
        self.__simulation_weight = simulation_weight
    def iter_channels(self):
        for channel in self.__channels.values():
            yield channel
    def add_channel(self, channel):
        """
        adds a NuRadioReco.framework.sim_channel to the SimStation object
        """
        if not isinstance(channel, NuRadioReco.framework.sim_channel.SimChannel):
            raise AttributeError("channel needs to be of type NuRadioReco.framework.sim_channel")
        if(channel.get_unique_identifier() in self.__channels):
            raise AttributeError(f"channel with the unique identifier {channel.get_unique_identifier()} is already present in SimStation")
        self.__channels[channel.get_unique_identifier()] = channel
    def get_channel(self, unique_identifier):
        """
        returns channel identified by the triple (channel_id, shower_id, ray_tracing_id)
        """
        return self.__channels[unique_identifier]
    def METHOD_NAME(self):
        """
        returns a list with the channel IDs of all simChannels of the simStation
        """
        channel_ids = []
        for unique_identifier in self.__channels.keys():
            if unique_identifier[0] not in channel_ids:
                channel_ids.append(unique_identifier[0])
        channel_ids.sort()
        return channel_ids
    def get_shower_ids(self):
        """
        returns a list with the shower IDs of all simChannels of the simStation
        """
        shower_ids = []
        for unique_identifier in self.__channels.keys():
            if unique_identifier[1] not in shower_ids:
                shower_ids.append(unique_identifier[1])
        shower_ids.sort()
        return shower_ids
    def get_ray_tracing_ids(self):
        """
        returns a list with the raytracing IDs of all simChannels of the simStation
        """
        ray_tracing_ids = []
        for unique_identifier in self.__channels.keys():
            if unique_identifier[2] not in ray_tracing_ids:
                ray_tracing_ids.append(unique_identifier[2])
        ray_tracing_ids.sort()
        return ray_tracing_ids
    def get_channels_by_channel_id(self, channel_id):
        """
        returns all simChannels that have the given channel_id
        """
        for channel in self.__channels.values():
            if channel.get_id() == channel_id:
                yield channel
    def get_channels_by_shower_id(self, shower_id):
        """
        returns all simChannels that have the given shower_id
        """
        for channel in self.__channels.values():
            if channel.get_shower_id() == shower_id:
                yield channel
    def get_channels_by_ray_tracing_id(self, ray_tracing_id):
        """
        returns all simChannels that have the given ray_tracing_id
        """
        for channel in self.__channels.values():
            if channel.get_ray_tracing_solution_id() == ray_tracing_id:
                yield channel
    def serialize(self, save_channel_traces, save_efield_traces):
        base_station_pkl = NuRadioReco.framework.base_station.BaseStation.serialize(self, save_efield_traces=save_efield_traces)
        channels_pkl = []
        for channel in self.iter_channels():
            channels_pkl.append(channel.serialize(save_trace=save_channel_traces))
        data = {'__magnetic_field_vector': self.__magnetic_field_vector,
                '__simulation_weight': self.__simulation_weight,
                'channels': channels_pkl,
                'base_station': base_station_pkl}
        return pickle.dumps(data, protocol=4)
    def deserialize(self, data_pkl):
        data = pickle.loads(data_pkl)
        NuRadioReco.framework.base_station.BaseStation.deserialize(self, data['base_station'])
        self.__magnetic_field_vector = data['__magnetic_field_vector']
        self.__simulation_weight = data['__simulation_weight']
        if 'channels' in data.keys():
            for channel_pkl in data['channels']:
                channel = NuRadioReco.framework.sim_channel.SimChannel(0, 0, 0)
                channel.deserialize(channel_pkl)
                self.add_channel(channel) | 
| 158 | 
	update attribute | 
	"""Map from manufacturer to standard clusters for electric heating thermostats."""
import logging
from zigpy.profiles import zha
import zigpy.types as t
from zigpy.zcl.clusters.general import Basic, Groups, Ota, Scenes, Time
from zhaquirks.const import (
    DEVICE_TYPE,
    ENDPOINTS,
    INPUT_CLUSTERS,
    MODELS_INFO,
    OUTPUT_CLUSTERS,
    PROFILE_ID,
)
from zhaquirks.tuya import (
    TuyaManufClusterAttributes,
    TuyaThermostat,
    TuyaThermostatCluster,
    TuyaUserInterfaceCluster,
)
# info from https://github.com/zigpy/zha-device-handlers/pull/538#issuecomment-723334124
# https://github.com/Koenkk/zigbee-herdsman-converters/blob/master/converters/fromZigbee.js#L239
# and https://github.com/Koenkk/zigbee-herdsman-converters/blob/master/converters/common.js#L113
MOESBHT_TARGET_TEMP_ATTR = 0x0210  # [0,0,0,21] target room temp (degree)
MOESBHT_TEMPERATURE_ATTR = 0x0218  # [0,0,0,200] current room temp (decidegree)
MOESBHT_SCHEDULE_MODE_ATTR = 0x0403  # [1] false [0] true   /!\ inverted
MOESBHT_MANUAL_MODE_ATTR = 0x0402  # [1] false [0] true /!\ inverted
MOESBHT_ENABLED_ATTR = 0x0101  # [0] off [1] on
MOESBHT_RUNNING_MODE_ATTR = 0x0424  # [1] idle [0] heating /!\ inverted
MOESBHT_CHILD_LOCK_ATTR = 0x0128  # [0] unlocked [1] child-locked
_LOGGER = logging.getLogger(__name__)
class MoesBHTManufCluster(TuyaManufClusterAttributes):
    """Manufacturer Specific Cluster of some electric heating thermostats."""
    attributes = {
        MOESBHT_TARGET_TEMP_ATTR: ("target_temperature", t.uint32_t, True),
        MOESBHT_TEMPERATURE_ATTR: ("temperature", t.uint32_t, True),
        MOESBHT_SCHEDULE_MODE_ATTR: ("schedule_mode", t.uint8_t, True),
        MOESBHT_MANUAL_MODE_ATTR: ("manual_mode", t.uint8_t, True),
        MOESBHT_ENABLED_ATTR: ("enabled", t.uint8_t, True),
        MOESBHT_RUNNING_MODE_ATTR: ("running_mode", t.uint8_t, True),
        MOESBHT_CHILD_LOCK_ATTR: ("child_lock", t.uint8_t, True),
    }
    def METHOD_NAME(self, attrid, value):
        super().METHOD_NAME(attrid, value)
        if attrid == MOESBHT_TARGET_TEMP_ATTR:
            self.endpoint.device.thermostat_bus.listener_event(
                "temperature_change",
                "occupied_heating_setpoint",
                value * 100,  # degree to centidegree
            )
        elif attrid == MOESBHT_TEMPERATURE_ATTR:
            self.endpoint.device.thermostat_bus.listener_event(
                "temperature_change",
                "local_temperature",
                value * 10,  # decidegree to centidegree
            )
        elif attrid == MOESBHT_SCHEDULE_MODE_ATTR:
            if value == 0:  # value is inverted
                self.endpoint.device.thermostat_bus.listener_event(
                    "program_change", "scheduled"
                )
        elif attrid == MOESBHT_MANUAL_MODE_ATTR:
            if value == 0:  # value is inverted
                self.endpoint.device.thermostat_bus.listener_event(
                    "program_change", "manual"
                )
        elif attrid == MOESBHT_ENABLED_ATTR:
            self.endpoint.device.thermostat_bus.listener_event("enabled_change", value)
        elif attrid == MOESBHT_RUNNING_MODE_ATTR:
            # value is inverted
            self.endpoint.device.thermostat_bus.listener_event(
                "state_change", 1 - value
            )
        elif attrid == MOESBHT_CHILD_LOCK_ATTR:
            self.endpoint.device.ui_bus.listener_event("child_lock_change", value)
class MoesBHTThermostat(TuyaThermostatCluster):
    """Thermostat cluster for some electric heating controllers."""
    def map_attribute(self, attribute, value):
        """Map standardized attribute value to dict of manufacturer values."""
        if attribute == "occupied_heating_setpoint":
            # centidegree to degree
            return {MOESBHT_TARGET_TEMP_ATTR: round(value / 100)}
        if attribute == "system_mode":
            if value == self.SystemMode.Off:
                return {MOESBHT_ENABLED_ATTR: 0}
            if value == self.SystemMode.Heat:
                return {MOESBHT_ENABLED_ATTR: 1}
            self.error("Unsupported value for SystemMode")
        elif attribute == "programing_oper_mode":
            # values are inverted
            if value == self.ProgrammingOperationMode.Simple:
                return {MOESBHT_MANUAL_MODE_ATTR: 0, MOESBHT_SCHEDULE_MODE_ATTR: 1}
            if value == self.ProgrammingOperationMode.Schedule_programming_mode:
                return {MOESBHT_MANUAL_MODE_ATTR: 1, MOESBHT_SCHEDULE_MODE_ATTR: 0}
            self.error("Unsupported value for ProgrammingOperationMode")
        return super().map_attribute(attribute, value)
    def program_change(self, mode):
        """Programming mode change."""
        if mode == "manual":
            value = self.ProgrammingOperationMode.Simple
        else:
            value = self.ProgrammingOperationMode.Schedule_programming_mode
        self.METHOD_NAME(
            self.attributes_by_name["programing_oper_mode"].id, value
        )
    def enabled_change(self, value):
        """System mode change."""
        if value == 0:
            mode = self.SystemMode.Off
        else:
            mode = self.SystemMode.Heat
        self.METHOD_NAME(self.attributes_by_name["system_mode"].id, mode)
class MoesBHTUserInterface(TuyaUserInterfaceCluster):
    """HVAC User interface cluster for tuya electric heating thermostats."""
    _CHILD_LOCK_ATTR = MOESBHT_CHILD_LOCK_ATTR
class MoesBHT(TuyaThermostat):
    """Tuya thermostat for devices like the Moes BHT-002GCLZB valve and BHT-003GBLZB Electric floor heating."""
    signature = {
        #  endpoint=1 profile=260 device_type=81 device_version=1 input_clusters=[0, 4, 5, 61184],
        #  output_clusters=[10, 25]
        MODELS_INFO: [
            ("_TZE200_aoclfnxz", "TS0601"),
            ("_TZE200_2ekuz3dz", "TS0601"),
            ("_TZE200_ye5jkfsb", "TS0601"),
            ("_TZE200_u9bfwha0", "TS0601"),
        ],
        ENDPOINTS: {
            1: {
                PROFILE_ID: zha.PROFILE_ID,
                DEVICE_TYPE: zha.DeviceType.SMART_PLUG,
                INPUT_CLUSTERS: [
                    Basic.cluster_id,
                    Groups.cluster_id,
                    Scenes.cluster_id,
                    TuyaManufClusterAttributes.cluster_id,
                ],
                OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
            }
        },
    }
    replacement = {
        ENDPOINTS: {
            1: {
                PROFILE_ID: zha.PROFILE_ID,
                DEVICE_TYPE: zha.DeviceType.THERMOSTAT,
                INPUT_CLUSTERS: [
                    Basic.cluster_id,
                    Groups.cluster_id,
                    Scenes.cluster_id,
                    MoesBHTManufCluster,
                    MoesBHTThermostat,
                    MoesBHTUserInterface,
                ],
                OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
            }
        }
    } | 
| 159 | 
	parse txt | 
	# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Iterable
import numpy as np
from scipy.io import loadmat
from ..logging import logger
from ..model.file.base import File
from ..utils.path import split_ext
from .spreadsheet import read_spreadsheet
AnyFile = File | Path | str
def _extract_value(x: Any):
    if isinstance(x, np.ndarray):
        return _extract_value(x[0])
    return x
@dataclass
class ConditionFile:
    conditions: list[str]
    onsets: list[list[float]]
    durations: list[list[float]]
    def __init__(
        self,
        data: Iterable[tuple[AnyFile, str] | AnyFile] | AnyFile | None = None,
    ):
        self.conditions = list()
        self.onsets = list()
        self.durations = list()
        if isinstance(data, Iterable) and not isinstance(data, str):
            for x in data:
                self.parse(x)
        elif data is not None:
            self.parse(data)
    def parse(self, data: tuple[AnyFile, str] | AnyFile) -> None:
        path: Path | None = None
        condition: str | None = None
        extension: str | None = None
        # We have received a tuple of an FSL condition file and a condition name.
        if isinstance(data, tuple):
            data, condition = data
            extension = ".txt"
        # Ensure that we have a Path object. Extract the extension if necessary.
        if isinstance(data, (Path, str)):
            path = Path(data)
            if extension is None:
                _, extension = split_ext(path)
        elif isinstance(data, File):
            file = data
            path = Path(file.path)
            if extension is None:
                extension = file.extension
            if condition is None:
                condition = file.tags.get("condition")
        if extension == ".mat":
            self.parse_mat(path)
        elif extension == ".tsv":
            self.parse_tsv(path)
        elif extension == ".txt" or isinstance(condition, str):
            if not isinstance(condition, str):
                raise ValueError(f'Missing condition name for file "{path}"')
            self.METHOD_NAME(condition, path)
        else:
            raise ValueError(
                f'Cannot read condition file "{path}" with extension "{extension}"'
            )
    def parse_tsv(self, path: Path | str) -> None:
        data_frame = read_spreadsheet(path)
        if "trial_type" not in data_frame.columns:
            logger.warning(f'No "trial_type" column in "{path}"')
            return
        data_frame = data_frame.astype(dict(trial_type=str), copy=False)
        groupby = data_frame.groupby(by="trial_type")
        onsets_mapping = groupby["onset"].apply(list).to_dict()
        durations_mapping = groupby["duration"].apply(list).to_dict()
        for condition in groupby.groups.keys():
            assert isinstance(condition, str)
            self.conditions.append(condition)
            self.onsets.append(onsets_mapping[condition])
            self.durations.append(durations_mapping[condition])
    def parse_mat(self, path: Path | str) -> None:
        data = loadmat(path)
        if data is None:
            logger.warning(f'Cannot read condition file "{path}"')
            return
        names = np.squeeze(data["names"])
        durations = np.squeeze(data["durations"])
        onsets = np.squeeze(data["onsets"])
        for i, name in enumerate(names):
            condition = _extract_value(name)
            onset = np.ravel(onsets[i])
            duration = np.ravel(durations[i])
            # ensure type and shape
            coerce = np.zeros((onset.size, 2))
            coerce[:, 0] = onset
            coerce[:, 1] = duration
            self.conditions.append(condition)
            self.onsets.append(coerce[:, 0].tolist())
            self.durations.append(coerce[:, 1].tolist())
    def METHOD_NAME(self, condition: str, path: Path | str) -> None:
        self.conditions.append(condition)
        try:
            data_frame = read_spreadsheet(path)
            data_frame.rename(
                columns=dict(
                    zip(list(data_frame.columns)[:2], ["onsets", "durations"])
                ),
                inplace=True,
            )
            self.onsets.append(data_frame.onsets.tolist())
            self.durations.append(data_frame.durations.tolist())
        except Exception as e:  # unreadable or empty file
            logger.warning(f'Cannot read condition file "{path}"', exc_info=e)
            self.onsets.append([])  # fail gracefully
            self.durations.append([])
    def select(self, conditions: list[str]):
        conditions = list(map(str, conditions))  # make traits-free
        onsets = list()
        durations = list()
        for condition in conditions:
            if condition in self.conditions:
                i = self.conditions.index(condition)
                onsets.append(self.onsets[i])
                durations.append(self.durations[i])
            else:
                onsets.append(list())
                durations.append(list())
        return conditions, onsets, durations | 
| 160 | 
	get requests 24hrs ago | 
	import datetime
import logging
from django.conf import settings
from memoized import memoized
from django.db import models
from corehq.apps.domain.models import Domain
log = logging.getLogger(__name__)
class RegistrationRequest(models.Model):
    tos_confirmed = models.BooleanField(default=False)
    request_time = models.DateTimeField()
    request_ip = models.CharField(max_length=31, null=True)
    activation_guid = models.CharField(max_length=126, unique=True)
    confirm_time = models.DateTimeField(null=True)
    confirm_ip = models.CharField(max_length=31, null=True)
    domain = models.CharField(max_length=255, null=True)
    new_user_username = models.CharField(max_length=255, null=True)
    requesting_user_username = models.CharField(max_length=255, null=True)
    couch_id = models.CharField(max_length=126, null=True, db_index=True)
    class Meta:
        db_table = "registration_registrationrequest"
    @property
    @memoized
    def project(self):
        return Domain.get_by_name(self.domain)
    @classmethod
    def get_by_guid(cls, guid):
        return RegistrationRequest.objects.filter(activation_guid=guid).first()
    @classmethod
    def get_requests_today(cls):
        today = datetime.datetime.utcnow()
        yesterday = today - datetime.timedelta(1)
        return RegistrationRequest.objects.filter(
            request_time__gte=yesterday.isoformat(),
            request_time__lte=today.isoformat(),
        ).count()
    @classmethod
    def METHOD_NAME(cls):
        today = datetime.datetime.utcnow()
        yesterday = today - datetime.timedelta(1)
        join_on_start = datetime.datetime(
            yesterday.year, yesterday.month, yesterday.day, yesterday.hour, 0, 0, 0)
        join_on_end = datetime.datetime(
            yesterday.year, yesterday.month, yesterday.day, yesterday.hour, 59, 59, 59)
        requests = RegistrationRequest.objects.filter(
            request_time__gte=join_on_start,
            request_time__lte=join_on_end,
            confirm_time__isnull=True
        )
        return [req for req in requests if req.new_user_username == req.requesting_user_username]
    @classmethod
    def get_request_for_username(cls, username):
        return RegistrationRequest.objects.filter(new_user_username=username).first()
class AsyncSignupRequest(models.Model):
    """
    Use this model to store information from signup or invitation forms when
    the user is redirected to login elsewhere (like SSO) but the signup/invitation
    process must resume when they return.
    NOTE: The reason we use this instead of storing data in request.session
    is that during the SAML handshake for SSO, the Identity Provider
    acknowledges the handshake by posting to a view that is CSRF exempt.
    For security reasons, Django wipes the session data during this process.
    """
    username = models.CharField(max_length=255, db_index=True)
    invitation = models.ForeignKey('users.Invitation', null=True, blank=True, on_delete=models.SET_NULL)
    phone_number = models.CharField(max_length=126, null=True, blank=True)
    project_name = models.CharField(max_length=255, null=True, blank=True)
    atypical_user = models.BooleanField(default=False)
    persona = models.CharField(max_length=128, null=True, blank=True)
    persona_other = models.TextField(null=True, blank=True)
    additional_hubspot_data = models.JSONField(null=True, blank=True)
    date_created = models.DateTimeField(auto_now_add=True)
    @classmethod
    def get_by_username(cls, username):
        try:
            return cls.objects.get(username=username)
        except cls.MultipleObjectsReturned:
            # this would have to be a weird edge case where an error occurred
            # during the signup process. We should log and then triage.
            log.error(
                f"Fetched multiple AsyncSignupRequests for {username}. "
                f"Please check for errors in any auth backends that might "
                f"be interrupting the sign up workflows."
            )
            return cls.objects.first(username=username)
        except cls.DoesNotExist:
            return None
    @classmethod
    def create_from_registration_form(cls, reg_form, additional_hubspot_data=None):
        """
        Creates an AsyncSignupRequest to store registration form details
        when a user is signing up for an account on HQ and must navigate
        away in the middle of the process
        :param reg_form: RegisterWebUserForm
        :return: AsyncSignupRequest
        """
        username = reg_form.cleaned_data['email']
        async_signup, _ = cls.objects.get_or_create(username=username)
        async_signup.phone_number = reg_form.cleaned_data['phone_number']
        async_signup.project_name = reg_form.cleaned_data['project_name']
        async_signup.atypical_user = reg_form.cleaned_data.get('atypical_user', False)
        # SaaS analytics related
        if settings.IS_SAAS_ENVIRONMENT:
            persona = reg_form.cleaned_data['persona']
            persona_other = reg_form.cleaned_data['persona_other']
            additional_hubspot_data = additional_hubspot_data or {}
            additional_hubspot_data.update({
                'buyer_persona': persona,
                'buyer_persona_other': persona_other,
            })
            async_signup.persona = persona
            async_signup.persona_other = persona_other
            async_signup.additional_hubspot_data = additional_hubspot_data
        async_signup.save()
        return async_signup
    @classmethod
    def create_from_invitation(cls, invitation):
        """
        Creates an AsyncSignupRequest to store invitation details when a user
        is accepting an invitation on HQ and must navigate away in the middle
        of the process to sign in or perform another action
        :param invitation: Invitation
        :return: AsyncSignupRequest
        """
        async_signup, _ = cls.objects.get_or_create(username=invitation.email)
        async_signup.invitation = invitation
        async_signup.save()
        return async_signup
    @classmethod
    def clear_data_for_username(cls, username):
        """
        This makes sure that any outstanding AsyncSignupRequest associated with
        username is deleted.
        :param username: string
        """
        cls.objects.filter(username=username).delete() | 
| 161 | 
	test zero mean | 
	from unittest import TestCase
import numpy as np
from aspire.basis import FFBBasis2D
from aspire.covariance import BatchedRotCov2D, RotCov2D
from aspire.noise import WhiteNoiseAdder
from aspire.operators import RadialCTFFilter
from aspire.source.simulation import Simulation
from aspire.utils import utest_tolerance
class BatchedRotCov2DTestCase(TestCase):
    """
    Tests batched cov2d without providing any CTF filters.
    """
    filters = None
    ctf_idx = None
    ctf_fb = None
    def setUp(self):
        n = 32
        L = 8
        self.dtype = np.float32
        self.noise_var = 0.1848
        # Initial noise filter to generate noise images.
        # Noise variance is set to a value far away that is used to calculate
        # covariance matrix and CWF coefficients in order to check the function
        # for rebuilding positive definite covariance matrix.
        noise_adder = WhiteNoiseAdder(var=self.noise_var * 0.001)
        self.src = Simulation(
            L,
            n,
            unique_filters=self.filters,
            dtype=self.dtype,
            noise_adder=noise_adder,
        )
        self.basis = FFBBasis2D((L, L), dtype=self.dtype)
        self.coeff = self.basis.evaluate_t(self.src.images[:])
        self.cov2d = RotCov2D(self.basis)
        self.bcov2d = BatchedRotCov2D(self.src, self.basis, batch_size=7)
    def tearDown(self):
        pass
    def blk_diag_allclose(self, blk_diag_a, blk_diag_b, atol=None):
        if atol is None:
            atol = utest_tolerance(self.dtype)
        close = True
        for blk_a, blk_b in zip(blk_diag_a, blk_diag_b):
            close = close and np.allclose(blk_a, blk_b, atol=atol)
        return close
    def testMeanCovar(self):
        # Test basic functionality against RotCov2D.
        mean_cov2d = self.cov2d.get_mean(
            self.coeff, ctf_fb=self.ctf_fb, ctf_idx=self.ctf_idx
        )
        covar_cov2d = self.cov2d.get_covar(
            self.coeff,
            mean_coeff=mean_cov2d,
            ctf_fb=self.ctf_fb,
            ctf_idx=self.ctf_idx,
            noise_var=self.noise_var,
        )
        mean_bcov2d = self.bcov2d.get_mean()
        covar_bcov2d = self.bcov2d.get_covar(noise_var=self.noise_var)
        self.assertTrue(
            np.allclose(mean_cov2d, mean_bcov2d, atol=utest_tolerance(self.dtype))
        )
        self.assertTrue(
            self.blk_diag_allclose(
                covar_cov2d, covar_bcov2d, atol=utest_tolerance(self.dtype)
            )
        )
    def METHOD_NAME(self):
        # Make sure it works with zero mean (pure second moment).
        zero_coeff = np.zeros((self.basis.count,), dtype=self.dtype)
        covar_cov2d = self.cov2d.get_covar(
            self.coeff, mean_coeff=zero_coeff, ctf_fb=self.ctf_fb, ctf_idx=self.ctf_idx
        )
        covar_bcov2d = self.bcov2d.get_covar(mean_coeff=zero_coeff)
        self.assertTrue(
            self.blk_diag_allclose(
                covar_cov2d, covar_bcov2d, atol=utest_tolerance(self.dtype)
            )
        )
    def testAutoMean(self):
        # Make sure it automatically calls get_mean if needed.
        covar_cov2d = self.cov2d.get_covar(
            self.coeff, ctf_fb=self.ctf_fb, ctf_idx=self.ctf_idx
        )
        covar_bcov2d = self.bcov2d.get_covar()
        self.assertTrue(
            self.blk_diag_allclose(
                covar_cov2d, covar_bcov2d, atol=utest_tolerance(self.dtype)
            )
        )
    def testShrink(self):
        # Make sure it properly shrinks the right-hand side if specified.
        covar_est_opt = {
            "shrinker": "frobenius_norm",
            "verbose": 0,
            "max_iter": 250,
            "iter_callback": [],
            "store_iterates": False,
            "rel_tolerance": 1e-12,
            "precision": self.dtype,
        }
        covar_cov2d = self.cov2d.get_covar(
            self.coeff,
            ctf_fb=self.ctf_fb,
            ctf_idx=self.ctf_idx,
            covar_est_opt=covar_est_opt,
        )
        covar_bcov2d = self.bcov2d.get_covar(covar_est_opt=covar_est_opt)
        self.assertTrue(self.blk_diag_allclose(covar_cov2d, covar_bcov2d))
    def testAutoBasis(self):
        # Make sure basis is automatically created if not specified.
        nbcov2d = BatchedRotCov2D(self.src)
        covar_bcov2d = self.bcov2d.get_covar()
        covar_nbcov2d = nbcov2d.get_covar()
        self.assertTrue(
            self.blk_diag_allclose(
                covar_bcov2d, covar_nbcov2d, atol=utest_tolerance(self.dtype)
            )
        )
    def testCWFCoeff(self):
        # Calculate CWF coefficients using Cov2D base class
        mean_cov2d = self.cov2d.get_mean(
            self.coeff, ctf_fb=self.ctf_fb, ctf_idx=self.ctf_idx
        )
        covar_cov2d = self.cov2d.get_covar(
            self.coeff,
            ctf_fb=self.ctf_fb,
            ctf_idx=self.ctf_idx,
            noise_var=self.noise_var,
            make_psd=True,
        )
        coeff_cov2d = self.cov2d.get_cwf_coeffs(
            self.coeff,
            self.ctf_fb,
            self.ctf_idx,
            mean_coeff=mean_cov2d,
            covar_coeff=covar_cov2d,
            noise_var=self.noise_var,
        )
        # Calculate CWF coefficients using Batched Cov2D class
        mean_bcov2d = self.bcov2d.get_mean()
        covar_bcov2d = self.bcov2d.get_covar(noise_var=self.noise_var, make_psd=True)
        coeff_bcov2d = self.bcov2d.get_cwf_coeffs(
            self.coeff,
            self.ctf_fb,
            self.ctf_idx,
            mean_bcov2d,
            covar_bcov2d,
            noise_var=self.noise_var,
        )
        self.assertTrue(
            self.blk_diag_allclose(
                coeff_cov2d,
                coeff_bcov2d,
                atol=utest_tolerance(self.dtype),
            )
        )
    def testCWFCoeffCleanCTF(self):
        """
        Test case of clean images (coeff_clean and noise_var=0)
        while using a non Identity CTF.
        This case may come up when a developer switches between
        clean and dirty images.
        """
        # Calculate CWF coefficients using Cov2D base class
        mean_cov2d = self.cov2d.get_mean(
            self.coeff, ctf_fb=self.ctf_fb, ctf_idx=self.ctf_idx
        )
        covar_cov2d = self.cov2d.get_covar(
            self.coeff,
            ctf_fb=self.ctf_fb,
            ctf_idx=self.ctf_idx,
            noise_var=self.noise_var,
            make_psd=True,
        )
        coeff_cov2d = self.cov2d.get_cwf_coeffs(
            self.coeff,
            self.ctf_fb,
            self.ctf_idx,
            mean_coeff=mean_cov2d,
            covar_coeff=covar_cov2d,
            noise_var=0,
        )
        # Calculate CWF coefficients using Batched Cov2D class
        mean_bcov2d = self.bcov2d.get_mean()
        covar_bcov2d = self.bcov2d.get_covar(noise_var=self.noise_var, make_psd=True)
        coeff_bcov2d = self.bcov2d.get_cwf_coeffs(
            self.coeff,
            self.ctf_fb,
            self.ctf_idx,
            mean_bcov2d,
            covar_bcov2d,
            noise_var=0,
        )
        self.assertTrue(
            self.blk_diag_allclose(
                coeff_cov2d,
                coeff_bcov2d,
                atol=utest_tolerance(self.dtype),
            )
        )
class BatchedRotCov2DTestCaseCTF(BatchedRotCov2DTestCase):
    """
    Tests batched cov2d with CTF information.
    """
    @property
    def filters(self):
        return [
            RadialCTFFilter(5, 200, defocus=d, Cs=2.0, alpha=0.1)
            for d in np.linspace(1.5e4, 2.5e4, 7)
        ]
    @property
    def ctf_idx(self):
        return self.src.filter_indices
    @property
    def ctf_fb(self):
        return [f.fb_mat(self.basis) for f in self.src.unique_filters] | 
| 162 | 
	test02 chi2 thin back side | 
	import pytest
import drjit as dr
import mitsuba as mi
def test00_construction_and_lobes(variant_scalar_rgb):
    # By default the BSDF should only have 2 lobes, no anisotropic / transmission
    b = mi.load_dict({
        'type': 'principledthin',
    })
    assert b.component_count() == 3
    assert mi.has_flag(b.flags(), mi.BSDFFlags.DiffuseReflection)
    assert mi.has_flag(b.flags(), mi.BSDFFlags.GlossyReflection)
    assert not mi.has_flag(b.flags(), mi.BSDFFlags.GlossyTransmission)
    assert not mi.has_flag(b.flags(), mi.BSDFFlags.Anisotropic)
    # Adding anisotropy via the traverse mechanism
    p = mi.traverse(b)
    p['anisotropic.value'] = 0.5
    p.update()
    assert mi.has_flag(b.flags(), mi.BSDFFlags.Anisotropic)
    b = mi.load_dict({
        'type': 'principledthin',
        'spec_trans': 0.5,
    })
    assert b.component_count() == 4
    assert mi.has_flag(b.flags(), mi.BSDFFlags.DiffuseReflection)
    assert mi.has_flag(b.flags(), mi.BSDFFlags.GlossyReflection)
    assert mi.has_flag(b.flags(), mi.BSDFFlags.GlossyTransmission)
    assert not mi.has_flag(b.flags(), mi.BSDFFlags.Anisotropic)
def test01_chi2_thin_front_side(variants_vec_backends_once_rgb):
    # front_side thin
    xml = """<float name="roughness" value="0.6"/>
             <float name="anisotropic" value="0.5"/>
             <float name="spec_trans" value="0.4"/>
             <float name="eta" value="1.3296"/>
             <float name="diff_trans" value="0.6"/>
          """
    wi = dr.normalize(mi.ScalarVector3f([1, 0, 1]))
    sample_func, pdf_func = mi.chi2.BSDFAdapter("principledthin", xml, wi=wi)
    chi2 = mi.chi2.ChiSquareTest(
        domain=mi.chi2.SphericalDomain(),
        sample_func=sample_func,
        pdf_func=pdf_func,
        sample_dim=3
    )
    assert chi2.run()
def METHOD_NAME(variants_vec_backends_once_rgb):
    # back side thin
    xml = """<float name="roughness" value="0.6"/>
             <float name="anisotropic" value="0.5"/>
             <float name="spec_trans" value="0.6"/>
             <float name="eta" value="1.3296"/>
             <float name="diff_trans" value="0.9"/>
        """
    wi = dr.normalize(mi.ScalarVector3f([1, 0, -1]))
    sample_func, pdf_func = mi.chi2.BSDFAdapter("principledthin", xml, wi=wi)
    chi2 = mi.chi2.ChiSquareTest(
        domain=mi.chi2.SphericalDomain(),
        sample_func=sample_func,
        pdf_func=pdf_func,
        sample_dim=3
    )
    assert chi2.run()
def test03_eval_pdf_thin(variant_scalar_rgb):
    # The true values are defined by the first implementation in order to
    # prevent unwanted changes.
    pdf_true = [
        0.18230389058589935,
        0.17071931064128876,
        0.1604636013507843,
        0.15113641321659088,
        0.1424213945865631,
        0.13407252728939056,
        0.1259022206068039,
        0.1177704930305481,
        0.10957615822553635,
        0.1012495756149292,
        0.09274674206972122,
        0.0840444564819336,
        0.07513649761676788,
        0.06603056192398071,
        0.05674567073583603,
        0.04731012135744095,
        0.03775978833436966,
        0.028136683627963066,
        0.01848774217069149,
        0.008863822557032108]
    evaluate_true = [
        0.04944333806633949,
        0.04872140288352966,
        0.047846242785453796,
        0.0468028225004673,
        0.04557877406477928,
        0.04416417330503464,
        0.042551249265670776,
        0.04073421657085419,
        0.03870923072099686,
        0.036474503576755524,
        0.03403010591864586,
        0.0313769206404686,
        0.028513599187135696,
        0.025431113317608833,
        0.02210502326488495,
        0.01848825439810753,
        0.014510626904666424,
        0.01009628176689148,
        0.005216196645051241,
        3.899415020768372e-18]
    bsdf = mi.load_string("""<bsdf version='2.0.0' type='principledthin'>
                      <float name="eta" value="1.5"/>
                      <float name="anisotropic" value="0.5"/>
                      <float name="sheen" value="0.5"/>
                      <float name="sheen_tint" value="0.2"/>
                      <float name="spec_trans" value="0.5"/>
                      <float name="flatness" value="0.5"/>
                      <float name="diff_trans" value="0.6"/>
                      </bsdf>
                      """)
    si = mi.SurfaceInteraction3f()
    si.p = [0, 0, 0]
    si.n = [0, 0, 1]
    si.wi = [1, 0, 1]
    si.sh_frame = mi.Frame3f(si.n)
    ctx = mi.BSDFContext()
    pdf = []
    evaluate = []
    for i in range(20):
        theta = i / 19.0 * (dr.pi / 2)
        wo = [dr.sin(theta), 0, dr.cos(theta)]
        assert dr.allclose(bsdf.pdf(ctx, si, wo=wo), pdf_true[i])
        assert dr.allclose(bsdf.eval(ctx, si, wo=wo)[0], evaluate_true[i]) | 
| 163 | 
	tau y | 
	# Copyright (c) 2017 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from spinn_utilities.overrides import overrides
from spinn_front_end_common.utilities.constants import (
    BYTES_PER_SHORT, BYTES_PER_WORD)
from spynnaker.pyNN.data import SpynnakerDataView
from spynnaker.pyNN.models.neuron.plasticity.stdp.common import (
    get_exp_lut_array)
from spynnaker.pyNN.models.neuron.plasticity.stdp.timing_dependence import (
    AbstractTimingDependence)
from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure import (
    SynapseStructureWeightOnly)
class TimingDependencePfisterSpikeTriplet(AbstractTimingDependence):
    """
    A timing dependence STDP rule based on spike triplets.
    Jean-Pascal Pfister, Wulfram Gerstner. Triplets of Spikes in a Model of
    Spike Timing-Dependent Plasticity. *Journal of Neuroscience*,
    20 September 2006, 26 (38) 9673-9682; DOI: 10.1523/JNEUROSCI.1425-06.2006
    """
    __slots__ = [
        "__synapse_structure",
        "__tau_minus",
        "__tau_minus_data",
        "__tau_plus",
        "__tau_plus_data",
        "__tau_x",
        "__tau_x_data",
        "__tau_y",
        "__tau_y_data",
        "__a_plus",
        "__a_minus"]
    __PARAM_NAMES = ('tau_plus', 'tau_minus', 'tau_x', 'tau_y')
    # noinspection PyPep8Naming
    def __init__(self, tau_plus, tau_minus, tau_x, METHOD_NAME, A_plus, A_minus):
        r"""
        :param float tau_plus: :math:`\tau_+`
        :param float tau_minus: :math:`\tau_-`
        :param float tau_x: :math:`\tau_x`
        :param float tau_y: :math:`\tau_y`
        :param float A_plus: :math:`A^+`
        :param float A_minus: :math:`A^-`
        """
        self.__tau_plus = tau_plus
        self.__tau_minus = tau_minus
        self.__tau_x = tau_x
        self.__tau_y = METHOD_NAME
        self.__a_plus = A_plus
        self.__a_minus = A_minus
        self.__synapse_structure = SynapseStructureWeightOnly()
        ts = SpynnakerDataView.get_simulation_time_step_ms()
        self.__tau_plus_data = get_exp_lut_array(ts, self.__tau_plus)
        self.__tau_minus_data = get_exp_lut_array(ts, self.__tau_minus)
        self.__tau_x_data = get_exp_lut_array(ts, self.__tau_x, shift=2)
        self.__tau_y_data = get_exp_lut_array(ts, self.__tau_y, shift=2)
    @property
    def tau_plus(self):
        r"""
        :math:`\tau_+`
        :rtype: float
        """
        return self.__tau_plus
    @property
    def tau_minus(self):
        r"""
        :math:`\tau_-`
        :rtype: float
        """
        return self.__tau_minus
    @property
    def tau_x(self):
        r"""
        :math:`\tau_x`
        :rtype: float
        """
        return self.__tau_x
    @property
    def METHOD_NAME(self):
        r"""
        :math:`\tau_y`
        :rtype: float
        """
        return self.__tau_y
    @property
    def A_plus(self):
        r"""
        :math:`A^+`
        :rtype: float
        """
        return self.__a_plus
    @A_plus.setter
    def A_plus(self, new_value):
        self.__a_plus = new_value
    @property
    def A_minus(self):
        r"""
        :math:`A^-`
        :rtype: float
        """
        return self.__a_minus
    @A_minus.setter
    def A_minus(self, new_value):
        self.__a_minus = new_value
    @overrides(AbstractTimingDependence.is_same_as)
    def is_same_as(self, timing_dependence):
        if not isinstance(
                timing_dependence, TimingDependencePfisterSpikeTriplet):
            return False
        return (
            (self.__tau_plus == timing_dependence.tau_plus) and
            (self.__tau_minus == timing_dependence.tau_minus) and
            (self.__tau_x == timing_dependence.tau_x) and
            (self.__tau_y == timing_dependence.METHOD_NAME))
    @property
    def vertex_executable_suffix(self):
        """
        The suffix to be appended to the vertex executable for this rule.
        :rtype: str
        """
        return "pfister_triplet"
    @property
    def pre_trace_n_bytes(self):
        """
        The number of bytes used by the pre-trace of the rule per neuron.
        :rtype: int
        """
        # Triplet rule trace entries consists of two 16-bit traces - R1 and R2
        # (Note: this is the pre-trace size, not the post-trace size)
        return BYTES_PER_SHORT * 2
    @overrides(AbstractTimingDependence.get_parameters_sdram_usage_in_bytes)
    def get_parameters_sdram_usage_in_bytes(self):
        lut_array_words = (
            len(self.__tau_plus_data) + len(self.__tau_minus_data) +
            len(self.__tau_x_data) + len(self.__tau_y_data))
        return lut_array_words * BYTES_PER_WORD
    @property
    def n_weight_terms(self):
        """
        The number of weight terms expected by this timing rule.
        :rtype: int
        """
        return 2
    @overrides(AbstractTimingDependence.write_parameters)
    def write_parameters(
            self, spec, global_weight_scale, synapse_weight_scales):
        # Write lookup tables
        spec.write_array(self.__tau_plus_data)
        spec.write_array(self.__tau_minus_data)
        spec.write_array(self.__tau_x_data)
        spec.write_array(self.__tau_y_data)
    @property
    def synaptic_structure(self):
        """
        The synaptic structure of the plastic part of the rows.
        :rtype: AbstractSynapseStructure
        """
        return self.__synapse_structure
    @overrides(AbstractTimingDependence.get_parameter_names)
    def get_parameter_names(self):
        return self.__PARAM_NAMES | 
| 164 | 
	parse bytes | 
	#!/usr/bin/env python3
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is meant to be used to analyze memory profiles created by the Prow binaries when
# the --profile-memory-usage flag is passed. The interval of profiling can be set with the
# --memory-profile-interval flag. This tool can also be used on the output of the sidecar utility
# when the sidecar.Options.WriteMemoryProfile option has been set. The tools will write sequential
# profiles into a directory, from which this script can load the data, create time series and
# visualize them.
import os
import pathlib
import subprocess
import sys
from datetime import datetime
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.font_manager import FontProperties
if len(sys.argv) != 2:
    print("[ERROR] Expected the directory containing profiles as the only argument.")
    print("Usage: {} ./path/to/profiles/".format(sys.argv[0]))
    sys.exit(1)
profile_dir = sys.argv[1]
def METHOD_NAME(value):
    # we will either see a raw number or one with a suffix
    value = value.decode("utf-8")
    if not value.endswith("B"):
        return float(value)
    suffix = value[-2:]
    multiple = 1
    if suffix == "KB":
        multiple = 1024
    elif suffix == "MB":
        multiple = 1024 * 1024
    elif suffix == "GB":
        multiple = 1024 * 1024 * 1024
    return float(value[:-2]) * multiple
overall_name = "overall".encode("utf-8")
dates_by_name = {overall_name: []}
flat_usage_over_time = {overall_name: []}
cumulative_usage_over_time = {overall_name: []}
max_usage = 0
for subdir, dirs, files in os.walk(profile_dir):
    for file in files:
        full_path = os.path.join(subdir, file)
        date = datetime.fromtimestamp(pathlib.Path(full_path).stat().st_mtime)
        output = subprocess.run(
            ["go", "tool", "pprof", "-top", "-inuse_space", full_path],
            check=True, stdout=subprocess.PIPE
        )
        # The output of go tool pprof will look like:
        #
        # File: sidecar
        # Type: inuse_space
        # Time: Mar 19, 2021 at 10:30am (PDT)
        # Showing nodes accounting for 66.05MB, 100% of 66.05MB total
        #       flat  flat%   sum%        cum   cum%
        #       64MB 96.90% 96.90%       64MB 96.90%  google.golang.org/api/internal/gensupport...
        #
        # We want to parse all of the lines after the header and metadata.
        lines = output.stdout.splitlines()
        usage = METHOD_NAME(lines[3].split()[-2])
        if usage > max_usage:
            max_usage = usage
        data_index = 0
        for i in range(len(lines)):
            if lines[i].split()[0].decode("utf-8") == "flat":
                data_index = i + 1
                break
        flat_overall = 0
        cumulative_overall = 0
        for line in lines[data_index:]:
            parts = line.split()
            name = parts[5]
            if name not in dates_by_name:
                dates_by_name[name] = []
            dates_by_name[name].append(date)
            if name not in flat_usage_over_time:
                flat_usage_over_time[name] = []
            flat_usage = METHOD_NAME(parts[0])
            flat_usage_over_time[name].append(flat_usage)
            flat_overall += flat_usage
            if name not in cumulative_usage_over_time:
                cumulative_usage_over_time[name] = []
            cumulative_usage = METHOD_NAME(parts[3])
            cumulative_usage_over_time[name].append(cumulative_usage)
            cumulative_overall += cumulative_usage
        dates_by_name[overall_name].append(date)
        flat_usage_over_time[overall_name].append(flat_overall)
        cumulative_usage_over_time[overall_name].append(cumulative_overall)
plt.rcParams.update({'font.size': 22})
fig = plt.figure(figsize=(30, 18))
plt.subplots_adjust(right=0.7)
ax = plt.subplot(211)
for name in dates_by_name:
    dates = mdates.date2num(dates_by_name[name])
    values = flat_usage_over_time[name]
    # we only want to show the top couple callsites, or our legend gets noisy
    if max(values) > 0.01 * max_usage:
        ax.plot_date(dates, values,
                     label="{} (max: {:,.0f}MB)".format(name.decode("utf-8"), max(values) / (1024 * 1024)),
                     linestyle='solid')
    else:
        ax.plot_date(dates, values, linestyle='solid')
ax.set_yscale('log')
ax.set_ylim(bottom=10*1024*1024)
formatter = ticker.FuncFormatter(lambda y, pos: '{:,.0f}'.format(y / (1024 * 1024)) + 'MB')
ax.yaxis.set_major_formatter(formatter)
plt.xlabel("Time")
plt.ylabel("Flat Space In Use (bytes)")
plt.title("Space In Use By Callsite")
fontP = FontProperties()
fontP.set_size('xx-small')
plt.legend(bbox_to_anchor=(1, 1), loc='upper left', prop=fontP)
ax = plt.subplot(212)
for name in dates_by_name:
    dates = mdates.date2num(dates_by_name[name])
    values = cumulative_usage_over_time[name]
    # we only want to show the top couple callsites, or our legend gets noisy
    if max(values) > 0.01 * max_usage:
        ax.plot_date(dates, values,
                     label="{} (max: {:,.0f}MB)".format(name.decode("utf-8"), max(values) / (1024 * 1024)),
                     linestyle='solid')
    else:
        ax.plot_date(dates, values, linestyle='solid')
ax.set_yscale('log')
ax.set_ylim(bottom=10*1024*1024)
ax.yaxis.set_major_formatter(formatter)
plt.xlabel("Time")
plt.ylabel("Cumulative Space In Use (bytes)")
fontP = FontProperties()
fontP.set_size('xx-small')
plt.legend(bbox_to_anchor=(1, 1), loc='upper left', prop=fontP)
plt.show() | 
| 165 | 
	analyser osmosis full | 
	#!/usr/bin/env python
#-*- coding: utf-8 -*-
###########################################################################
##                                                                       ##
## Copyrights Frédéric Rodrigo 2011                                      ##
##                                                                       ##
## This program is free software: you can redistribute it and/or modify  ##
## it under the terms of the GNU General Public License as published by  ##
## the Free Software Foundation, either version 3 of the License, or     ##
## (at your option) any later version.                                   ##
##                                                                       ##
## This program is distributed in the hope that it will be useful,       ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of        ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         ##
## GNU General Public License for more details.                          ##
##                                                                       ##
## You should have received a copy of the GNU General Public License     ##
## along with this program.  If not, see <http://www.gnu.org/licenses/>. ##
##                                                                       ##
###########################################################################
from modules.OsmoseTranslation import T_
from .Analyser_Osmosis import Analyser_Osmosis
sql10 = """
SELECT
    id,
    regexp_replace(ST_IsValidReason(ST_MakePolygon(linestring)), '[^[]+\\[([^]]+).*', 'POINT(\\1)'),
    ST_IsValidReason(ST_MakePolygon(linestring)) AS detail
FROM
    {0}ways
WHERE
    NOT is_polygon AND
    NOT (tags?'roller_coaster' AND tags->'roller_coaster' = 'track') AND -- permit self-intersecting ways
    NOT (tags?'highway' AND tags->'highway' = 'raceway') AND -- permit self-intersecting ways
    nodes[1] = nodes[array_length(nodes,1)] AND
    ST_NumPoints(linestring) > 3 AND
    ST_IsClosed(linestring) AND
    NOT ST_IsValid(ST_MakePolygon(linestring))
"""
sql20 = """
CREATE TEMP TABLE {0}_{1}_relation_linestrings AS
SELECT
  relations.id,
  ST_LineMerge(ST_Collect(linestring)) AS linestring
FROM
  {0}relations AS relations
  JOIN relation_members ON
    relation_members.relation_id = relations.id AND
    relation_members.member_type = 'W' AND
    relation_members.member_role = 'outer'
  JOIN {1}ways AS ways ON
    ways.id = relation_members.member_id AND
    ST_NumPoints(ways.linestring) >= 2
WHERE
  relations.tags?'type' AND
  relations.tags->'type' IN ('multipolygon', 'boundary')
GROUP BY
  relations.id
"""
sql21 = """
SELECT
    id,
    regexp_replace(ST_IsValidReason(ST_MakePolygon(linestring)), '[^[]+\\[([^]]+).*', 'POINT(\\1)') AS detail,
    ST_IsValidReason(ST_MakePolygon(linestring)) AS detail
FROM
    {0}_{1}_relation_linestrings
WHERE
    (ST_NumGeometries(linestring) IS NULL OR ST_NumGeometries(linestring) = 1) AND
    ST_NumPoints(linestring) > 3 AND
    ST_IsClosed(linestring) AND
   NOT ST_IsValid(ST_MakePolygon(linestring))
"""
class Analyser_Osmosis_Polygon(Analyser_Osmosis):
    def __init__(self, config, logger = None):
        Analyser_Osmosis.__init__(self, config, logger)
        doc = dict(
            detail = T_(
'''The polygon intersects itself. The marker points directly to the
error area of the crossing.'''),
            fix = T_(
'''Find where the polygon intersects itself (i.e. it forms an '8') and
correct geometry for a single loop (a '0') or by removing nodes or
changing the order of these nodes, by adding new nodes or by creating
multiple polygons.'''),
            trap = T_(
'''Make sure the nodes to move do not belong to other way.'''),
            example = T_(
''''''))
        self.classs_change[1] = self.def_class(item = 1040, level = 1, tags = ['geom', 'fix:chair'], title = T_('Invalid polygon'), **doc)
        self.classs_change[2] = self.def_class(item = 1040, level = 1, tags = ['geom', 'fix:chair'], title = T_('Invalid multipolygon'), **doc)
        self.callback10 = lambda res: {"class":1, "data":[self.way_full, self.positionAsText], "text": {"en": res[2]}}
        self.callback20 = lambda res: {"class":2, "data":[self.relation, self.positionAsText], "text": {"en": res[2]}}
    def METHOD_NAME(self):
        self.run(sql10.format(""), self.callback10)
        self.run(sql20.format("", ""))
        self.run(sql21.format("", ""), self.callback20)
    def analyser_osmosis_diff(self):
        self.run(sql10.format("touched_"), self.callback10)
        self.run(sql20.format("touched_", ""))
        self.run(sql21.format("touched_", ""), self.callback20)
        self.run(sql20.format("not_touched_", "touched_"))
        self.run(sql21.format("not_touched_", "touched_"), self.callback20) | 
| 166 | 
	fc name | 
	# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
#
# Copyright (C) 2006 Red Hat 
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; version 2 only
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
Utilities for dealing with the compilation of modules and creation
of module tress.
"""
import re
import tempfile
try:
    from subprocess import getstatusoutput
except ImportError:
    from commands import getstatusoutput
import os
import os.path
import shutil
import selinux
from . import defaults
def is_valid_name(modname):
    """Check that a module name is valid.
    """
    m = re.findall(r"[^a-zA-Z0-9_\-\.]", modname)
    if len(m) == 0 and modname[0].isalpha():
        return True
    else:
        return False
class ModuleTree:
    def __init__(self, modname):
        self.modname = modname
        self.dirname = None
    def dir_name(self):
        return self.dirname
    def te_name(self):
        return self.dirname + "/" + self.modname + ".te"
    def METHOD_NAME(self):
        return self.dirname + "/" + self.modname + ".fc"
    def if_name(self):
        return self.dirname + "/" + self.modname + ".if"
    def package_name(self):
        return self.dirname + "/" + self.modname + ".pp"
    def makefile_name(self):
        return self.dirname + "/Makefile"
    def create(self, parent_dirname, makefile_include=None):
        self.dirname = parent_dirname + "/" + self.modname
        os.mkdir(self.dirname)
        fd = open(self.makefile_name(), "w")
        if makefile_include:
            fd.write("include " + makefile_include)
        else:
            fd.write("include " + defaults.refpolicy_makefile())
        fd.close()
        # Create empty files for the standard refpolicy
        # module files
        open(self.te_name(), "w").close()
        open(self.METHOD_NAME(), "w").close()
        open(self.if_name(), "w").close()
def modname_from_sourcename(sourcename):
    return os.path.splitext(os.path.split(sourcename)[1])[0]
class ModuleCompiler:
    """ModuleCompiler eases running of the module compiler.
    The ModuleCompiler class encapsulates running the commandline
    module compiler (checkmodule) and module packager (semodule_package).
    You are likely interested in the create_module_package method.
    
    Several options are controlled via parameters (only effects the
    non-refpol builds):
    
     .mls          [boolean] Generate an MLS module (by passed -M to
                   checkmodule). True to generate an MLS module, false
                   otherwise.
                   
     .module       [boolean] Generate a module instead of a base module.
                   True to generate a module, false to generate a base.
                   
     .checkmodule  [string] Fully qualified path to the module compiler.
                   Default is /usr/bin/checkmodule.
                   
     .semodule_package [string] Fully qualified path to the module
                   packager. Defaults to /usr/bin/semodule_package.
     .output       [file object] File object used to write verbose
                   output of the compililation and packaging process.
    """
    def __init__(self, output=None):
        """Create a ModuleCompiler instance, optionally with an
        output file object for verbose output of the compilation process.
        """
        self.mls = selinux.is_selinux_mls_enabled()
        self.module = True
        self.checkmodule = "/usr/bin/checkmodule"
        self.semodule_package = "/usr/bin/semodule_package"
        self.output = output
        self.last_output = ""
        self.refpol_makefile = defaults.refpolicy_makefile()
        self.make = "/usr/bin/make"
    def o(self, str):
        if self.output:
            self.output.write(str + "\n")
        self.last_output = str
    def run(self, command):
        self.o(command)
        rc, output = getstatusoutput(command)
        self.o(output)
        
        return rc
    
    def gen_filenames(self, sourcename):
        """Generate the module and policy package filenames from
        a source file name. The source file must be in the form
        of "foo.te". This will generate "foo.mod" and "foo.pp".
        
        Returns a tuple with (modname, policypackage).
        """
        splitname = sourcename.split(".")
        if len(splitname) < 2:
            raise RuntimeError("invalid sourcefile name %s (must end in .te)", sourcename)
        # Handle other periods in the filename correctly
        basename = ".".join(splitname[0:-1])
        modname = basename + ".mod"
        packagename = basename + ".pp"
        
        return (modname, packagename)
    def create_module_package(self, sourcename, refpolicy=True):
        """Create a module package saved in a packagename from a
        sourcename.
        The create_module_package creates a module package saved in a
        file named sourcename (.pp is the standard extension) from a
        source file (.te is the standard extension). The source file
        should contain SELinux policy statements appropriate for a
        base or non-base module (depending on the setting of .module).
        Only file names are accepted, not open file objects or
        descriptors because the command line SELinux tools are used.
        On error a RuntimeError will be raised with a descriptive
        error message.
        """
        if refpolicy:
            self.refpol_build(sourcename)
        else:
            modname, packagename = self.gen_filenames(sourcename)
            self.compile(sourcename, modname)
            self.package(modname, packagename)
            os.unlink(modname)
            
    def refpol_build(self, sourcename):
        # Compile
        command = self.make + " -f " + self.refpol_makefile
        rc = self.run(command)
        # Raise an error if the process failed
        if rc != 0:
            raise RuntimeError("compilation failed:\n%s" % self.last_output)
        
    def compile(self, sourcename, modname):
        s = [self.checkmodule]
        if self.mls:
            s.append("-M")
        if self.module:
            s.append("-m")
        s.append("-o")
        s.append(modname)
        s.append(sourcename)
        rc = self.run(" ".join(s))
        if rc != 0:
            raise RuntimeError("compilation failed:\n%s" % self.last_output)
    def package(self, modname, packagename):
        s = [self.semodule_package]
        s.append("-o")
        s.append(packagename)
        s.append("-m")
        s.append(modname)
        
        rc = self.run(" ".join(s))
        if rc != 0:
            raise RuntimeError("packaging failed [%s]" % self.last_output)
        
     | 
| 167 | 
	payload encryption cryptobox | 
	# automatically generated by the FlatBuffers compiler, do not modify
# namespace: proto
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class PublisherFeatures(object):
    __slots__ = ['_tab']
    @classmethod
    def GetRootAs(cls, buf, offset=0):
        n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
        x = PublisherFeatures()
        x.Init(buf, n + offset)
        return x
    @classmethod
    def GetRootAsPublisherFeatures(cls, buf, offset=0):
        """This method is deprecated. Please switch to GetRootAs."""
        return cls.GetRootAs(buf, offset)
    # PublisherFeatures
    def Init(self, buf, pos):
        self._tab = flatbuffers.table.Table(buf, pos)
    # PublisherFeatures
    def PublisherIdentification(self):
        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
        if o != 0:
            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
        return False
    # PublisherFeatures
    def PublisherExclusion(self):
        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
        if o != 0:
            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
        return False
    # PublisherFeatures
    def SubscriberBlackwhiteListing(self):
        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
        if o != 0:
            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
        return False
    # PublisherFeatures
    def AcknowledgeEventReceived(self):
        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
        if o != 0:
            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
        return False
    # PublisherFeatures
    def PayloadTransparency(self):
        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
        if o != 0:
            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
        return False
    # PublisherFeatures
    def METHOD_NAME(self):
        o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
        if o != 0:
            return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
        return False
def PublisherFeaturesStart(builder): builder.StartObject(6)
def Start(builder):
    return PublisherFeaturesStart(builder)
def PublisherFeaturesAddPublisherIdentification(builder, publisherIdentification): builder.PrependBoolSlot(0, publisherIdentification, 0)
def AddPublisherIdentification(builder, publisherIdentification):
    return PublisherFeaturesAddPublisherIdentification(builder, publisherIdentification)
def PublisherFeaturesAddPublisherExclusion(builder, publisherExclusion): builder.PrependBoolSlot(1, publisherExclusion, 0)
def AddPublisherExclusion(builder, publisherExclusion):
    return PublisherFeaturesAddPublisherExclusion(builder, publisherExclusion)
def PublisherFeaturesAddSubscriberBlackwhiteListing(builder, subscriberBlackwhiteListing): builder.PrependBoolSlot(2, subscriberBlackwhiteListing, 0)
def AddSubscriberBlackwhiteListing(builder, subscriberBlackwhiteListing):
    return PublisherFeaturesAddSubscriberBlackwhiteListing(builder, subscriberBlackwhiteListing)
def PublisherFeaturesAddAcknowledgeEventReceived(builder, acknowledgeEventReceived): builder.PrependBoolSlot(3, acknowledgeEventReceived, 0)
def AddAcknowledgeEventReceived(builder, acknowledgeEventReceived):
    return PublisherFeaturesAddAcknowledgeEventReceived(builder, acknowledgeEventReceived)
def PublisherFeaturesAddPayloadTransparency(builder, payloadTransparency): builder.PrependBoolSlot(4, payloadTransparency, 0)
def AddPayloadTransparency(builder, payloadTransparency):
    return PublisherFeaturesAddPayloadTransparency(builder, payloadTransparency)
def PublisherFeaturesAddPayloadEncryptionCryptobox(builder, payloadEncryptionCryptobox): builder.PrependBoolSlot(5, payloadEncryptionCryptobox, 0)
def AddPayloadEncryptionCryptobox(builder, payloadEncryptionCryptobox):
    return PublisherFeaturesAddPayloadEncryptionCryptobox(builder, payloadEncryptionCryptobox)
def PublisherFeaturesEnd(builder): return builder.EndObject()
def End(builder):
    return PublisherFeaturesEnd(builder | 
| 168 | 
	get fullpath | 
	################################################################################
 # Copyright (C) 2023 Maxim Integrated Products, Inc., All Rights Reserved.
 #
 # Permission is hereby granted, free of charge, to any person obtaining a
 # copy of this software and associated documentation files (the "Software"),
 # to deal in the Software without restriction, including without limitation
 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
 # and/or sell copies of the Software, and to permit persons to whom the
 # Software is furnished to do so, subject to the following conditions:
 #
 # The above copyright notice and this permission notice shall be included
 # in all copies or substantial portions of the Software.
 #
 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 # IN NO EVENT SHALL MAXIM INTEGRATED BE LIABLE FOR ANY CLAIM, DAMAGES
 # OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 # OTHER DEALINGS IN THE SOFTWARE.
 #
 # Except as contained in this notice, the name of Maxim Integrated
 # Products, Inc. shall not be used except as stated in the Maxim Integrated
 # Products, Inc. Branding Policy.
 #
 # The mere transfer of this software does not imply any licenses
 # of trade secrets, proprietary technology, copyrights, patents,
 # trademarks, maskwork rights, or any other form of intellectual
 # property whatsoever. Maxim Integrated Products, Inc. retains all
 # ownership rights.
 #
 ###############################################################################
copy_right='''/******************************************************************************
 * Copyright (C) 2023 Maxim Integrated Products, Inc., All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included
 * in all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 * IN NO EVENT SHALL MAXIM INTEGRATED BE LIABLE FOR ANY CLAIM, DAMAGES
 * OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Except as contained in this notice, the name of Maxim Integrated
 * Products, Inc. shall not be used except as stated in the Maxim Integrated
 * Products, Inc. Branding Policy.
 *
 * The mere transfer of this software does not imply any licenses
 * of trade secrets, proprietary technology, copyrights, patents,
 * trademarks, maskwork rights, or any other form of intellectual
 * property whatsoever. Maxim Integrated Products, Inc. retains all
 * ownership rights.
 *
 ******************************************************************************/
'''
import sys
import os
import re
def METHOD_NAME(file_dir, file_name):
    if file_dir == "":
        return file_name
    if os.name == "posix":
        return file_dir + '/' + file_name
    if os.name == "nt":
        return file_dir + '\\' + file_name
def parse_scpcmd_file(filename):
    f = open(filename, "r")
    packet_list_file = f.read()
    f.close()
    file_dir = os.path.dirname(filename)
    packets = []
    # Get number of packets to send
    for line in packet_list_file.split("\n"):
        file_name = line.strip()
        if file_name == '':
            continue
        s_m = re.search('(\w+[_-]*\w+)\.(\d+)\.(\w+[_-]*\w+)\.((\w+[_-]*)*)\.\w+', file_name)
        if s_m is not None:
            id = s_m.group(2)
            cmd = s_m.group(4)
            way_str = s_m.group(3)
        else:
            print("error: wrong filename: " + file_name)
            raise
        if way_str == 'bl':
            is_tx = 0 # bl will send this to the host
        elif way_str == 'host':
            is_tx = 1 # host will send this to target
        else:
            print("error: wrong filename: " + file_name)
            raise
        # read packet data
        data_str = ''
        with open(METHOD_NAME(file_dir, file_name), 'rb') as f:
            data_bin = f.read()
            for i, x in enumerate(data_bin):
                if (i != 0) and ((i % 16) == 0):
                    data_str += '\n'
                data_str += '0x{:02x}, '.format(x)
            data_str = data_str[:-2] # remove last comma
        if data_str is None:
            print("Error : Unable to read file packet : " + file_dir)
            raise
        if cmd in ("hello_request",):
            packet_type = 1
        elif cmd in ("hello_reply",):
            packet_type = 2
        elif cmd in ("erase_mem", "del_mem"):
            packet_type = 3
        elif cmd in ("erase_mem_response", "del_mem_response"):
            packet_type = 4
        elif cmd in ("write_mem",):
            packet_type = 5   
        elif cmd in ("write_mem_response",):
            packet_type = 6   
        elif cmd in ("dump",):
            packet_type = 7
        elif cmd in ("write_crk_response",):
            packet_type = 8     
        else:
            packet_type = 0
        arr_name = f'scp_{id}_{cmd}'
        arr_len = len(data_bin)
        packets.append((packet_type, is_tx, arr_name, arr_len, data_str))
    return packets
if __name__ == "__main__":
    if len(sys.argv) > 3:
        packet_list = sys.argv[1]
        chip = sys.argv[2]
        image = sys.argv[3]
    elif len(sys.argv) > 2:
        packet_list = sys.argv[1]
        chip = sys.argv[2]
        image = 'fw'
    elif len(sys.argv) > 1:
        packet_list = sys.argv[1]
        chip = 'MAX32520KIT'
        image = 'fw'
    else:
        print('Usage error, please pass packet.list file as parameter')
        exit(-1)
    packets_data = parse_scpcmd_file(packet_list)
    target_file_name = f'scp_{chip}_{image}.c'
    with open(target_file_name, 'w') as f:
        f.write(copy_right)
        f.write('\n\n')
        scp_packets_arr = f'const scp_packet_struct scp_{chip}_{image}[] = {{ \n'
        for t, d, name, l, data in packets_data:
            f.write(f'static const unsigned char {name}[] = {{ {data} }};')
            f.write('\n\n')
            scp_packets_arr += f'{{ {t:<2}, {d:<2}, {l:<8}, {name} }},\n'
        scp_packets_arr += f'{{ {0:<2}, {0:<2}, {0:<8}, 0 }} \n' # to demonstrate end of packet
        scp_packets_arr += '};'  # end of array
        f.write('''
typedef struct {
	unsigned char type; // 1:hello_reply, 2:erase/del_mem
    unsigned char is_tx;// 1: From host to target, 0: From target to host
    unsigned short len;
    const unsigned char *data;
} scp_packet_struct;\n\n
''')
        f.write(scp_packets_arr)
        f.write('\n')
        print(target_file_name + " generated.") | 
| 169 | 
	test get dockerrun happy case | 
	# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ebcli.objects.exceptions import ValidationError
from mock import patch
from unittest import TestCase
import ebcli.containers.dockerrun as dr
INVALID_VERSION = '3.5'
MOCK_IMG_NAME = 'janedoe/image'
MOCK_DOCKERRUN_PATH = '/home/local/ANT/user/hello/Dockerrun.aws.json'
MOCK_DOCKERRUN_CONTENTS = '{}'
MOCK_DOCKERRUN = {}
MOCK_PORT = '5000'
JSON_TRUE = "true"
MOCK_AUTH_KEY = '.dockerfg'
MOCK_AUTH_BUCKET = 'bucket'
MOCK_LOGDIR = '.elasticbeanstalk/logs/local'
class TestDockerrun(TestCase):
    def test_validate_dockerrun_v1_missing_version(self):
        drun = _make_mock_dockerrun()
        self.assertRaises(ValidationError, dr.validate_dockerrun_v1, drun, False)
        self.assertRaises(ValidationError, dr.validate_dockerrun_v1, drun, True)
    def test_validate_dockerrun_v1_invalid_version(self):
        drun = _make_mock_dockerrun(INVALID_VERSION)
        self.assertRaises(ValidationError, dr.validate_dockerrun_v1, drun, False)
        self.assertRaises(ValidationError, dr.validate_dockerrun_v1, drun, True)
    def test_validate_dockerrun_v1_valid_version(self):
        dockerrun = _make_mock_dockerrun(dr.VERSION_ONE)
        try:
            dr.validate_dockerrun_v1(dockerrun, False)
        except Exception:
            assertFail('Expected no exceptions raised.')
    def test_validate_dockerrun_v1_no_img(self):
        drun = _make_mock_dockerrun(dr.VERSION_ONE)
        self.assertRaises(ValidationError, dr.validate_dockerrun_v1, drun, True)
    def test_validate_dockerrun_v1_no_img_name(self):
        drun = _make_mock_dockerrun(dr.VERSION_ONE, img_update=JSON_TRUE)
        self.assertRaises(ValidationError, dr.validate_dockerrun_v1, drun, True)
    def test_validate_dockerrun_v1_no_port(self):
        drun = _make_mock_dockerrun(dr.VERSION_ONE, MOCK_IMG_NAME, JSON_TRUE)
        self.assertRaises(ValidationError, dr.validate_dockerrun_v1, drun, True)
    def test_validate_dockerrun_v1_has_port(self):
        drun = _make_mock_dockerrun(dr.VERSION_ONE, MOCK_IMG_NAME,
                                    JSON_TRUE, MOCK_PORT)
        try:
            dr.validate_dockerrun_v1(drun, True)
        except Exception:
            self.assertFail('Expected no exceptions raised.')
    def test_validate_dockerrun_v2_no_dockerrun(self):
        self.assertRaises(ValidationError, dr.validate_dockerrun_v2, None)
    def test_validate_dockerrun_v2_invalid_version(self):
        drun = _make_mock_dockerrun(dr.VERSION_ONE)
        self.assertRaises(ValidationError, dr.validate_dockerrun_v2, drun)
    def test_validate_dockerrun_v2_valid_version(self):
        drun = _make_mock_dockerrun(dr.VERSION_TWO)
        try:
            dr.validate_dockerrun_v2(drun)
        except Exception:
            self.assertFail('Expected no exceptions raised.')
    def test_require_docker_pull_with_no_dockerrun(self):
        self.assertTrue(dr.require_docker_pull(None),
                        'Expected pull True when no dockerrun is provided')
    def test_require_docker_pull_with_missing_img(self):
        dockerrun = _make_mock_dockerrun()
        self.assertTrue(dr.require_docker_pull(dockerrun),
                        'Expected pull True when no Image is provided')
    def test_require_docker_pull_with_img_true(self):
        dockerrun = _make_mock_dockerrun(img_update=JSON_TRUE)
        self.assertTrue(dr.require_docker_pull(dockerrun),
                        'Expected pull True when Image.Update=' + JSON_TRUE)
    def test_require_docker_pull_with_img_false(self):
        dockerrun = _make_mock_dockerrun(img_update=dr.JSON_FALSE)
        msg = 'Expected False on when Image.Update=' + dr.JSON_FALSE
        self.assertFalse(dr.require_docker_pull(dockerrun), msg)
    @patch('ebcli.containers.dockerrun.fileoperations.get_json_dict')
    def METHOD_NAME(self, get_json_dict):
        get_json_dict.return_value = {}
        self.assertEqual({}, dr.get_dockerrun(MOCK_DOCKERRUN_PATH))
    @patch('ebcli.containers.dockerrun.fileoperations.get_json_dict')
    def test_get_dockerrun_ioerror_case(self, get_json_dict):
        get_json_dict.side_effect = IOError
        self.assertIsNone(dr.get_dockerrun(MOCK_DOCKERRUN_PATH))
    @patch('ebcli.containers.dockerrun.fileoperations.get_json_dict')
    def test_get_dockerrun_valueerror_case(self, get_json_dict):
        get_json_dict.side_effect = ValueError
        self.assertRaises(ValidationError, dr.get_dockerrun,
                          MOCK_DOCKERRUN_PATH)
    def test_require_auth_download_when_dockerrun_none(self):
        self.assertFalse(dr.require_auth_download(None))
    def test_require_auth_download_key_and_bucket_exists(self):
        dockerrun = _make_mock_dockerrun(auth_key=MOCK_AUTH_KEY,
                                         auth_bucket=MOCK_AUTH_BUCKET,
                                         version=dr.VERSION_ONE)
        self.assertTrue(dr.require_auth_download(dockerrun))
    def test_require_auth_download_key_and_bucket_not_exists(self):
        self.assertFalse(dr.require_auth_download({}))
    def test_get_auth_key(self):
        dockerrun = _make_mock_dockerrun(auth_key=MOCK_AUTH_KEY, version=dr.VERSION_ONE)
        self.assertEqual(MOCK_AUTH_KEY, dr.get_auth_key(dockerrun))
    def test_get_auth_key_keyerror(self):
        self.assertRaises(KeyError, dr.get_auth_key, {})
    def test_get_auth_bucket_name(self):
        dockerrun = _make_mock_dockerrun(auth_bucket=MOCK_AUTH_BUCKET,
                                         version=dr.VERSION_ONE)
        self.assertEqual(MOCK_AUTH_BUCKET, dr.get_auth_bucket_name(dockerrun))
    def test_get_auth_bucket_name_keyerror(self):
        self.assertRaises(KeyError, dr.get_auth_bucket_name, {})
    def test_get_logdir(self):
        dockerrun = _make_mock_dockerrun(logdir=MOCK_LOGDIR)
        self.assertEqual(MOCK_LOGDIR, dr.get_logdir(dockerrun))
    def test_get_logdir_none_dockerrun(self):
        self.assertIsNone(dr.get_logdir(None))
    def test_get_logdir_key_missing_dockerrun(self):
        self.assertIsNone(dr.get_logdir({}))
def _make_mock_dockerrun(version=None, img_name=None, img_update=None,
                         port=None, auth_key=None, auth_bucket=None,
                         logdir=None):
    dockerrun = {}
    if version:
        dockerrun[dr.VERSION_KEY] = version
    if img_name or img_update:
        dockerrun[dr.IMG_KEY] = {}
    if img_name:
        dockerrun[dr.IMG_KEY][dr.IMG_NAME_KEY] = img_name
    if img_update:
        dockerrun[dr.IMG_KEY][dr.IMG_UPDATE_KEY] = img_update
    if port:
        dockerrun[dr.PORTS_KEY] = [{dr.CONTAINER_PORT_KEY: port}]
    if auth_key or auth_bucket:
        dockerrun[dr.AUTH_KEY] = {}
    if auth_key:
        dockerrun[dr.AUTH_KEY][dr.AUTHKEY_KEY] = auth_key
    if auth_bucket:
        dockerrun[dr.AUTH_KEY][dr.AUTH_BUCKET_KEY] = auth_bucket
    if logdir:
        dockerrun[dr.LOGGING_KEY] = logdir
    return dockerrun | 
| 170 | 
	reduce mean | 
	# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import pgl.math as math
class Message(object):
    """This implement Message for graph.recv.
    Args:
        msg: A dictionary provided by send function.
        segment_ids: The id that the message belongs to.
   
    """
    def __init__(self, msg, segment_ids):
        self._segment_ids = segment_ids
        self._msg = msg
    def reduce(self, msg, pool_type="sum"):
        """This method reduce message by given `pool_type`.
        Now, this method only supports default reduce function, 
        with ('sum', 'mean', 'max', 'min').
        Args:
            feature (paddle.Tensor): feature with first dim as num_edges.
            pool_type (str): 'sum', 'mean', 'max', 'min' built-in receive function.
        Returns:
 
            Returns a paddle.Tensor with the first dim the same as the largest segment_id.
        """
        outputs = math.segment_pool(
            msg, self._segment_ids, pool_type=pool_type)
        return outputs
    def reduce_sum(self, msg):
        """This method reduce message by sum. 
        Args:
            feature (paddle.Tensor): feature with first dim as num_edges.
        Returns:
 
            Returns a paddle.Tensor with the first dim the same as the largest segment_id.
        """
        return math.segment_sum(msg, self._segment_ids)
    def METHOD_NAME(self, msg):
        """This method reduce message by mean. 
        Args:
            feature (paddle.Tensor): feature with first dim as num_edges.
        Returns:
 
            Returns a paddle.Tensor with the first dim the same as the largest segment_id.
        """
        return math.segment_mean(msg, self._segment_ids)
    def reduce_max(self, msg):
        """This method reduce message by max. 
        Args:
            feature (paddle.Tensor): feature with first dim as num_edges.
        Returns:
 
            Returns a paddle.Tensor with the first dim the same as the largest segment_id.
        """
        return math.segment_max(msg, self._segment_ids)
    def reduce_min(self, msg):
        """This method reduce message by min. 
        Args:
            feature (paddle.Tensor): feature with first dim as num_edges.
        Returns:
 
            Returns a paddle.Tensor with the first dim the same as the largest segment_id.
        """
        return math.segment_min(msg, self._segment_ids)
    def edge_expand(self, msg):
        """This is the inverse method for reduce.
        Args:
            feature (paddle.Tensor): A reduced message. 
        Returns:
 
            Returns a paddle.Tensor with the first dim the same as the num_edges.
        Examples:
            .. code-block:: python
                import numpy as np
                import pgl
                import paddle
                num_nodes = 5
                edges = [ (0, 1), (1, 2), (3, 4)]
                feature = np.random.randn(5, 100)
                edge_feature = np.random.randn(3, 100)
                graph = pgl.Graph(num_nodes=num_nodes,
                        edges=edges,
                        node_feat={
                            "feature": feature
                        },
                        edge_feat={
                            "edge_feature": edge_feature
                        })
                graph.tensor() 
                def send_func(src_feat, dst_feat, edge_feat):
                    return { "out": src_feat["feature"] }
                message = graph.send(send_func, src_feat={"feature": graph.node_feat["feature"]})
                def recv_func(msg):
                    value = msg["out"]
                    max_value = msg.reduce_max(value)
                    # We want to subscribe the max_value correspond to the destination node.
                    max_value = msg.edge_expand(max_value)
                    value = value - max_value
                    return msg.reduce_sum(value)
                   
                out = graph.recv(recv_func, message)
        """
        return paddle.gather(msg, self._segment_ids, axis=0)
    def reduce_softmax(self, msg):
        """This method reduce message by softmax. 
        Args:
            feature (paddle.Tensor): feature with first dim as num_edges.
        Returns:
 
            Returns a paddle.Tensor with the first dim the same as the largest segment_id.
        """
        return math.segment_softmax(msg, self._segment_ids)
    def __getitem__(self, key):
        return self._msg[key] | 
| 171 | 
	next | 
	"""A readline()-style interface to the parts of a multipart message.
The MultiFile class makes each part of a multipart message "feel" like
an ordinary file, as long as you use fp.readline().  Allows recursive
use, for nested multipart messages.  Probably best used together
with module mimetools.
Suggested use:
real_fp = open(...)
fp = MultiFile(real_fp)
"read some lines from fp"
fp.push(separator)
while 1:
        "read lines from fp until it returns an empty string" (A)
        if not fp.next(): break
fp.pop()
"read remaining lines from fp until it returns an empty string"
The latter sequence may be used recursively at (A).
It is also allowed to use multiple push()...pop() sequences.
If seekable is given as 0, the class code will not do the bookkeeping
it normally attempts in order to make seeks relative to the beginning of the
current file part.  This may be useful when using MultiFile with a non-
seekable stream object.
"""
from warnings import warn
warn("the multifile module has been deprecated since Python 2.5",
        DeprecationWarning, stacklevel=2)
del warn
__all__ = ["MultiFile","Error"]
class Error(Exception):
    pass
class MultiFile:
    seekable = 0
    def __init__(self, fp, seekable=1):
        self.fp = fp
        self.stack = []
        self.level = 0
        self.last = 0
        if seekable:
            self.seekable = 1
            self.start = self.fp.tell()
            self.posstack = []
    def tell(self):
        if self.level > 0:
            return self.lastpos
        return self.fp.tell() - self.start
    def seek(self, pos, whence=0):
        here = self.tell()
        if whence:
            if whence == 1:
                pos = pos + here
            elif whence == 2:
                if self.level > 0:
                    pos = pos + self.lastpos
                else:
                    raise Error, "can't use whence=2 yet"
        if not 0 <= pos <= here or \
                        self.level > 0 and pos > self.lastpos:
            raise Error, 'bad MultiFile.seek() call'
        self.fp.seek(pos + self.start)
        self.level = 0
        self.last = 0
    def readline(self):
        if self.level > 0:
            return ''
        line = self.fp.readline()
        # Real EOF?
        if not line:
            self.level = len(self.stack)
            self.last = (self.level > 0)
            if self.last:
                raise Error, 'sudden EOF in MultiFile.readline()'
            return ''
        assert self.level == 0
        # Fast check to see if this is just data
        if self.is_data(line):
            return line
        else:
            # Ignore trailing whitespace on marker lines
            marker = line.rstrip()
        # No?  OK, try to match a boundary.
        # Return the line (unstripped) if we don't.
        for i, sep in enumerate(reversed(self.stack)):
            if marker == self.section_divider(sep):
                self.last = 0
                break
            elif marker == self.end_marker(sep):
                self.last = 1
                break
        else:
            return line
        # We only get here if we see a section divider or EOM line
        if self.seekable:
            self.lastpos = self.tell() - len(line)
        self.level = i+1
        if self.level > 1:
            raise Error,'Missing endmarker in MultiFile.readline()'
        return ''
    def readlines(self):
        list = []
        while 1:
            line = self.readline()
            if not line: break
            list.append(line)
        return list
    def read(self): # Note: no size argument -- read until EOF only!
        return ''.join(self.readlines())
    def METHOD_NAME(self):
        while self.readline(): pass
        if self.level > 1 or self.last:
            return 0
        self.level = 0
        self.last = 0
        if self.seekable:
            self.start = self.fp.tell()
        return 1
    def push(self, sep):
        if self.level > 0:
            raise Error, 'bad MultiFile.push() call'
        self.stack.append(sep)
        if self.seekable:
            self.posstack.append(self.start)
            self.start = self.fp.tell()
    def pop(self):
        if self.stack == []:
            raise Error, 'bad MultiFile.pop() call'
        if self.level <= 1:
            self.last = 0
        else:
            abslastpos = self.lastpos + self.start
        self.level = max(0, self.level - 1)
        self.stack.pop()
        if self.seekable:
            self.start = self.posstack.pop()
            if self.level > 0:
                self.lastpos = abslastpos - self.start
    def is_data(self, line):
        return line[:2] != '--'
    def section_divider(self, str):
        return "--" + str
    def end_marker(self, str):
        return "--" + str + "--" | 
| 172 | 
	compute shapes | 
	# Copyright (c) 2020  PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import numpy as np
import paddle
from paddle.fluid import layers
from paddle2onnx.legacy.op_mapper import CustomPaddleOp, register_custom_paddle_op
from paddle2onnx.legacy.op_mapper import OpMapper as op_mapper
from paddle2onnx.legacy.op_mapper import mapper_helper
class AnchorGenerator(CustomPaddleOp):
    def __init__(self, node, **kw):
        super(AnchorGenerator, self).__init__(node)
        #self.x_shape = node.input_shape('Input', 0)
        self.anchor_sizes = node.attr('anchor_sizes')
        self.aspect_ratios = node.attr('aspect_ratios')
        self.offset = node.attr('offset')
        self.strides = node.attr('stride')
        self.variances = node.attr('variances')
        self.shapes = self.METHOD_NAME()
    def METHOD_NAME(self):
        shapes = list()
        for r in range(len(self.aspect_ratios)):
            ar = self.aspect_ratios[r]
            for s in range(len(self.anchor_sizes)):
                anchor_size = self.anchor_sizes[s]
                area = self.strides[0] * self.strides[1]
                area_ratios = area / ar
                base_w = np.floor(np.sqrt(area_ratios) + 0.5)
                base_h = np.floor(base_w * ar + 0.5)
                scale_w = anchor_size / self.strides[0]
                scale_h = anchor_size / self.strides[1]
                w = scale_w * base_w
                h = scale_h * base_h
                shapes.append([
                    -0.5 * (w - 1), -0.5 * (h - 1), 0.5 * (w - 1), 0.5 * (h - 1)
                ])
        return shapes
    def forward(self):
        input_feature = self.input('Input', 0)
        input_shape = paddle.shape(input_feature)
        n, c, h, w = paddle.tensor.split(input_shape, num_or_sections=4)
        x_ctr = paddle.arange(start=0, end=w, step=1, dtype=input_feature.dtype)
        y_ctr = paddle.arange(start=0, end=h, step=1, dtype=input_feature.dtype)
        x_ctr = x_ctr * self.strides[0] + self.offset * (self.strides[0] - 1)
        y_ctr = y_ctr * self.strides[1] + self.offset * (self.strides[1] - 1)
        tensor_one = paddle.ones(shape=[1], dtype='int64')
        tensor_len_shape = paddle.full(
            shape=[1], fill_value=len(self.shapes), dtype='int64')
        x_ctr = paddle.reshape(x_ctr, shape=(1, -1))
        y_ctr = paddle.reshape(y_ctr, shape=(1, -1))
        x_ctr = paddle.tile(x_ctr, repeat_times=(h, tensor_one))
        y_ctr = paddle.tile(y_ctr, repeat_times=(w, tensor_one))
        y_ctr = paddle.transpose(y_ctr, perm=[1, 0])
        centers = paddle.stack([x_ctr, y_ctr], axis=-1)
        centers = paddle.tensor.unsqueeze(centers, axis=[2])
        centers = paddle.tile(centers, repeat_times=(1, 1, len(self.shapes), 2))
        shape_tensor = paddle.assign(np.array(self.shapes).astype('float32'))
        anchors = centers + shape_tensor
        variance_tensor = paddle.assign(
            np.asarray(self.variances).astype('float32'))
        vars = paddle.reshape(variance_tensor, shape=[1, 1, 1, -1])
        vars = paddle.tile(
            vars, repeat_times=(h, w, tensor_len_shape, tensor_one))
        return {'Anchors': [anchors], 'Variances': [vars]}
@op_mapper('anchor_generator')
class Anchors_generator:
    @classmethod
    def opset_1(cls, graph, node, **kw):
        node = graph.make_node(
            'anchor_generator',
            inputs=node.input('Input'),
            outputs=node.output('Anchors') + node.output('Variances'),
            anchor_sizes = node.attr('anchor_sizes'),
            aspect_ratios = node.attr('aspect_ratios'),
            offset = node.attr('offset'),
            strides = node.attr('stride'),
            variances = node.attr('variances'),
            domain = 'custom')
register_custom_paddle_op('anchor_generator', AnchorGenerator) | 
| 173 | 
	cmake args | 
	# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Pumi(CMakePackage):
    """SCOREC RPI's Parallel Unstructured Mesh Infrastructure (PUMI).
    An efficient distributed mesh data structure and methods to support
    parallel adaptive analysis including general mesh-based operations,
    such as mesh entity creation/deletion, adjacency and geometric
    classification, iterators, arbitrary (field) data attachable to mesh
    entities, efficient communication involving entities duplicated
    across multiple tasks, migration of mesh entities between tasks,
    and dynamic load balancing."""
    homepage = "https://www.scorec.rpi.edu/pumi"
    git = "https://github.com/SCOREC/core.git"
    maintainers("cwsmith")
    tags = ["e4s"]
    # We will use the scorec/core master branch as the 'nightly' version
    # of pumi in spack.  The master branch is more stable than the
    # scorec/core develop branch and we prefer not to expose spack users
    # to the added instability.
    version("master", submodules=True, branch="master")
    version(
        "2.2.7", submodules=True, commit="a295720d7b4828282484f2b78bac1f6504512de4"
    )  # tag 2.2.7
    version("2.2.6", commit="4dd330e960b1921ae0d8d4039b8de8680a20d993")  # tag 2.2.6
    version("2.2.5", commit="73c16eae073b179e45ec625a5abe4915bc589af2")  # tag 2.2.5
    version("2.2.4", commit="8072fdbafd53e0c9a63248a269f4cce5000a4a8e")  # tag 2.2.4
    version("2.2.3", commit="d200cb366813695d0f18b514d8d8ecc382cb79fc")  # tag 2.2.3
    version("2.2.2", commit="bc34e3f7cfd8ab314968510c71486b140223a68f")  # tag 2.2.2
    version("2.2.1", commit="cd826205db21b8439026db1f6af61a8ed4a18564")  # tag 2.2.1
    version("2.2.0", commit="8c7e6f13943893b2bc1ece15003e4869a0e9634f")  # tag 2.2.0
    version("2.1.0", commit="840fbf6ec49a63aeaa3945f11ddb224f6055ac9f")
    variant("int64", default=False, description="Enable 64bit mesh entity ids")
    variant("shared", default=False, description="Build shared libraries")
    variant("zoltan", default=False, description="Enable Zoltan Features")
    variant("fortran", default=False, description="Enable FORTRAN interface")
    variant("testing", default=False, description="Enable all tests")
    variant(
        "simmodsuite",
        default="none",
        values=("none", "base", "kernels", "full"),
        description="Enable Simmetrix SimModSuite Support: 'base' enables "
        "the minimum set of functionality, 'kernels' adds CAD kernel "
        "support to 'base', and 'full' enables all functionality.",
    )
    variant(
        "simmodsuite_version_check",
        default=True,
        description="Enable check of Simmetrix SimModSuite version. "
        "Disable the check for testing new versions.",
    )
    depends_on("mpi")
    depends_on("cmake@3:", type="build")
    depends_on("zoltan", when="+zoltan")
    depends_on("zoltan+int64", when="+zoltan+int64")
    simbase = "+base"
    simkernels = simbase + "+parasolid+acis+discrete"
    simfull = (
        simkernels
        + "+abstract+adv+advmodel\
                            +import+paralleladapt+parallelmesh"
    )
    depends_on("simmetrix-simmodsuite" + simbase, when="simmodsuite=base")
    depends_on("simmetrix-simmodsuite" + simkernels, when="simmodsuite=kernels")
    depends_on("simmetrix-simmodsuite" + simfull, when="simmodsuite=full")
    def METHOD_NAME(self):
        spec = self.spec
        args = [
            "-DSCOREC_CXX_WARNINGS=OFF",
            self.define_from_variant("ENABLE_ZOLTAN", "zoltan"),
            "-DCMAKE_C_COMPILER=%s" % spec["mpi"].mpicc,
            "-DCMAKE_CXX_COMPILER=%s" % spec["mpi"].mpicxx,
            self.define_from_variant("BUILD_SHARED_LIBS", "shared"),
            self.define_from_variant("PUMI_FORTRAN_INTERFACE", "fortran"),
            "-DMDS_ID_TYPE=%s" % ("long" if "+int64" in spec else "int"),
            "-DSKIP_SIMMETRIX_VERSION_CHECK=%s"
            % ("ON" if "~simmodsuite_version_check" in spec else "OFF"),
            self.define_from_variant("IS_TESTING", "testing"),
            "-DMESHES=%s" % join_path(self.stage.source_path, "pumi-meshes"),
        ]
        if spec.satisfies("fortran"):
            args += ["-DCMAKE_Fortran_COMPILER=%s" % spec["mpi"].mpifc]
        if spec.satisfies("@2.2.3"):
            args += ["-DCMAKE_CXX_STANDARD=11"]
        if self.spec.variants["simmodsuite"].value != "none":
            args.append("-DENABLE_SIMMETRIX=ON")
            mpi_id = spec["mpi"].name + spec["mpi"].version.up_to(1).string
            args.append("-DSIM_MPI=" + mpi_id)
            if self.spec.variants["simmodsuite"].value in ["kernels", "full"]:
                args.append("-DENABLE_SIMMETRIX=ON")
                args.append("-DSIM_PARASOLID=ON")
                args.append("-DSIM_ACIS=ON")
                args.append("-DSIM_DISCRETE=ON")
        return args
    def test(self):
        if self.spec.version <= Version("2.2.6"):
            return
        exe = "uniform"
        options = ["../testdata/pipe.dmg", "../testdata/pipe.smb", "pipe_unif.smb"]
        expected = "mesh pipe_unif.smb written"
        description = "testing pumi uniform mesh refinement"
        self.run_test(exe, options, expected, purpose=description, work_dir=self.prefix.bin)
        mpiexec = Executable(join_path(self.spec["mpi"].prefix.bin, "mpiexec")).command
        mpiopt = ["-n", "2"]
        exe = ["split"]
        options = ["../testdata/pipe.dmg", "../testdata/pipe.smb", "pipe_2_.smb", "2"]
        expected = "mesh pipe_2_.smb written"
        description = "testing pumi mesh partitioning"
        self.run_test(
            mpiexec,
            mpiopt + exe + options,
            expected,
            purpose=description,
            work_dir=self.prefix.bin,
        ) | 
| 174 | 
	seek | 
	#!/usr/bin/env python3
# 
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
#
import contextlib
import logging
from typing import (
    Iterator,
    Set
)
from ..abc import CodeStreamAPI
from ..validation import (
    validate_is_bytes,
)
from ..vm.opcode_values import (
    PUSH1,
    PUSH32,
    STOP,
)
class CodeStream(CodeStreamAPI):
    __slots__ = ['_length_cache', '_raw_code_bytes', 'invalid_positions', 'valid_positions']
    logger = logging.getLogger('eth.vm.CodeStream')
    def __init__(self, code_bytes: bytes) -> None:
        validate_is_bytes(code_bytes, title="CodeStream bytes")
        # in order to avoid method overhead when setting/accessing pc, we no longer fence
        # the pc (Program Counter) into 0 <= pc <= len(code_bytes). We now let it float free.
        # NOTE: Setting pc to a negative value has undefined behavior.
        self.program_counter = 0
        self._raw_code_bytes = code_bytes
        self._length_cache = len(code_bytes)
        self.invalid_positions: Set[int] = set()
        self.valid_positions: Set[int] = set()
    def read(self, size: int) -> bytes:
        old_program_counter = self.program_counter
        target_program_counter = old_program_counter + size
        self.program_counter = target_program_counter
        return self._raw_code_bytes[old_program_counter:target_program_counter]
    @property
    def pc(self) -> int:
        return self.program_counter - 1
    def __len__(self) -> int:
        return self._length_cache
    def __getitem__(self, i: int) -> int:
        return self._raw_code_bytes[i]
    def __iter__(self) -> Iterator[int]:
        # a very performance-sensitive method
        pc = self.program_counter
        while pc < self._length_cache:
            opcode = self._raw_code_bytes[pc]
            self.program_counter = pc + 1
            yield opcode
            # a read might have adjusted the pc during the last yield
            pc = self.program_counter
        yield STOP
    def peek(self) -> int:
        pc = self.program_counter
        if pc < self._length_cache:
            return self._raw_code_bytes[pc]
        else:
            return STOP
    @contextlib.contextmanager
    def METHOD_NAME(self, program_counter: int) -> Iterator['CodeStream']:
        anchor_pc = self.program_counter
        self.program_counter = program_counter
        try:
            yield self
        finally:
            self.program_counter = anchor_pc
    def _potentially_disqualifying_opcode_positions(self, position: int) -> Iterator[int]:
        # Look at the last 32 positions (from 1 byte back to 32 bytes back).
        # Don't attempt to look at negative positions.
        deepest_lookback = min(32, position)
        # iterate in reverse, because PUSH32 is more common than others
        for bytes_back in range(deepest_lookback, 0, -1):
            earlier_position = position - bytes_back
            opcode = self._raw_code_bytes[earlier_position]
            if PUSH1 + (bytes_back - 1) <= opcode <= PUSH32:
                # that PUSH1, if two bytes back, isn't disqualifying
                # PUSH32 in any of the bytes back is disqualifying
                yield earlier_position
    def is_valid_opcode(self, position: int) -> bool:
        if position >= self._length_cache:
            return False
        elif position in self.invalid_positions:
            return False
        elif position in self.valid_positions:
            return True
        else:
            # An opcode is not valid, iff it is the "data" following a PUSH_
            # So we look at the previous 32 bytes (PUSH32 being the largest) to see if there
            # is a PUSH_ before the opcode in this position.
            for disqualifier in self._potentially_disqualifying_opcode_positions(position):
                # Now that we found a PUSH_ before this position, we check if *that* PUSH is valid
                if self.is_valid_opcode(disqualifier):
                    # If the PUSH_ valid, then the current position is invalid
                    self.invalid_positions.add(position)
                    return False
                # Otherwise, keep looking for other potentially disqualifying PUSH_ codes
            # We didn't find any valid PUSH_ opcodes in the 32 bytes before position; it's valid
            self.valid_positions.add(position)
            return True | 
| 175 | 
	set path | 
	from typing import Any, Optional, Union
from AnyQt.QtWidgets import (
    QGraphicsItem, QGraphicsObject, QStyleOptionGraphicsItem, QWidget
)
from AnyQt.QtGui import (
    QPainterPath, QPainterPathStroker, QBrush, QPen, QPainter, QColor
)
from AnyQt.QtCore import Qt, QPointF, QRectF
from AnyQt.QtCore import pyqtSignal as Signal
class GraphicsPathObject(QGraphicsObject):
    """A QGraphicsObject subclass implementing an interface similar to
    QGraphicsPathItem, and also adding a positionChanged() signal
    """
    positionChanged = Signal([], ["QPointF"])
    def __init__(self, parent=None, **kwargs):
        # type: (Optional[QGraphicsItem], Any) -> None
        super().__init__(parent, **kwargs)
        self.setFlag(QGraphicsObject.ItemSendsGeometryChanges)
        self.__path = QPainterPath()
        self.__brush = QBrush(Qt.NoBrush)
        self.__pen = QPen()
        self.__boundingRect = None  # type: Optional[QRectF]
    def METHOD_NAME(self, path):
        # type: (QPainterPath) -> None
        """Set the items `path` (:class:`QPainterPath`).
        """
        if self.__path != path:
            self.prepareGeometryChange()
            # Need to store a copy of object so the shape can't be mutated
            # without properly updating the geometry.
            self.__path = QPainterPath(path)
            self.__boundingRect = None
            self.update()
    def path(self):
        # type: () -> QPainterPath
        """Return the items path.
        """
        return QPainterPath(self.__path)
    def setBrush(self, brush):
        # type: (Union[QBrush, QColor, Qt.GlobalColor, Qt.BrushStyle]) -> None
        """Set the items `brush` (:class:`QBrush`)
        """
        if not isinstance(brush, QBrush):
            brush = QBrush(brush)
        if self.__brush != brush:
            self.__brush = QBrush(brush)
            self.update()
    def brush(self):
        # type: () -> QBrush
        """Return the items brush.
        """
        return QBrush(self.__brush)
    def setPen(self, pen):
        # type: (Union[QPen, QBrush, Qt.PenStyle]) -> None
        """Set the items outline `pen` (:class:`QPen`).
        """
        if not isinstance(pen, QPen):
            pen = QPen(pen)
        if self.__pen != pen:
            self.prepareGeometryChange()
            self.__pen = QPen(pen)
            self.__boundingRect = None
            self.update()
    def pen(self):
        # type: () -> QPen
        """Return the items pen.
        """
        return QPen(self.__pen)
    def paint(self, painter, option, widget=None):
        # type: (QPainter, QStyleOptionGraphicsItem, Optional[QWidget]) -> None
        if self.__path.isEmpty():
            return
        painter.save()
        painter.setPen(self.__pen)
        painter.setBrush(self.__brush)
        painter.drawPath(self.__path)
        painter.restore()
    def boundingRect(self):
        # type: () -> QRectF
        if self.__boundingRect is None:
            br = self.__path.controlPointRect()
            pen_w = self.__pen.widthF()
            self.__boundingRect = br.adjusted(-pen_w, -pen_w, pen_w, pen_w)
        return QRectF(self.__boundingRect)
    def shape(self):
        # type: () -> QPainterPath
        return shapeFromPath(self.__path, self.__pen)
    def itemChange(self, change, value):
        # type: (QGraphicsItem.GraphicsItemChange, Any) -> Any
        if change == QGraphicsObject.ItemPositionHasChanged:
            self.positionChanged.emit()
            self.positionChanged[QPointF].emit(value)
        return super().itemChange(change, value)
def shapeFromPath(path, pen):
    # type: (QPainterPath, QPen) -> QPainterPath
    """Create a QPainterPath shape from the `path` drawn with `pen`.
    """
    stroker = QPainterPathStroker()
    stroker.setCapStyle(pen.capStyle())
    stroker.setJoinStyle(pen.joinStyle())
    stroker.setMiterLimit(pen.miterLimit())
    stroker.setWidth(max(pen.widthF(), 1e-9))
    shape = stroker.createStroke(path)
    shape.addPath(path)
    return shape | 
| 176 | 
	test copy hbm2ddr | 
	# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
from dace import subsets as sbs, dtypes, memlet as mem
import dace
import numpy as np
from dace.codegen.targets.fpga import _FPGA_STORAGE_TYPES
from dace.dtypes import StorageType
from dace.fpga_testing import fpga_test, xilinx_test
# A test checking copies involving Multibank-arrays using HBM and DDR in some way
def mkc(sdfg: dace.SDFG,
        state_before,
        src_name,
        dst_name,
        src_storage=None,
        dst_storage=None,
        src_shape=None,
        dst_shape=None,
        copy_expr=None,
        src_loc=None,
        dst_loc=None):
    """
    Helper MaKe_Copy that creates and appends states performing exactly one copy. If a provided
    arrayname already exists it will use the old array, and ignore all newly passed values
    """
    if copy_expr is None:
        copy_expr = src_name
    if (state_before == None):
        state = sdfg.add_state(is_start_state=True)
    else:
        state = sdfg.add_state_after(state_before)
    def mkarray(name, shape, storage, loc):
        if (name in sdfg.arrays):
            return sdfg.arrays[name]
        is_transient = False
        if (storage in _FPGA_STORAGE_TYPES):
            is_transient = True
        arr = sdfg.add_array(name, shape, dace.int32, storage, transient=is_transient)
        if loc is not None:
            arr[1].location["memorytype"] = loc[0]
            arr[1].location["bank"] = loc[1]
        return arr
    a = mkarray(src_name, src_shape, src_storage, src_loc)
    b = mkarray(dst_name, dst_shape, dst_storage, dst_loc)
    aAcc = state.add_access(src_name)
    bAcc = state.add_access(dst_name)
    edge = state.add_edge(aAcc, None, bAcc, None, mem.Memlet(copy_expr))
    a_np_arr, b_np_arr = None, None
    if src_shape is not None:
        try:
            a_np_arr = np.zeros(src_shape, dtype=np.int32)
        except:
            pass
    if dst_shape is not None:
        try:
            b_np_arr = np.zeros(dst_shape, dtype=np.int32)
        except:
            pass
    return (state, a_np_arr, b_np_arr)
# Note, usually there are only 4 ddr banks but much more hmb banks.
# Since the tests run in simulation mode, this should not be an issue.
def copy_multibank_1_mem_type(mem_type):
    sdfg = dace.SDFG("copy_multibank_1_mem_type_" + mem_type)
    s, a, _ = mkc(sdfg, None, "a", "x", StorageType.Default, StorageType.FPGA_Global, [3, 4, 4], [3, 4, 4], "a", None,
                  (mem_type, "0:3"))
    s, _, _ = mkc(sdfg, s, "x", "y", None, StorageType.FPGA_Global, None, [2, 4, 4, 4],
                  "x[1, 1:4, 1:4]->1, 1:4, 1:4, 1", None, (mem_type, "3:5"))
    s, _, _ = mkc(sdfg, s, "y", "z", None, StorageType.FPGA_Global, None, [1, 4, 4, 4],
                  "y[1, 0:4, 0:4, 0:4]->0, 0:4, 0:4, 0:4", None, (mem_type, "5:6"))
    s, _, _ = mkc(sdfg, s, "z", "w", None, StorageType.FPGA_Global, None, [1, 4, 4, 4], "z", None, (mem_type, "6:7"))
    s, _, c = mkc(sdfg, s, "w", "c", None, StorageType.Default, None, [1, 4, 4, 4], "w")
    a.fill(1)
    a[1, 0:4, 1] += 2
    a[1, 1, 0:4] += 2
    expect = np.copy(c)
    expect.fill(1)
    expect[0, 1:5, 1, 1] += 2
    expect[0, 1, 1:5, 1] += 2
    sdfg(a=a, c=c)
    assert np.allclose(c[0, 1:4, 1:4, 1], expect[0, 1:4, 1:4, 1])
    return sdfg
def copy_multibank_2_mem_type(mem_type_1, mem_type_2):
    sdfg = dace.SDFG("copy_multibank_2_mem_type_" + mem_type_1 + "_" + mem_type_2)
    s, a, _ = mkc(sdfg, None, "a", "x", StorageType.Default, StorageType.FPGA_Global, [3, 5, 5], [3, 5, 5], "a", None,
                  (mem_type_1, "0:3"))
    s, _, _ = mkc(sdfg, s, "x", "d1", None, StorageType.FPGA_Global, None, [3, 5, 5], "x[2, 0:5, 0:5]->1, 0:5, 0:5",
                  None, (mem_type_2, "1:4"))
    s, _, _ = mkc(sdfg, s, "d1", "y", None, StorageType.FPGA_Global, None, [1, 7, 7], "d1[1, 0:5,0:5]->0, 2:7, 2:7",
                  None, (mem_type_1, "3:4"))
    s, _, c = mkc(sdfg, s, "y", "c", None, StorageType.Default, None, [1, 7, 7], "y")
    a.fill(1)
    a[2, 2:4, 2:4] += 3
    expect = np.copy(c)
    expect.fill(1)
    expect[0, 4:6, 4:6] += 3
    sdfg(a=a, c=c)
    assert np.allclose(c[2:7], expect[2:7])
    return sdfg
@xilinx_test()
def test_copy_hbm2hbm():
    return copy_multibank_1_mem_type(mem_type="hbm")
@xilinx_test()
def test_copy_ddr2ddr():
    return copy_multibank_1_mem_type(mem_type="ddr")
@xilinx_test()
def METHOD_NAME():
    return copy_multibank_2_mem_type(mem_type_1="hbm", mem_type_2="ddr")
@xilinx_test()
def test_copy_ddr2hbm():
    return copy_multibank_2_mem_type(mem_type_1="ddr", mem_type_2="hbm")
if __name__ == "__main__":
    test_copy_hbm2hbm(None)  # HBM to HBM to HBM
    test_copy_ddr2ddr(None)  # DDR to DDR to DDR
    METHOD_NAME(None)  # HBM to DDR to HBM
    test_copy_ddr2hbm(None)  # DDR to HBM to DDR | 
| 177 | 
	get next | 
	# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
    ClientAuthenticationError,
    HttpResponseError,
    ResourceExistsError,
    ResourceNotFoundError,
    ResourceNotModifiedError,
    map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
    from typing import Literal  # pylint: disable=no-name-in-module, ungrouped-imports
else:
    from typing_extensions import Literal  # type: ignore  # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
    region_id: str, subscription_id: str, *, sku_id: Optional[str] = None, **kwargs: Any
) -> HttpRequest:
    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
    api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-04-01"))  # type: Literal["2019-04-01"]
    accept = _headers.pop("Accept", "application/json")
    # Construct URL
    _url = kwargs.pop(
        "template_url",
        "/subscriptions/{subscriptionId}/providers/Microsoft.VMwareCloudSimple/locations/{regionId}/availabilities",
    )  # pylint: disable=line-too-long
    path_format_arguments = {
        "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
        "regionId": _SERIALIZER.url("region_id", region_id, "str"),
    }
    _url = _format_url_section(_url, **path_format_arguments)
    # Construct parameters
    if sku_id is not None:
        _params["skuId"] = _SERIALIZER.query("sku_id", sku_id, "str")
    _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
    # Construct headers
    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class SkusAvailabilityOperations:
    """
    .. warning::
        **DO NOT** instantiate this class directly.
        Instead, you should access the following operations through
        :class:`~azure.mgmt.vmwarecloudsimple.VMwareCloudSimple`'s
        :attr:`skus_availability` attribute.
    """
    models = _models
    def __init__(self, *args, **kwargs):
        input_args = list(args)
        self._client = input_args.pop(0) if input_args else kwargs.pop("client")
        self._config = input_args.pop(0) if input_args else kwargs.pop("config")
        self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
        self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
    @distributed_trace
    def list(self, region_id: str, sku_id: Optional[str] = None, **kwargs: Any) -> Iterable["_models.SkuAvailability"]:
        """Implements SkuAvailability List method.
        Returns list of available resources in region.
        :param region_id: The region Id (westus, eastus). Required.
        :type region_id: str
        :param sku_id: sku id, if no sku is passed availability for all skus will be returned. Default
         value is None.
        :type sku_id: str
        :keyword callable cls: A custom type or function that will be passed the direct response
        :return: An iterator like instance of either SkuAvailability or the result of cls(response)
        :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.vmwarecloudsimple.models.SkuAvailability]
        :raises ~azure.core.exceptions.HttpResponseError:
        """
        _headers = kwargs.pop("headers", {}) or {}
        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
        api_version = kwargs.pop(
            "api_version", _params.pop("api-version", self._config.api_version)
        )  # type: Literal["2019-04-01"]
        cls = kwargs.pop("cls", None)  # type: ClsType[_models.SkuAvailabilityListResponse]
        error_map = {
            401: ClientAuthenticationError,
            404: ResourceNotFoundError,
            409: ResourceExistsError,
            304: ResourceNotModifiedError,
        }
        error_map.update(kwargs.pop("error_map", {}) or {})
        def prepare_request(next_link=None):
            if not next_link:
                request = build_list_request(
                    region_id=region_id,
                    subscription_id=self._config.subscription_id,
                    sku_id=sku_id,
                    api_version=api_version,
                    template_url=self.list.metadata["url"],
                    headers=_headers,
                    params=_params,
                )
                request = _convert_request(request)
                request.url = self._client.format_url(request.url)  # type: ignore
            else:
                # make call to next link with the client's api-version
                _parsed_next_link = urllib.parse.urlparse(next_link)
                _next_request_params = case_insensitive_dict(
                    {
                        key: [urllib.parse.quote(v) for v in value]
                        for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
                    }
                )
                _next_request_params["api-version"] = self._config.api_version
                request = HttpRequest(
                    "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
                )
                request = _convert_request(request)
                request.url = self._client.format_url(request.url)  # type: ignore
                request.method = "GET"
            return request
        def extract_data(pipeline_response):
            deserialized = self._deserialize("SkuAvailabilityListResponse", pipeline_response)
            list_of_elem = deserialized.value
            if cls:
                list_of_elem = cls(list_of_elem)
            return deserialized.next_link or None, iter(list_of_elem)
        def METHOD_NAME(next_link=None):
            request = prepare_request(next_link)
            pipeline_response = self._client._pipeline.run(  # type: ignore # pylint: disable=protected-access
                request, stream=False, **kwargs
            )
            response = pipeline_response.http_response
            if response.status_code not in [200]:
                map_error(status_code=response.status_code, response=response, error_map=error_map)
                error = self._deserialize.failsafe_deserialize(_models.CSRPError, pipeline_response)
                raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
            return pipeline_response
        return ItemPaged(METHOD_NAME, extract_data)
    list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.VMwareCloudSimple/locations/{regionId}/availabilities"}  # type: ignore | 
| 178 | 
	add | 
	from __future__ import annotations
from pathlib import Path
from typing import Dict, Optional, Text, Any, List
import rasa.shared.utils.io
from rasa.engine.graph import ExecutionContext, GraphComponent
from rasa.engine.storage.storage import ModelStorage
from rasa.engine.storage.resource import Resource
class AddInputs(GraphComponent):
    @classmethod
    def create(
        cls,
        config: Dict,
        model_storage: ModelStorage,
        resource: Resource,
        execution_context: ExecutionContext,
        **kwargs: Any,
    ) -> AddInputs:
        return cls()
    def METHOD_NAME(self, i1: Any, i2: Any) -> int:
        return int(i1) + int(i2)
class SubtractByX(GraphComponent):
    @staticmethod
    def get_default_config() -> Dict[Text, Any]:
        return {"x": 0}
    def __init__(self, x: int) -> None:
        self._x = x
    @classmethod
    def create(
        cls,
        config: Dict,
        model_storage: ModelStorage,
        resource: Resource,
        execution_context: ExecutionContext,
        **kwargs: Any,
    ) -> SubtractByX:
        return cls(config["x"])
    def subtract_x(self, i: Any) -> int:
        return int(i) - self._x
class AssertComponent(GraphComponent):
    def __init__(self, value_to_assert: Any) -> None:
        self._value_to_assert = value_to_assert
    @classmethod
    def create(
        cls,
        config: Dict,
        model_storage: ModelStorage,
        resource: Resource,
        execution_context: ExecutionContext,
        **kwargs: Any,
    ) -> AssertComponent:
        return cls(config["value_to_assert"])
    def run_assert(self, i: Any) -> CacheableText:
        assert i == self._value_to_assert
        return CacheableText("")
class ProvideX(GraphComponent):
    def __init__(self) -> None:
        self.x = 1
    @classmethod
    def create(
        cls,
        config: Dict,
        model_storage: ModelStorage,
        resource: Resource,
        execution_context: ExecutionContext,
        x: Optional[int] = None,
        **kwargs: Any,
    ) -> ProvideX:
        instance = cls()
        if x:
            instance.x = x
        return instance
    @classmethod
    def create_with_2(
        cls,
        config: Dict,
        model_storage: ModelStorage,
        resource: Resource,
        execution_context: ExecutionContext,
        **kwargs: Any,
    ) -> ProvideX:
        return cls.create(
            config, model_storage, resource, execution_context, 2, **kwargs
        )
    def provide(self) -> int:
        return self.x
class FileReader(GraphComponent):
    def __init__(self, file_path: Path) -> None:
        self._file_path = file_path
    @classmethod
    def create(
        cls,
        config: Dict[Text, Any],
        model_storage: ModelStorage,
        resource: Resource,
        execution_context: ExecutionContext,
    ) -> FileReader:
        return cls(Path(config["file_path"]))
    def read(self) -> CacheableText:
        return CacheableText(self._file_path.read_text())
class ExecutionContextAware(GraphComponent):
    def __init__(self, execution_context: ExecutionContext) -> None:
        self._execution_context = execution_context
    @classmethod
    def create(
        cls,
        config: Dict,
        model_storage: ModelStorage,
        resource: Resource,
        execution_context: ExecutionContext,
        **kwargs: Any,
    ) -> ExecutionContextAware:
        return cls(execution_context)
    def get_execution_context(self) -> ExecutionContext:
        return self._execution_context
class PersistableTestComponent(GraphComponent):
    def __init__(
        self,
        config: Dict[Text, Any],
        model_storage: ModelStorage,
        resource: Resource,
        eager_instantiated_value: Any = None,
    ) -> None:
        self._model_storage = model_storage
        self._resource = resource
        self._config = config
        self._wrap_cacheable = self._config.get("wrap_output_in_cacheable", False)
        self._eager_instantiated_value = eager_instantiated_value
    @classmethod
    def create(
        cls,
        config: Dict[Text, Any],
        model_storage: ModelStorage,
        resource: Resource,
        execution_context: ExecutionContext,
    ) -> PersistableTestComponent:
        assert model_storage
        assert resource
        return cls(config, model_storage, resource)
    @classmethod
    def load(
        cls,
        config: Dict[Text, Any],
        model_storage: ModelStorage,
        resource: Resource,
        execution_context: ExecutionContext,
        **kwargs: Any,
    ) -> PersistableTestComponent:
        assert model_storage
        assert resource
        with model_storage.read_from(resource) as directory:
            eager_instantiated_value = rasa.shared.utils.io.read_json_file(
                directory / "test.json"
            )
        return cls(config, model_storage, resource, eager_instantiated_value)
    def supported_languages(self) -> List[Text]:
        return []
    @staticmethod
    def required_packages() -> List[Text]:
        return []
    def train(self) -> Resource:
        with self._model_storage.write_to(self._resource) as directory:
            rasa.shared.utils.io.dump_obj_as_json_to_file(
                directory / "test.json", self._config["test_value"]
            )
            sub_dir = directory / "sub_dir"
            sub_dir.mkdir()
            rasa.shared.utils.io.dump_obj_as_json_to_file(
                sub_dir / "test.json", self._config.get("test_value_for_sub_directory")
            )
        return self._resource
    def run_train_process(self) -> Any:
        if self._wrap_cacheable:
            return CacheableText(self._eager_instantiated_value)
        return self._eager_instantiated_value
    def run_inference(self) -> Any:
        if self._wrap_cacheable:
            return CacheableText(self._eager_instantiated_value)
        return self._eager_instantiated_value
class CacheableText:
    def __init__(self, text: Text) -> None:
        self.text = text
    def to_cache(self, directory: Path, model_storage: ModelStorage) -> None:
        rasa.shared.utils.io.write_text_file(self.text, directory / "my_file.txt")
    @classmethod
    def from_cache(
        cls,
        node_name: Text,
        directory: Path,
        model_storage: ModelStorage,
        output_fingerprint: Text,
    ) -> CacheableText:
        text = rasa.shared.utils.io.read_file(directory / "my_file.txt")
        return cls(text=text)
    def __repr__(self) -> Text:
        return self.text
    def __int__(self) -> int:
        return int(self.text)
class CacheableComponent(GraphComponent):
    @staticmethod
    def get_default_config() -> Dict[Text, Any]:
        return {"prefix": "Hello "}
    def __init__(self, prefix: Text):
        self.prefix = prefix
    @classmethod
    def create(
        cls,
        config: Dict,
        model_storage: ModelStorage,
        resource: Resource,
        execution_context: ExecutionContext,
        **kwargs: Any,
    ) -> CacheableComponent:
        return cls(config["prefix"])
    def run(self, suffix: Text):
        return CacheableText(self.prefix + str(suffix)) | 
| 179 | 
	read profile | 
	import collections
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Sequence
_CONTRACT_PAT = re.compile(r"(\w+) <Contract>")
_FUNCTION_PAT = re.compile(r".+─ (\w+)\s+-\s+avg:\s+(\d+)")
_ANSI_ESCAPE_PAT = re.compile(
    r"""
    \x1B  # ESC
    (?:   # 7-bit C1 Fe (except CSI)
        [@-Z\\-_]
    |     # or [ for CSI, followed by a control sequence
        \[
        [0-?]*  # Parameter bytes
        [ -/]*  # Intermediate bytes
        [@-~]   # Final byte
    )
""",
    re.VERBOSE,
)
@dataclass
class Profile:
    filename: str
    contracts: dict = field(default_factory=lambda: collections.defaultdict(dict))
def METHOD_NAME(filename: str) -> Profile:
    contract = None
    profile = Profile(filename=filename)
    with open(filename) as fp:
        for line in fp:
            line = _ANSI_ESCAPE_PAT.sub("", line)
            m = _CONTRACT_PAT.match(line)
            if m is not None:
                contract = m.group(1)
                continue
            if contract is None:
                continue
            m = _FUNCTION_PAT.match(line)
            if m is not None:
                function, gas = m.groups()
                profile.contracts[contract][function] = int(gas)
    return profile
def _compute_max_name_width(contracts: dict) -> int:
    """Return the maximum width needed by the function name column."""
    return max(
        len(function) for contract, functions in contracts.items() for function in functions
    )
def _compute_max_gas_width(contracts: dict, profile: Profile) -> int:
    """Return the maximum width needed by the gas column for the given profile."""
    max_gas_width = 0
    for functions in contracts.values():
        for profiles in functions.values():
            gas = profiles.get(profile.filename, 0)
            max_gas_width = max(max_gas_width, len(f"{gas:,}"))
    return max_gas_width
def _transform_profiles(profiles: Sequence[Profile]) -> dict:
    # Transform the profile data into a sequence of mappings:
    # contracts -> functions -> profiles.
    # defaultdict -> defaultdict -> dict.
    contracts: dict = collections.defaultdict(lambda: collections.defaultdict(dict))
    for profile in profiles:
        for contract, functions in profile.contracts.items():
            for function, gas in functions.items():
                contracts[contract][function][profile.filename] = gas
    return contracts
_PREFIX_TERMINAL = "   └─"
_PREFIX_NONTERMINAL = "   ├─"
def _print_profiles(contracts: dict, profiles: Sequence[Profile]) -> None:
    # Print header.
    max_name_width = _compute_max_name_width(contracts)
    fmt = "{prefix} {function:<{width}} - avg: "
    dummy = fmt.format(prefix=_PREFIX_TERMINAL, function="-", width=max_name_width)
    print(" " * len(dummy), end="")
    for profile in profiles:
        max_gas_width = _compute_max_gas_width(contracts, profile)
        print("{0:>{width}}".format(profile.filename, width=max_gas_width), end=" │ ")
    print("{0:>{width}}".format("difference", width=max_gas_width), end=" │ ")
    print()
    # Print gas values.
    for contract, functions in contracts.items():
        print(contract)
        items = tuple(functions.items())
        for idx, (function, func_profiles) in enumerate(items):
            prefix = _PREFIX_TERMINAL if idx == len(items) - 1 else _PREFIX_NONTERMINAL
            print(fmt.format(prefix=prefix, function=function, width=max_name_width), end="")
            for profile in profiles:
                max_gas_width = _compute_max_gas_width(contracts, profile)
                gas = func_profiles.get(profile.filename)
                if gas is not None:
                    print("  {0:>{width},}  ".format(gas, width=max_gas_width), end="")
                else:
                    print("  {0:>{width}}  ".format("-", width=max_gas_width), end="")
            # Print the difference.
            gas_old = func_profiles.get(profiles[0].filename)
            gas_new = func_profiles.get(profiles[1].filename)
            if gas_old is None and gas_new is None:
                diff = "-"
            elif gas_old is None and gas_new is not None:
                diff = "{0:+,}".format(gas_new)
            elif gas_old is not None and gas_new is None:
                diff = "{0:+,}".format(-gas_old)
            elif gas_old is not None and gas_new is not None:
                gas_diff = gas_new - gas_old
                diff = "{0:+,}".format(gas_diff) if gas_diff != 0 else "0"
            print("  {0:>{width}}  ".format(diff, width=max_gas_width), end="")
            print()
if len(sys.argv) < 3:
    print("Usage:\n\t %s <profile1> <profile2>" % os.path.basename(sys.argv[0]))
    sys.exit(1)
profiles = METHOD_NAME(sys.argv[1]), METHOD_NAME(sys.argv[2])
contracts = _transform_profiles(profiles)
_print_profiles(contracts, profiles) | 
| 180 | 
	render report | 
	from akvo.rsr.models import Project
from akvo.utils import ensure_decimal
from akvo.rsr.decorators import with_download_indicator
from datetime import datetime
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from pyexcelerate import Workbook, Style, Font, Fill, Color, Alignment
from pyexcelerate.Borders import Borders
from pyexcelerate.Border import Border
from . import utils
@login_required
@with_download_indicator
def METHOD_NAME(request, project_id):
    queryset = Project.objects.prefetch_related(
        'results', 'results__indicators', 'results__indicators__periods')
    project = get_object_or_404(queryset, pk=project_id)
    in_eutf_hierarchy = project.in_eutf_hierarchy()
    wb = Workbook()
    ws = wb.new_sheet('ResultsTable')
    ws.set_col_style(1, Style(size=75))
    ws.set_col_style(2, Style(size=75))
    ws.set_col_style(3, Style(size=41))
    ws.set_col_style(4, Style(size=18.5))
    ws.set_col_style(5, Style(size=34))
    ws.set_col_style(6, Style(size=37.5))
    ws.set_col_style(7, Style(size=47.5))
    ws.set_col_style(8, Style(size=20))
    ws.set_col_style(9, Style(size=20))
    ws.set_col_style(10, Style(size=34))
    ws.set_col_style(11, Style(size=20))
    ws.set_col_style(12, Style(size=20))
    ws.set_col_style(13, Style(size=20))
    ws.set_col_style(14, Style(size=24))
    ws.set_col_style(15, Style(size=20.5))
    ws.set_col_style(16, Style(size=30))
    ws.set_col_style(17, Style(size=22))
    ws.set_col_style(18, Style(size=21))
    # r1
    ws.set_row_style(1, Style(size=36))
    ws.set_cell_style(1, 1, Style(
        font=Font(bold=True, size=18, color=Color(255, 255, 255)),
        fill=Fill(background=Color(32, 56, 100)),
        alignment=Alignment(horizontal='center')
    ))
    ws.set_cell_value(1, 1, 'Project Results and Indicators simple table report')
    # r2
    ws.set_row_style(2, Style(size=36))
    for i in range(1, 19):
        ws.set_cell_style(2, i, Style(
            font=Font(bold=True, size=14),
            fill=Fill(background=Color(214, 234, 248)),
            alignment=Alignment(horizontal='center'),
            borders=Borders(top=Border(color=Color(0, 0, 0)), bottom=Border(color=Color(0, 0, 0)))
        ))
    ws.set_cell_value(2, 1, 'Project name')
    ws.set_cell_value(2, 2, 'Project subtitle')
    ws.set_cell_value(2, 3, 'Result title')
    ws.set_cell_value(2, 4, 'Result type')
    ws.set_cell_value(2, 5, 'Result description')
    ws.set_cell_value(2, 6, 'Indicator title')
    ws.set_cell_value(2, 7, 'Indicator description')
    ws.set_cell_value(2, 8, 'Baseline year')
    ws.set_cell_value(2, 9, 'Baseline value')
    ws.set_cell_value(2, 10, 'Baseline comment')
    ws.set_cell_value(2, 11, 'Period start')
    ws.set_cell_value(2, 12, 'Period end')
    ws.set_cell_value(2, 13, 'Target value')
    ws.set_cell_value(2, 14, 'Target comment')
    ws.set_cell_value(2, 15, 'Actual value')
    ws.set_cell_value(2, 16, 'Actual comment')
    ws.set_cell_value(2, 17, 'Type')
    ws.set_cell_value(2, 18, 'Aggregation status')
    # r3
    row = 3
    ws.set_cell_value(row, 1, project.title)
    ws.set_cell_value(row, 2, project.subtitle)
    prev_type = ''
    curr_type = ''
    prev_agg_status = ''
    curr_agg_status = ''
    prev_indicator_type = ''
    curr_indicator_type = ''
    for result in project.results.exclude(type__exact='').all():
        ws.set_cell_value(row, 3, result.title)
        curr_type = result.iati_type().name
        if curr_type != prev_type:
            ws.set_cell_value(row, 4, curr_type)
            prev_type = curr_type
        ws.set_cell_style(row, 5, Style(alignment=Alignment(wrap_text=True)))
        ws.set_cell_value(row, 5, result.description)
        curr_agg_status = 'Yes' if result.aggregation_status else 'No'
        if curr_agg_status != prev_agg_status:
            ws.set_cell_value(row, 18, curr_agg_status)
            prev_agg_status = curr_agg_status
        for indicator in result.indicators.all():
            ws.set_cell_style(row, 6, Style(alignment=Alignment(wrap_text=True)))
            ws.set_cell_value(row, 6, indicator.title)
            ws.set_cell_style(row, 7, Style(alignment=Alignment(wrap_text=True)))
            ws.set_cell_value(row, 7, indicator.description)
            ws.set_cell_value(row, 8, indicator.baseline_year)
            ws.set_cell_value(row, 9, indicator.baseline_value)
            ws.set_cell_style(row, 10, Style(alignment=Alignment(wrap_text=True)))
            ws.set_cell_value(row, 10, indicator.baseline_comment)
            curr_indicator_type = 'Qualitative' if indicator.type == '2' else 'Quantitative'
            if curr_indicator_type != prev_indicator_type:
                ws.set_cell_value(row, 17, curr_indicator_type)
                prev_indicator_type = curr_indicator_type
            for period in indicator.periods.all():
                ws.set_cell_value(row, 11, utils.get_period_start(period, in_eutf_hierarchy))
                ws.set_cell_value(row, 12, utils.get_period_end(period, in_eutf_hierarchy))
                ws.set_cell_value(row, 13, period.target_value)
                ws.set_cell_style(row, 14, Style(alignment=Alignment(wrap_text=True)))
                ws.set_cell_value(row, 14, period.target_comment)
                ws.set_cell_value(row, 15, ensure_decimal(period.actual_value))
                ws.set_cell_style(row, 16, Style(alignment=Alignment(wrap_text=True)))
                ws.set_cell_value(row, 16, period.actual_comment)
                ws.set_row_style(row, Style(size=68))
                row += 1
    filename = '{}-{}-eutf-project-results-indicators-report.xlsx'.format(
        datetime.today().strftime('%Y%b%d'), project.id)
    return utils.make_excel_response(wb, filename) | 
| 181 | 
	get client | 
	#
# (C) Copyright Cloudlab URV 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import os
import shutil
import time
import logging
from requests.exceptions import SSLError as TooManyConnectionsError
from io import BytesIO
from google.api_core import exceptions as google_exceptions
from google.cloud import storage
from google.cloud.exceptions import NotFound
from lithops.constants import STORAGE_CLI_MSG
from lithops.storage.utils import StorageNoSuchKeyError
logger = logging.getLogger(__name__)
TIMEOUT = 5
class GCPStorageBackend:
    def __init__(self, gcp_storage_config):
        logger.debug("Creating GCP Storage client")
        self.credentials_path = gcp_storage_config.get('credentials_path')
        self.region = gcp_storage_config['region']
        if self.credentials_path and os.path.isfile(self.credentials_path):
            logger.debug(f'Getting GCP credentials from {self.credentials_path}')
            self.client = storage.Client.from_service_account_json(self.credentials_path)
        else:
            logger.debug('Getting GCP credentials from the environment')
            self.client = storage.Client()
        msg = STORAGE_CLI_MSG.format('Google Cloud Storage')
        logger.info(f"{msg} - Region: {self.region}")
    def METHOD_NAME(self):
        return self.client
    def create_bucket(self, bucket_name):
        """
        Create a bucket if not exists
        """
        try:
            bucket = self.client.bucket(bucket_name)
            bucket.storage_class = "STANDARD"
            self.client.create_bucket(bucket, location=self.region)
        except google_exceptions.Conflict:
            pass
    def put_object(self, bucket_name, key, data):
        done = False
        while not done:
            try:
                bucket = self.client.get_bucket(bucket_name, timeout=TIMEOUT)
                blob = bucket.blob(blob_name=key)
                if hasattr(data, 'read'):
                    blob.upload_from_file(file_obj=data)
                else:
                    blob.upload_from_string(data=data)
                done = True
            except TooManyConnectionsError:
                time.sleep(0.1)
            except google_exceptions.NotFound:
                raise StorageNoSuchKeyError(bucket=bucket_name, key=key)
    def get_object(self, bucket_name, key, stream=False, extra_get_args={}):
        try:
            bucket = self.client.get_bucket(bucket_name, timeout=TIMEOUT)
            blob = bucket.blob(blob_name=key)
        except google_exceptions.NotFound:
            raise StorageNoSuchKeyError(bucket_name, key)
        if not blob.exists():
            raise StorageNoSuchKeyError(bucket_name, key)
        if extra_get_args and 'Range' in extra_get_args:
            start, end = re.findall(r'\d+', extra_get_args['Range'])
            start = int(start)
            end = int(end)
        else:
            start, end = None, None
        if stream:
            stream = BytesIO()
            # Download object to bytes buffer
            blob.download_to_file(stream, start=start, end=end)
            stream.seek(0)  # Retrun to the initial buffer position
            return stream
        else:
            return blob.download_as_string(start=start, end=end)
    def upload_file(self, file_name, bucket, key=None, extra_args={}):
        """Upload a file
        :param file_name: File to upload
        :param bucket: Bucket to upload to
        :param key: object name. If not specified then file_name is used
        :return: True if file was uploaded, else False
        """
        # If S3 key was not specified, use file_name
        if key is None:
            key = os.path.basename(file_name)
        # Upload the file
        try:
            with open(file_name, 'rb') as in_file:
                self.put_object(bucket, key, in_file)
        except Exception as e:
            logging.error(e)
            return False
        return True
    def download_file(self, bucket, key, file_name=None, extra_args={}):
        """Download a file
        :param bucket: Bucket to download from
        :param key: object name. If not specified then file_name is used
        :param file_name: File to upload
        :return: True if file was downloaded, else False
        """
        # If file_name was not specified, use S3 key
        if file_name is None:
            file_name = key
        # Download the file
        try:
            dirname = os.path.dirname(file_name)
            if dirname and not os.path.exists(dirname):
                os.makedirs(dirname)
            with open(file_name, 'wb') as out:
                data_stream = self.get_object(bucket, key, stream=True)
                shutil.copyfileobj(data_stream, out)
        except Exception as e:
            logging.error(e)
            return False
        return True
    def head_object(self, bucket_name, key):
        try:
            bucket = self.client.get_bucket(bucket_name, timeout=TIMEOUT)
            blob = bucket.get_blob(blob_name=key)
        except google_exceptions.NotFound:
            raise StorageNoSuchKeyError(bucket_name, key)
        if blob is None:
            raise StorageNoSuchKeyError(bucket_name, key)
        response = {
            'LastModified': blob.updated,
            'ETag': blob.etag,
            'content-type': blob.content_type,
            'content-length': str(blob.size)
        }
        return response
    def delete_object(self, bucket_name, key):
        try:
            bucket = self.client.get_bucket(bucket_name, timeout=TIMEOUT)
        except google_exceptions.NotFound:
            raise StorageNoSuchKeyError(bucket_name, key)
        blob = bucket.get_blob(blob_name=key)
        if blob is None:
            raise StorageNoSuchKeyError(bucket_name, key)
        blob.delete()
    def delete_objects(self, bucket_name, key_list):
        bucket = self.client.get_bucket(bucket_name, timeout=TIMEOUT)
        try:
            bucket.delete_blobs(blobs=key_list)
        except google_exceptions.NotFound:
            pass
    def head_bucket(self, bucket_name):
        bucket = self.client.get_bucket(bucket_name, timeout=TIMEOUT)
        response = {
            'ResponseMetadata':
                {'HTTPStatusCode': 200,
                 'HTTPHeaders': {'content-type': 'application/xml',
                                 'server': 'GoogleStorage'}}
        }
        response['ResponseMetadata']['HTTPHeaders'].update(bucket._properties)
        return response
    def list_objects(self, bucket_name, prefix=None, match_pattern=None):
        try:
            bucket = self.client.get_bucket(bucket_name, timeout=TIMEOUT)
            page = bucket.list_blobs(prefix=prefix)
        except google_exceptions.ClientError:
            raise StorageNoSuchKeyError(bucket_name, '')
        return [{'Key': blob.name, 'Size': blob.size, 'LastModified': blob.updated} for blob in page]
    def list_keys(self, bucket_name, prefix=None):
        try:
            bucket = self.client.get_bucket(bucket_name, timeout=TIMEOUT)
            page = bucket.list_blobs(prefix=prefix)
        except google_exceptions.ClientError:
            raise StorageNoSuchKeyError(bucket_name, '')
        return [blob.name for blob in page] | 
| 182 | 
	get special fields | 
	import itertools
from django.utils.translation import gettext as _
from dimagi.ext import jsonobject
from corehq.apps.app_manager.app_schemas.case_properties import (
    get_all_case_properties_for_case_type,
)
from corehq.apps.case_importer.util import RESERVED_FIELDS
from corehq.apps.data_dictionary.util import get_values_hints_dict, get_deprecated_fields
from corehq.toggles import BULK_UPLOAD_DATE_OPENED
def _combine_field_specs(field_specs, exclude_fields):
    """
    take a list of FieldSpec objects and return a sorted list where field is unique
    and fields in exclude_fields are removed, and where the first mention of a
    field in field_specs will win out over any repeats.
    """
    combined_field_specs = {}
    for field_spec in field_specs:
        field = field_spec.field
        if field not in exclude_fields and field not in combined_field_specs:
            combined_field_specs[field] = field_spec
    return sorted(list(combined_field_specs.values()), key=lambda field_spec: field_spec.field)
def get_suggested_case_fields(domain, case_type, exclude=None):
    exclude_fields = set(RESERVED_FIELDS) | set(exclude or [])
    hints_dict = get_values_hints_dict(domain, case_type)
    deprecated_fields = get_deprecated_fields(domain, case_type)
    special_field_specs = (field_spec for field_spec in METHOD_NAME(domain))
    dynamic_field_specs = (
        FieldSpec(field=field, show_in_menu=True, values_hints=hints_dict[field],
                  deprecated=field in deprecated_fields)
        for field in get_all_case_properties_for_case_type(domain, case_type, exclude_deprecated_properties=False))
    return _combine_field_specs(
        itertools.chain(special_field_specs, dynamic_field_specs),
        exclude_fields=exclude_fields
    )
class FieldSpec(jsonobject.StrictJsonObject):
    field = jsonobject.StringProperty()
    description = jsonobject.StringProperty()
    show_in_menu = jsonobject.BooleanProperty(default=False)
    discoverable = jsonobject.BooleanProperty(default=True)
    values_hints = jsonobject.ListProperty()
    deprecated = jsonobject.BooleanProperty(default=False)
def METHOD_NAME(domain=None):
    special_fields = [
        FieldSpec(
            field='name',
            description=_("This field will be used to set the case's name."),
            show_in_menu=True),
        FieldSpec(
            field='owner_name',
            description=_("This field will assign the case to a new owner given by "
                          "Username, Group name, or Organization name."),
        ),
        FieldSpec(
            field='owner_id',
            description=_("This field will assign the case to a new owner given by "
                          "User ID, Group ID, or Organization ID.")),
        FieldSpec(
            field='external_id',
            description=_("This field will set the case's external_id")),
        FieldSpec(
            field='parent_external_id',
            description=_("This field will assign the case a new parent given by "
                          "the parent case's external_id. "
                          "You must use along with parent_type.")),
        FieldSpec(
            field='parent_id',
            description=_("This field will assign the case a new parent given by "
                          "the parent's Case ID. "
                          "You must use along with parent_type.")),
        FieldSpec(
            field='parent_type',
            description=_("Use to specify the parent's case type. "
                          "Usually used with parent_id or parent_external_id")),
        FieldSpec(
            field='parent_relationship_type',
            description=_("Whether the relationship with the parent is 'child' or "
                          "'extension'. Default value is 'child'"
                          "Values other than 'child' or 'extension' are invalid. "
                          "Used with parent_id or parent_external_id columns")),
        FieldSpec(
            field='parent_identifier',
            description=_("The index identifier when creating child/extension cases "
                          "Used with parent_id or parent_external_id columns")),
        FieldSpec(
            field='close',
            description=_("This field will be used to close cases. "
                          "Any case with 'yes' in this column will be closed.")),
    ]
    if domain and BULK_UPLOAD_DATE_OPENED.enabled(domain):
        special_fields.append(
            FieldSpec(
                field='date_opened',
                description=_(
                    "The date opened property for this case will be changed. "
                    "Please do not use unless you know what you are doing"
                )
            )
        )
    return special_fields | 
| 183 | 
	post | 
	import logging
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.utils.translation import gettext_lazy as _
from django.views.generic import UpdateView
from rdmo.core.views import ObjectPermissionMixin, RedirectViewMixin
from rdmo.questions.models import Catalog
from rdmo.tasks.models import Task
from rdmo.views.models import View
from ..forms import (ProjectForm, ProjectUpdateCatalogForm,
                     ProjectUpdateInformationForm, ProjectUpdateParentForm,
                     ProjectUpdateTasksForm, ProjectUpdateViewsForm)
from ..mixins import ProjectImportMixin
from ..models import Project
logger = logging.getLogger(__name__)
class ProjectUpdateView(ObjectPermissionMixin, RedirectViewMixin, UpdateView):
    model = Project
    queryset = Project.objects.all()
    form_class = ProjectForm
    permission_required = 'projects.change_project_object'
    def get_form_kwargs(self):
        catalogs = Catalog.objects.filter_current_site() \
                                  .filter_group(self.request.user) \
                                  .filter_availability(self.request.user)
        projects = Project.objects.filter_user(self.request.user)
        form_kwargs = super().get_form_kwargs()
        form_kwargs.update({
            'catalogs': catalogs,
            'projects': projects
        })
        return form_kwargs
class ProjectUpdateInformationView(ObjectPermissionMixin, RedirectViewMixin, UpdateView):
    model = Project
    queryset = Project.objects.all()
    form_class = ProjectUpdateInformationForm
    permission_required = 'projects.change_project_object'
class ProjectUpdateCatalogView(ObjectPermissionMixin, RedirectViewMixin, UpdateView):
    model = Project
    queryset = Project.objects.all()
    form_class = ProjectUpdateCatalogForm
    permission_required = 'projects.change_project_object'
    def get_form_kwargs(self):
        catalogs = Catalog.objects.filter_current_site() \
                                  .filter_group(self.request.user) \
                                  .filter_availability(self.request.user)
        form_kwargs = super().get_form_kwargs()
        form_kwargs.update({
            'catalogs': catalogs
        })
        return form_kwargs
class ProjectUpdateTasksView(ObjectPermissionMixin, RedirectViewMixin, UpdateView):
    model = Project
    queryset = Project.objects.all()
    form_class = ProjectUpdateTasksForm
    permission_required = 'projects.change_project_object'
    def get_form_kwargs(self):
        tasks = Task.objects.filter_current_site() \
                            .filter_catalog(self.object.catalog) \
                            .filter_group(self.request.user) \
                            .filter_availability(self.request.user)
        form_kwargs = super().get_form_kwargs()
        form_kwargs.update({
            'tasks': tasks
        })
        return form_kwargs
class ProjectUpdateViewsView(ObjectPermissionMixin, RedirectViewMixin, UpdateView):
    model = Project
    queryset = Project.objects.all()
    form_class = ProjectUpdateViewsForm
    permission_required = 'projects.change_project_object'
    def get_form_kwargs(self):
        views = View.objects.filter_current_site() \
                            .filter_catalog(self.object.catalog) \
                            .filter_group(self.request.user) \
                            .filter_availability(self.request.user)
        form_kwargs = super().get_form_kwargs()
        form_kwargs.update({
            'views': views
        })
        return form_kwargs
class ProjectUpdateParentView(ObjectPermissionMixin, RedirectViewMixin, UpdateView):
    model = Project
    queryset = Project.objects.all()
    form_class = ProjectUpdateParentForm
    permission_required = 'projects.change_project_object'
    def get_form_kwargs(self):
        projects = Project.objects.filter_user(self.request.user)
        form_kwargs = super().get_form_kwargs()
        form_kwargs.update({
            'projects': projects
        })
        return form_kwargs
class ProjectUpdateImportView(ProjectImportMixin, ObjectPermissionMixin, RedirectViewMixin, UpdateView):
    model = Project
    queryset = Project.objects.all()
    permission_required = 'projects.import_project_object'
    def get(self, request, *args, **kwargs):
        self.object = self.get_object()
        if kwargs.get('format') is None:
            return self.import_form()
        else:
            return self.get_import_plugin(self.kwargs.get('format'), self.object).render()
    def METHOD_NAME(self, request, *args, **kwargs):
        self.object = self.get_object()
        method = request.POST.get('method')
        if method in ['upload_file', 'import_file', 'import_project']:
            return getattr(self, method)()
        else:
            return self.get_import_plugin(self.kwargs.get('format'), self.object).submit() | 
| 184 | 
	test custom theme override | 
	"""Tests for embargo app views. """
from unittest.mock import patch, MagicMock
import ddt
import maxminddb
import geoip2.database
from django.urls import reverse
from django.conf import settings
from .factories import CountryAccessRuleFactory, RestrictedCourseFactory
from .. import messages
from lms.djangoapps.course_api.tests.mixins import CourseApiFactoryMixin  # lint-amnesty, pylint: disable=wrong-import-order
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase, skip_unless_lms  # lint-amnesty, pylint: disable=wrong-import-order
from openedx.core.djangoapps.theming.tests.test_util import with_comprehensive_theme  # lint-amnesty, pylint: disable=wrong-import-order
from common.djangoapps.student.tests.factories import UserFactory  # lint-amnesty, pylint: disable=wrong-import-order
from common.djangoapps.util.testing import UrlResetMixin  # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.tests.factories import CourseFactory  # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase  # lint-amnesty, pylint: disable=wrong-import-order
@skip_unless_lms
@ddt.ddt
class CourseAccessMessageViewTest(CacheIsolationTestCase, UrlResetMixin):
    """Tests for the courseware access message view.
    These end-points serve static content.
    While we *could* check the text on each page,
    this will require changes to the test every time
    the text on the page changes.
    Instead, we load each page we expect to be available
    (based on the configuration in `embargo.messages`)
    and verify that we get the correct status code.
    This will catch errors in the message configuration
    (for example, moving a template and forgetting to
    update the configuration appropriately).
    """
    ENABLED_CACHES = ['default']
    URLCONF_MODULES = ['openedx.core.djangoapps.embargo']
    @patch.dict(settings.FEATURES, {'EMBARGO': True})
    def setUp(self):
        super().setUp()
    @ddt.data(*list(messages.ENROLL_MESSAGES.keys()))
    def test_enrollment_messages(self, msg_key):
        self._load_page('enrollment', msg_key)
    @ddt.data(*list(messages.COURSEWARE_MESSAGES.keys()))
    def test_courseware_messages(self, msg_key):
        self._load_page('courseware', msg_key)
    @ddt.data('enrollment', 'courseware')
    def test_invalid_message_key(self, access_point):
        self._load_page(access_point, 'invalid', expected_status=404)
    @with_comprehensive_theme("test-theme")
    @ddt.data('enrollment', 'courseware')
    def METHOD_NAME(self, access_point):
        # Custom override specified for the "embargo" message
        # for backwards compatibility with previous versions
        # of the embargo app.
        url = reverse('embargo:blocked_message', kwargs={
            'access_point': access_point,
            'message_key': "embargo"
        })
        response = self.client.get(url)
        self.assertContains(
            response,
            "This is a test template to test embargo message override for theming."
        )
    def _load_page(self, access_point, message_key, expected_status=200):
        """Load the message page and check the status code. """
        url = reverse('embargo:blocked_message', kwargs={
            'access_point': access_point,
            'message_key': message_key
        })
        response = self.client.get(url)
        assert response.status_code ==\
               expected_status, f"Unexpected status code when loading '{url}': expected {expected_status}" \
                                f" but got {response.status_code}"
@skip_unless_lms
class CheckCourseAccessViewTest(CourseApiFactoryMixin, ModuleStoreTestCase):
    """ Tests the course access check endpoint. """
    @patch.dict(settings.FEATURES, {'EMBARGO': True})
    def setUp(self):
        super().setUp()
        self.url = reverse('api_embargo:v1_course_access')
        user = UserFactory(is_staff=True)
        self.client.login(username=user.username, password=UserFactory._DEFAULT_PASSWORD)  # lint-amnesty, pylint: disable=protected-access
        self.course_id = str(CourseFactory().id)  # lint-amnesty, pylint: disable=no-member
        self.request_data = {
            'course_ids': [self.course_id],
            'ip_address': '0.0.0.0',
            'user': self.user,
        }
    def test_course_access_endpoint_with_unrestricted_course(self):
        response = self.client.get(self.url, data=self.request_data)
        expected_response = {'access': True}
        assert response.status_code == 200
        assert response.data == expected_response
    def test_course_access_endpoint_with_restricted_course(self):
        CountryAccessRuleFactory(restricted_course=RestrictedCourseFactory(course_key=self.course_id))
        self.user.is_staff = False
        self.user.save()
        # Appear to make a request from an IP in the blocked country
        # pylint: disable=unused-argument
        def mock_country(reader, country):
            """
            :param reader:
            :param country:
            :return:
            """
            magic_mock = MagicMock()
            magic_mock.country = MagicMock()
            type(magic_mock.country).iso_code = 'US'
            return magic_mock
        patcher = patch.object(maxminddb, 'open_database')
        patcher.start()
        country_patcher = patch.object(geoip2.database.Reader, 'country', mock_country)
        country_patcher.start()
        self.addCleanup(patcher.stop)
        self.addCleanup(country_patcher.stop)
        response = self.client.get(self.url, data=self.request_data)
        expected_response = {'access': False}
        assert response.status_code == 200
        assert response.data == expected_response
    def test_course_access_endpoint_with_logged_out_user(self):
        self.client.logout()
        response = self.client.get(self.url, data=self.request_data)
        assert response.status_code == 403
    def test_course_access_endpoint_with_non_staff_user(self):
        user = UserFactory(is_staff=False)
        self.client.login(username=user.username, password=UserFactory._DEFAULT_PASSWORD)  # lint-amnesty, pylint: disable=protected-access
        response = self.client.get(self.url, data=self.request_data)
        assert response.status_code == 403
    def test_course_access_endpoint_with_invalid_data(self):
        response = self.client.get(self.url, data=None)
        assert response.status_code == 400
    def test_invalid_course_id(self):
        self.request_data['course_ids'] = ['foo']
        response = self.client.get(self.url, data=self.request_data)
        assert response.status_code == 400 | 
| 185 | 
	add party | 
	# -*- coding: utf-8 -*-
"""
Autogenerated IBEISController functions
TemplateInfo:
    autogen_time = 15:14:53 2015/03/11
    autogen_key = party
ToRegenerate:
    python -m wbia.templates.template_generator --key party --Tcfg with_api_cache=False with_web_api=False with_deleters=False --diff
    python -m wbia.templates.template_generator --key party --Tcfg with_api_cache=False with_web_api=False with_deleters=False --write
"""
import functools  # NOQA
import logging
import utool as ut
from wbia import constants as const
from wbia.control import accessor_decors  # NOQA
from wbia.control import controller_inject
print, rrr, profile = ut.inject2(__name__)
logger = logging.getLogger('wbia')
# Create dectorator to inject functions in this module into the IBEISController
CLASS_INJECT_KEY, register_ibs_method = controller_inject.make_ibs_register_decorator(
    __name__
)
register_api = controller_inject.get_wbia_flask_api(__name__)
def testdata_ibs(defaultdb='testdb1'):
    r"""
    Auto-docstr for 'testdata_ibs'
    """
    import wbia
    ibs = wbia.opendb(defaultdb=defaultdb)
    qreq_ = None
    return ibs, qreq_
# AUTOGENED CONSTANTS:
PARTY_ROWID = 'party_rowid'
PARTY_TAG = 'party_tag'
@register_ibs_method
def _get_all_party_rowids(ibs):
    r"""
    all_party_rowids <- party.get_all_rowids()
    Returns:
        list_ (list): unfiltered party_rowids
    TemplateInfo:
        Tider_all_rowids
        tbl = party
    Example:
        >>> # ENABLE_DOCTEST
        >>> from wbia.control._autogen_party_funcs import *  # NOQA
        >>> ibs, qreq_ = testdata_ibs()
        >>> ibs._get_all_party_rowids()
    """
    all_party_rowids = ibs.db.get_all_rowids(const.PARTY_TABLE)
    return all_party_rowids
@register_ibs_method
# @register_api('/api/autogen/', methods=['POST'])
def METHOD_NAME(ibs, party_tag_list):
    r"""
    Returns:
        returns party_rowid_list of added (or already existing partys)
    TemplateInfo:
        Tadder_native
        tbl = party
    RESTful:
        Method: POST
        URL:    /api/autogen/
    """
    # WORK IN PROGRESS
    colnames = (PARTY_TAG,)
    params_iter = ((party_tag,) for (party_tag,) in zip(party_tag_list))
    get_rowid_from_superkey = ibs.get_party_rowid_from_superkey
    party_rowid_list = ibs.db.add_cleanly(
        const.PARTY_TABLE, colnames, params_iter, get_rowid_from_superkey
    )
    return party_rowid_list
@register_ibs_method
# @register_api('/api/autogen/party/rowid/superkey/', methods=['GET'])
def get_party_rowid_from_superkey(ibs, party_tag_list, eager=True, nInput=None):
    r"""
    party_rowid_list <- party[party_tag_list]
    Args:
        superkey lists: party_tag_list
    Returns:
        party_rowid_list
    TemplateInfo:
        Tgetter_native_rowid_from_superkey
        tbl = party
    RESTful:
        Method: GET
        URL:    /api/autogen/party_rowid_from_superkey/
    """
    colnames = (PARTY_ROWID,)
    # FIXME: col_rowid is not correct
    params_iter = zip(party_tag_list)
    andwhere_colnames = (PARTY_TAG,)
    party_rowid_list = ibs.db.get_where_eq(
        const.PARTY_TABLE,
        colnames,
        params_iter,
        andwhere_colnames,
        eager=eager,
        nInput=nInput,
    )
    return party_rowid_list
@register_ibs_method
# @register_api('/api/autogen/party/tag/', methods=['GET'])
def get_party_tag(ibs, party_rowid_list, eager=True, nInput=None):
    r"""
    party_tag_list <- party.party_tag[party_rowid_list]
    gets data from the "native" column "party_tag" in the "party" table
    Args:
        party_rowid_list (list):
    Returns:
        list: party_tag_list
    TemplateInfo:
        Tgetter_table_column
        col = party_tag
        tbl = party
    RESTful:
        Method: GET
        URL:    /api/autogen/party/tag/
    Example:
        >>> # ENABLE_DOCTEST
        >>> from wbia.control._autogen_party_funcs import *  # NOQA
        >>> ibs, qreq_ = testdata_ibs()
        >>> party_rowid_list = ibs._get_all_party_rowids()
        >>> eager = True
        >>> party_tag_list = ibs.get_party_tag(party_rowid_list, eager=eager)
        >>> assert len(party_rowid_list) == len(party_tag_list)
    """
    id_iter = party_rowid_list
    colnames = (PARTY_TAG,)
    party_tag_list = ibs.db.get(
        const.PARTY_TABLE,
        colnames,
        id_iter,
        id_colname='rowid',
        eager=eager,
        nInput=nInput,
    )
    return party_tag_list | 
| 186 | 
	embed builder | 
	import logging
import random
import textwrap
from collections import defaultdict
from datetime import UTC, datetime
from discord import Color, Embed, Emoji
from discord.ext import commands
from bot.bot import Bot
from bot.constants import Colours, ERROR_REPLIES
from bot.utils.pagination import LinePaginator
from bot.utils.time import time_since
log = logging.getLogger(__name__)
class Emojis(commands.Cog):
    """A collection of commands related to emojis in the server."""
    def __init__(self, bot: Bot) -> None:
        self.bot = bot
    @staticmethod
    def METHOD_NAME(emoji: dict) -> tuple[Embed, list[str]]:
        """Generates an embed with the emoji names and count."""
        embed = Embed(
            color=Colours.orange,
            title="Emoji Count",
            timestamp=datetime.now(tz=UTC)
        )
        msg = []
        if len(emoji) == 1:
            for category_name, category_emojis in emoji.items():
                if len(category_emojis) == 1:
                    msg.append(f"There is **{len(category_emojis)}** emoji in the **{category_name}** category.")
                else:
                    msg.append(f"There are **{len(category_emojis)}** emojis in the **{category_name}** category.")
                embed.set_thumbnail(url=random.choice(category_emojis).url)
        else:
            for category_name, category_emojis in emoji.items():
                emoji_choice = random.choice(category_emojis)
                if len(category_emojis) > 1:
                    emoji_info = f"There are **{len(category_emojis)}** emojis in the **{category_name}** category."
                else:
                    emoji_info = f"There is **{len(category_emojis)}** emoji in the **{category_name}** category."
                if emoji_choice.animated:
                    msg.append(f"<a:{emoji_choice.name}:{emoji_choice.id}> {emoji_info}")
                else:
                    msg.append(f"<:{emoji_choice.name}:{emoji_choice.id}> {emoji_info}")
        return embed, msg
    @staticmethod
    def generate_invalid_embed(emojis: list[Emoji]) -> tuple[Embed, list[str]]:
        """Generates error embed for invalid emoji categories."""
        embed = Embed(
            color=Colours.soft_red,
            title=random.choice(ERROR_REPLIES)
        )
        msg = []
        emoji_dict = defaultdict(list)
        for emoji in emojis:
            emoji_dict[emoji.name.split("_")[0]].append(emoji)
        error_comp = ", ".join(emoji_dict)
        msg.append(f"These are the valid emoji categories:\n```\n{error_comp}\n```")
        return embed, msg
    @commands.group(name="emoji", invoke_without_command=True)
    async def emoji_group(self, ctx: commands.Context, emoji: Emoji | None) -> None:
        """A group of commands related to emojis."""
        if emoji is not None:
            await ctx.invoke(self.info_command, emoji)
        else:
            await self.bot.invoke_help_command(ctx)
    @emoji_group.command(name="count", aliases=("c",))
    async def count_command(self, ctx: commands.Context, *, category_query: str | None = None) -> None:
        """Returns embed with emoji category and info given by the user."""
        emoji_dict = defaultdict(list)
        if not ctx.guild.emojis:
            await ctx.send("No emojis found.")
            return
        log.trace(f"Emoji Category {'' if category_query else 'not '}provided by the user.")
        for emoji in ctx.guild.emojis:
            emoji_category = emoji.name.split("_")[0]
            if category_query is not None and emoji_category not in category_query:
                continue
            emoji_dict[emoji_category].append(emoji)
        if not emoji_dict:
            log.trace("Invalid name provided by the user")
            embed, msg = self.generate_invalid_embed(ctx.guild.emojis)
        else:
            embed, msg = self.METHOD_NAME(emoji_dict)
        await LinePaginator.paginate(lines=msg, ctx=ctx, embed=embed)
    @emoji_group.command(name="info", aliases=("i",))
    async def info_command(self, ctx: commands.Context, emoji: Emoji) -> None:
        """Returns relevant information about a Discord Emoji."""
        emoji_information = Embed(
            title=f"Emoji Information: {emoji.name}",
            description=textwrap.dedent(f"""
                **Name:** {emoji.name}
                **Created:** {time_since(emoji.created_at.replace(tzinfo=None), precision="hours")}
                **Date:** {datetime.strftime(emoji.created_at.replace(tzinfo=None), "%d/%m/%Y")}
                **ID:** {emoji.id}
            """),
            color=Color.og_blurple(),
            url=str(emoji.url),
        ).set_thumbnail(url=emoji.url)
        await ctx.send(embed=emoji_information)
async def setup(bot: Bot) -> None:
    """Load the Emojis cog."""
    await bot.add_cog(Emojis(bot)) | 
| 187 | 
	get fanout os | 
	import logging
from tests.common.devices.sonic import SonicHost
from tests.common.devices.onyx import OnyxHost
from tests.common.devices.ixia import IxiaHost
from tests.common.devices.eos import EosHost
from tests.common.devices.aos import AosHost
logger = logging.getLogger(__name__)
class FanoutHost(object):
    """
    @summary: Class for Fanout switch
    For running ansible module on the Fanout switch
    """
    def __init__(self, ansible_adhoc, os, hostname, device_type, user, passwd,
                 eos_shell_user=None, eos_shell_passwd=None):
        self.hostname = hostname
        self.type = device_type
        self.host_to_fanout_port_map = {}
        self.fanout_to_host_port_map = {}
        if os == 'sonic':
            self.os = os
            self.fanout_port_alias_to_name = {}
            self.host = SonicHost(ansible_adhoc, hostname,
                                  ssh_user=user,
                                  ssh_passwd=passwd)
        elif os == 'onyx':
            self.os = os
            self.host = OnyxHost(ansible_adhoc, hostname, user, passwd)
        elif os == 'ixia':
            # TODO: add ixia chassis abstraction
            self.os = os
            self.host = IxiaHost(ansible_adhoc, os, hostname, device_type)
        elif os == 'aos':
            self.os = os
            self.host = AosHost(ansible_adhoc, hostname, user, passwd)
        else:
            # Use eos host if the os type is unknown
            self.os = 'eos'
            self.host = EosHost(ansible_adhoc, hostname, user, passwd,
                                shell_user=eos_shell_user, shell_passwd=eos_shell_passwd)
    def __getattr__(self, module_name):
        return getattr(self.host, module_name)
    def METHOD_NAME(self):
        return self.os
    def get_fanout_type(self):
        return self.type
    def shutdown(self, interface_name):
        """
        Shuts down the given interface.
        If a list of interfaces is provided, checks if the host object has
        a method that can shut down multiple interfaces at once. If no
        such method is found, an AttributeError is raised
        """
        if isinstance(interface_name, list):
            shutdown_multiple = getattr(self.host, "shutdown_multiple", None)
            if callable(shutdown_multiple):
                return shutdown_multiple(interface_name)
            else:
                raise AttributeError("Host of type {} does not contain a"
                                     "'shutdown_multiple' method"
                                     .format(type(self.host)))
        if self.os == 'sonic':
            if interface_name in list(self.fanout_port_alias_to_name.keys()):
                return self.host.shutdown(self.fanout_port_alias_to_name[interface_name])
        return self.host.shutdown(interface_name)
    def no_shutdown(self, interface_name):
        """
        Starts up the given interface.
        If a list of interfaces is provided, checks if the host object has
        a method that can startup multiple interfaces at once. If no
        such method is found, an AttributeError is raised
        """
        if isinstance(interface_name, list):
            no_shutdown_multiple = getattr(self.host, "no_shutdown_multiple", None)
            if callable(no_shutdown_multiple):
                return no_shutdown_multiple(interface_name)
            else:
                raise AttributeError("Host of type {} does not contain a"
                                     "'no_shutdown_multiple' method"
                                     .format(type(self.host)))
        if self.os == 'sonic':
            if interface_name in list(self.fanout_port_alias_to_name.keys()):
                return self.host.no_shutdown(self.fanout_port_alias_to_name[interface_name])
        return self.host.no_shutdown(interface_name)
    def check_intf_link_state(self, interface_name):
        return self.host.check_intf_link_state(interface_name)
    def __str__(self):
        return "{ os: '%s', hostname: '%s', device_type: '%s' }" % (self.os, self.hostname, self.type)
    def __repr__(self):
        return self.__str__()
    def add_port_map(self, host_port, fanout_port):
        """
            Fanout switch is build from the connection graph of the
            DUT. So each fanout switch instance is relevant to the
            DUT instance in the test. As result the port mapping is
            unique from the DUT perspective. However, this function
            need update when supporting multiple DUT
            host_port is a encoded string of <host name>|<port name>,
            e.g. sample_host|Ethernet0.
        """
        self.host_to_fanout_port_map[host_port] = fanout_port
        self.fanout_to_host_port_map[fanout_port] = host_port
    def exec_template(self, ansible_root, ansible_playbook, inventory, **kwargs):
        return self.host.exec_template(ansible_root, ansible_playbook, inventory, **kwargs)
    def get_supported_speeds(self, interface_name):
        """Get supported speeds for a given interface
        Args:
            interface_name (str): Interface name
        Returns:
            list: A list of supported speed strings or None
        """
        return self.host.get_supported_speeds(interface_name)
    def set_auto_negotiation_mode(self, interface_name, mode):
        """Set auto negotiation mode for a given interface
        Args:
            interface_name (str): Interface name
            mode (boolean): True to enable auto negotiation else disable
        Returns:
            boolean: False if the operation is not supported else True
        """
        return self.host.set_auto_negotiation_mode(interface_name, mode)
    def get_auto_negotiation_mode(self, interface_name):
        """Get auto negotiation mode for a given interface
        Args:
            interface_name (str): Interface name
        Returns:
            boolean: True if auto negotiation mode is enabled else False. Return None if
            the auto negotiation mode is unknown or unsupported.
        """
        return self.host.get_auto_negotiation_mode(interface_name)
    def set_speed(self, interface_name, speed):
        """Set interface speed according to the auto negotiation mode. When auto negotiation mode
        is enabled, set the advertised speeds; otherwise, set the force speed.
        Args:
            interface_name (str): Interface name
            speed (str): SONiC style interface speed. E.g, 1G=1000, 10G=10000, 100G=100000. If the speed
            is None and auto negotiation mode is enabled, it sets the advertised speeds to all supported
            speeds.
        Returns:
            boolean: True if success. Usually, the method return False only if the operation
            is not supported or failed.
        """
        return self.host.set_speed(interface_name, speed)
    def get_speed(self, interface_name):
        """Get interface speed
        Args:
            interface_name (str): Interface name
        Returns:
            str: SONiC style interface speed value. E.g, 1G=1000, 10G=10000, 100G=100000.
        """
        return self.host.get_speed(interface_name)
    def links_status_down(self, ports):
        """Get interface status
        Args:
            ports (set): Interfaces on one fanout
        Returns:
            True: if all interfaces are down
            False: if any interface is up
        """
        return self.host.links_status_down(ports)
    def links_status_up(self, ports):
        """Get interface status
        Args:
            ports (set): Interfaces on one fanout
        Returns:
            True: if all interfaces are up
            False: if any interface is down
        """
        return self.host.links_status_up(ports)
    def set_port_fec(self, interface_name, mode):
        self.host.set_port_fec(interface_name, mode) | 
| 188 | 
	save history | 
	#!/usr/bin/env python
"""
Copyright (c) 2006-2023 sqlmap developers (https://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
import atexit
import os
from lib.core import readlineng as readline
from lib.core.common import getSafeExString
from lib.core.data import logger
from lib.core.data import paths
from lib.core.enums import AUTOCOMPLETE_TYPE
from lib.core.enums import OS
from lib.core.settings import IS_WIN
from lib.core.settings import MAX_HISTORY_LENGTH
try:
    import rlcompleter
    class CompleterNG(rlcompleter.Completer):
        def global_matches(self, text):
            """
            Compute matches when text is a simple name.
            Return a list of all names currently defined in self.namespace
            that match.
            """
            matches = []
            n = len(text)
            for ns in (self.namespace,):
                for word in ns:
                    if word[:n] == text:
                        matches.append(word)
            return matches
except:
    readline._readline = None
def readlineAvailable():
    """
    Check if the readline is available. By default
    it is not in Python default installation on Windows
    """
    return readline._readline is not None
def clearHistory():
    if not readlineAvailable():
        return
    readline.clear_history()
def METHOD_NAME(completion=None):
    try:
        if not readlineAvailable():
            return
        if completion == AUTOCOMPLETE_TYPE.SQL:
            historyPath = paths.SQL_SHELL_HISTORY
        elif completion == AUTOCOMPLETE_TYPE.OS:
            historyPath = paths.OS_SHELL_HISTORY
        elif completion == AUTOCOMPLETE_TYPE.API:
            historyPath = paths.API_SHELL_HISTORY
        else:
            historyPath = paths.SQLMAP_SHELL_HISTORY
        try:
            with open(historyPath, "w+"):
                pass
        except:
            pass
        readline.set_history_length(MAX_HISTORY_LENGTH)
        try:
            readline.write_history_file(historyPath)
        except IOError as ex:
            warnMsg = "there was a problem writing the history file '%s' (%s)" % (historyPath, getSafeExString(ex))
            logger.warning(warnMsg)
    except KeyboardInterrupt:
        pass
def loadHistory(completion=None):
    if not readlineAvailable():
        return
    clearHistory()
    if completion == AUTOCOMPLETE_TYPE.SQL:
        historyPath = paths.SQL_SHELL_HISTORY
    elif completion == AUTOCOMPLETE_TYPE.OS:
        historyPath = paths.OS_SHELL_HISTORY
    elif completion == AUTOCOMPLETE_TYPE.API:
        historyPath = paths.API_SHELL_HISTORY
    else:
        historyPath = paths.SQLMAP_SHELL_HISTORY
    if os.path.exists(historyPath):
        try:
            readline.read_history_file(historyPath)
        except IOError as ex:
            warnMsg = "there was a problem loading the history file '%s' (%s)" % (historyPath, getSafeExString(ex))
            logger.warning(warnMsg)
        except UnicodeError:
            if IS_WIN:
                warnMsg = "there was a problem loading the history file '%s'. " % historyPath
                warnMsg += "More info can be found at 'https://github.com/pyreadline/pyreadline/issues/30'"
                logger.warning(warnMsg)
def autoCompletion(completion=None, os=None, commands=None):
    if not readlineAvailable():
        return
    if completion == AUTOCOMPLETE_TYPE.OS:
        if os == OS.WINDOWS:
            # Reference: http://en.wikipedia.org/wiki/List_of_DOS_commands
            completer = CompleterNG({
                "attrib": None, "copy": None, "del": None,
                "dir": None, "echo": None, "fc": None,
                "label": None, "md": None, "mem": None,
                "move": None, "net": None, "netstat -na": None,
                "tree": None, "truename": None, "type": None,
                "ver": None, "vol": None, "xcopy": None,
            })
        else:
            # Reference: http://en.wikipedia.org/wiki/List_of_Unix_commands
            completer = CompleterNG({
                "cat": None, "chmod": None, "chown": None,
                "cp": None, "cut": None, "date": None, "df": None,
                "diff": None, "du": None, "echo": None, "env": None,
                "file": None, "find": None, "free": None, "grep": None,
                "id": None, "ifconfig": None, "ls": None, "mkdir": None,
                "mv": None, "netstat": None, "pwd": None, "rm": None,
                "uname": None, "whoami": None,
            })
        readline.set_completer(completer.complete)
        readline.parse_and_bind("tab: complete")
    elif commands:
        completer = CompleterNG(dict(((_, None) for _ in commands)))
        readline.set_completer_delims(' ')
        readline.set_completer(completer.complete)
        readline.parse_and_bind("tab: complete")
    loadHistory(completion)
    atexit.register(METHOD_NAME, completion) | 
| 189 | 
	document send | 
	# Copyright (C) 2019  Renato Lima - Akretion
# Copyright (C) 2019  KMEE INFORMATICA LTDA
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import _, api, fields, models
from odoo.exceptions import UserError, ValidationError
from ..constants.fiscal import (
    DOCUMENT_ISSUER,
    DOCUMENT_ISSUER_COMPANY,
    PROCESSADOR_NENHUM,
    SITUACAO_EDOC_AUTORIZADA,
)
def filter_processador(record):
    if record.document_electronic and record.processador_edoc == PROCESSADOR_NENHUM:
        return True
    return False
class DocumentEletronic(models.AbstractModel):
    _name = "l10n_br_fiscal.document.electronic"
    _description = "Fiscal Eletronic Document"
    _inherit = "l10n_br_fiscal.document.workflow"
    issuer = fields.Selection(
        selection=DOCUMENT_ISSUER,
        default=DOCUMENT_ISSUER_COMPANY,
    )
    status_code = fields.Char(
        copy=False,
    )
    status_name = fields.Char(
        copy=False,
    )
    status_description = fields.Char(
        compute="_compute_status_description",
        copy=False,
    )
    # Authorization Event Related Fields
    authorization_event_id = fields.Many2one(
        comodel_name="l10n_br_fiscal.event",
        string="Authorization Event",
        readonly=True,
        copy=False,
    )
    authorization_date = fields.Datetime(
        related="authorization_event_id.protocol_date",
        string="Authorization Protocol Date",
        readonly=True,
    )
    authorization_protocol = fields.Char(
        related="authorization_event_id.protocol_number",
        string="Authorization Protocol Number",
        readonly=True,
    )
    send_file_id = fields.Many2one(
        comodel_name="ir.attachment",
        related="authorization_event_id.file_request_id",
        string="Send Document File XML",
        ondelete="restrict",
        readonly=True,
    )
    authorization_file_id = fields.Many2one(
        comodel_name="ir.attachment",
        related="authorization_event_id.file_response_id",
        string="Authorization File XML",
        ondelete="restrict",
        readonly=True,
    )
    # Cancel Event Related Fields
    cancel_event_id = fields.Many2one(
        comodel_name="l10n_br_fiscal.event",
        string="Cancel Event",
        copy=False,
    )
    cancel_date = fields.Datetime(
        related="cancel_event_id.protocol_date",
        string="Cancel Protocol Date",
        readonly=True,
    )
    cancel_protocol_number = fields.Char(
        related="cancel_event_id.protocol_number",
        string="Cancel Protocol Protocol",
        readonly=True,
    )
    cancel_file_id = fields.Many2one(
        comodel_name="ir.attachment",
        related="cancel_event_id.file_response_id",
        string="Cancel File XML",
        ondelete="restrict",
        readonly=True,
    )
    # Invalidate Event Related Fields
    invalidate_event_id = fields.Many2one(
        comodel_name="l10n_br_fiscal.event",
        string="Invalidate Event",
        copy=False,
    )
    invalidate_date = fields.Datetime(
        related="invalidate_event_id.protocol_date",
        string="Invalidate Protocol Date",
        readonly=True,
    )
    invalidate_protocol_number = fields.Char(
        related="invalidate_event_id.protocol_number",
        string="Invalidate Protocol Number",
        readonly=True,
    )
    invalidate_file_id = fields.Many2one(
        comodel_name="ir.attachment",
        related="invalidate_event_id.file_response_id",
        string="Invalidate File XML",
        ondelete="restrict",
        readonly=True,
    )
    document_version = fields.Char(string="Version", default="4.00", readonly=True)
    is_edoc_printed = fields.Boolean(string="Is Printed?", readonly=True)
    file_report_id = fields.Many2one(
        comodel_name="ir.attachment",
        string="Document Report",
        ondelete="restrict",
        readonly=True,
        copy=False,
    )
    @api.depends("status_code", "status_name")
    def _compute_status_description(self):
        for record in self:
            if record.status_code:
                record.status_description = "{} - {}".format(
                    record.status_code or "",
                    record.status_name or "",
                )
            else:
                record.status_description = False
    def _eletronic_document_send(self):
        """Implement this method in your transmission module,
        to send the electronic document and use the method _change_state
        to update the state of the transmited document,
        def _eletronic_document_send(self):
            super()._document_send()
            for record in self.filtered(myfilter):
                Do your transmission stuff
                [...]
                Change the state of the document
        """
        for record in self.filtered(filter_processador):
            record._change_state(SITUACAO_EDOC_AUTORIZADA)
    def METHOD_NAME(self):
        no_electronic = self.filtered(
            lambda d: not d.document_electronic
            or not d.issuer == DOCUMENT_ISSUER_COMPANY
        )
        no_electronic._no_eletronic_document_send()
        electronic = self - no_electronic
        electronic._eletronic_document_send()
    def serialize(self):
        edocs = []
        self._serialize(edocs)
        return edocs
    def _serialize(self, edocs):
        return edocs
    def _target_new_tab(self, attachment_id):
        if attachment_id:
            return {
                "type": "ir.actions.act_url",
                "url": "/web/content/{id}/{nome}".format(
                    id=attachment_id.id, nome=attachment_id.name
                ),
                "target": "new",
            }
    def view_xml(self):
        self.ensure_one()
        xml_file = self.authorization_file_id or self.send_file_id
        if not xml_file:
            self._document_export()
            xml_file = self.authorization_file_id or self.send_file_id
        if not xml_file:
            raise UserError(_("No XML file generated!"))
        return self._target_new_tab(xml_file)
    def make_pdf(self):
        pass
    def view_pdf(self):
        self.ensure_one()
        if not self.file_report_id or not self.authorization_file_id:
            self.make_pdf()
        if not self.file_report_id:
            raise UserError(_("No PDF file generated!"))
        return self._target_new_tab(self.file_report_id)
    def _document_status(self):
        """Retorna o status do documento em texto e se necessário,
        atualiza o status do documento"""
        return
    @api.constrains("issuer")
    def _check_issuer(self):
        for record in self.filtered(lambda d: d.document_electronic):
            if not record.issuer:
                raise ValidationError(
                    _(
                        "The field 'Issuer' is required for brazilian electronic documents!"
                    )
                ) | 
| 190 | 
	parse uri | 
	from _typeshed import Incomplete, Unused
from typing import Any, NoReturn
from typing_extensions import Literal, TypeAlias
from urllib.request import OpenerDirector
from xml.dom.expatbuilder import ExpatBuilder, ExpatBuilderNS
from xml.dom.minidom import Node
__all__ = ["DOMBuilder", "DOMEntityResolver", "DOMInputSource"]
# UNKNOWN TYPES:
# - `Options.errorHandler`.
#       The same as `_DOMBuilderErrorHandlerType`?
#       Maybe `xml.sax.handler.ErrorHandler`?
# - Return type of DOMBuilder.getFeature().
#       We could get rid of the `Incomplete` if we knew more
#       about `Options.errorHandler`.
# ALIASES REPRESENTING MORE UNKNOWN TYPES:
# probably the same as `Options.errorHandler`?
# Maybe `xml.sax.handler.ErrorHandler`?
_DOMBuilderErrorHandlerType: TypeAlias = Incomplete | None
# probably some kind of IO...
_DOMInputSourceCharacterStreamType: TypeAlias = Incomplete | None
# probably a string??
_DOMInputSourceStringDataType: TypeAlias = Incomplete | None
# probably a string??
_DOMInputSourceEncodingType: TypeAlias = Incomplete | None
class Options:
    namespaces: int
    namespace_declarations: bool
    validation: bool
    external_parameter_entities: bool
    external_general_entities: bool
    external_dtd_subset: bool
    validate_if_schema: bool
    validate: bool
    datatype_normalization: bool
    create_entity_ref_nodes: bool
    entities: bool
    whitespace_in_element_content: bool
    cdata_sections: bool
    comments: bool
    charset_overrides_xml_encoding: bool
    infoset: bool
    supported_mediatypes_only: bool
    errorHandler: Any | None
    filter: DOMBuilderFilter | None  # a guess, but seems likely
class DOMBuilder:
    entityResolver: DOMEntityResolver | None  # a guess, but seems likely
    errorHandler: _DOMBuilderErrorHandlerType
    filter: DOMBuilderFilter | None  # a guess, but seems likely
    ACTION_REPLACE: Literal[1]
    ACTION_APPEND_AS_CHILDREN: Literal[2]
    ACTION_INSERT_AFTER: Literal[3]
    ACTION_INSERT_BEFORE: Literal[4]
    def setFeature(self, name: str, state: int) -> None: ...
    def supportsFeature(self, name: str) -> bool: ...
    def canSetFeature(self, name: str, state: int) -> bool: ...
    # getFeature could return any attribute from an instance of `Options`
    def getFeature(self, name: str) -> Incomplete: ...
    def METHOD_NAME(self, uri: str) -> ExpatBuilder | ExpatBuilderNS: ...
    def parse(self, input: DOMInputSource) -> ExpatBuilder | ExpatBuilderNS: ...
    # `input` and `cnode` argtypes for `parseWithContext` are unknowable
    # as the function does nothing with them, and always raises an exception.
    # But `input` is *probably* `DOMInputSource`?
    def parseWithContext(self, input: Unused, cnode: Unused, action: Literal[1, 2, 3, 4]) -> NoReturn: ...
class DOMEntityResolver:
    def resolveEntity(self, publicId: str | None, systemId: str) -> DOMInputSource: ...
class DOMInputSource:
    byteStream: OpenerDirector | None
    characterStream: _DOMInputSourceCharacterStreamType
    stringData: _DOMInputSourceStringDataType
    encoding: _DOMInputSourceEncodingType
    publicId: str | None
    systemId: str | None
    baseURI: str | None
class DOMBuilderFilter:
    FILTER_ACCEPT: Literal[1]
    FILTER_REJECT: Literal[2]
    FILTER_SKIP: Literal[3]
    FILTER_INTERRUPT: Literal[4]
    whatToShow: int
    def acceptNode(self, element: Unused) -> Literal[1]: ...
    def startContainer(self, element: Unused) -> Literal[1]: ...
class DocumentLS:
    async_: bool
    def abort(self) -> NoReturn: ...
    # `load()` and `loadXML()` always raise exceptions
    # so the argtypes of `uri` and `source` are unknowable.
    # `source` is *probably* `DOMInputSource`?
    # `uri` is *probably* a str? (see DOMBuilder.parseURI())
    def load(self, uri: Unused) -> NoReturn: ...
    def loadXML(self, source: Unused) -> NoReturn: ...
    def saveXML(self, snode: Node | None) -> str: ...
class DOMImplementationLS:
    MODE_SYNCHRONOUS: Literal[1]
    MODE_ASYNCHRONOUS: Literal[2]
    def createDOMBuilder(self, mode: Literal[1], schemaType: None) -> DOMBuilder: ...
    def createDOMWriter(self) -> NoReturn: ...
    def createDOMInputSource(self) -> DOMInputSource: ... | 
| 191 | 
	tags | 
	# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
    'GetIpPrefixResult',
    'AwaitableGetIpPrefixResult',
    'get_ip_prefix',
    'get_ip_prefix_output',
]
@pulumi.output_type
class GetIpPrefixResult:
    """
    The IP Prefix resource definition.
    """
    def __init__(__self__, administrative_state=None, annotation=None, configuration_state=None, id=None, ip_prefix_rules=None, location=None, name=None, provisioning_state=None, system_data=None, METHOD_NAME=None, type=None):
        if administrative_state and not isinstance(administrative_state, str):
            raise TypeError("Expected argument 'administrative_state' to be a str")
        pulumi.set(__self__, "administrative_state", administrative_state)
        if annotation and not isinstance(annotation, str):
            raise TypeError("Expected argument 'annotation' to be a str")
        pulumi.set(__self__, "annotation", annotation)
        if configuration_state and not isinstance(configuration_state, str):
            raise TypeError("Expected argument 'configuration_state' to be a str")
        pulumi.set(__self__, "configuration_state", configuration_state)
        if id and not isinstance(id, str):
            raise TypeError("Expected argument 'id' to be a str")
        pulumi.set(__self__, "id", id)
        if ip_prefix_rules and not isinstance(ip_prefix_rules, list):
            raise TypeError("Expected argument 'ip_prefix_rules' to be a list")
        pulumi.set(__self__, "ip_prefix_rules", ip_prefix_rules)
        if location and not isinstance(location, str):
            raise TypeError("Expected argument 'location' to be a str")
        pulumi.set(__self__, "location", location)
        if name and not isinstance(name, str):
            raise TypeError("Expected argument 'name' to be a str")
        pulumi.set(__self__, "name", name)
        if provisioning_state and not isinstance(provisioning_state, str):
            raise TypeError("Expected argument 'provisioning_state' to be a str")
        pulumi.set(__self__, "provisioning_state", provisioning_state)
        if system_data and not isinstance(system_data, dict):
            raise TypeError("Expected argument 'system_data' to be a dict")
        pulumi.set(__self__, "system_data", system_data)
        if METHOD_NAME and not isinstance(METHOD_NAME, dict):
            raise TypeError("Expected argument 'tags' to be a dict")
        pulumi.set(__self__, "tags", METHOD_NAME)
        if type and not isinstance(type, str):
            raise TypeError("Expected argument 'type' to be a str")
        pulumi.set(__self__, "type", type)
    @property
    @pulumi.getter(name="administrativeState")
    def administrative_state(self) -> str:
        """
        Administrative state of the resource.
        """
        return pulumi.get(self, "administrative_state")
    @property
    @pulumi.getter
    def annotation(self) -> Optional[str]:
        """
        Switch configuration description.
        """
        return pulumi.get(self, "annotation")
    @property
    @pulumi.getter(name="configurationState")
    def configuration_state(self) -> str:
        """
        Configuration state of the resource.
        """
        return pulumi.get(self, "configuration_state")
    @property
    @pulumi.getter
    def id(self) -> str:
        """
        Fully qualified resource ID for the resource. E.g. "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}"
        """
        return pulumi.get(self, "id")
    @property
    @pulumi.getter(name="ipPrefixRules")
    def ip_prefix_rules(self) -> Sequence['outputs.IpPrefixRuleResponse']:
        """
        The list of IP Prefix Rules.
        """
        return pulumi.get(self, "ip_prefix_rules")
    @property
    @pulumi.getter
    def location(self) -> str:
        """
        The geo-location where the resource lives
        """
        return pulumi.get(self, "location")
    @property
    @pulumi.getter
    def name(self) -> str:
        """
        The name of the resource
        """
        return pulumi.get(self, "name")
    @property
    @pulumi.getter(name="provisioningState")
    def provisioning_state(self) -> str:
        """
        Provisioning state of the resource.
        """
        return pulumi.get(self, "provisioning_state")
    @property
    @pulumi.getter(name="systemData")
    def system_data(self) -> 'outputs.SystemDataResponse':
        """
        Azure Resource Manager metadata containing createdBy and modifiedBy information.
        """
        return pulumi.get(self, "system_data")
    @property
    @pulumi.getter
    def METHOD_NAME(self) -> Optional[Mapping[str, str]]:
        """
        Resource tags.
        """
        return pulumi.get(self, "tags")
    @property
    @pulumi.getter
    def type(self) -> str:
        """
        The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
        """
        return pulumi.get(self, "type")
class AwaitableGetIpPrefixResult(GetIpPrefixResult):
    # pylint: disable=using-constant-test
    def __await__(self):
        if False:
            yield self
        return GetIpPrefixResult(
            administrative_state=self.administrative_state,
            annotation=self.annotation,
            configuration_state=self.configuration_state,
            id=self.id,
            ip_prefix_rules=self.ip_prefix_rules,
            location=self.location,
            name=self.name,
            provisioning_state=self.provisioning_state,
            system_data=self.system_data,
            METHOD_NAME=self.METHOD_NAME,
            type=self.type)
def get_ip_prefix(ip_prefix_name: Optional[str] = None,
                  resource_group_name: Optional[str] = None,
                  opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIpPrefixResult:
    """
    Implements IP Prefix GET method.
    :param str ip_prefix_name: Name of the IP Prefix.
    :param str resource_group_name: The name of the resource group. The name is case insensitive.
    """
    __args__ = dict()
    __args__['ipPrefixName'] = ip_prefix_name
    __args__['resourceGroupName'] = resource_group_name
    opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
    __ret__ = pulumi.runtime.invoke('azure-native:managednetworkfabric/v20230615:getIpPrefix', __args__, opts=opts, typ=GetIpPrefixResult).value
    return AwaitableGetIpPrefixResult(
        administrative_state=pulumi.get(__ret__, 'administrative_state'),
        annotation=pulumi.get(__ret__, 'annotation'),
        configuration_state=pulumi.get(__ret__, 'configuration_state'),
        id=pulumi.get(__ret__, 'id'),
        ip_prefix_rules=pulumi.get(__ret__, 'ip_prefix_rules'),
        location=pulumi.get(__ret__, 'location'),
        name=pulumi.get(__ret__, 'name'),
        provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
        system_data=pulumi.get(__ret__, 'system_data'),
        METHOD_NAME=pulumi.get(__ret__, 'tags'),
        type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_ip_prefix)
def get_ip_prefix_output(ip_prefix_name: Optional[pulumi.Input[str]] = None,
                         resource_group_name: Optional[pulumi.Input[str]] = None,
                         opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetIpPrefixResult]:
    """
    Implements IP Prefix GET method.
    :param str ip_prefix_name: Name of the IP Prefix.
    :param str resource_group_name: The name of the resource group. The name is case insensitive.
    """
    ... | 
| 192 | 
	test evaluation | 
	# Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import typing
from typing import Dict, List, Tuple
import pytest
import mlrun
from mlrun.frameworks._common import ArtifactsLibrary
from mlrun.frameworks._ml_common import AlgorithmFunctionality, MLPlanStages
from mlrun.frameworks.sklearn import MetricsLibrary, SKLearnArtifactsLibrary
from mlrun.frameworks.xgboost import XGBoostArtifactsLibrary
from .ml_functions import MLFunctions
from .sklearn import SKLearnFunctions
from .xgboost import XGBoostFunctions
class FrameworkKeys:
    XGBOOST = "xgboost"
    SKLEARN = "sklearn"
FRAMEWORKS = {
    FrameworkKeys.XGBOOST: (
        XGBoostFunctions,
        XGBoostArtifactsLibrary,
        MetricsLibrary,
    ),
    FrameworkKeys.SKLEARN: (
        SKLearnFunctions,
        SKLearnArtifactsLibrary,
        MetricsLibrary,
    ),
}  # type: Dict[str, Tuple[MLFunctions, ArtifactsLibrary, MetricsLibrary]]
FRAMEWORKS_KEYS = [
    FrameworkKeys.XGBOOST,
    FrameworkKeys.SKLEARN,
]  # type: List[str]
ALGORITHM_FUNCTIONALITIES = [
    algorithm_functionality.value
    for algorithm_functionality in AlgorithmFunctionality
    if "Unknown" not in algorithm_functionality.value
]  # type: List[str]
FRAMEWORKS_ALGORITHM_FUNCTIONALITIES = [
    (framework, algorithm_functionality)
    for framework in FRAMEWORKS_KEYS
    for algorithm_functionality in ALGORITHM_FUNCTIONALITIES
    if (
        framework is not FrameworkKeys.XGBOOST
        or algorithm_functionality
        != AlgorithmFunctionality.MULTI_OUTPUT_MULTICLASS_CLASSIFICATION.value
    )
]  # type: List[Tuple[str, str]]
def framework_algorithm_functionality_pair_ids(
    framework_algorithm_functionality_pair: typing.Tuple[str, str]
) -> str:
    framework, algorithm_functionality = framework_algorithm_functionality_pair
    return f"{framework}-{algorithm_functionality}"
@pytest.mark.parametrize(
    "framework_algorithm_functionality_pair",
    FRAMEWORKS_ALGORITHM_FUNCTIONALITIES,
    ids=framework_algorithm_functionality_pair_ids,
)
def test_training(framework_algorithm_functionality_pair: typing.Tuple[str, str]):
    framework, algorithm_functionality = framework_algorithm_functionality_pair
    # Unpack the framework classes:
    (functions, artifacts_library, metrics_library) = FRAMEWORKS[
        framework
    ]  # type: MLFunctions, ArtifactsLibrary, MetricsLibrary
    # Run training:
    train_run = mlrun.new_function().run(
        artifact_path="./temp",
        handler=functions.train,
        params={"algorithm_functionality": algorithm_functionality},
    )
    # Print the outputs for manual validation:
    print(json.dumps(train_run.outputs, indent=4))
    # Get assertion parameters:
    algorithm_functionality = AlgorithmFunctionality(algorithm_functionality)
    dummy_model = functions.get_model(algorithm_functionality=algorithm_functionality)
    _, dummy_y = functions.get_dataset(
        algorithm_functionality=algorithm_functionality, for_training=False
    )
    expected_artifacts = artifacts_library.get_plans(model=dummy_model, y=dummy_y)
    expected_results = metrics_library.get_metrics(model=dummy_model, y=dummy_y)
    # Validate artifacts (model artifact shouldn't be counted, hence the '-1'):
    assert len(train_run.status.artifacts) - 1 == len(expected_artifacts)
    # Validate results:
    assert len(train_run.status.results) == len(expected_results)
@pytest.mark.parametrize(
    "framework_algorithm_functionality_pair",
    FRAMEWORKS_ALGORITHM_FUNCTIONALITIES,
    ids=framework_algorithm_functionality_pair_ids,
)
def METHOD_NAME(
    rundb_mock,
    framework_algorithm_functionality_pair: typing.Tuple[str, str],
):
    framework, algorithm_functionality = framework_algorithm_functionality_pair
    # Unpack the framework classes:
    (functions, artifacts_library, metrics_library) = FRAMEWORKS[
        framework
    ]  # type: MLFunctions, ArtifactsLibrary, MetricsLibrary
    # Run training:
    train_run = mlrun.new_function().run(
        artifact_path="./temp2",
        handler=functions.train,
        params={"algorithm_functionality": algorithm_functionality},
    )
    # Run evaluation (on the model that was just trained):
    evaluate_run = mlrun.new_function().run(
        artifact_path="./temp2",
        handler=functions.evaluate,
        params={
            "algorithm_functionality": algorithm_functionality,
            "model_path": train_run.outputs["model"],
        },
    )
    # Print the outputs for manual validation:
    print(json.dumps(evaluate_run.outputs, indent=4))
    # Get assertion parameters:
    algorithm_functionality = AlgorithmFunctionality(algorithm_functionality)
    dummy_model = functions.get_model(algorithm_functionality=algorithm_functionality)
    _, dummy_y = functions.get_dataset(
        algorithm_functionality=algorithm_functionality, for_training=False
    )
    expected_artifacts = [
        plan
        for plan in artifacts_library.get_plans(model=dummy_model, y=dummy_y)
        if not (
            # Count only pre and post prediction artifacts (evaluation artifacts).
            plan.is_ready(stage=MLPlanStages.POST_FIT, is_probabilities=False)
            or plan.is_ready(stage=MLPlanStages.PRE_FIT, is_probabilities=False)
        )
    ]
    expected_results = metrics_library.get_metrics(model=dummy_model, y=dummy_y)
    # Validate artifacts:
    assert len(evaluate_run.status.artifacts) == len(expected_artifacts)
    # Validate results:
    assert len(evaluate_run.status.results) == len(expected_results) | 
| 193 | 
	play | 
	import pychromecast
from t_modules.t_extra import shooter
import time
import socket
def get_ip():
    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    s.settimeout(0)
    try:
        # doesn't even have to be reachable
        s.connect(('10.255.255.255', 1))
        IP = s.getsockname()[0]
    except Exception:
        IP = '127.0.0.1'
    finally:
        s.close()
    return IP
class Chrome:
    def __init__(self, tauon):
        self.tauon = tauon
        self.services = []
        self.active = False
        self.cast = None
        self.target_playlist = None
        self.target_id = None
        self.save_vol = 100
    def rescan(self):
        print("Scanning for chromecasts...")
        if True: #not self.services:
            try:
                #self.tauon.gui.show_message(self.tauon.strings.scan_chrome)
                services, browser = pychromecast.discovery.discover_chromecasts()
                pychromecast.discovery.stop_discovery(browser)
                menu = self.tauon.chrome_menu
                MenuItem = self.tauon.MenuItem
                #menu.items.clear()
                for item in services:
                    self.services.append([str(item.uuid), str(item.friendly_name)])
                    menu.add_to_sub(1, MenuItem(self.tauon.strings.cast_to % str(item.friendly_name), self.three, pass_ref=True, args=[str(item.uuid), str(item.friendly_name)]))
                menu.add_to_sub(1, MenuItem(self.tauon.strings.stop_cast, self.end, show_test=lambda x: self.active))
            except:
                raise
                print("Failed to get chromecasts")
    def three(self, _, item):
        shooter(self.four, [item])
    def four(self, item):
        if self.active:
            self.end()
        self.tauon.start_remote()
        ccs, browser = pychromecast.get_listed_chromecasts(friendly_names=[item[1]], discovery_timeout=3.0)
        self.browser = browser
        self.cast = ccs[0]
        self.cast.wait()
        self.save_vol = self.tauon.pctl.player_volume
        self.tauon.pctl.player_volume = min(self.cast.status.volume_level * 100, 100)
        self.ip = get_ip()
        mc = self.cast.media_controller
        mc.app_id = "2F76715B"
        self.tauon.chrome_mode = True
        self.active = True
        self.tauon.gui.update += 1
        self.tauon.pctl.playerCommand = "startchrome"
        self.tauon.pctl.playerCommandReady = True
        self.tauon.tm.ready_playback()
    def update(self):
        self.cast.media_controller.update_status()
        return self.cast.media_controller.status.current_time, \
            self.cast.media_controller.status.media_custom_data.get("id"), \
            self.cast.media_controller.status.player_state, \
               self.cast.media_controller.status.duration
    def start(self, track_id, enqueue=False, t=0, url=None):
        self.cast.wait()
        tr = self.tauon.pctl.g(track_id)
        n = 0
        try:
            n = int(tr.track_number)
        except:
            pass
        d = {
            "metadataType": 3,
            "albumName": tr.album,
            "title": tr.title,
            "albumArtist": tr.album_artist,
            "artist": tr.artist,
            "trackNumber": n,
            "images": [{"url": f"http://{self.ip}:7814/api1/pic/medium/{track_id}"}],
            "releaseDate": tr.date
        }
        m = {
            "duration": round(float(tr.length), 1),
            "customData": {"id": str(tr.index)}
        }
        if url is None:
            url = f"http://{self.ip}:7814/api1/file/{track_id}"
        else:
            url = url.replace("localhost", self.ip)
            url = url.replace("127.0.0.1", self.ip)
        self.cast.media_controller.play_media(url, 'audio/mpeg', media_info=m, metadata=d, current_time=t, enqueue=enqueue)
    def stop(self):
        self.cast.media_controller.stop()
    def METHOD_NAME(self):
        self.cast.media_controller.METHOD_NAME()
    def pause(self):
        self.cast.media_controller.pause()
    def seek(self, t):
        self.cast.media_controller.seek(t)
    def volume(self, decimal):
        self.cast.set_volume(decimal)
    def end(self):
        self.tauon.pctl.playerCommand = "endchrome"
        self.tauon.pctl.playerCommandReady = True
        if self.active:
            if self.cast:
                mc = self.cast.media_controller
                mc.stop()
            self.active = False
        self.tauon.chrome_mode = False
        self.tauon.pctl.player_volume = self.save_vol
 | 
| 194 | 
	process response | 
	"""
An extension to retry failed requests that are potentially caused by temporary
problems such as a connection timeout or HTTP 500 error.
You can change the behaviour of this middleware by modifying the scraping settings:
RETRY_TIMES - how many times to retry a failed page
RETRY_HTTP_CODES - which HTTP response codes to retry
Failed pages are collected on the scraping process and rescheduled at the end,
once the spider has finished crawling all regular (non failed) pages.
"""
import warnings
from logging import Logger, getLogger
from typing import Optional, Union
from scrapy.exceptions import NotConfigured, ScrapyDeprecationWarning
from scrapy.http.request import Request
from scrapy.settings import Settings
from scrapy.spiders import Spider
from scrapy.utils.misc import load_object
from scrapy.utils.python import global_object_name
from scrapy.utils.response import response_status_message
retry_logger = getLogger(__name__)
class BackwardsCompatibilityMetaclass(type):
    @property
    def EXCEPTIONS_TO_RETRY(cls):
        warnings.warn(
            "Attribute RetryMiddleware.EXCEPTIONS_TO_RETRY is deprecated. "
            "Use the RETRY_EXCEPTIONS setting instead.",
            ScrapyDeprecationWarning,
            stacklevel=2,
        )
        return tuple(
            load_object(x) if isinstance(x, str) else x
            for x in Settings().getlist("RETRY_EXCEPTIONS")
        )
def get_retry_request(
    request: Request,
    *,
    spider: Spider,
    reason: Union[str, Exception] = "unspecified",
    max_retry_times: Optional[int] = None,
    priority_adjust: Optional[int] = None,
    logger: Logger = retry_logger,
    stats_base_key: str = "retry",
):
    """
    Returns a new :class:`~scrapy.Request` object to retry the specified
    request, or ``None`` if retries of the specified request have been
    exhausted.
    For example, in a :class:`~scrapy.Spider` callback, you could use it as
    follows::
        def parse(self, response):
            if not response.text:
                new_request_or_none = get_retry_request(
                    response.request,
                    spider=self,
                    reason='empty',
                )
                return new_request_or_none
    *spider* is the :class:`~scrapy.Spider` instance which is asking for the
    retry request. It is used to access the :ref:`settings <topics-settings>`
    and :ref:`stats <topics-stats>`, and to provide extra logging context (see
    :func:`logging.debug`).
    *reason* is a string or an :class:`Exception` object that indicates the
    reason why the request needs to be retried. It is used to name retry stats.
    *max_retry_times* is a number that determines the maximum number of times
    that *request* can be retried. If not specified or ``None``, the number is
    read from the :reqmeta:`max_retry_times` meta key of the request. If the
    :reqmeta:`max_retry_times` meta key is not defined or ``None``, the number
    is read from the :setting:`RETRY_TIMES` setting.
    *priority_adjust* is a number that determines how the priority of the new
    request changes in relation to *request*. If not specified, the number is
    read from the :setting:`RETRY_PRIORITY_ADJUST` setting.
    *logger* is the logging.Logger object to be used when logging messages
    *stats_base_key* is a string to be used as the base key for the
    retry-related job stats
    """
    settings = spider.crawler.settings
    stats = spider.crawler.stats
    retry_times = request.meta.get("retry_times", 0) + 1
    if max_retry_times is None:
        max_retry_times = request.meta.get("max_retry_times")
        if max_retry_times is None:
            max_retry_times = settings.getint("RETRY_TIMES")
    if retry_times <= max_retry_times:
        logger.debug(
            "Retrying %(request)s (failed %(retry_times)d times): %(reason)s",
            {"request": request, "retry_times": retry_times, "reason": reason},
            extra={"spider": spider},
        )
        new_request: Request = request.copy()
        new_request.meta["retry_times"] = retry_times
        new_request.dont_filter = True
        if priority_adjust is None:
            priority_adjust = settings.getint("RETRY_PRIORITY_ADJUST")
        new_request.priority = request.priority + priority_adjust
        if callable(reason):
            reason = reason()
        if isinstance(reason, Exception):
            reason = global_object_name(reason.__class__)
        stats.inc_value(f"{stats_base_key}/count")
        stats.inc_value(f"{stats_base_key}/reason_count/{reason}")
        return new_request
    stats.inc_value(f"{stats_base_key}/max_reached")
    logger.error(
        "Gave up retrying %(request)s (failed %(retry_times)d times): " "%(reason)s",
        {"request": request, "retry_times": retry_times, "reason": reason},
        extra={"spider": spider},
    )
    return None
class RetryMiddleware(metaclass=BackwardsCompatibilityMetaclass):
    def __init__(self, settings):
        if not settings.getbool("RETRY_ENABLED"):
            raise NotConfigured
        self.max_retry_times = settings.getint("RETRY_TIMES")
        self.retry_http_codes = set(
            int(x) for x in settings.getlist("RETRY_HTTP_CODES")
        )
        self.priority_adjust = settings.getint("RETRY_PRIORITY_ADJUST")
        if not hasattr(
            self, "EXCEPTIONS_TO_RETRY"
        ):  # If EXCEPTIONS_TO_RETRY is not "overriden"
            self.exceptions_to_retry = tuple(
                load_object(x) if isinstance(x, str) else x
                for x in settings.getlist("RETRY_EXCEPTIONS")
            )
        else:
            self.exceptions_to_retry = self.EXCEPTIONS_TO_RETRY
    @classmethod
    def from_crawler(cls, crawler):
        return cls(crawler.settings)
    def METHOD_NAME(self, request, response, spider):
        if request.meta.get("dont_retry", False):
            return response
        if response.status in self.retry_http_codes:
            reason = response_status_message(response.status)
            return self._retry(request, reason, spider) or response
        return response
    def process_exception(self, request, exception, spider):
        if isinstance(exception, self.exceptions_to_retry) and not request.meta.get(
            "dont_retry", False
        ):
            return self._retry(request, exception, spider)
    def _retry(self, request, reason, spider):
        max_retry_times = request.meta.get("max_retry_times", self.max_retry_times)
        priority_adjust = request.meta.get("priority_adjust", self.priority_adjust)
        return get_retry_request(
            request,
            reason=reason,
            spider=spider,
            max_retry_times=max_retry_times,
            priority_adjust=priority_adjust,
        ) | 
| 195 | 
	list all | 
	#!/usr/bin/python3
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2019-2021  Patryk Obara <patryk.obara@gmail.com>
# pylint: disable=invalid-name
# pylint: disable=missing-docstring
"""
Count all compiler warnings and print a summary.
It returns success to the shell if the number or warnings encountered
is less than or equal to the desired maximum warnings (default: 0).
You can override the default limit with MAX_WARNINGS environment variable or
using --max-warnings option (see the description of argument in --help for
details).
note: new compilers include additional flag -fdiagnostics-format=[text|json],
which could be used instead of parsing using regex, but we want to preserve
human-readable output in standard log.
"""
import argparse
import os
import re
import sys
# For recognizing warnings in GCC format in stderr:
#
GCC_WARN_PATTERN = re.compile(r"([^:]+):(\d+):\d+: warning: .* \[-W(.+?)\](.*)")
#                                ~~~~~   ~~~  ~~~           ~~      ~~~    ~~
#                                ↑       ↑    ↑             ↑       ↑      ↑
#                                file    line column  message    type  extra
# For recognizing warnings in MSVC format:
#
MSVC_WARN_PATTERN = re.compile(r".+>([^\(]+)\((\d+),\d+\): warning ([^:]+): .*")
#                                ~~  ~~~~~~    ~~~  ~~~             ~~~~~   ~~
#                                ↑   ↑         ↑    ↑               ↑        ↑
#                          project   file      line column       code  message
# For removing color when GCC is invoked with -fdiagnostics-color=always
#
ANSI_COLOR_PATTERN = re.compile(r"\x1b\[[0-9;]*[mGKH]")
# For recognizing warnings from usr/* or subprojects files
USR_OR_SUBPROJECTS_PATTERN = re.compile(r"^/usr/.*|.*/subprojects/.*")
class warning_summaries:
    def __init__(self):
        self.types = {}
        self.files = {}
        self.lines = set()
    def count_type(self, name):
        self.types[name] = self.types.get(name, 0) + 1
    def count_file(self, name):
        self.files[name] = self.files.get(name, 0) + 1
    def METHOD_NAME(self):
        for line in sorted(self.lines):
            print(line)
        print()
    def print_files(self):
        print("Warnings grouped by file:\n")
        print_summary(self.files)
    def print_types(self):
        print("Warnings grouped by type:\n")
        print_summary(self.types)
def remove_colors(line):
    return re.sub(ANSI_COLOR_PATTERN, "", line)
def count_warning(gcc_format, line_no, line, warnings):
    line = remove_colors(line)
    pattern = GCC_WARN_PATTERN if gcc_format else MSVC_WARN_PATTERN
    match = pattern.match(line)
    if not match:
        return 0
    # Ignore out-of-scope warnings from system and subprojects.
    file = match.group(1)
    if USR_OR_SUBPROJECTS_PATTERN.match(file):
        return 0
    # Some warnings (e.g. effc++) are reported multiple times, once
    # for every usage; ignore duplicates.
    line = line.strip()
    if line in warnings.lines:
        return 0
    warnings.lines.add(line)
    # wline = match.group(2)
    wtype = match.group(3)
    if pattern == GCC_WARN_PATTERN and match.group(4):
        print(
            "Log file is corrupted: extra characters in line", line_no, file=sys.stderr
        )
    _, fname = os.path.split(file)
    warnings.count_type(wtype)
    warnings.count_file(fname)
    return 1
def get_input_lines(name):
    if name == "-":
        return sys.stdin.readlines()
    if not os.path.isfile(name):
        print("{}: no such file.".format(name))
        sys.exit(2)
    with open(name, "r", encoding="utf-8") as logs:
        return logs.readlines()
def find_longest_name_length(names):
    return max(len(x) for x in names)
def print_summary(issues):
    size = find_longest_name_length(issues.keys()) + 1
    items = list(issues.items())
    for name, count in sorted(items, key=lambda x: (x[1], x[0]), reverse=True):
        print(
            "  {text:{field_size}s}: {count}".format(
                text=name, count=count, field_size=size
            )
        )
    print()
def parse_args():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawTextHelpFormatter, description=__doc__
    )
    parser.add_argument(
        "logfile",
        metavar="LOGFILE",
        help="Path to the logfile, or use - to read from stdin",
    )
    max_warnings = int(os.getenv("MAX_WARNINGS", "0"))
    parser.add_argument(
        "-m",
        "--max-warnings",
        type=int,
        default=max_warnings,
        help="Override the maximum number of warnings.\n"
        "Use value -1 to disable the check.",
    )
    parser.add_argument(
        "-f", "--files", action="store_true", help="Group warnings by filename."
    )
    parser.add_argument(
        "-l", "--list", action="store_true", help="Display sorted list of all warnings."
    )
    parser.add_argument(
        "--msvc", action="store_true", help="Look for warnings using MSVC format."
    )
    return parser.parse_args()
def main():
    rcode = 0
    total = 0
    warnings = warning_summaries()
    args = parse_args()
    use_gcc_format = not args.msvc
    line_no = 1
    for line in get_input_lines(args.logfile):
        total += count_warning(use_gcc_format, line_no, line, warnings)
        line_no += 1
    if args.list:
        warnings.METHOD_NAME()
    if args.files and warnings.files:
        warnings.print_files()
    if warnings.types:
        warnings.print_types()
    print("Total: {} warnings".format(total), end="")
    if args.max_warnings >= 0:
        print(" (out of {} allowed)\n".format(args.max_warnings))
        if total > args.max_warnings:
            print("Error: upper limit of allowed warnings is", args.max_warnings)
            rcode = 1
    else:
        print("\n")
    return rcode
if __name__ == "__main__":
    sys.exit(main()) | 
| 196 | 
	check chain | 
	""" X509Request is a class for managing X509 requests with their Pkeys.
It's main use is for proxy delegation.
"""
import M2Crypto
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Security.m2crypto import DEFAULT_PROXY_STRENGTH
from DIRAC.Core.Utilities import DErrno
# from DIRAC.Core.Security.m2crypto.X509Chain import X509Chain  # pylint: disable=import-error
# pylint: disable=broad-except
class X509Request:
    """
    Class representing X509 Certificate Request. it is used for delegation.
    Please see :ref:`about_proxies` for detailed explanations on delegation,
    and :py:class:`DIRAC.Core.Security.m2crypto.X509Chain` for code examples.
    """
    def __init__(self, reqObj=None, pkeyObj=None):
        """C'tor
        :param reqObj: M2Crypto.X509.Request object. Never used. Shall be removed
        :param pkeyObj: M2Crypto.EVP.PKey() object. Never used. Shall be removed
        """
        self.__valid = False
        self.__reqObj = reqObj
        self.__pkeyObj = pkeyObj
        if reqObj and pkeyObj:  # isn't it a bit too liberal?
            self.__valid = True
    def generateProxyRequest(self, bitStrength=DEFAULT_PROXY_STRENGTH, limited=False):
        """
        Initialize the Request object as well as the PKey.
        :param bitStrength: (default 2048) length of the key generated
        :param limited: (default False) If True, request is done for a limited proxy
        """
        # self.__pkeyObj is both the public and private key
        self.__pkeyObj = M2Crypto.EVP.PKey()
        self.__pkeyObj.assign_rsa(
            M2Crypto.RSA.gen_key(bitStrength, 65537, callback=M2Crypto.util.quiet_genparam_callback)
        )
        self.__reqObj = M2Crypto.X509.Request()
        self.__reqObj.set_pubkey(self.__pkeyObj)
        if limited:
            self.__reqObj.get_subject().add_entry_by_txt(
                field="CN", type=M2Crypto.ASN1.MBSTRING_ASC, entry="limited proxy", len=-1, loc=-1, set=0
            )
        else:
            self.__reqObj.get_subject().add_entry_by_txt(
                field="CN", type=M2Crypto.ASN1.MBSTRING_ASC, entry="proxy", len=-1, loc=-1, set=0
            )
        self.__reqObj.sign(self.__pkeyObj, "sha256")
        self.__valid = True
    def dumpRequest(self):
        """
        Get the request as a string
        :returns: S_OK(pem encoded request)
        """
        if not self.__valid:
            return S_ERROR(DErrno.ENOCERT)
        try:
            reqStr = self.__reqObj.as_pem().decode("ascii")
        except Exception as e:
            return S_ERROR(DErrno.EX509, f"Can't serialize request: {e}")
        return S_OK(reqStr)
    # def getRequestObject(self):
    #   """
    #   Get internal X509Request object
    #   Not used
    #   """
    #   return S_OK(self.__reqObj)
    def getPKey(self):
        """
        Get PKey Internal
        :returns: M2Crypto.EVP.PKEY object
        """
        return self.__pkeyObj
    def dumpPKey(self):
        """
        Get the private as a string
        :returns: S_OK(PEM encoded PKey)
        """
        if not self.__valid:
            return S_ERROR(DErrno.ENOCERT)
        try:
            pkeyStr = self.__pkeyObj.as_pem(cipher=None, callback=M2Crypto.util.no_passphrase_callback).decode("ascii")
        except Exception as e:
            return S_ERROR(DErrno.EX509, f"Can't serialize pkey: {e}")
        return S_OK(pkeyStr)
    def dumpAll(self):
        """
        Dump the Request and the PKey into a string
        :returns: S_OK(PEM encoded req + key), S_ERROR
        """
        if not self.__valid:
            return S_ERROR(DErrno.ENOCERT)
        req = self.dumpRequest()
        pkey = self.dumpPKey()
        if not req["OK"]:
            return S_ERROR(DErrno.EX509, f"Can't serialize request: {req['Message']}")
        if not pkey["OK"]:
            return S_ERROR(DErrno.EX509, f"Can't serialize pkey: {pkey['Message']}")
        return S_OK(f"{req['Value']}{pkey['Value']}")
    def loadAllFromString(self, pemData):
        """load the Request and key argument from a PEM encoded string.
        :param pemData: PEN encoded string containing Request and PKey
        :returns: S_OK()
        """
        if not isinstance(pemData, bytes):
            pemData = pemData.encode("ascii")
        try:
            self.__reqObj = M2Crypto.X509.load_request_string(pemData)
        except Exception as e:
            return S_ERROR(DErrno.ENOCERT, str(e))
        try:
            self.__pkeyObj = M2Crypto.EVP.load_key_string(pemData)
        except Exception as e:
            return S_ERROR(DErrno.ENOPKEY, str(e))
        self.__valid = True
        return S_OK()
    # def generateChainFromResponse(self, pemData):
    #   """
    #   Generate a X509 Chain from the pkey and the pem data passed as the argument
    #   Return : S_OK( X509Chain ) / S_ERROR
    #   """
    #   if not self.__valid:
    #     return S_ERROR(DErrno.ENOCERT)
    #   chain = X509Chain()
    #   ret = chain.loadChainFromString(pemData)
    #   if not ret['OK']:
    #     return ret
    #   ret = chain.setPKey(self.__pkeyObj)
    #   if not ret['OK']:
    #     return ret
    #   return chain
    def getSubjectDN(self):
        """
        Get subject DN of the request as a string
        :return: S_OK( string )/S_ERROR
        """
        if not self.__valid:
            return S_ERROR(DErrno.ENOCERT)
        return S_OK(str(self.__reqObj.get_subject()))
    def METHOD_NAME(self, chain):
        """
        Check that the public keys of the chain and the request match.
        :param chain: :py:class:`X509Chain` object
        """
        if not self.__valid:
            return S_ERROR(DErrno.ENOCERT)
        retVal = chain.getCertInChain()
        if not retVal["OK"]:
            return retVal
        lastCert = retVal["Value"]
        chainPubKey = lastCert.getPublicKey()
        if not chainPubKey["OK"]:
            return chainPubKey
        # as_der will dump public key info, while as_pem
        # dumps private key.
        chainPubKey = chainPubKey["Value"].as_der()
        reqPubKey = self.__reqObj.get_pubkey().as_der()
        if not chainPubKey == reqPubKey:
            return S_ERROR(DErrno.EX509, "Public keys do not match")
        return S_OK(True)
    def getStrength(self):
        """
        Get the length of the key of the request in bit
        :returns: S_OK( size )/S_ERROR
        """
        try:
            return S_OK(self.__pkeyObj.size() * 8)
        except Exception as e:
            return S_ERROR(f"Cannot get request strength: {e}") | 
| 197 | 
	test regularization | 
	#  ___________________________________________________________________________
#
#  Pyomo: Python Optimization Modeling Objects
#  Copyright (c) 2008-2022
#  National Technology and Engineering Solutions of Sandia, LLC
#  Under the terms of Contract DE-NA0003525 with National Technology and
#  Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
#  rights in this software.
#  This software is distributed under the 3-clause BSD License.
#  ___________________________________________________________________________
import pyomo.common.unittest as unittest
import pyomo.environ as pyo
from pyomo.core.base import ConcreteModel, Var, Constraint, Objective
from pyomo.common.dependencies import attempt_import
np, numpy_available = attempt_import(
    'numpy', 'Interior point requires numpy', minimum_version='1.13.0'
)
scipy, scipy_available = attempt_import('scipy', 'Interior point requires scipy')
mumps, mumps_available = attempt_import('mumps', 'Interior point requires mumps')
if not (numpy_available and scipy_available):
    raise unittest.SkipTest('Interior point tests require numpy and scipy')
if scipy_available:
    from pyomo.contrib.interior_point.linalg.scipy_interface import ScipyInterface
if mumps_available:
    from pyomo.contrib.interior_point.linalg.mumps_interface import MumpsInterface
from pyomo.contrib.pynumero.asl import AmplInterface
asl_available = AmplInterface.available()
if not asl_available:
    raise unittest.SkipTest('Regularization tests require ASL')
from pyomo.contrib.interior_point.interior_point import (
    InteriorPointSolver,
    InteriorPointStatus,
)
from pyomo.contrib.interior_point.interface import InteriorPointInterface
from pyomo.contrib.pynumero.linalg.ma27 import MA27Interface
ma27_available = MA27Interface.available()
if ma27_available:
    from pyomo.contrib.interior_point.linalg.ma27_interface import (
        InteriorPointMA27Interface,
    )
def make_model():
    m = ConcreteModel()
    m.x = Var([1, 2, 3], initialize=0)
    m.f = Var([1, 2, 3], initialize=0)
    m.F = Var(initialize=0)
    m.f[1].fix(1)
    m.f[2].fix(2)
    m.sum_con = Constraint(expr=(1 == m.x[1] + m.x[2] + m.x[3]))
    def bilin_rule(m, i):
        return m.F * m.x[i] == m.f[i]
    m.bilin_con = Constraint([1, 2, 3], rule=bilin_rule)
    m.obj = Objective(expr=m.F**2)
    return m
def make_model_2():
    m = ConcreteModel()
    m.x = Var(initialize=0.1, bounds=(0, 1))
    m.y = Var(initialize=0.1, bounds=(0, 1))
    m.obj = Objective(expr=-m.x**2 - m.y**2)
    m.c = Constraint(expr=m.y <= pyo.exp(-m.x))
    return m
class TestRegularization(unittest.TestCase):
    def METHOD_NAME(self, linear_solver):
        m = make_model()
        interface = InteriorPointInterface(m)
        ip_solver = InteriorPointSolver(linear_solver)
        ip_solver.set_interface(interface)
        interface.set_barrier_parameter(1e-1)
        # Evaluate KKT matrix before any iterations
        kkt = interface.evaluate_primal_dual_kkt_matrix()
        reg_coef = ip_solver.factorize(kkt)
        # Expected regularization coefficient:
        self.assertAlmostEqual(reg_coef, 1e-4)
        desired_n_neg_evals = (
            ip_solver.interface.n_eq_constraints()
            + ip_solver.interface.n_ineq_constraints()
        )
        # Expected inertia:
        n_pos_evals, n_neg_evals, n_null_evals = linear_solver.get_inertia()
        self.assertEqual(n_null_evals, 0)
        self.assertEqual(n_neg_evals, desired_n_neg_evals)
    @unittest.skipIf(not mumps_available, 'Mumps is not available')
    def test_mumps(self):
        solver = MumpsInterface()
        self.METHOD_NAME(solver)
    @unittest.skipIf(not scipy_available, "Scipy is not available")
    def test_scipy(self):
        solver = ScipyInterface(compute_inertia=True)
        self.METHOD_NAME(solver)
    @unittest.skipIf(not ma27_available, 'MA27 is not available')
    def test_ma27(self):
        solver = InteriorPointMA27Interface(icntl_options={1: 0, 2: 0})
        self.METHOD_NAME(solver)
    def _test_regularization_2(self, linear_solver):
        m = make_model_2()
        interface = InteriorPointInterface(m)
        ip_solver = InteriorPointSolver(linear_solver)
        status = ip_solver.solve(interface)
        self.assertEqual(status, InteriorPointStatus.optimal)
        interface.load_primals_into_pyomo_model()
        self.assertAlmostEqual(m.x.value, 1)
        self.assertAlmostEqual(m.y.value, pyo.exp(-1))
    @unittest.skipIf(not mumps_available, 'Mumps is not available')
    def test_mumps_2(self):
        solver = MumpsInterface()
        self._test_regularization_2(solver)
    @unittest.skipIf(not scipy_available, "Scipy is not available")
    def test_scipy_2(self):
        solver = ScipyInterface(compute_inertia=True)
        self._test_regularization_2(solver)
    @unittest.skipIf(not ma27_available, 'MA27 is not available')
    def test_ma27_2(self):
        solver = InteriorPointMA27Interface(icntl_options={1: 0, 2: 0})
        self._test_regularization_2(solver)
if __name__ == '__main__':
    #
    unittest.main()
    # test_reg = TestRegularization()
    # test_reg.test_regularize_mumps()
    # test_reg.test_regularize_scipy() | 
| 198 | 
	print info | 
	#!/usr/bin/python
# monitoring write syscall
#
# Print processes and PID which call write method and ordered by counts 
# 
# Based on opensnoopy(bcc)
#
# version 2.0
from __future__ import print_function
from bcc import BPF
from bcc.utils import printb
from ctypes import c_int
from time import sleep, strftime
import pwd
import os
import argparse
# arguments
examples = """examples:
    ./write          # trace file write info
    ./write -c       # trace TOP10 info
"""
parser = argparse.ArgumentParser(
    description="Trace file write info",
    formatter_class=argparse.RawDescriptionHelpFormatter,
    epilog=examples)
parser.add_argument("-c", "--count", action="store_true",
    help="show TOP10 info ordered by counts")
args = parser.parse_args()
# Get pid
self = os.getpid()
print('Pid: ', self)
sleep(1)
# Print interval
interval = 1
def title():
    print("Print file write info" )
    print("'-c' to show TOP10 info every %ds ordered by counts." % interval)
    print("---------------------------------------------------")
    sleep(1)
# BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <uapi/linux/limits.h>
#include <linux/sched.h>
#include <linux/fs.h>
# define FNAME_MAX 64
//for hash table
struct val_t {
    u32 pid;
    u32 uid;
    char comm[TASK_COMM_LEN];
    int pr;
    int fd;
    ssize_t ret; //return Nbytes if success,else -1
    char fstype[FNAME_MAX];
};
struct tmp_t
{
    int fd;
    ssize_t ret; 
};
BPF_HASH(write_info, u64, struct val_t);
BPF_HASH(rettmp, u64, struct tmp_t);
BPF_HASH(fdtmp, u64, struct tmp_t);
int entry_vfs_write(struct pt_regs *ctx, struct file *file, const char __user *buf, size_t count, loff_t *pos)
{
    struct val_t val = {};
    struct tmp_t *fdp, *retp;
    struct task_struct *tp;
    struct file *fp;
    u64 id = bpf_get_current_pid_tgid();
    u32 self = id >> 32;
    if (self == %d) return 0;
 
    u64 ts = bpf_ktime_get_ns();
    if (bpf_get_current_comm(&val.comm, sizeof(val.comm)) == 0)
    {
        val.pid = id >> 32;
        val.uid = bpf_get_current_uid_gid();
        tp = (struct task_struct*)bpf_get_current_task();
        val.pr = tp->prio;
        //fp = (struct file *)PT_REGS_PARM1(ctx);
        bpf_probe_read_kernel_str(val.fstype, sizeof(val.fstype), file->f_inode->i_sb->s_type->name);
        fdp = fdtmp.lookup(&id);
        if (fdp == 0)
            return 0;
        else
        {
            val.fd = fdp->fd;
        }
        retp = rettmp.lookup(&id);
        if (retp == 0)
            return 0;
        else
        {
            val.ret = retp->ret;
        }
        write_info.update(&ts, &val);
    }
    return 0;
}
int entry_ksys_write(struct pt_regs *ctx, unsigned int fd, const char __user *buf, size_t count)
{
    struct tmp_t tmp= {};
    u64 id = bpf_get_current_pid_tgid(); 
    u32 self = id >> 32;
    if (self == %d) return 0;
    int fdt = fd;
    if (fdt >= 0)
        tmp.fd = fdt;
    else
        tmp.fd= -1;
    
    fdtmp.update(&id, &tmp);
    return 0;
}
int exit_vfs_write(struct pt_regs *ctx)
{
    struct tmp_t tmp= {};
    u64 id = bpf_get_current_pid_tgid(); 
    u32 self = id >> 32;
    if (self == %d) return 0;
    ssize_t ret = PT_REGS_RC(ctx);
    tmp.ret = ret;
    
    rettmp.update(&id, &tmp);
    return 0;
}
""" % (self,self,self)
b = BPF(text=bpf_text)
b.attach_kprobe(event="vfs_write", fn_name="entry_vfs_write")
b.attach_kprobe(event="ksys_write", fn_name="entry_ksys_write")
b.attach_kretprobe(event="vfs_write",fn_name="exit_vfs_write")
write_info = b.get_table("write_info")
def METHOD_NAME():
    title()
    while True:
        try:
            sleep(interval)
            for k, v in sorted(write_info.items(), key=lambda write_info:write_info[0].value):
                print("%-16d" % k.value ,end="")
                print("pid=%-8d" % v.pid, end="")
                print("comm=%-8s" % v.comm, end="")
                print("pr=%-6d" % v.pr, end="")
                print("user=%-6s" % pwd.getpwuid(v.uid)[0], end="")
                print("fd=%-5d" % v.fd, end="")
                if(len(v.fstype)==0):
                    print("NULL", end="")
                else:
                    print("fs=%-8s " % v.fstype.decode(), end="")
                print("ret=%-5d" % v.ret, end="")
                print()
            print()
            b['write_info'].clear()
            b['fdtmp'].clear()
            b['rettmp'].clear()
            
        except KeyboardInterrupt:
            pass
            exit()
    
def print_count():
    title()
    dic = {}
    while True:
        sleep(interval)
        for k, v in sorted(write_info.items(), key=lambda write_info:write_info[0].value):
            str = "pid=%-8d comm=%-8s pr=%-6d user=%-6s fd=%-5d fs=%-8s ret=%-5d" % \
            (v.pid, v.comm, v.pr, pwd.getpwuid(v.uid)[0], v.fd, v.fstype, v.ret)
            if dic.get(str,-1) == -1:
                dic[str]=1
            else:
                dic[str]+=1
        i = 0
        print("TIME:%-10s" % strftime("%H:%M:%S"))
        for k, v in sorted(dic.items(), key=lambda item:item[1], reverse=True):
            i += 1
            print("NO.%-4d" % (i), end="")
            print("%-4d%s" % (v, k))
            if i==10:
                break
        dic = {}
        b['write_info'].clear()
        b['fdtmp'].clear()
        b['rettmp'].clear()
if args.count:
    print_count()
else:
    METHOD_NAME()
       | 
| 199 | 
	test demux package | 
	# Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import pathlib
import tempfile
import pytest
from tcr_misc import get_sample
from lib.cuckoo.common import demux
@pytest.fixture
def grab_sample():
    def _grab_sample(sample_hash):
        sample_location = pathlib.Path(__file__).absolute().parent.as_posix() + "/test_objects/" + sample_hash
        get_sample(hash=sample_hash, download_location=sample_location)
        return sample_location
    return _grab_sample
class TestDemux:
    """ToDo reenable
    @pytest.mark.skip("Takes minutes to run, skipping!")
    def test_demux_sample_microsoft_docx(self, grab_sample):
        # .docx file
        sample_hash = "c0c1c1c852a045eb3eb3b26dad2124aea866ea008449e0d7a84925c2ded7fddb"
        sample_location = grab_sample(sample_hash)
        assert demux.demux_sample(filename=sample_location, package=None, options="foobar") == [
            pathlib.Path(__file__).absolute().parent.as_posix() + "/test_objects/" + sample_hash
        ]
    def test_demux_sample_microsoft_no_sflock_docx(self, grab_sample):
        # .docx file
        sample_hash = "c0c1c1c852a045eb3eb3b26dad2124aea866ea008449e0d7a84925c2ded7fddb"
        sample_location = grab_sample(sample_hash)
        assert demux.demux_sample(filename=sample_location, package=None, options="foobar", use_sflock=False) == [
            pathlib.Path(__file__).absolute().parent.as_posix() + "/test_objects/" + sample_hash
        ]
    def test_demux_sample_microsoft_password_no_sflock_doc(self, grab_sample):
        # password protected .doc file
        sample_hash = "d211ce5c36f630aa1e85d4f36291fee2a600216d823d23805fe41bb68ea99dbb"
        sample_location = grab_sample(sample_hash)
        assert demux.demux_sample(filename=sample_location, package=None, options="password=infected", use_sflock=False) == [
            pathlib.Path(__file__).absolute().parent.as_posix() + "/test_objects/" + sample_hash
        ]
    def test_demux_sample_microsoft_no_password_no_sflock_doc(self, grab_sample):
        # no password .doc file
        sample_hash = "d211ce5c36f630aa1e85d4f36291fee2a600216d823d23805fe41bb68ea99dbb"
        sample_location = grab_sample(sample_hash)
        assert demux.demux_sample(filename=sample_location, package=None, options="foo", use_sflock=False) == [
            pathlib.Path(__file__).absolute().parent.as_posix() + "/test_objects/" + sample_hash
        ]
    def test_demux_sample_java(self, grab_sample):
        # java class file for a simple hello world
        sample_hash = "27c428570256f0e5f8229d053f352aea4276e5c9c5a601c20e04535a8ba1e41d"
        sample_location = grab_sample(sample_hash)
        assert demux.demux_sample(filename=sample_location, package=None, options="foo", use_sflock=False) == [
            pathlib.Path(__file__).absolute().parent.as_posix() + "/test_objects/" + sample_hash
        ]
    def test_demux_sample_microsoft_outlook(self, grab_sample):
        # outlook message from https://github.com/HamiltonInsurance/outlook_msg/blob/e6c0293f098e8aee9cd4124aa6a5d409c798bc49/test_data/No%20attachment.msg
        sample_hash = "0e16568cc1e8ddda0f0856b27857d1d043d7b18909a566ae5fa2460fc8fd3614"
        sample_location = grab_sample(sample_hash)
        assert demux.demux_sample(filename=sample_location, package=None, options="foo", use_sflock=False) == [
            pathlib.Path(__file__).absolute().parent.as_posix() + "/test_objects/" + sample_hash
        ]
    def test_demux_sample_pe32(self, grab_sample):
        # pe32 from https://github.com/bootandy/dust/releases/download/v0.5.4/dust-v0.5.4-i686-pc-windows-msvc.zip
        sample_hash = "5dd87d3d6b9d8b4016e3c36b189234772661e690c21371f1eb8e018f0f0dec2b"
        sample_location = grab_sample(sample_hash)
        assert demux.demux_sample(filename=sample_location, package=None, options="foo", use_sflock=False) == [
            pathlib.Path(__file__).absolute().parent.as_posix() + "/test_objects/" + sample_hash
        ]
    """
    def METHOD_NAME(self):
        empty_file = tempfile.NamedTemporaryFile()
        assert demux.demux_sample(filename=empty_file.name, package="Emotet", options="foo", use_sflock=False) == [
            (empty_file.name, "")
        ]
        empty_file.close()
    def test_options2passwd(self):
        options = "password=foobar"
        demux.options2passwd(options) | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.
