| title
				 stringclasses 1
				value | text
				 stringlengths 30 426k | id
				 stringlengths 27 30 | 
|---|---|---|
| 
	pandas/io/sql.py/read_sql_table
def read_sql_table(
    table_name: str,
    con,
    schema: str | None = None,
    index_col: str | list[str] | None = None,
    coerce_float: bool = True,
    parse_dates: list[str] | dict[str, str] | None = None,
    columns: list[str] | None = None,
    chunksize: int | None = None,
    dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
) -> DataFrame | Iterator[DataFrame]:
    """
    Read SQL database table into a DataFrame.
    Given a table name and a SQLAlchemy connectable, returns a DataFrame.
    This function does not support DBAPI connections.
    Parameters
    ----------
    table_name : str
        Name of SQL table in database.
    con : SQLAlchemy connectable or str
        A database URI could be provided as str.
        SQLite DBAPI connection mode not supported.
    schema : str, default None
        Name of SQL schema in database to query (if database flavor
        supports this). Uses default schema if None (default).
    index_col : str or list of str, optional, default: None
        Column(s) to set as index(MultiIndex).
    coerce_float : bool, default True
        Attempts to convert values of non-string, non-numeric objects (like
        decimal.Decimal) to floating point. Can result in loss of Precision.
    parse_dates : list or dict, default None
        - List of column names to parse as dates.
        - Dict of ``{column_name: format string}`` where format string is
          strftime compatible in case of parsing string times or is one of
          (D, s, ns, ms, us) in case of parsing integer timestamps.
        - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
          to the keyword arguments of :func:`pandas.to_datetime`
          Especially useful with databases without native Datetime support,
          such as SQLite.
    columns : list, default None
        List of column names to select from SQL table.
    chunksize : int, default None
        If specified, returns an iterator where `chunksize` is the number of
        rows to include in each chunk.
    dtype_backend : {'numpy_nullable', 'pyarrow'}
        Back-end data type applied to the resultant :class:`DataFrame`
        (still experimental). If not specified, the default behavior
        is to not use nullable data types. If specified, the behavior
        is as follows:
        * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
        * ``"pyarrow"``: returns pyarrow-backed nullable
          :class:`ArrowDtype` :class:`DataFrame`
        .. versionadded:: 2.0
    Returns
    -------
    DataFrame or Iterator[DataFrame]
        A SQL table is returned as two-dimensional data structure with labeled
        axes.
    See Also
    --------
    read_sql_query : Read SQL query into a DataFrame.
    read_sql : Read SQL query or database table into a DataFrame.
    Notes
    -----
    Any datetime values with time zone information will be converted to UTC.
    Examples
    --------
    >>> pd.read_sql_table("table_name", "postgres:///db_name")  # doctest:+SKIP
    """
    check_dtype_backend(dtype_backend)
    if dtype_backend is lib.no_default:
        dtype_backend = "numpy"  # type: ignore[assignment]
    assert dtype_backend is not lib.no_default
    with pandasSQL_builder(con, schema=schema, need_transaction=True) as pandas_sql:
        if not pandas_sql.has_table(table_name):
            raise ValueError(f"Table {table_name} not found")
        table = pandas_sql.read_table(
            table_name,
            index_col=index_col,
            coerce_float=coerce_float,
            parse_dates=parse_dates,
            columns=columns,
            chunksize=chunksize,
            dtype_backend=dtype_backend,
        )
    if table is not None:
        return table
    else:
        raise ValueError(f"Table {table_name} not found", con) | 
	apositive_train_query0_00000 | |
| 
	pandas/io/sql.py/read_sql_query
def read_sql_query(
    sql,
    con,
    index_col: str | list[str] | None = None,
    coerce_float: bool = True,
    params: list[Any] | Mapping[str, Any] | None = None,
    parse_dates: list[str] | dict[str, str] | None = None,
    chunksize: int | None = None,
    dtype: DtypeArg | None = None,
    dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
) -> DataFrame | Iterator[DataFrame]:
    """
    Read SQL query into a DataFrame.
    Returns a DataFrame corresponding to the result set of the query
    string. Optionally provide an `index_col` parameter to use one of the
    columns as the index, otherwise default integer index will be used.
    Parameters
    ----------
    sql : str SQL query or SQLAlchemy Selectable (select or text object)
        SQL query to be executed.
    con : SQLAlchemy connectable, str, or sqlite3 connection
        Using SQLAlchemy makes it possible to use any DB supported by that
        library. If a DBAPI2 object, only sqlite3 is supported.
    index_col : str or list of str, optional, default: None
        Column(s) to set as index(MultiIndex).
    coerce_float : bool, default True
        Attempts to convert values of non-string, non-numeric objects (like
        decimal.Decimal) to floating point. Useful for SQL result sets.
    params : list, tuple or mapping, optional, default: None
        List of parameters to pass to execute method.  The syntax used
        to pass parameters is database driver dependent. Check your
        database driver documentation for which of the five syntax styles,
        described in PEP 249's paramstyle, is supported.
        Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
    parse_dates : list or dict, default: None
        - List of column names to parse as dates.
        - Dict of ``{column_name: format string}`` where format string is
          strftime compatible in case of parsing string times, or is one of
          (D, s, ns, ms, us) in case of parsing integer timestamps.
        - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
          to the keyword arguments of :func:`pandas.to_datetime`
          Especially useful with databases without native Datetime support,
          such as SQLite.
    chunksize : int, default None
        If specified, return an iterator where `chunksize` is the number of
        rows to include in each chunk.
    dtype : Type name or dict of columns
        Data type for data or columns. E.g. np.float64 or
        {'a': np.float64, 'b': np.int32, 'c': 'Int64'}.
        .. versionadded:: 1.3.0
    dtype_backend : {'numpy_nullable', 'pyarrow'}
        Back-end data type applied to the resultant :class:`DataFrame`
        (still experimental). If not specified, the default behavior
        is to not use nullable data types. If specified, the behavior
        is as follows:
        * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
        * ``"pyarrow"``: returns pyarrow-backed nullable
          :class:`ArrowDtype` :class:`DataFrame`
        .. versionadded:: 2.0
    Returns
    -------
    DataFrame or Iterator[DataFrame]
        Returns a DataFrame object that contains the result set of the
        executed SQL query, in relation to the specified database connection.
    See Also
    --------
    read_sql_table : Read SQL database table into a DataFrame.
    read_sql : Read SQL query or database table into a DataFrame.
    Notes
    -----
    Any datetime values with time zone information parsed via the `parse_dates`
    parameter will be converted to UTC.
    Examples
    --------
    >>> from sqlalchemy import create_engine  # doctest: +SKIP
    >>> engine = create_engine("sqlite:///database.db")  # doctest: +SKIP
    >>> sql_query = "SELECT int_column FROM test_data"  # doctest: +SKIP
    >>> with engine.connect() as conn, conn.begin():  # doctest: +SKIP
    ...     data = pd.read_sql_query(sql_query, conn)  # doctest: +SKIP
    """
    check_dtype_backend(dtype_backend)
    if dtype_backend is lib.no_default:
        dtype_backend = "numpy"  # type: ignore[assignment]
    assert dtype_backend is not lib.no_default
    with pandasSQL_builder(con) as pandas_sql:
        return pandas_sql.read_query(
            sql,
            index_col=index_col,
            params=params,
            coerce_float=coerce_float,
            parse_dates=parse_dates,
            chunksize=chunksize,
            dtype=dtype,
            dtype_backend=dtype_backend,
        ) | 
	apositive_train_query0_00001 | |
| 
	setup.py/is_platform_windows
def is_platform_windows():
    return sys.platform in ("win32", "cygwin") | 
	negative_train_query0_00000 | |
| 
	setup.py/is_platform_mac
def is_platform_mac():
    return sys.platform == "darwin" | 
	negative_train_query0_00001 | |
| 
	setup.py/build_ext/render_templates
class build_ext:
def render_templates(cls, pxifiles) -> None:
        for pxifile in pxifiles:
            # build pxifiles first, template extension must be .pxi.in
            assert pxifile.endswith(".pxi.in")
            outfile = pxifile[:-3]
            if (
                os.path.exists(outfile)
                and os.stat(pxifile).st_mtime < os.stat(outfile).st_mtime
            ):
                # if .pxi.in is not updated, no need to output .pxi
                continue
            with open(pxifile, encoding="utf-8") as f:
                tmpl = f.read()
            pyxcontent = Tempita.sub(tmpl)
            with open(outfile, "w", encoding="utf-8") as f:
                f.write(pyxcontent) | 
	negative_train_query0_00002 | |
| 
	setup.py/build_ext/build_extensions
class build_ext:
def build_extensions(self) -> None:
        # if building from c files, don't need to
        # generate template output
        if _CYTHON_INSTALLED:
            self.render_templates(_pxifiles)
        super().build_extensions() | 
	negative_train_query0_00003 | |
| 
	setup.py/CleanCommand/initialize_options
class CleanCommand:
def initialize_options(self) -> None:
        self.all = True
        self._clean_me = []
        self._clean_trees = []
        base = pjoin("pandas", "_libs", "src")
        parser = pjoin(base, "parser")
        vendored = pjoin(base, "vendored")
        dt = pjoin(base, "datetime")
        ujson_python = pjoin(vendored, "ujson", "python")
        ujson_lib = pjoin(vendored, "ujson", "lib")
        self._clean_exclude = [
            pjoin(vendored, "numpy", "datetime", "np_datetime.c"),
            pjoin(vendored, "numpy", "datetime", "np_datetime_strings.c"),
            pjoin(dt, "date_conversions.c"),
            pjoin(parser, "tokenizer.c"),
            pjoin(parser, "io.c"),
            pjoin(ujson_python, "ujson.c"),
            pjoin(ujson_python, "objToJSON.c"),
            pjoin(ujson_python, "JSONtoObj.c"),
            pjoin(ujson_lib, "ultrajsonenc.c"),
            pjoin(ujson_lib, "ultrajsondec.c"),
            pjoin(dt, "pd_datetime.c"),
            pjoin(parser, "pd_parser.c"),
        ]
        for root, dirs, files in os.walk("pandas"):
            for f in files:
                filepath = pjoin(root, f)
                if filepath in self._clean_exclude:
                    continue
                if os.path.splitext(f)[-1] in (
                    ".pyc",
                    ".so",
                    ".o",
                    ".pyo",
                    ".pyd",
                    ".c",
                    ".cpp",
                    ".orig",
                ):
                    self._clean_me.append(filepath)
            self._clean_trees.append(pjoin(root, d) for d in dirs if d == "__pycache__")
        # clean the generated pxi files
        for pxifile in _pxifiles:
            pxifile_replaced = pxifile.replace(".pxi.in", ".pxi")
            self._clean_me.append(pxifile_replaced)
        self._clean_trees.append(d for d in ("build", "dist") if os.path.exists(d)) | 
	negative_train_query0_00004 | |
| 
	setup.py/CleanCommand/finalize_options
class CleanCommand:
def finalize_options(self) -> None:
        pass | 
	negative_train_query0_00005 | |
| 
	setup.py/CleanCommand/run
class CleanCommand:
def run(self) -> None:
        for clean_me in self._clean_me:
            try:
                os.unlink(clean_me)
            except OSError:
                pass
        for clean_tree in self._clean_trees:
            try:
                shutil.rmtree(clean_tree)
            except OSError:
                pass | 
	negative_train_query0_00006 | |
| 
	setup.py/CheckSDist/initialize_options
class CheckSDist:
def initialize_options(self) -> None:
        sdist_class.initialize_options(self) | 
	negative_train_query0_00007 | |
| 
	setup.py/CheckSDist/run
class CheckSDist:
def run(self) -> None:
        if "cython" in cmdclass:
            self.run_command("cython")
        else:
            # If we are not running cython then
            # compile the extensions correctly
            pyx_files = [(self._pyxfiles, "c"), (self._cpp_pyxfiles, "cpp")]
            for pyxfiles, extension in pyx_files:
                for pyxfile in pyxfiles:
                    sourcefile = pyxfile[:-3] + extension
                    msg = (
                        f"{extension}-source file '{sourcefile}' not found.\n"
                        "Run 'setup.py cython' before sdist."
                    )
                    assert os.path.isfile(sourcefile), msg
        sdist_class.run(self) | 
	negative_train_query0_00008 | |
| 
	setup.py/CheckingBuildExt/check_cython_extensions
class CheckingBuildExt:
def check_cython_extensions(self, extensions) -> None:
        for ext in extensions:
            for src in ext.sources:
                if not os.path.exists(src):
                    print(f"{ext.name}: -> [{ext.sources}]")
                    raise Exception(
                        f"""Cython-generated file '{src}' not found.
                Cython is required to compile pandas from a development branch.
                Please install Cython or download a release package of pandas.
                """
                    ) | 
	negative_train_query0_00009 | |
| 
	setup.py/CheckingBuildExt/build_extensions
class CheckingBuildExt:
def build_extensions(self) -> None:
        self.check_cython_extensions(self.extensions)
        build_ext.build_extensions(self) | 
	negative_train_query0_00010 | |
| 
	setup.py/CythonCommand/build_extension
class CythonCommand:
def build_extension(self, ext) -> None:
        pass | 
	negative_train_query0_00011 | |
| 
	setup.py/DummyBuildSrc/initialize_options
class DummyBuildSrc:
def initialize_options(self) -> None:
        self.py_modules_dict = {} | 
	negative_train_query0_00012 | |
| 
	setup.py/DummyBuildSrc/finalize_options
class DummyBuildSrc:
def finalize_options(self) -> None:
        pass | 
	negative_train_query0_00013 | |
| 
	setup.py/DummyBuildSrc/run
class DummyBuildSrc:
def run(self) -> None:
        pass | 
	negative_train_query0_00014 | |
| 
	setup.py/maybe_cythonize
def maybe_cythonize(extensions, *args, **kwargs):
    """
    Render tempita templates before calling cythonize. This is skipped for
    * clean
    * sdist
    """
    if "clean" in sys.argv or "sdist" in sys.argv:
        # See https://github.com/cython/cython/issues/1495
        return extensions
    elif not _CYTHON_INSTALLED:
        # GH#28836 raise a helfpul error message
        if _CYTHON_VERSION:
            raise RuntimeError(
                f"Cannot cythonize with old Cython version ({_CYTHON_VERSION} "
                f"installed, needs {min_cython_ver})"
            )
        raise RuntimeError("Cannot cythonize without Cython installed.")
    # reuse any parallel arguments provided for compilation to cythonize
    parser = argparse.ArgumentParser()
    parser.add_argument("--parallel", "-j", type=int, default=1)
    parsed, _ = parser.parse_known_args()
    kwargs["nthreads"] = parsed.parallel
    build_ext.render_templates(_pxifiles)
    if debugging_symbols_requested:
        kwargs["gdb_debug"] = True
    return cythonize(extensions, *args, **kwargs) | 
	negative_train_query0_00015 | |
| 
	setup.py/srcpath
def srcpath(name=None, suffix=".pyx", subdir="src"):
    return pjoin("pandas", subdir, name + suffix) | 
	negative_train_query0_00016 | |
| 
	generate_pxi.py/process_tempita
def process_tempita(pxifile, outfile) -> None:
    with open(pxifile, encoding="utf-8") as f:
        tmpl = f.read()
    pyxcontent = Tempita.sub(tmpl)
    with open(outfile, "w", encoding="utf-8") as f:
        f.write(pyxcontent) | 
	negative_train_query0_00017 | |
| 
	generate_pxi.py/main
def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument("infile", type=str, help="Path to the input file")
    parser.add_argument("-o", "--outdir", type=str, help="Path to the output directory")
    args = parser.parse_args()
    if not args.infile.endswith(".in"):
        raise ValueError(f"Unexpected extension: {args.infile}")
    outdir_abs = os.path.join(os.getcwd(), args.outdir)
    outfile = os.path.join(
        outdir_abs, os.path.splitext(os.path.split(args.infile)[1])[0]
    )
    process_tempita(args.infile, outfile) | 
	negative_train_query0_00018 | |
| 
	generate_version.py/write_version_info
def write_version_info(path) -> None:
    version = None
    git_version = None
    try:
        import _version_meson
        version = _version_meson.__version__
        git_version = _version_meson.__git_version__
    except ImportError:
        version = versioneer.get_version()
        git_version = versioneer.get_versions()["full-revisionid"]
    if os.environ.get("MESON_DIST_ROOT"):
        path = os.path.join(os.environ.get("MESON_DIST_ROOT"), path)
    with open(path, "w", encoding="utf-8") as file:
        file.write(f'__version__="{version}"\n')
        file.write(f'__git_version__="{git_version}"\n') | 
	negative_train_query0_00019 | |
| 
	generate_version.py/main
def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-o",
        "--outfile",
        type=str,
        help="Path to write version info to",
        required=False,
    )
    parser.add_argument(
        "--print",
        default=False,
        action="store_true",
        help="Whether to print out the version",
        required=False,
    )
    args = parser.parse_args()
    if args.outfile:
        if not args.outfile.endswith(".py"):
            raise ValueError(
                f"Output file must be a Python file. "
                f"Got: {args.outfile} as filename instead"
            )
        write_version_info(args.outfile)
    if args.print:
        try:
            import _version_meson
            version = _version_meson.__version__
        except ImportError:
            version = versioneer.get_version()
        print(version) | 
	negative_train_query0_00020 | |
| 
	asv_bench/benchmarks/indexing_engines.py/_get_numeric_engines
def _get_numeric_engines():
    engine_names = [
        ("Int64Engine", np.int64),
        ("Int32Engine", np.int32),
        ("Int16Engine", np.int16),
        ("Int8Engine", np.int8),
        ("UInt64Engine", np.uint64),
        ("UInt32Engine", np.uint32),
        ("UInt16engine", np.uint16),
        ("UInt8Engine", np.uint8),
        ("Float64Engine", np.float64),
        ("Float32Engine", np.float32),
    ]
    return [
        (getattr(libindex, engine_name), dtype)
        for engine_name, dtype in engine_names
        if hasattr(libindex, engine_name)
    ] | 
	negative_train_query0_00021 | |
| 
	asv_bench/benchmarks/indexing_engines.py/_get_masked_engines
def _get_masked_engines():
    engine_names = [
        ("MaskedInt64Engine", "Int64"),
        ("MaskedInt32Engine", "Int32"),
        ("MaskedInt16Engine", "Int16"),
        ("MaskedInt8Engine", "Int8"),
        ("MaskedUInt64Engine", "UInt64"),
        ("MaskedUInt32Engine", "UInt32"),
        ("MaskedUInt16engine", "UInt16"),
        ("MaskedUInt8Engine", "UInt8"),
        ("MaskedFloat64Engine", "Float64"),
        ("MaskedFloat32Engine", "Float32"),
    ]
    return [
        (getattr(libindex, engine_name), dtype)
        for engine_name, dtype in engine_names
        if hasattr(libindex, engine_name)
    ] | 
	negative_train_query0_00022 | |
| 
	asv_bench/benchmarks/indexing_engines.py/NumericEngineIndexing/setup
class NumericEngineIndexing:
def setup(self, engine_and_dtype, index_type, unique, N):
        engine, dtype = engine_and_dtype
        if index_type == "monotonic_incr":
            if unique:
                arr = np.arange(N * 3, dtype=dtype)
            else:
                arr = np.array([1, 2, 3], dtype=dtype).repeat(N)
        elif index_type == "monotonic_decr":
            if unique:
                arr = np.arange(N * 3, dtype=dtype)[::-1]
            else:
                arr = np.array([3, 2, 1], dtype=dtype).repeat(N)
        else:
            assert index_type == "non_monotonic"
            if unique:
                arr = np.empty(N * 3, dtype=dtype)
                arr[:N] = np.arange(N * 2, N * 3, dtype=dtype)
                arr[N:] = np.arange(N * 2, dtype=dtype)
            else:
                arr = np.array([1, 2, 3], dtype=dtype).repeat(N)
        self.data = engine(arr)
        # code below avoids populating the mapping etc. while timing.
        self.data.get_loc(2)
        self.key_middle = arr[len(arr) // 2]
        self.key_early = arr[2] | 
	negative_train_query0_00023 | |
| 
	asv_bench/benchmarks/indexing_engines.py/NumericEngineIndexing/time_get_loc
class NumericEngineIndexing:
def time_get_loc(self, engine_and_dtype, index_type, unique, N):
        self.data.get_loc(self.key_early) | 
	negative_train_query0_00024 | |
| 
	asv_bench/benchmarks/indexing_engines.py/NumericEngineIndexing/time_get_loc_near_middle
class NumericEngineIndexing:
def time_get_loc_near_middle(self, engine_and_dtype, index_type, unique, N):
        # searchsorted performance may be different near the middle of a range
        #  vs near an endpoint
        self.data.get_loc(self.key_middle) | 
	negative_train_query0_00025 | |
| 
	asv_bench/benchmarks/indexing_engines.py/MaskedNumericEngineIndexing/setup
class MaskedNumericEngineIndexing:
def setup(self, engine_and_dtype, index_type, unique, N):
        engine, dtype = engine_and_dtype
        dtype = dtype.lower()
        if index_type == "monotonic_incr":
            if unique:
                arr = np.arange(N * 3, dtype=dtype)
            else:
                arr = np.array([1, 2, 3], dtype=dtype).repeat(N)
            mask = np.zeros(N * 3, dtype=np.bool_)
        elif index_type == "monotonic_decr":
            if unique:
                arr = np.arange(N * 3, dtype=dtype)[::-1]
            else:
                arr = np.array([3, 2, 1], dtype=dtype).repeat(N)
            mask = np.zeros(N * 3, dtype=np.bool_)
        else:
            assert index_type == "non_monotonic"
            if unique:
                arr = np.zeros(N * 3, dtype=dtype)
                arr[:N] = np.arange(N * 2, N * 3, dtype=dtype)
                arr[N:] = np.arange(N * 2, dtype=dtype)
            else:
                arr = np.array([1, 2, 3], dtype=dtype).repeat(N)
            mask = np.zeros(N * 3, dtype=np.bool_)
            mask[-1] = True
        self.data = engine(BaseMaskedArray(arr, mask))
        # code below avoids populating the mapping etc. while timing.
        self.data.get_loc(2)
        self.key_middle = arr[len(arr) // 2]
        self.key_early = arr[2] | 
	negative_train_query0_00026 | |
| 
	asv_bench/benchmarks/indexing_engines.py/MaskedNumericEngineIndexing/time_get_loc
class MaskedNumericEngineIndexing:
def time_get_loc(self, engine_and_dtype, index_type, unique, N):
        self.data.get_loc(self.key_early) | 
	negative_train_query0_00027 | |
| 
	asv_bench/benchmarks/indexing_engines.py/MaskedNumericEngineIndexing/time_get_loc_near_middle
class MaskedNumericEngineIndexing:
def time_get_loc_near_middle(self, engine_and_dtype, index_type, unique, N):
        # searchsorted performance may be different near the middle of a range
        #  vs near an endpoint
        self.data.get_loc(self.key_middle) | 
	negative_train_query0_00028 | |
| 
	asv_bench/benchmarks/indexing_engines.py/ObjectEngineIndexing/setup
class ObjectEngineIndexing:
def setup(self, index_type):
        N = 10**5
        values = list("a" * N + "b" * N + "c" * N)
        arr = {
            "monotonic_incr": np.array(values, dtype=object),
            "monotonic_decr": np.array(list(reversed(values)), dtype=object),
            "non_monotonic": np.array(list("abc") * N, dtype=object),
        }[index_type]
        self.data = libindex.ObjectEngine(arr)
        # code below avoids populating the mapping etc. while timing.
        self.data.get_loc("b") | 
	negative_train_query0_00029 | |
| 
	asv_bench/benchmarks/indexing_engines.py/ObjectEngineIndexing/time_get_loc
class ObjectEngineIndexing:
def time_get_loc(self, index_type):
        self.data.get_loc("b") | 
	negative_train_query0_00030 | |
| 
	asv_bench/benchmarks/timedelta.py/DatetimeAccessor/setup_cache
class DatetimeAccessor:
def setup_cache(self):
        N = 100000
        series = Series(timedelta_range("1 days", periods=N, freq="h"))
        return series | 
	negative_train_query0_00031 | |
| 
	asv_bench/benchmarks/timedelta.py/DatetimeAccessor/time_dt_accessor
class DatetimeAccessor:
def time_dt_accessor(self, series):
        series.dt | 
	negative_train_query0_00032 | |
| 
	asv_bench/benchmarks/timedelta.py/DatetimeAccessor/time_timedelta_days
class DatetimeAccessor:
def time_timedelta_days(self, series):
        series.dt.days | 
	negative_train_query0_00033 | |
| 
	asv_bench/benchmarks/timedelta.py/DatetimeAccessor/time_timedelta_seconds
class DatetimeAccessor:
def time_timedelta_seconds(self, series):
        series.dt.seconds | 
	negative_train_query0_00034 | |
| 
	asv_bench/benchmarks/timedelta.py/DatetimeAccessor/time_timedelta_microseconds
class DatetimeAccessor:
def time_timedelta_microseconds(self, series):
        series.dt.microseconds | 
	negative_train_query0_00035 | |
| 
	asv_bench/benchmarks/timedelta.py/DatetimeAccessor/time_timedelta_nanoseconds
class DatetimeAccessor:
def time_timedelta_nanoseconds(self, series):
        series.dt.nanoseconds | 
	negative_train_query0_00036 | |
| 
	asv_bench/benchmarks/timedelta.py/TimedeltaIndexing/setup
class TimedeltaIndexing:
def setup(self):
        self.index = timedelta_range(start="1985", periods=1000, freq="D")
        self.index2 = timedelta_range(start="1986", periods=1000, freq="D")
        self.series = Series(range(1000), index=self.index)
        self.timedelta = self.index[500] | 
	negative_train_query0_00037 | |
| 
	asv_bench/benchmarks/timedelta.py/TimedeltaIndexing/time_get_loc
class TimedeltaIndexing:
def time_get_loc(self):
        self.index.get_loc(self.timedelta) | 
	negative_train_query0_00038 | |
| 
	asv_bench/benchmarks/timedelta.py/TimedeltaIndexing/time_shallow_copy
class TimedeltaIndexing:
def time_shallow_copy(self):
        self.index._view() | 
	negative_train_query0_00039 | |
| 
	asv_bench/benchmarks/timedelta.py/TimedeltaIndexing/time_series_loc
class TimedeltaIndexing:
def time_series_loc(self):
        self.series.loc[self.timedelta] | 
	negative_train_query0_00040 | |
| 
	asv_bench/benchmarks/timedelta.py/TimedeltaIndexing/time_align
class TimedeltaIndexing:
def time_align(self):
        DataFrame({"a": self.series, "b": self.series[:500]}) | 
	negative_train_query0_00041 | |
| 
	asv_bench/benchmarks/timedelta.py/TimedeltaIndexing/time_intersection
class TimedeltaIndexing:
def time_intersection(self):
        self.index.intersection(self.index2) | 
	negative_train_query0_00042 | |
| 
	asv_bench/benchmarks/timedelta.py/TimedeltaIndexing/time_union
class TimedeltaIndexing:
def time_union(self):
        self.index.union(self.index2) | 
	negative_train_query0_00043 | |
| 
	asv_bench/benchmarks/timedelta.py/TimedeltaIndexing/time_unique
class TimedeltaIndexing:
def time_unique(self):
        self.index.unique() | 
	negative_train_query0_00044 | |
| 
	asv_bench/benchmarks/strings.py/Dtypes/setup
class Dtypes:
def setup(self, dtype):
        try:
            self.s = Series(
                Index([f"i-{i}" for i in range(10000)], dtype=object)._values,
                dtype=dtype,
            )
        except ImportError as err:
            raise NotImplementedError from err | 
	negative_train_query0_00045 | |
| 
	asv_bench/benchmarks/strings.py/Construction/setup
class Construction:
def setup(self, pd_type, dtype):
        series_arr = np.array(
            [str(i) * 10 for i in range(100_000)], dtype=self.dtype_mapping[dtype]
        )
        if pd_type == "series":
            self.arr = series_arr
        elif pd_type == "frame":
            self.arr = series_arr.reshape((50_000, 2)).copy()
        elif pd_type == "categorical_series":
            # GH37371. Testing construction of string series/frames from ExtensionArrays
            self.arr = Categorical(series_arr) | 
	negative_train_query0_00046 | |
| 
	asv_bench/benchmarks/strings.py/Construction/time_construction
class Construction:
def time_construction(self, pd_type, dtype):
        self.pd_mapping[pd_type](self.arr, dtype=dtype) | 
	negative_train_query0_00047 | |
| 
	asv_bench/benchmarks/strings.py/Construction/peakmem_construction
class Construction:
def peakmem_construction(self, pd_type, dtype):
        self.pd_mapping[pd_type](self.arr, dtype=dtype) | 
	negative_train_query0_00048 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_center
class Methods:
def time_center(self, dtype):
        self.s.str.center(100) | 
	negative_train_query0_00049 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_count
class Methods:
def time_count(self, dtype):
        self.s.str.count("A") | 
	negative_train_query0_00050 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_endswith
class Methods:
def time_endswith(self, dtype):
        self.s.str.endswith("A") | 
	negative_train_query0_00051 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_extract
class Methods:
def time_extract(self, dtype):
        with warnings.catch_warnings(record=True):
            self.s.str.extract("(\\w*)A(\\w*)") | 
	negative_train_query0_00052 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_findall
class Methods:
def time_findall(self, dtype):
        self.s.str.findall("[A-Z]+") | 
	negative_train_query0_00053 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_find
class Methods:
def time_find(self, dtype):
        self.s.str.find("[A-Z]+") | 
	negative_train_query0_00054 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_rfind
class Methods:
def time_rfind(self, dtype):
        self.s.str.rfind("[A-Z]+") | 
	negative_train_query0_00055 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_fullmatch
class Methods:
def time_fullmatch(self, dtype):
        self.s.str.fullmatch("A") | 
	negative_train_query0_00056 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_get
class Methods:
def time_get(self, dtype):
        self.s.str.get(0) | 
	negative_train_query0_00057 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_len
class Methods:
def time_len(self, dtype):
        self.s.str.len() | 
	negative_train_query0_00058 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_join
class Methods:
def time_join(self, dtype):
        self.s.str.join(" ") | 
	negative_train_query0_00059 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_match
class Methods:
def time_match(self, dtype):
        self.s.str.match("A") | 
	negative_train_query0_00060 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_normalize
class Methods:
def time_normalize(self, dtype):
        self.s.str.normalize("NFC") | 
	negative_train_query0_00061 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_pad
class Methods:
def time_pad(self, dtype):
        self.s.str.pad(100, side="both") | 
	negative_train_query0_00062 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_partition
class Methods:
def time_partition(self, dtype):
        self.s.str.partition("A") | 
	negative_train_query0_00063 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_rpartition
class Methods:
def time_rpartition(self, dtype):
        self.s.str.rpartition("A") | 
	negative_train_query0_00064 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_replace
class Methods:
def time_replace(self, dtype):
        self.s.str.replace("A", "\x01\x01") | 
	negative_train_query0_00065 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_translate
class Methods:
def time_translate(self, dtype):
        self.s.str.translate({"A": "\x01\x01"}) | 
	negative_train_query0_00066 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_slice
class Methods:
def time_slice(self, dtype):
        self.s.str.slice(5, 15, 2) | 
	negative_train_query0_00067 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_startswith
class Methods:
def time_startswith(self, dtype):
        self.s.str.startswith("A") | 
	negative_train_query0_00068 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_strip
class Methods:
def time_strip(self, dtype):
        self.s.str.strip("A") | 
	negative_train_query0_00069 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_rstrip
class Methods:
def time_rstrip(self, dtype):
        self.s.str.rstrip("A") | 
	negative_train_query0_00070 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_lstrip
class Methods:
def time_lstrip(self, dtype):
        self.s.str.lstrip("A") | 
	negative_train_query0_00071 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_title
class Methods:
def time_title(self, dtype):
        self.s.str.title() | 
	negative_train_query0_00072 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_upper
class Methods:
def time_upper(self, dtype):
        self.s.str.upper() | 
	negative_train_query0_00073 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_lower
class Methods:
def time_lower(self, dtype):
        self.s.str.lower() | 
	negative_train_query0_00074 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_wrap
class Methods:
def time_wrap(self, dtype):
        self.s.str.wrap(10) | 
	negative_train_query0_00075 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_zfill
class Methods:
def time_zfill(self, dtype):
        self.s.str.zfill(10) | 
	negative_train_query0_00076 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_isalnum
class Methods:
def time_isalnum(self, dtype):
        self.s.str.isalnum() | 
	negative_train_query0_00077 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_isalpha
class Methods:
def time_isalpha(self, dtype):
        self.s.str.isalpha() | 
	negative_train_query0_00078 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_isdecimal
class Methods:
def time_isdecimal(self, dtype):
        self.s.str.isdecimal() | 
	negative_train_query0_00079 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_isdigit
class Methods:
def time_isdigit(self, dtype):
        self.s.str.isdigit() | 
	negative_train_query0_00080 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_islower
class Methods:
def time_islower(self, dtype):
        self.s.str.islower() | 
	negative_train_query0_00081 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_isnumeric
class Methods:
def time_isnumeric(self, dtype):
        self.s.str.isnumeric() | 
	negative_train_query0_00082 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_isspace
class Methods:
def time_isspace(self, dtype):
        self.s.str.isspace() | 
	negative_train_query0_00083 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_istitle
class Methods:
def time_istitle(self, dtype):
        self.s.str.istitle() | 
	negative_train_query0_00084 | |
| 
	asv_bench/benchmarks/strings.py/Methods/time_isupper
class Methods:
def time_isupper(self, dtype):
        self.s.str.isupper() | 
	negative_train_query0_00085 | |
| 
	asv_bench/benchmarks/strings.py/Repeat/setup
class Repeat:
def setup(self, repeats):
        N = 10**5
        self.s = Series(Index([f"i-{i}" for i in range(N)], dtype=object))
        repeat = {"int": 1, "array": np.random.randint(1, 3, N)}
        self.values = repeat[repeats] | 
	negative_train_query0_00086 | |
| 
	asv_bench/benchmarks/strings.py/Repeat/time_repeat
class Repeat:
def time_repeat(self, repeats):
        self.s.str.repeat(self.values) | 
	negative_train_query0_00087 | |
| 
	asv_bench/benchmarks/strings.py/Cat/setup
class Cat:
def setup(self, other_cols, sep, na_rep, na_frac):
        N = 10**5
        mask_gen = lambda: np.random.choice([True, False], N, p=[1 - na_frac, na_frac])
        self.s = Series(Index([f"i-{i}" for i in range(N)], dtype=object)).where(
            mask_gen()
        )
        if other_cols == 0:
            # str.cat self-concatenates only for others=None
            self.others = None
        else:
            self.others = DataFrame(
                {
                    i: Index([f"i-{i}" for i in range(N)], dtype=object).where(
                        mask_gen()
                    )
                    for i in range(other_cols)
                }
            ) | 
	negative_train_query0_00088 | |
| 
	asv_bench/benchmarks/strings.py/Cat/time_cat
class Cat:
def time_cat(self, other_cols, sep, na_rep, na_frac):
        # before the concatenation (one caller + other_cols columns), the total
        # expected fraction of rows containing any NaN is:
        # reduce(lambda t, _: t + (1 - t) * na_frac, range(other_cols + 1), 0)
        # for other_cols=3 and na_frac=0.15, this works out to ~48%
        self.s.str.cat(others=self.others, sep=sep, na_rep=na_rep) | 
	negative_train_query0_00089 | |
| 
	asv_bench/benchmarks/strings.py/Contains/setup
class Contains:
def setup(self, dtype, regex):
        super().setup(dtype) | 
	negative_train_query0_00090 | |
| 
	asv_bench/benchmarks/strings.py/Contains/time_contains
class Contains:
def time_contains(self, dtype, regex):
        self.s.str.contains("A", regex=regex) | 
	negative_train_query0_00091 | |
| 
	asv_bench/benchmarks/strings.py/Split/setup
class Split:
def setup(self, dtype, expand):
        super().setup(dtype)
        self.s = self.s.str.join("--") | 
	negative_train_query0_00092 | |
| 
	asv_bench/benchmarks/strings.py/Split/time_split
class Split:
def time_split(self, dtype, expand):
        self.s.str.split("--", expand=expand) | 
	negative_train_query0_00093 | |
| 
	asv_bench/benchmarks/strings.py/Split/time_rsplit
class Split:
def time_rsplit(self, dtype, expand):
        self.s.str.rsplit("--", expand=expand) | 
	negative_train_query0_00094 | |
| 
	asv_bench/benchmarks/strings.py/Extract/setup
class Extract:
def setup(self, dtype, expand):
        super().setup(dtype) | 
	negative_train_query0_00095 | |
| 
	asv_bench/benchmarks/strings.py/Extract/time_extract_single_group
class Extract:
def time_extract_single_group(self, dtype, expand):
        with warnings.catch_warnings(record=True):
            self.s.str.extract("(\\w*)A", expand=expand) | 
	negative_train_query0_00096 | |
| 
	asv_bench/benchmarks/strings.py/Dummies/setup
class Dummies:
def setup(self, dtype):
        super().setup(dtype)
        N = len(self.s) // 5
        self.s = self.s[:N].str.join("|") | 
	negative_train_query0_00097 | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.
