| title
				 stringclasses 1
				value | text
				 stringlengths 30 426k | id
				 stringlengths 27 30 | 
|---|---|---|
| 
	asv_bench/benchmarks/strings.py/Dummies/time_get_dummies
class Dummies:
def time_get_dummies(self, dtype):
        self.s.str.get_dummies("|") | 
	negative_train_query0_00098 | |
| 
	asv_bench/benchmarks/strings.py/Encode/setup
class Encode:
def setup(self):
        self.ser = Series(Index([f"i-{i}" for i in range(10_000)], dtype=object)) | 
	negative_train_query0_00099 | |
| 
	asv_bench/benchmarks/strings.py/Encode/time_encode_decode
class Encode:
def time_encode_decode(self):
        self.ser.str.encode("utf-8").str.decode("utf-8") | 
	negative_train_query0_00100 | |
| 
	asv_bench/benchmarks/strings.py/Slice/setup
class Slice:
def setup(self):
        self.s = Series(["abcdefg", np.nan] * 500000) | 
	negative_train_query0_00101 | |
| 
	asv_bench/benchmarks/strings.py/Slice/time_vector_slice
class Slice:
def time_vector_slice(self):
        # GH 2602
        self.s.str[:5] | 
	negative_train_query0_00102 | |
| 
	asv_bench/benchmarks/strings.py/Iter/time_iter
class Iter:
def time_iter(self, dtype):
        for i in self.s:
            pass | 
	negative_train_query0_00103 | |
| 
	asv_bench/benchmarks/strings.py/StringArrayConstruction/setup
class StringArrayConstruction:
def setup(self):
        self.series_arr = np.array([str(i) * 10 for i in range(10**5)], dtype=object)
        self.series_arr_nan = np.concatenate([self.series_arr, np.array([NA] * 1000)]) | 
	negative_train_query0_00104 | |
| 
	asv_bench/benchmarks/strings.py/StringArrayConstruction/time_string_array_construction
class StringArrayConstruction:
def time_string_array_construction(self):
        StringArray(self.series_arr) | 
	negative_train_query0_00105 | |
| 
	asv_bench/benchmarks/strings.py/StringArrayConstruction/time_string_array_with_nan_construction
class StringArrayConstruction:
def time_string_array_with_nan_construction(self):
        StringArray(self.series_arr_nan) | 
	negative_train_query0_00106 | |
| 
	asv_bench/benchmarks/strings.py/StringArrayConstruction/peakmem_stringarray_construction
class StringArrayConstruction:
def peakmem_stringarray_construction(self):
        StringArray(self.series_arr) | 
	negative_train_query0_00107 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromDicts/setup
class FromDicts:
def setup(self):
        N, K = 5000, 50
        self.index = pd.Index([f"i-{i}" for i in range(N)], dtype=object)
        self.columns = pd.Index([f"i-{i}" for i in range(K)], dtype=object)
        frame = DataFrame(np.random.randn(N, K), index=self.index, columns=self.columns)
        self.data = frame.to_dict()
        self.dict_list = frame.to_dict(orient="records")
        self.data2 = {i: {j: float(j) for j in range(100)} for i in range(2000)}
        # arrays which we won't consolidate
        self.dict_of_categoricals = {i: Categorical(np.arange(N)) for i in range(K)} | 
	negative_train_query0_00108 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromDicts/time_list_of_dict
class FromDicts:
def time_list_of_dict(self):
        DataFrame(self.dict_list) | 
	negative_train_query0_00109 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromDicts/time_nested_dict
class FromDicts:
def time_nested_dict(self):
        DataFrame(self.data) | 
	negative_train_query0_00110 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromDicts/time_nested_dict_index
class FromDicts:
def time_nested_dict_index(self):
        DataFrame(self.data, index=self.index) | 
	negative_train_query0_00111 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromDicts/time_nested_dict_columns
class FromDicts:
def time_nested_dict_columns(self):
        DataFrame(self.data, columns=self.columns) | 
	negative_train_query0_00112 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromDicts/time_nested_dict_index_columns
class FromDicts:
def time_nested_dict_index_columns(self):
        DataFrame(self.data, index=self.index, columns=self.columns) | 
	negative_train_query0_00113 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromDicts/time_nested_dict_int64
class FromDicts:
def time_nested_dict_int64(self):
        # nested dict, integer indexes, regression described in #621
        DataFrame(self.data2) | 
	negative_train_query0_00114 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromDicts/time_dict_of_categoricals
class FromDicts:
def time_dict_of_categoricals(self):
        # dict of arrays that we won't consolidate
        DataFrame(self.dict_of_categoricals) | 
	negative_train_query0_00115 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromSeries/setup
class FromSeries:
def setup(self):
        mi = MultiIndex.from_product([range(100), range(100)])
        self.s = Series(np.random.randn(10000), index=mi) | 
	negative_train_query0_00116 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromSeries/time_mi_series
class FromSeries:
def time_mi_series(self):
        DataFrame(self.s) | 
	negative_train_query0_00117 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromDictwithTimestamp/setup
class FromDictwithTimestamp:
def setup(self, offset):
        N = 10**3
        idx = date_range(Timestamp("1/1/1900"), freq=offset, periods=N)
        df = DataFrame(np.random.randn(N, 10), index=idx)
        self.d = df.to_dict() | 
	negative_train_query0_00118 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromDictwithTimestamp/time_dict_with_timestamp_offsets
class FromDictwithTimestamp:
def time_dict_with_timestamp_offsets(self, offset):
        DataFrame(self.d) | 
	negative_train_query0_00119 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromRecords/setup
class FromRecords:
def setup(self, nrows):
        N = 100000
        self.gen = ((x, (x * 20), (x * 100)) for x in range(N)) | 
	negative_train_query0_00120 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromRecords/time_frame_from_records_generator
class FromRecords:
def time_frame_from_records_generator(self, nrows):
        # issue-6700
        self.df = DataFrame.from_records(self.gen, nrows=nrows) | 
	negative_train_query0_00121 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromNDArray/setup
class FromNDArray:
def setup(self):
        N = 100000
        self.data = np.random.randn(N) | 
	negative_train_query0_00122 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromNDArray/time_frame_from_ndarray
class FromNDArray:
def time_frame_from_ndarray(self):
        self.df = DataFrame(self.data) | 
	negative_train_query0_00123 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromLists/setup
class FromLists:
def setup(self):
        N = 1000
        M = 100
        self.data = [list(range(M)) for i in range(N)] | 
	negative_train_query0_00124 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromLists/time_frame_from_lists
class FromLists:
def time_frame_from_lists(self):
        self.df = DataFrame(self.data) | 
	negative_train_query0_00125 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromRange/setup
class FromRange:
def setup(self):
        N = 1_000_000
        self.data = range(N) | 
	negative_train_query0_00126 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromRange/time_frame_from_range
class FromRange:
def time_frame_from_range(self):
        self.df = DataFrame(self.data) | 
	negative_train_query0_00127 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromScalar/setup
class FromScalar:
def setup(self):
        self.nrows = 100_000 | 
	negative_train_query0_00128 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromScalar/time_frame_from_scalar_ea_float64
class FromScalar:
def time_frame_from_scalar_ea_float64(self):
        DataFrame(
            1.0,
            index=range(self.nrows),
            columns=list("abc"),
            dtype=Float64Dtype(),
        ) | 
	negative_train_query0_00129 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromScalar/time_frame_from_scalar_ea_float64_na
class FromScalar:
def time_frame_from_scalar_ea_float64_na(self):
        DataFrame(
            NA,
            index=range(self.nrows),
            columns=list("abc"),
            dtype=Float64Dtype(),
        ) | 
	negative_train_query0_00130 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromArrays/setup
class FromArrays:
def setup(self):
        N_rows = 1000
        N_cols = 1000
        self.float_arrays = [np.random.randn(N_rows) for _ in range(N_cols)]
        self.sparse_arrays = [
            pd.arrays.SparseArray(np.random.randint(0, 2, N_rows), dtype="float64")
            for _ in range(N_cols)
        ]
        self.int_arrays = [
            pd.array(np.random.randint(1000, size=N_rows), dtype="Int64")
            for _ in range(N_cols)
        ]
        self.index = pd.Index(range(N_rows))
        self.columns = pd.Index(range(N_cols)) | 
	negative_train_query0_00131 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromArrays/time_frame_from_arrays_float
class FromArrays:
def time_frame_from_arrays_float(self):
        self.df = DataFrame._from_arrays(
            self.float_arrays,
            index=self.index,
            columns=self.columns,
            verify_integrity=False,
        ) | 
	negative_train_query0_00132 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromArrays/time_frame_from_arrays_int
class FromArrays:
def time_frame_from_arrays_int(self):
        self.df = DataFrame._from_arrays(
            self.int_arrays,
            index=self.index,
            columns=self.columns,
            verify_integrity=False,
        ) | 
	negative_train_query0_00133 | |
| 
	asv_bench/benchmarks/frame_ctor.py/FromArrays/time_frame_from_arrays_sparse
class FromArrays:
def time_frame_from_arrays_sparse(self):
        self.df = DataFrame._from_arrays(
            self.sparse_arrays,
            index=self.index,
            columns=self.columns,
            verify_integrity=False,
        ) | 
	negative_train_query0_00134 | |
| 
	asv_bench/benchmarks/join_merge.py/Concat/setup
class Concat:
def setup(self, axis):
        N = 1000
        s = Series(N, index=Index([f"i-{i}" for i in range(N)], dtype=object))
        self.series = [s[i:-i] for i in range(1, 10)] * 50
        self.small_frames = [DataFrame(np.random.randn(5, 4))] * 1000
        df = DataFrame(
            {"A": range(N)}, index=date_range("20130101", periods=N, freq="s")
        )
        self.empty_left = [DataFrame(), df]
        self.empty_right = [df, DataFrame()]
        self.mixed_ndims = [df, df.head(N // 2)] | 
	negative_train_query0_00135 | |
| 
	asv_bench/benchmarks/join_merge.py/Concat/time_concat_series
class Concat:
def time_concat_series(self, axis):
        concat(self.series, axis=axis, sort=False) | 
	negative_train_query0_00136 | |
| 
	asv_bench/benchmarks/join_merge.py/Concat/time_concat_small_frames
class Concat:
def time_concat_small_frames(self, axis):
        concat(self.small_frames, axis=axis) | 
	negative_train_query0_00137 | |
| 
	asv_bench/benchmarks/join_merge.py/Concat/time_concat_empty_right
class Concat:
def time_concat_empty_right(self, axis):
        concat(self.empty_right, axis=axis) | 
	negative_train_query0_00138 | |
| 
	asv_bench/benchmarks/join_merge.py/Concat/time_concat_empty_left
class Concat:
def time_concat_empty_left(self, axis):
        concat(self.empty_left, axis=axis) | 
	negative_train_query0_00139 | |
| 
	asv_bench/benchmarks/join_merge.py/Concat/time_concat_mixed_ndims
class Concat:
def time_concat_mixed_ndims(self, axis):
        concat(self.mixed_ndims, axis=axis) | 
	negative_train_query0_00140 | |
| 
	asv_bench/benchmarks/join_merge.py/ConcatDataFrames/setup
class ConcatDataFrames:
def setup(self, axis, ignore_index):
        frame_c = DataFrame(np.zeros((10000, 200), dtype=np.float32, order="C"))
        self.frame_c = [frame_c] * 20
        frame_f = DataFrame(np.zeros((10000, 200), dtype=np.float32, order="F"))
        self.frame_f = [frame_f] * 20 | 
	negative_train_query0_00141 | |
| 
	asv_bench/benchmarks/join_merge.py/ConcatDataFrames/time_c_ordered
class ConcatDataFrames:
def time_c_ordered(self, axis, ignore_index):
        concat(self.frame_c, axis=axis, ignore_index=ignore_index) | 
	negative_train_query0_00142 | |
| 
	asv_bench/benchmarks/join_merge.py/ConcatDataFrames/time_f_ordered
class ConcatDataFrames:
def time_f_ordered(self, axis, ignore_index):
        concat(self.frame_f, axis=axis, ignore_index=ignore_index) | 
	negative_train_query0_00143 | |
| 
	asv_bench/benchmarks/join_merge.py/ConcatIndexDtype/setup
class ConcatIndexDtype:
def setup(self, dtype, structure, axis, sort):
        N = 10_000
        if dtype == "datetime64[ns]":
            vals = date_range("1970-01-01", periods=N)
        elif dtype in ("int64", "Int64", "int64[pyarrow]"):
            vals = np.arange(N, dtype=np.int64)
        elif dtype in ("string[python]", "string[pyarrow]"):
            vals = Index([f"i-{i}" for i in range(N)], dtype=object)
        else:
            raise NotImplementedError
        idx = Index(vals, dtype=dtype)
        if structure == "monotonic":
            idx = idx.sort_values()
        elif structure == "non_monotonic":
            idx = idx[::-1]
        elif structure == "has_na":
            if not idx._can_hold_na:
                raise NotImplementedError
            idx = Index([None], dtype=dtype).append(idx)
        else:
            raise NotImplementedError
        self.series = [Series(i, idx[:-i]) for i in range(1, 6)] | 
	negative_train_query0_00144 | |
| 
	asv_bench/benchmarks/join_merge.py/ConcatIndexDtype/time_concat_series
class ConcatIndexDtype:
def time_concat_series(self, dtype, structure, axis, sort):
        concat(self.series, axis=axis, sort=sort) | 
	negative_train_query0_00145 | |
| 
	asv_bench/benchmarks/join_merge.py/Join/setup
class Join:
def setup(self, sort):
        level1 = Index([f"i-{i}" for i in range(10)], dtype=object).values
        level2 = Index([f"i-{i}" for i in range(1000)], dtype=object).values
        codes1 = np.arange(10).repeat(1000)
        codes2 = np.tile(np.arange(1000), 10)
        index2 = MultiIndex(levels=[level1, level2], codes=[codes1, codes2])
        self.df_multi = DataFrame(
            np.random.randn(len(index2), 4), index=index2, columns=["A", "B", "C", "D"]
        )
        self.key1 = np.tile(level1.take(codes1), 10)
        self.key2 = np.tile(level2.take(codes2), 10)
        self.df = DataFrame(
            {
                "data1": np.random.randn(100000),
                "data2": np.random.randn(100000),
                "key1": self.key1,
                "key2": self.key2,
            }
        )
        self.df_key1 = DataFrame(
            np.random.randn(len(level1), 4), index=level1, columns=["A", "B", "C", "D"]
        )
        self.df_key2 = DataFrame(
            np.random.randn(len(level2), 4), index=level2, columns=["A", "B", "C", "D"]
        )
        shuf = np.arange(100000)
        np.random.shuffle(shuf)
        self.df_shuf = self.df.reindex(self.df.index[shuf]) | 
	negative_train_query0_00146 | |
| 
	asv_bench/benchmarks/join_merge.py/Join/time_join_dataframe_index_multi
class Join:
def time_join_dataframe_index_multi(self, sort):
        self.df.join(self.df_multi, on=["key1", "key2"], sort=sort) | 
	negative_train_query0_00147 | |
| 
	asv_bench/benchmarks/join_merge.py/Join/time_join_dataframe_index_single_key_bigger
class Join:
def time_join_dataframe_index_single_key_bigger(self, sort):
        self.df.join(self.df_key2, on="key2", sort=sort) | 
	negative_train_query0_00148 | |
| 
	asv_bench/benchmarks/join_merge.py/Join/time_join_dataframe_index_single_key_small
class Join:
def time_join_dataframe_index_single_key_small(self, sort):
        self.df.join(self.df_key1, on="key1", sort=sort) | 
	negative_train_query0_00149 | |
| 
	asv_bench/benchmarks/join_merge.py/Join/time_join_dataframe_index_shuffle_key_bigger_sort
class Join:
def time_join_dataframe_index_shuffle_key_bigger_sort(self, sort):
        self.df_shuf.join(self.df_key2, on="key2", sort=sort) | 
	negative_train_query0_00150 | |
| 
	asv_bench/benchmarks/join_merge.py/Join/time_join_dataframes_cross
class Join:
def time_join_dataframes_cross(self, sort):
        self.df.loc[:2000].join(self.df_key1, how="cross", sort=sort) | 
	negative_train_query0_00151 | |
| 
	asv_bench/benchmarks/join_merge.py/JoinIndex/setup
class JoinIndex:
def setup(self):
        N = 5000
        self.left = DataFrame(
            np.random.randint(1, N / 50, (N, 2)), columns=["jim", "joe"]
        )
        self.right = DataFrame(
            np.random.randint(1, N / 50, (N, 2)), columns=["jolie", "jolia"]
        ).set_index("jolie") | 
	negative_train_query0_00152 | |
| 
	asv_bench/benchmarks/join_merge.py/JoinIndex/time_left_outer_join_index
class JoinIndex:
def time_left_outer_join_index(self):
        self.left.join(self.right, on="jim") | 
	negative_train_query0_00153 | |
| 
	asv_bench/benchmarks/join_merge.py/JoinMultiindexSubset/setup
class JoinMultiindexSubset:
def setup(self):
        N = 100_000
        mi1 = MultiIndex.from_arrays([np.arange(N)] * 4, names=["a", "b", "c", "d"])
        mi2 = MultiIndex.from_arrays([np.arange(N)] * 2, names=["a", "b"])
        self.left = DataFrame({"col1": 1}, index=mi1)
        self.right = DataFrame({"col2": 2}, index=mi2) | 
	negative_train_query0_00154 | |
| 
	asv_bench/benchmarks/join_merge.py/JoinMultiindexSubset/time_join_multiindex_subset
class JoinMultiindexSubset:
def time_join_multiindex_subset(self):
        self.left.join(self.right) | 
	negative_train_query0_00155 | |
| 
	asv_bench/benchmarks/join_merge.py/JoinEmpty/setup
class JoinEmpty:
def setup(self):
        N = 100_000
        self.df = DataFrame({"A": np.arange(N)})
        self.df_empty = DataFrame(columns=["B", "C"], dtype="int64") | 
	negative_train_query0_00156 | |
| 
	asv_bench/benchmarks/join_merge.py/JoinEmpty/time_inner_join_left_empty
class JoinEmpty:
def time_inner_join_left_empty(self):
        self.df_empty.join(self.df, how="inner") | 
	negative_train_query0_00157 | |
| 
	asv_bench/benchmarks/join_merge.py/JoinEmpty/time_inner_join_right_empty
class JoinEmpty:
def time_inner_join_right_empty(self):
        self.df.join(self.df_empty, how="inner") | 
	negative_train_query0_00158 | |
| 
	asv_bench/benchmarks/join_merge.py/JoinNonUnique/setup
class JoinNonUnique:
def setup(self):
        date_index = date_range("01-Jan-2013", "23-Jan-2013", freq="min")
        daily_dates = date_index.to_period("D").to_timestamp("s", "s")
        self.fracofday = date_index.values - daily_dates.values
        self.fracofday = self.fracofday.astype("timedelta64[ns]")
        self.fracofday = self.fracofday.astype(np.float64) / 86_400_000_000_000
        self.fracofday = Series(self.fracofday, daily_dates)
        index = date_range(date_index.min(), date_index.max(), freq="D")
        self.temp = Series(1.0, index)[self.fracofday.index] | 
	negative_train_query0_00159 | |
| 
	asv_bench/benchmarks/join_merge.py/JoinNonUnique/time_join_non_unique_equal
class JoinNonUnique:
def time_join_non_unique_equal(self):
        self.fracofday * self.temp | 
	negative_train_query0_00160 | |
| 
	asv_bench/benchmarks/join_merge.py/Merge/setup
class Merge:
def setup(self, sort):
        N = 10000
        indices = Index([f"i-{i}" for i in range(N)], dtype=object).values
        indices2 = Index([f"i-{i}" for i in range(N)], dtype=object).values
        key = np.tile(indices[:8000], 10)
        key2 = np.tile(indices2[:8000], 10)
        self.left = DataFrame(
            {"key": key, "key2": key2, "value": np.random.randn(80000)}
        )
        self.right = DataFrame(
            {
                "key": indices[2000:],
                "key2": indices2[2000:],
                "value2": np.random.randn(8000),
            }
        )
        self.df = DataFrame(
            {
                "key1": np.tile(np.arange(500).repeat(10), 2),
                "key2": np.tile(np.arange(250).repeat(10), 4),
                "value": np.random.randn(10000),
            }
        )
        self.df2 = DataFrame({"key1": np.arange(500), "value2": np.random.randn(500)})
        self.df3 = self.df[:5000] | 
	negative_train_query0_00161 | |
| 
	asv_bench/benchmarks/join_merge.py/Merge/time_merge_2intkey
class Merge:
def time_merge_2intkey(self, sort):
        merge(self.left, self.right, sort=sort) | 
	negative_train_query0_00162 | |
| 
	asv_bench/benchmarks/join_merge.py/Merge/time_merge_dataframe_integer_2key
class Merge:
def time_merge_dataframe_integer_2key(self, sort):
        merge(self.df, self.df3, sort=sort) | 
	negative_train_query0_00163 | |
| 
	asv_bench/benchmarks/join_merge.py/Merge/time_merge_dataframe_integer_key
class Merge:
def time_merge_dataframe_integer_key(self, sort):
        merge(self.df, self.df2, on="key1", sort=sort) | 
	negative_train_query0_00164 | |
| 
	asv_bench/benchmarks/join_merge.py/Merge/time_merge_dataframe_empty_right
class Merge:
def time_merge_dataframe_empty_right(self, sort):
        merge(self.left, self.right.iloc[:0], sort=sort) | 
	negative_train_query0_00165 | |
| 
	asv_bench/benchmarks/join_merge.py/Merge/time_merge_dataframe_empty_left
class Merge:
def time_merge_dataframe_empty_left(self, sort):
        merge(self.left.iloc[:0], self.right, sort=sort) | 
	negative_train_query0_00166 | |
| 
	asv_bench/benchmarks/join_merge.py/Merge/time_merge_dataframes_cross
class Merge:
def time_merge_dataframes_cross(self, sort):
        merge(self.left.loc[:2000], self.right.loc[:2000], how="cross", sort=sort) | 
	negative_train_query0_00167 | |
| 
	asv_bench/benchmarks/join_merge.py/MergeEA/setup
class MergeEA:
def setup(self, dtype, monotonic):
        N = 10_000
        indices = np.arange(1, N)
        key = np.tile(indices[:8000], 10)
        self.left = DataFrame(
            {"key": Series(key, dtype=dtype), "value": np.random.randn(80000)}
        )
        self.right = DataFrame(
            {
                "key": Series(indices[2000:], dtype=dtype),
                "value2": np.random.randn(7999),
            }
        )
        if monotonic:
            self.left = self.left.sort_values("key")
            self.right = self.right.sort_values("key") | 
	negative_train_query0_00168 | |
| 
	asv_bench/benchmarks/join_merge.py/MergeEA/time_merge
class MergeEA:
def time_merge(self, dtype, monotonic):
        merge(self.left, self.right) | 
	negative_train_query0_00169 | |
| 
	asv_bench/benchmarks/join_merge.py/I8Merge/setup
class I8Merge:
def setup(self, how):
        low, high, n = -1000, 1000, 10**6
        self.left = DataFrame(
            np.random.randint(low, high, (n, 7)), columns=list("ABCDEFG")
        )
        self.left["left"] = self.left.sum(axis=1)
        self.right = self.left.sample(frac=1).rename({"left": "right"}, axis=1)
        self.right = self.right.reset_index(drop=True)
        self.right["right"] *= -1 | 
	negative_train_query0_00170 | |
| 
	asv_bench/benchmarks/join_merge.py/I8Merge/time_i8merge
class I8Merge:
def time_i8merge(self, how):
        merge(self.left, self.right, how=how) | 
	negative_train_query0_00171 | |
| 
	asv_bench/benchmarks/join_merge.py/UniqueMerge/setup
class UniqueMerge:
def setup(self, unique_elements):
        N = 1_000_000
        self.left = DataFrame({"a": np.random.randint(1, unique_elements, (N,))})
        self.right = DataFrame({"a": np.random.randint(1, unique_elements, (N,))})
        uniques = self.right.a.drop_duplicates()
        self.right["a"] = concat(
            [uniques, Series(np.arange(0, -(N - len(uniques)), -1))], ignore_index=True
        ) | 
	negative_train_query0_00172 | |
| 
	asv_bench/benchmarks/join_merge.py/UniqueMerge/time_unique_merge
class UniqueMerge:
def time_unique_merge(self, unique_elements):
        merge(self.left, self.right, how="inner") | 
	negative_train_query0_00173 | |
| 
	asv_bench/benchmarks/join_merge.py/MergeDatetime/setup
class MergeDatetime:
def setup(self, units, tz, monotonic):
        unit_left, unit_right = units
        N = 10_000
        keys = Series(date_range("2012-01-01", freq="min", periods=N, tz=tz))
        self.left = DataFrame(
            {
                "key": keys.sample(N * 10, replace=True).dt.as_unit(unit_left),
                "value1": np.random.randn(N * 10),
            }
        )
        self.right = DataFrame(
            {
                "key": keys[:8000].dt.as_unit(unit_right),
                "value2": np.random.randn(8000),
            }
        )
        if monotonic:
            self.left = self.left.sort_values("key")
            self.right = self.right.sort_values("key") | 
	negative_train_query0_00174 | |
| 
	asv_bench/benchmarks/join_merge.py/MergeDatetime/time_merge
class MergeDatetime:
def time_merge(self, units, tz, monotonic):
        merge(self.left, self.right) | 
	negative_train_query0_00175 | |
| 
	asv_bench/benchmarks/join_merge.py/MergeCategoricals/setup
class MergeCategoricals:
def setup(self):
        self.left_object = DataFrame(
            {
                "X": np.random.choice(range(10), size=(10000,)),
                "Y": np.random.choice(["one", "two", "three"], size=(10000,)),
            }
        )
        self.right_object = DataFrame(
            {
                "X": np.random.choice(range(10), size=(10000,)),
                "Z": np.random.choice(["jjj", "kkk", "sss"], size=(10000,)),
            }
        )
        self.left_cat = self.left_object.assign(
            Y=self.left_object["Y"].astype("category")
        )
        self.right_cat = self.right_object.assign(
            Z=self.right_object["Z"].astype("category")
        )
        self.left_cat_col = self.left_object.astype({"X": "category"})
        self.right_cat_col = self.right_object.astype({"X": "category"})
        self.left_cat_idx = self.left_cat_col.set_index("X")
        self.right_cat_idx = self.right_cat_col.set_index("X") | 
	negative_train_query0_00176 | |
| 
	asv_bench/benchmarks/join_merge.py/MergeCategoricals/time_merge_object
class MergeCategoricals:
def time_merge_object(self):
        merge(self.left_object, self.right_object, on="X") | 
	negative_train_query0_00177 | |
| 
	asv_bench/benchmarks/join_merge.py/MergeCategoricals/time_merge_cat
class MergeCategoricals:
def time_merge_cat(self):
        merge(self.left_cat, self.right_cat, on="X") | 
	negative_train_query0_00178 | |
| 
	asv_bench/benchmarks/join_merge.py/MergeCategoricals/time_merge_on_cat_col
class MergeCategoricals:
def time_merge_on_cat_col(self):
        merge(self.left_cat_col, self.right_cat_col, on="X") | 
	negative_train_query0_00179 | |
| 
	asv_bench/benchmarks/join_merge.py/MergeCategoricals/time_merge_on_cat_idx
class MergeCategoricals:
def time_merge_on_cat_idx(self):
        merge(self.left_cat_idx, self.right_cat_idx, on="X") | 
	negative_train_query0_00180 | |
| 
	asv_bench/benchmarks/join_merge.py/MergeOrdered/setup
class MergeOrdered:
def setup(self):
        groups = Index([f"i-{i}" for i in range(10)], dtype=object).values
        self.left = DataFrame(
            {
                "group": groups.repeat(5000),
                "key": np.tile(np.arange(0, 10000, 2), 10),
                "lvalue": np.random.randn(50000),
            }
        )
        self.right = DataFrame(
            {"key": np.arange(10000), "rvalue": np.random.randn(10000)}
        ) | 
	negative_train_query0_00181 | |
| 
	asv_bench/benchmarks/join_merge.py/MergeOrdered/time_merge_ordered
class MergeOrdered:
def time_merge_ordered(self):
        merge_ordered(self.left, self.right, on="key", left_by="group") | 
	negative_train_query0_00182 | |
| 
	asv_bench/benchmarks/join_merge.py/MergeAsof/setup
class MergeAsof:
def setup(self, direction, tolerance):
        one_count = 200000
        two_count = 1000000
        df1 = DataFrame(
            {
                "time": np.random.randint(0, one_count / 20, one_count),
                "key": np.random.choice(list(string.ascii_uppercase), one_count),
                "key2": np.random.randint(0, 25, one_count),
                "value1": np.random.randn(one_count),
            }
        )
        df2 = DataFrame(
            {
                "time": np.random.randint(0, two_count / 20, two_count),
                "key": np.random.choice(list(string.ascii_uppercase), two_count),
                "key2": np.random.randint(0, 25, two_count),
                "value2": np.random.randn(two_count),
            }
        )
        df1 = df1.sort_values("time")
        df2 = df2.sort_values("time")
        df1["time32"] = np.int32(df1.time)
        df2["time32"] = np.int32(df2.time)
        df1["timeu64"] = np.uint64(df1.time)
        df2["timeu64"] = np.uint64(df2.time)
        self.df1a = df1[["time", "value1"]]
        self.df2a = df2[["time", "value2"]]
        self.df1b = df1[["time", "key", "value1"]]
        self.df2b = df2[["time", "key", "value2"]]
        self.df1c = df1[["time", "key2", "value1"]]
        self.df2c = df2[["time", "key2", "value2"]]
        self.df1d = df1[["time32", "value1"]]
        self.df2d = df2[["time32", "value2"]]
        self.df1e = df1[["time", "key", "key2", "value1"]]
        self.df2e = df2[["time", "key", "key2", "value2"]]
        self.df1f = df1[["timeu64", "value1"]]
        self.df2f = df2[["timeu64", "value2"]] | 
	negative_train_query0_00183 | |
| 
	asv_bench/benchmarks/join_merge.py/MergeAsof/time_on_int
class MergeAsof:
def time_on_int(self, direction, tolerance):
        merge_asof(
            self.df1a, self.df2a, on="time", direction=direction, tolerance=tolerance
        ) | 
	negative_train_query0_00184 | |
| 
	asv_bench/benchmarks/join_merge.py/MergeAsof/time_on_int32
class MergeAsof:
def time_on_int32(self, direction, tolerance):
        merge_asof(
            self.df1d, self.df2d, on="time32", direction=direction, tolerance=tolerance
        ) | 
	negative_train_query0_00185 | |
| 
	asv_bench/benchmarks/join_merge.py/MergeAsof/time_on_uint64
class MergeAsof:
def time_on_uint64(self, direction, tolerance):
        merge_asof(
            self.df1f, self.df2f, on="timeu64", direction=direction, tolerance=tolerance
        ) | 
	negative_train_query0_00186 | |
| 
	asv_bench/benchmarks/join_merge.py/MergeAsof/time_by_object
class MergeAsof:
def time_by_object(self, direction, tolerance):
        merge_asof(
            self.df1b,
            self.df2b,
            on="time",
            by="key",
            direction=direction,
            tolerance=tolerance,
        ) | 
	negative_train_query0_00187 | |
| 
	asv_bench/benchmarks/join_merge.py/MergeAsof/time_by_int
class MergeAsof:
def time_by_int(self, direction, tolerance):
        merge_asof(
            self.df1c,
            self.df2c,
            on="time",
            by="key2",
            direction=direction,
            tolerance=tolerance,
        ) | 
	negative_train_query0_00188 | |
| 
	asv_bench/benchmarks/join_merge.py/MergeAsof/time_multiby
class MergeAsof:
def time_multiby(self, direction, tolerance):
        merge_asof(
            self.df1e,
            self.df2e,
            on="time",
            by=["key", "key2"],
            direction=direction,
            tolerance=tolerance,
        ) | 
	negative_train_query0_00189 | |
| 
	asv_bench/benchmarks/join_merge.py/MergeMultiIndex/setup
class MergeMultiIndex:
def setup(self, dtypes, how):
        n = 100_000
        offset = 50_000
        mi1 = MultiIndex.from_arrays(
            [
                array(np.arange(n), dtype=dtypes[0]),
                array(np.arange(n), dtype=dtypes[1]),
            ]
        )
        mi2 = MultiIndex.from_arrays(
            [
                array(np.arange(offset, n + offset), dtype=dtypes[0]),
                array(np.arange(offset, n + offset), dtype=dtypes[1]),
            ]
        )
        self.df1 = DataFrame({"col1": 1}, index=mi1)
        self.df2 = DataFrame({"col2": 2}, index=mi2) | 
	negative_train_query0_00190 | |
| 
	asv_bench/benchmarks/join_merge.py/MergeMultiIndex/time_merge_sorted_multiindex
class MergeMultiIndex:
def time_merge_sorted_multiindex(self, dtypes, how):
        # copy to avoid MultiIndex._values caching
        df1 = self.df1.copy()
        df2 = self.df2.copy()
        merge(df1, df2, how=how, left_index=True, right_index=True) | 
	negative_train_query0_00191 | |
| 
	asv_bench/benchmarks/join_merge.py/Align/setup
class Align:
def setup(self):
        size = 5 * 10**5
        rng = np.arange(0, 10**13, 10**7)
        stamps = np.datetime64("now").view("i8") + rng
        idx1 = np.sort(np.random.choice(stamps, size, replace=False))
        idx2 = np.sort(np.random.choice(stamps, size, replace=False))
        self.ts1 = Series(np.random.randn(size), idx1)
        self.ts2 = Series(np.random.randn(size), idx2) | 
	negative_train_query0_00192 | |
| 
	asv_bench/benchmarks/join_merge.py/Align/time_series_align_int64_index
class Align:
def time_series_align_int64_index(self):
        self.ts1 + self.ts2 | 
	negative_train_query0_00193 | |
| 
	asv_bench/benchmarks/join_merge.py/Align/time_series_align_left_monotonic
class Align:
def time_series_align_left_monotonic(self):
        self.ts1.align(self.ts2, join="left") | 
	negative_train_query0_00194 | |
| 
	asv_bench/benchmarks/boolean.py/TimeLogicalOps/setup
class TimeLogicalOps:
def setup(self):
        N = 10_000
        left, right, lmask, rmask = np.random.randint(0, 2, size=(4, N)).astype("bool")
        self.left = pd.arrays.BooleanArray(left, lmask)
        self.right = pd.arrays.BooleanArray(right, rmask) | 
	negative_train_query0_00195 | |
| 
	asv_bench/benchmarks/boolean.py/TimeLogicalOps/time_or_scalar
class TimeLogicalOps:
def time_or_scalar(self):
        self.left | True
        self.left | False | 
	negative_train_query0_00196 | |
| 
	asv_bench/benchmarks/boolean.py/TimeLogicalOps/time_or_array
class TimeLogicalOps:
def time_or_array(self):
        self.left | self.right | 
	negative_train_query0_00197 | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.
