title
stringclasses
1 value
text
stringlengths
30
426k
id
stringlengths
27
30
asv_bench/benchmarks/rolling.py/TableMethod/time_ewm_mean class TableMethod: def time_ewm_mean(self, method): self.df.ewm(1, method=method).mean(engine="numba")
negative_train_query0_00298
asv_bench/benchmarks/series_methods.py/SeriesConstructor/setup class SeriesConstructor: def setup(self): self.idx = date_range( start=datetime(2015, 10, 26), end=datetime(2016, 1, 1), freq="50s" ) self.data = dict(zip(self.idx, range(len(self.idx)))) self.array = np.array([1, 2, 3]) self.idx2 = Index(["a", "b", "c"])
negative_train_query0_00299
asv_bench/benchmarks/series_methods.py/SeriesConstructor/time_constructor_dict class SeriesConstructor: def time_constructor_dict(self): Series(data=self.data, index=self.idx)
negative_train_query0_00300
asv_bench/benchmarks/series_methods.py/SeriesConstructor/time_constructor_no_data class SeriesConstructor: def time_constructor_no_data(self): Series(data=None, index=self.idx)
negative_train_query0_00301
asv_bench/benchmarks/series_methods.py/ToFrame/setup class ToFrame: def setup(self, dtype, name): arr = np.arange(10**5) ser = Series(arr, dtype=dtype) self.ser = ser
negative_train_query0_00302
asv_bench/benchmarks/series_methods.py/ToFrame/time_to_frame class ToFrame: def time_to_frame(self, dtype, name): self.ser.to_frame(name)
negative_train_query0_00303
asv_bench/benchmarks/series_methods.py/NSort/setup class NSort: def setup(self, keep): self.s = Series(np.random.randint(1, 10, 100000))
negative_train_query0_00304
asv_bench/benchmarks/series_methods.py/NSort/time_nlargest class NSort: def time_nlargest(self, keep): self.s.nlargest(3, keep=keep)
negative_train_query0_00305
asv_bench/benchmarks/series_methods.py/NSort/time_nsmallest class NSort: def time_nsmallest(self, keep): self.s.nsmallest(3, keep=keep)
negative_train_query0_00306
asv_bench/benchmarks/series_methods.py/Dropna/setup class Dropna: def setup(self, dtype): N = 10**6 data = { "int": np.random.randint(1, 10, N), "datetime": date_range("2000-01-01", freq="s", periods=N), } self.s = Series(data[dtype]) if dtype == "datetime": self.s[np.random.randint(1, N, 100)] = NaT
negative_train_query0_00307
asv_bench/benchmarks/series_methods.py/Dropna/time_dropna class Dropna: def time_dropna(self, dtype): self.s.dropna()
negative_train_query0_00308
asv_bench/benchmarks/series_methods.py/Fillna/setup class Fillna: def setup(self, dtype): N = 10**6 if dtype == "datetime64[ns]": data = date_range("2000-01-01", freq="s", periods=N) na_value = NaT elif dtype in ("float64", "Float64"): data = np.random.randn(N) na_value = np.nan elif dtype in ("Int64", "int64[pyarrow]"): data = np.arange(N) na_value = NA elif dtype in ("string", "string[pyarrow]"): data = np.array([str(i) * 5 for i in range(N)], dtype=object) na_value = NA else: raise NotImplementedError fill_value = data[0] ser = Series(data, dtype=dtype) ser[::2] = na_value self.ser = ser self.fill_value = fill_value
negative_train_query0_00309
asv_bench/benchmarks/series_methods.py/Fillna/time_fillna class Fillna: def time_fillna(self, dtype): self.ser.fillna(value=self.fill_value)
negative_train_query0_00310
asv_bench/benchmarks/series_methods.py/Fillna/time_ffill class Fillna: def time_ffill(self, dtype): self.ser.ffill()
negative_train_query0_00311
asv_bench/benchmarks/series_methods.py/Fillna/time_bfill class Fillna: def time_bfill(self, dtype): self.ser.bfill()
negative_train_query0_00312
asv_bench/benchmarks/series_methods.py/SearchSorted/setup class SearchSorted: def setup(self, dtype): N = 10**5 data = np.array([1] * N + [2] * N + [3] * N).astype(dtype) self.s = Series(data)
negative_train_query0_00313
asv_bench/benchmarks/series_methods.py/SearchSorted/time_searchsorted class SearchSorted: def time_searchsorted(self, dtype): key = "2" if dtype == "str" else 2 self.s.searchsorted(key)
negative_train_query0_00314
asv_bench/benchmarks/series_methods.py/Map/setup class Map: def setup(self, mapper, dtype, na_action): map_size = 1000 map_data = Series(map_size - np.arange(map_size), dtype=dtype) # construct mapper if mapper == "Series": self.map_data = map_data elif mapper == "dict": self.map_data = map_data.to_dict() elif mapper == "lambda": map_dict = map_data.to_dict() self.map_data = lambda x: map_dict[x] else: raise NotImplementedError self.s = Series(np.random.randint(0, map_size, 10000), dtype=dtype)
negative_train_query0_00315
asv_bench/benchmarks/series_methods.py/Map/time_map class Map: def time_map(self, mapper, dtype, na_action): self.s.map(self.map_data, na_action=na_action)
negative_train_query0_00316
asv_bench/benchmarks/series_methods.py/Clip/setup class Clip: def setup(self, n): self.s = Series(np.random.randn(n))
negative_train_query0_00317
asv_bench/benchmarks/series_methods.py/Clip/time_clip class Clip: def time_clip(self, n): self.s.clip(0, 1)
negative_train_query0_00318
asv_bench/benchmarks/series_methods.py/ClipDt/setup class ClipDt: def setup(self): dr = date_range("20220101", periods=100_000, freq="s", tz="UTC") self.clipper_dt = dr[0:1_000].repeat(100) self.s = Series(dr)
negative_train_query0_00319
asv_bench/benchmarks/series_methods.py/ClipDt/time_clip class ClipDt: def time_clip(self): self.s.clip(upper=self.clipper_dt)
negative_train_query0_00320
asv_bench/benchmarks/series_methods.py/ValueCounts/setup class ValueCounts: def setup(self, N, dtype): self.s = Series(np.random.randint(0, N, size=10 * N)).astype(dtype)
negative_train_query0_00321
asv_bench/benchmarks/series_methods.py/ValueCounts/time_value_counts class ValueCounts: def time_value_counts(self, N, dtype): self.s.value_counts()
negative_train_query0_00322
asv_bench/benchmarks/series_methods.py/ValueCountsEA/setup class ValueCountsEA: def setup(self, N, dropna): self.s = Series(np.random.randint(0, N, size=10 * N), dtype="Int64") self.s.loc[1] = NA
negative_train_query0_00323
asv_bench/benchmarks/series_methods.py/ValueCountsEA/time_value_counts class ValueCountsEA: def time_value_counts(self, N, dropna): self.s.value_counts(dropna=dropna)
negative_train_query0_00324
asv_bench/benchmarks/series_methods.py/ValueCountsObjectDropNAFalse/setup class ValueCountsObjectDropNAFalse: def setup(self, N): self.s = Series(np.random.randint(0, N, size=10 * N)).astype("object")
negative_train_query0_00325
asv_bench/benchmarks/series_methods.py/ValueCountsObjectDropNAFalse/time_value_counts class ValueCountsObjectDropNAFalse: def time_value_counts(self, N): self.s.value_counts(dropna=False)
negative_train_query0_00326
asv_bench/benchmarks/series_methods.py/Mode/setup class Mode: def setup(self, N, dtype): self.s = Series(np.random.randint(0, N, size=10 * N)).astype(dtype)
negative_train_query0_00327
asv_bench/benchmarks/series_methods.py/Mode/time_mode class Mode: def time_mode(self, N, dtype): self.s.mode()
negative_train_query0_00328
asv_bench/benchmarks/series_methods.py/ModeObjectDropNAFalse/setup class ModeObjectDropNAFalse: def setup(self, N): self.s = Series(np.random.randint(0, N, size=10 * N)).astype("object")
negative_train_query0_00329
asv_bench/benchmarks/series_methods.py/ModeObjectDropNAFalse/time_mode class ModeObjectDropNAFalse: def time_mode(self, N): self.s.mode(dropna=False)
negative_train_query0_00330
asv_bench/benchmarks/series_methods.py/Dir/setup class Dir: def setup(self): self.s = Series(index=Index([f"i-{i}" for i in range(10000)], dtype=object))
negative_train_query0_00331
asv_bench/benchmarks/series_methods.py/Dir/time_dir_strings class Dir: def time_dir_strings(self): dir(self.s)
negative_train_query0_00332
asv_bench/benchmarks/series_methods.py/SeriesGetattr/setup class SeriesGetattr: def setup(self): self.s = Series(1, index=date_range("2012-01-01", freq="s", periods=10**6))
negative_train_query0_00333
asv_bench/benchmarks/series_methods.py/SeriesGetattr/time_series_datetimeindex_repr class SeriesGetattr: def time_series_datetimeindex_repr(self): getattr(self.s, "a", None)
negative_train_query0_00334
asv_bench/benchmarks/series_methods.py/All/setup class All: def setup(self, N, case, dtype): val = case != "fast" self.s = Series([val] * N, dtype=dtype)
negative_train_query0_00335
asv_bench/benchmarks/series_methods.py/All/time_all class All: def time_all(self, N, case, dtype): self.s.all()
negative_train_query0_00336
asv_bench/benchmarks/series_methods.py/Any/setup class Any: def setup(self, N, case, dtype): val = case == "fast" self.s = Series([val] * N, dtype=dtype)
negative_train_query0_00337
asv_bench/benchmarks/series_methods.py/Any/time_any class Any: def time_any(self, N, case, dtype): self.s.any()
negative_train_query0_00338
asv_bench/benchmarks/series_methods.py/NanOps/setup class NanOps: def setup(self, func, N, dtype): if func == "argmax" and dtype in {"Int64", "boolean"}: # Skip argmax for nullable int since this doesn't work yet (GH-24382) raise NotImplementedError self.s = Series(np.ones(N), dtype=dtype) self.func = getattr(self.s, func)
negative_train_query0_00339
asv_bench/benchmarks/series_methods.py/NanOps/time_func class NanOps: def time_func(self, func, N, dtype): self.func()
negative_train_query0_00340
asv_bench/benchmarks/series_methods.py/Rank/setup class Rank: def setup(self, dtype): self.s = Series(np.random.randint(0, 1000, size=100000), dtype=dtype)
negative_train_query0_00341
asv_bench/benchmarks/series_methods.py/Rank/time_rank class Rank: def time_rank(self, dtype): self.s.rank()
negative_train_query0_00342
asv_bench/benchmarks/series_methods.py/Iter/setup class Iter: def setup(self, dtype): N = 10**5 if dtype in ["bool", "boolean"]: data = np.repeat([True, False], N // 2) elif dtype in ["int64", "Int64"]: data = np.arange(N) elif dtype in ["float64", "Float64"]: data = np.random.randn(N) elif dtype == "datetime64[ns]": data = date_range("2000-01-01", freq="s", periods=N) else: raise NotImplementedError self.s = Series(data, dtype=dtype)
negative_train_query0_00343
asv_bench/benchmarks/series_methods.py/Iter/time_iter class Iter: def time_iter(self, dtype): for v in self.s: pass
negative_train_query0_00344
asv_bench/benchmarks/series_methods.py/ToNumpy/setup class ToNumpy: def setup(self): N = 1_000_000 self.ser = Series( np.random.randn( N, ) )
negative_train_query0_00345
asv_bench/benchmarks/series_methods.py/ToNumpy/time_to_numpy class ToNumpy: def time_to_numpy(self): self.ser.to_numpy()
negative_train_query0_00346
asv_bench/benchmarks/series_methods.py/ToNumpy/time_to_numpy_double_copy class ToNumpy: def time_to_numpy_double_copy(self): self.ser.to_numpy(dtype="float64", copy=True)
negative_train_query0_00347
asv_bench/benchmarks/series_methods.py/ToNumpy/time_to_numpy_copy class ToNumpy: def time_to_numpy_copy(self): self.ser.to_numpy(copy=True)
negative_train_query0_00348
asv_bench/benchmarks/series_methods.py/ToNumpy/time_to_numpy_float_with_nan class ToNumpy: def time_to_numpy_float_with_nan(self): self.ser.to_numpy(dtype="float64", na_value=np.nan)
negative_train_query0_00349
asv_bench/benchmarks/series_methods.py/Replace/setup class Replace: def setup(self, num_to_replace): N = 1_000_000 self.arr = np.random.randn(N) self.arr1 = self.arr.copy() np.random.shuffle(self.arr1) self.ser = Series(self.arr) self.to_replace_list = np.random.choice(self.arr, num_to_replace) self.values_list = np.random.choice(self.arr1, num_to_replace) self.replace_dict = dict(zip(self.to_replace_list, self.values_list))
negative_train_query0_00350
asv_bench/benchmarks/series_methods.py/Replace/time_replace_dict class Replace: def time_replace_dict(self, num_to_replace): self.ser.replace(self.replace_dict)
negative_train_query0_00351
asv_bench/benchmarks/series_methods.py/Replace/peakmem_replace_dict class Replace: def peakmem_replace_dict(self, num_to_replace): self.ser.replace(self.replace_dict)
negative_train_query0_00352
asv_bench/benchmarks/series_methods.py/Replace/time_replace_list class Replace: def time_replace_list(self, num_to_replace): self.ser.replace(self.to_replace_list, self.values_list)
negative_train_query0_00353
asv_bench/benchmarks/series_methods.py/Replace/peakmem_replace_list class Replace: def peakmem_replace_list(self, num_to_replace): self.ser.replace(self.to_replace_list, self.values_list)
negative_train_query0_00354
asv_bench/benchmarks/groupby.py/ApplyDictReturn/setup class ApplyDictReturn: def setup(self): self.labels = np.arange(1000).repeat(10) self.data = Series(np.random.randn(len(self.labels)))
negative_train_query0_00355
asv_bench/benchmarks/groupby.py/ApplyDictReturn/time_groupby_apply_dict_return class ApplyDictReturn: def time_groupby_apply_dict_return(self): self.data.groupby(self.labels).apply( lambda x: {"first": x.values[0], "last": x.values[-1]} )
negative_train_query0_00356
asv_bench/benchmarks/groupby.py/Apply/setup class Apply: def setup(self, factor): N = 10**factor # two cases: # - small groups: small data (N**4) + many labels (2000) -> average group # size of 5 (-> larger overhead of slicing method) # - larger groups: larger data (N**5) + fewer labels (20) -> average group # size of 5000 labels = np.random.randint(0, 2000 if factor == 4 else 20, size=N) labels2 = np.random.randint(0, 3, size=N) df = DataFrame( { "key": labels, "key2": labels2, "value1": np.random.randn(N), "value2": ["foo", "bar", "baz", "qux"] * (N // 4), } ) self.df = df
negative_train_query0_00357
asv_bench/benchmarks/groupby.py/Apply/time_scalar_function_multi_col class Apply: def time_scalar_function_multi_col(self, factor): self.df.groupby(["key", "key2"]).apply(lambda x: 1)
negative_train_query0_00358
asv_bench/benchmarks/groupby.py/Apply/time_scalar_function_single_col class Apply: def time_scalar_function_single_col(self, factor): self.df.groupby("key").apply(lambda x: 1)
negative_train_query0_00359
asv_bench/benchmarks/groupby.py/Apply/df_copy_function class Apply: def df_copy_function(g): # ensure that the group name is available (see GH #15062) g.name return g.copy()
negative_train_query0_00360
asv_bench/benchmarks/groupby.py/Apply/time_copy_function_multi_col class Apply: def time_copy_function_multi_col(self, factor): self.df.groupby(["key", "key2"]).apply(self.df_copy_function)
negative_train_query0_00361
asv_bench/benchmarks/groupby.py/Apply/time_copy_overhead_single_col class Apply: def time_copy_overhead_single_col(self, factor): self.df.groupby("key").apply(self.df_copy_function)
negative_train_query0_00362
asv_bench/benchmarks/groupby.py/ApplyNonUniqueUnsortedIndex/setup class ApplyNonUniqueUnsortedIndex: def setup(self): # GH 46527 # unsorted and non-unique index idx = np.arange(100)[::-1] idx = Index(np.repeat(idx, 200), name="key") self.df = DataFrame(np.random.randn(len(idx), 10), index=idx)
negative_train_query0_00363
asv_bench/benchmarks/groupby.py/ApplyNonUniqueUnsortedIndex/time_groupby_apply_non_unique_unsorted_index class ApplyNonUniqueUnsortedIndex: def time_groupby_apply_non_unique_unsorted_index(self): self.df.groupby("key", group_keys=False).apply(lambda x: x)
negative_train_query0_00364
asv_bench/benchmarks/groupby.py/Groups/setup_cache class Groups: def setup_cache(self): size = 10**6 data = { "int64_small": Series(np.random.randint(0, 100, size=size)), "int64_large": Series(np.random.randint(0, 10000, size=size)), "object_small": Series( Index([f"i-{i}" for i in range(100)], dtype=object).take( np.random.randint(0, 100, size=size) ) ), "object_large": Series( Index([f"i-{i}" for i in range(10000)], dtype=object).take( np.random.randint(0, 10000, size=size) ) ), } return data
negative_train_query0_00365
asv_bench/benchmarks/groupby.py/Groups/setup class Groups: def setup(self, data, key): self.ser = data[key]
negative_train_query0_00366
asv_bench/benchmarks/groupby.py/Groups/time_series_groups class Groups: def time_series_groups(self, data, key): self.ser.groupby(self.ser).groups
negative_train_query0_00367
asv_bench/benchmarks/groupby.py/Groups/time_series_indices class Groups: def time_series_indices(self, data, key): self.ser.groupby(self.ser).indices
negative_train_query0_00368
asv_bench/benchmarks/groupby.py/GroupManyLabels/setup class GroupManyLabels: def setup(self, ncols): N = 1000 data = np.random.randn(N, ncols) self.labels = np.random.randint(0, 100, size=N) self.df = DataFrame(data)
negative_train_query0_00369
asv_bench/benchmarks/groupby.py/GroupManyLabels/time_sum class GroupManyLabels: def time_sum(self, ncols): self.df.groupby(self.labels).sum()
negative_train_query0_00370
asv_bench/benchmarks/groupby.py/Nth/setup class Nth: def setup(self, dtype): N = 10**5 # with datetimes (GH7555) if dtype == "datetime": values = date_range("1/1/2011", periods=N, freq="s") elif dtype == "object": values = ["foo"] * N else: values = np.arange(N).astype(dtype) key = np.arange(N) self.df = DataFrame({"key": key, "values": values}) self.df.iloc[1, 1] = np.nan
negative_train_query0_00371
asv_bench/benchmarks/groupby.py/Nth/time_frame_nth_any class Nth: def time_frame_nth_any(self, dtype): self.df.groupby("key").nth(0, dropna="any")
negative_train_query0_00372
asv_bench/benchmarks/groupby.py/Nth/time_groupby_nth_all class Nth: def time_groupby_nth_all(self, dtype): self.df.groupby("key").nth(0, dropna="all")
negative_train_query0_00373
asv_bench/benchmarks/groupby.py/Nth/time_frame_nth class Nth: def time_frame_nth(self, dtype): self.df.groupby("key").nth(0)
negative_train_query0_00374
asv_bench/benchmarks/groupby.py/Nth/time_series_nth_any class Nth: def time_series_nth_any(self, dtype): self.df["values"].groupby(self.df["key"]).nth(0, dropna="any")
negative_train_query0_00375
asv_bench/benchmarks/groupby.py/Nth/time_series_nth_all class Nth: def time_series_nth_all(self, dtype): self.df["values"].groupby(self.df["key"]).nth(0, dropna="all")
negative_train_query0_00376
asv_bench/benchmarks/groupby.py/Nth/time_series_nth class Nth: def time_series_nth(self, dtype): self.df["values"].groupby(self.df["key"]).nth(0)
negative_train_query0_00377
asv_bench/benchmarks/groupby.py/DateAttributes/setup class DateAttributes: def setup(self): rng = date_range("1/1/2000", "12/31/2005", freq="h") self.year, self.month, self.day = rng.year, rng.month, rng.day self.ts = Series(np.random.randn(len(rng)), index=rng)
negative_train_query0_00378
asv_bench/benchmarks/groupby.py/DateAttributes/time_len_groupby_object class DateAttributes: def time_len_groupby_object(self): len(self.ts.groupby([self.year, self.month, self.day]))
negative_train_query0_00379
asv_bench/benchmarks/groupby.py/Int64/setup class Int64: def setup(self): arr = np.random.randint(-1 << 12, 1 << 12, (1 << 17, 5)) i = np.random.choice(len(arr), len(arr) * 5) arr = np.vstack((arr, arr[i])) i = np.random.permutation(len(arr)) arr = arr[i] self.cols = list("abcde") self.df = DataFrame(arr, columns=self.cols) self.df["jim"], self.df["joe"] = np.random.randn(2, len(self.df)) * 10
negative_train_query0_00380
asv_bench/benchmarks/groupby.py/Int64/time_overflow class Int64: def time_overflow(self): self.df.groupby(self.cols).max()
negative_train_query0_00381
asv_bench/benchmarks/groupby.py/CountMultiDtype/setup_cache class CountMultiDtype: def setup_cache(self): n = 10000 offsets = np.random.randint(n, size=n).astype("timedelta64[ns]") dates = np.datetime64("now") + offsets dates[np.random.rand(n) > 0.5] = np.datetime64("nat") offsets[np.random.rand(n) > 0.5] = np.timedelta64("nat") value2 = np.random.randn(n) value2[np.random.rand(n) > 0.5] = np.nan obj = np.random.choice(list("ab"), size=n).astype(object) obj[np.random.randn(n) > 0.5] = np.nan df = DataFrame( { "key1": np.random.randint(0, 500, size=n), "key2": np.random.randint(0, 100, size=n), "dates": dates, "value2": value2, "value3": np.random.randn(n), "ints": np.random.randint(0, 1000, size=n), "obj": obj, "offsets": offsets, } ) return df
negative_train_query0_00382
asv_bench/benchmarks/groupby.py/CountMultiDtype/time_multi_count class CountMultiDtype: def time_multi_count(self, df): df.groupby(["key1", "key2"]).count()
negative_train_query0_00383
asv_bench/benchmarks/groupby.py/CountMultiInt/setup_cache class CountMultiInt: def setup_cache(self): n = 10000 df = DataFrame( { "key1": np.random.randint(0, 500, size=n), "key2": np.random.randint(0, 100, size=n), "ints": np.random.randint(0, 1000, size=n), "ints2": np.random.randint(0, 1000, size=n), } ) return df
negative_train_query0_00384
asv_bench/benchmarks/groupby.py/CountMultiInt/time_multi_int_count class CountMultiInt: def time_multi_int_count(self, df): df.groupby(["key1", "key2"]).count()
negative_train_query0_00385
asv_bench/benchmarks/groupby.py/CountMultiInt/time_multi_int_nunique class CountMultiInt: def time_multi_int_nunique(self, df): df.groupby(["key1", "key2"]).nunique()
negative_train_query0_00386
asv_bench/benchmarks/groupby.py/AggFunctions/setup_cache class AggFunctions: def setup_cache(self): N = 10**5 fac1 = np.array(["A", "B", "C"], dtype="O") fac2 = np.array(["one", "two"], dtype="O") df = DataFrame( { "key1": fac1.take(np.random.randint(0, 3, size=N)), "key2": fac2.take(np.random.randint(0, 2, size=N)), "value1": np.random.randn(N), "value2": np.random.randn(N), "value3": np.random.randn(N), } ) return df
negative_train_query0_00387
asv_bench/benchmarks/groupby.py/AggFunctions/time_different_str_functions class AggFunctions: def time_different_str_functions(self, df): df.groupby(["key1", "key2"]).agg( {"value1": "mean", "value2": "var", "value3": "sum"} )
negative_train_query0_00388
asv_bench/benchmarks/groupby.py/AggFunctions/time_different_str_functions_multicol class AggFunctions: def time_different_str_functions_multicol(self, df): df.groupby(["key1", "key2"]).agg(["sum", "min", "max"])
negative_train_query0_00389
asv_bench/benchmarks/groupby.py/AggFunctions/time_different_str_functions_singlecol class AggFunctions: def time_different_str_functions_singlecol(self, df): df.groupby("key1").agg({"value1": "mean", "value2": "var", "value3": "sum"})
negative_train_query0_00390
asv_bench/benchmarks/groupby.py/GroupStrings/setup class GroupStrings: def setup(self): n = 2 * 10**5 alpha = list(map("".join, product(ascii_letters, repeat=4))) data = np.random.choice(alpha, (n // 5, 4), replace=False) data = np.repeat(data, 5, axis=0) self.df = DataFrame(data, columns=list("abcd")) self.df["joe"] = (np.random.randn(len(self.df)) * 10).round(3) self.df = self.df.sample(frac=1).reset_index(drop=True)
negative_train_query0_00391
asv_bench/benchmarks/groupby.py/GroupStrings/time_multi_columns class GroupStrings: def time_multi_columns(self): self.df.groupby(list("abcd")).max()
negative_train_query0_00392
asv_bench/benchmarks/groupby.py/MultiColumn/setup_cache class MultiColumn: def setup_cache(self): N = 10**5 key1 = np.tile(np.arange(100, dtype=object), 1000) key2 = key1.copy() np.random.shuffle(key1) np.random.shuffle(key2) df = DataFrame( { "key1": key1, "key2": key2, "data1": np.random.randn(N), "data2": np.random.randn(N), } ) return df
negative_train_query0_00393
asv_bench/benchmarks/groupby.py/MultiColumn/time_lambda_sum class MultiColumn: def time_lambda_sum(self, df): df.groupby(["key1", "key2"]).agg(lambda x: x.values.sum())
negative_train_query0_00394
asv_bench/benchmarks/groupby.py/MultiColumn/time_cython_sum class MultiColumn: def time_cython_sum(self, df): df.groupby(["key1", "key2"]).sum()
negative_train_query0_00395
asv_bench/benchmarks/groupby.py/MultiColumn/time_col_select_lambda_sum class MultiColumn: def time_col_select_lambda_sum(self, df): df.groupby(["key1", "key2"])["data1"].agg(lambda x: x.values.sum())
negative_train_query0_00396
asv_bench/benchmarks/groupby.py/MultiColumn/time_col_select_str_sum class MultiColumn: def time_col_select_str_sum(self, df): df.groupby(["key1", "key2"])["data1"].agg("sum")
negative_train_query0_00397