Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Vaex #243

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion _control/solutions.csv
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,6 @@ polars,join
arrow,groupby
arrow,join
duckdb,groupby
duckdb,join
duckdb,join
vaex,groupby
vaex,join
2 changes: 1 addition & 1 deletion _launcher/launcher.R
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ readret = function(x) {
file.ext = function(x) {
ans = switch(
x,
"data.table"=, "dplyr"=, "h2o"=, "arrow"=, "duckdb"="R",
"data.table"=, "dplyr"=, "h2o"=, "arrow"=, "duckdb"="R", "vaex"=,
"pandas"=, "cudf"=, "spark"=, "pydatatable"=, "modin"=, "dask"=, "polars"="py",
"clickhouse"="sql",
"juliadf"="jl"
Expand Down
2 changes: 1 addition & 1 deletion _launcher/solution.R
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ if ("quiet" %in% names(args)) {
file.ext = function(x) {
ans = switch(
x,
"data.table"=, "dplyr"=, "h2o"=, "arrow"=, "duckdb"="R",
"data.table"=, "dplyr"=, "h2o"=, "arrow"=, "duckdb"="R", "vaex"=,
"pandas"=, "cudf"=, "spark"=, "pydatatable"=, "modin"=, "dask"=, "polars"="py",
"clickhouse"="sql",
"juliadf"="jl"
Expand Down
2 changes: 1 addition & 1 deletion run.conf
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# task, used in init-setup-iteration.R
export RUN_TASKS="groupby join groupby2014"
# solution, used in init-setup-iteration.R
export RUN_SOLUTIONS="data.table dplyr pandas pydatatable spark dask juliadf cudf clickhouse polars arrow duckdb"
export RUN_SOLUTIONS="vaex data.table dplyr pandas pydatatable spark dask juliadf cudf clickhouse polars arrow duckdb"

# flag to upgrade tools, used in run.sh on init
export DO_UPGRADE=true
Expand Down
2 changes: 2 additions & 0 deletions run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@ if [[ "$DO_UPGRADE" == true && "$RUN_SOLUTIONS" =~ "dplyr" ]]; then ./dplyr/upg-
if [[ "$RUN_SOLUTIONS" =~ "dplyr" ]]; then ./dplyr/ver-dplyr.sh; fi;
if [[ "$DO_UPGRADE" == true && "$RUN_SOLUTIONS" =~ "juliadf" ]]; then ./juliadf/upg-juliadf.sh; fi;
if [[ "$RUN_SOLUTIONS" =~ "juliadf" ]]; then ./juliadf/ver-juliadf.sh; fi;
if [[ "$DO_UPGRADE" == true && "$RUN_SOLUTIONS" =~ "vaex" ]]; then ./vaex/upg-vaex.sh; fi;
if [[ "$RUN_SOLUTIONS" =~ "vaex" ]]; then ./vaex/ver-vaex.sh; fi;
if [[ "$DO_UPGRADE" == true && "$RUN_SOLUTIONS" =~ "modin" ]]; then ./modin/upg-modin.sh; fi;
if [[ "$RUN_SOLUTIONS" =~ "modin" ]]; then ./modin/ver-modin.sh; fi;
if [[ "$DO_UPGRADE" == true && "$RUN_SOLUTIONS" =~ "pandas" ]]; then ./pandas/upg-pandas.sh; fi;
Expand Down
302 changes: 302 additions & 0 deletions vaex/groupby-vaex.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,302 @@
#!/usr/bin/env python

print("# groupby-vaex.py", flush=True)

import os
import gc
import timeit
import vaex

exec(open("./_helpers/helpers.py").read())

ver = vaex.__version__['vaex-core']
git = '-'
task = "groupby"
solution = "vaex"
fun = ".groupby"
cache = "TRUE"
on_disk = "TRUE"

data_name = os.environ['SRC_DATANAME']
src_grp = os.path.join("data", data_name+".csv")
print("loading dataset %s" % data_name, flush=True)

x = vaex.open(src_grp, convert=True, dtype={"id4":"Int8", "id5":"Int8", "id6":"Int32", "v1":"Int8", "v2":"Int8"})
print("loaded dataset")
x.ordinal_encode('id1', inplace=True)
x.ordinal_encode('id2', inplace=True)
x.ordinal_encode('id3', inplace=True)

in_rows = x.shape[0]
print(in_rows, flush=True)

task_init = timeit.default_timer()
print("grouping...", flush=True)

question = "sum v1 by id1" # q1
gc.collect()
t_start = timeit.default_timer()
ans = x.groupby('id1').agg({'v1':'sum'})
print(ans.shape, flush=True)
t = timeit.default_timer() - t_start
m = memory_usage()
t_start = timeit.default_timer()
chk = [ans.v1.sum()]
chkt = timeit.default_timer() - t_start
write_log(task=task, data=data_name, in_rows=in_rows, question=question, out_rows=ans.shape[0], out_cols=ans.shape[1], solution=solution, version=ver, git=git, fun=fun, run=1, time_sec=t, mem_gb=m, cache=cache, chk=make_chk(chk), chk_time_sec=chkt, on_disk=on_disk)
del ans
gc.collect()
t_start = timeit.default_timer()
ans = x.groupby('id1').agg({'v1':'sum'})
print(ans.shape, flush=True)
t = timeit.default_timer() - t_start
m = memory_usage()
t_start = timeit.default_timer()
chk = [ans.v1.sum()]
chkt = timeit.default_timer() - t_start
write_log(task=task, data=data_name, in_rows=in_rows, question=question, out_rows=ans.shape[0], out_cols=ans.shape[1], solution=solution, version=ver, git=git, fun=fun, run=2, time_sec=t, mem_gb=m, cache=cache, chk=make_chk(chk), chk_time_sec=chkt, on_disk=on_disk)
print(ans.head(3), flush=True)
print(ans.tail(3), flush=True)
del ans

question = "sum v1 by id1:id2" # q2
gc.collect()
t_start = timeit.default_timer()
ans = x.groupby(['id1','id2']).agg({'v1':'sum'})
print(ans.shape, flush=True)
t = timeit.default_timer() - t_start
m = memory_usage()
t_start = timeit.default_timer()
chk = [ans.v1.sum()]
chkt = timeit.default_timer() - t_start
write_log(task=task, data=data_name, in_rows=in_rows, question=question, out_rows=ans.shape[0], out_cols=ans.shape[1], solution=solution, version=ver, git=git, fun=fun, run=1, time_sec=t, mem_gb=m, cache=cache, chk=make_chk(chk), chk_time_sec=chkt, on_disk=on_disk)
del ans
gc.collect()
t_start = timeit.default_timer()
ans = x.groupby(['id1','id2']).agg({'v1':'sum'})
print(ans.shape, flush=True)
t = timeit.default_timer() - t_start
m = memory_usage()
t_start = timeit.default_timer()
chk = [ans.v1.sum()]
chkt = timeit.default_timer() - t_start
write_log(task=task, data=data_name, in_rows=in_rows, question=question, out_rows=ans.shape[0], out_cols=ans.shape[1], solution=solution, version=ver, git=git, fun=fun, run=2, time_sec=t, mem_gb=m, cache=cache, chk=make_chk(chk), chk_time_sec=chkt, on_disk=on_disk)
print(ans.head(3), flush=True)
print(ans.tail(3), flush=True)
del ans

question = "sum v1 mean v3 by id3" # q3
gc.collect()
t_start = timeit.default_timer()
ans = x.groupby('id3').agg({'v1':'sum', 'v3':'mean'})
print(ans.shape, flush=True)
t = timeit.default_timer() - t_start
m = memory_usage()
t_start = timeit.default_timer()
chk = [ans.v1.sum(), ans.v3.sum()]
chkt = timeit.default_timer() - t_start
write_log(task=task, data=data_name, in_rows=in_rows, question=question, out_rows=ans.shape[0], out_cols=ans.shape[1], solution=solution, version=ver, git=git, fun=fun, run=1, time_sec=t, mem_gb=m, cache=cache, chk=make_chk(chk), chk_time_sec=chkt, on_disk=on_disk)
del ans
gc.collect()
t_start = timeit.default_timer()
ans = x.groupby('id3').agg({'v1':'sum', 'v3':'mean'})
print(ans.shape, flush=True)
t = timeit.default_timer() - t_start
m = memory_usage()
t_start = timeit.default_timer()
chk = [ans.v1.sum(), ans.v3.sum()]
chkt = timeit.default_timer() - t_start
write_log(task=task, data=data_name, in_rows=in_rows, question=question, out_rows=ans.shape[0], out_cols=ans.shape[1], solution=solution, version=ver, git=git, fun=fun, run=2, time_sec=t, mem_gb=m, cache=cache, chk=make_chk(chk), chk_time_sec=chkt, on_disk=on_disk)
print(ans.head(3), flush=True)
print(ans.tail(3), flush=True)
del ans

question = "mean v1:v3 by id4" # q4
gc.collect()
t_start = timeit.default_timer()
ans = x.groupby('id4').agg({'v1':'mean', 'v2':'mean', 'v3':'mean'})
print(ans.shape, flush=True)
t = timeit.default_timer() - t_start
m = memory_usage()
t_start = timeit.default_timer()
chk = [ans.v1.sum(), ans.v2.sum(), ans.v3.sum()]
chkt = timeit.default_timer() - t_start
write_log(task=task, data=data_name, in_rows=in_rows, question=question, out_rows=ans.shape[0], out_cols=ans.shape[1], solution=solution, version=ver, git=git, fun=fun, run=1, time_sec=t, mem_gb=m, cache=cache, chk=make_chk(chk), chk_time_sec=chkt, on_disk=on_disk)
del ans
gc.collect()
t_start = timeit.default_timer()
ans = x.groupby('id4').agg({'v1':'mean', 'v2':'mean', 'v3':'mean'})
print(ans.shape, flush=True)
t = timeit.default_timer() - t_start
m = memory_usage()
t_start = timeit.default_timer()
chk = [ans.v1.sum(), ans.v2.sum(), ans.v3.sum()]
chkt = timeit.default_timer() - t_start
write_log(task=task, data=data_name, in_rows=in_rows, question=question, out_rows=ans.shape[0], out_cols=ans.shape[1], solution=solution, version=ver, git=git, fun=fun, run=2, time_sec=t, mem_gb=m, cache=cache, chk=make_chk(chk), chk_time_sec=chkt, on_disk=on_disk)
print(ans.head(3), flush=True)
print(ans.tail(3), flush=True)
del ans

question = "sum v1:v3 by id6" # q5
gc.collect()
t_start = timeit.default_timer()
ans = x.groupby('id6').agg({'v1':'sum', 'v2':'sum', 'v3':'sum'})
print(ans.shape, flush=True)
t = timeit.default_timer() - t_start
m = memory_usage()
t_start = timeit.default_timer()
chk = [ans.v1.sum(), ans.v2.sum(), ans.v3.sum()]
chkt = timeit.default_timer() - t_start
write_log(task=task, data=data_name, in_rows=in_rows, question=question, out_rows=ans.shape[0], out_cols=ans.shape[1], solution=solution, version=ver, git=git, fun=fun, run=1, time_sec=t, mem_gb=m, cache=cache, chk=make_chk(chk), chk_time_sec=chkt, on_disk=on_disk)
del ans
gc.collect()
t_start = timeit.default_timer()
ans = x.groupby('id6').agg({'v1':'sum', 'v2':'sum', 'v3':'sum'})
print(ans.shape, flush=True)
t = timeit.default_timer() - t_start
m = memory_usage()
t_start = timeit.default_timer()
chk = [ans.v1.sum(), ans.v2.sum(), ans.v3.sum()]
chkt = timeit.default_timer() - t_start
write_log(task=task, data=data_name, in_rows=in_rows, question=question, out_rows=ans.shape[0], out_cols=ans.shape[1], solution=solution, version=ver, git=git, fun=fun, run=2, time_sec=t, mem_gb=m, cache=cache, chk=make_chk(chk), chk_time_sec=chkt, on_disk=on_disk)
print(ans.head(3), flush=True)
print(ans.tail(3), flush=True)
del ans

# question = "median v3 sd v3 by id4 id5" # q6 # median function not yet implemented: https://github.com/dask/dask/issues/4362
# gc.collect()
# t_start = timeit.default_timer()
# ans = x.groupby(['id4','id5']).agg({'v3': ['median','std']})
# print(ans.shape, flush=True)
# t = timeit.default_timer() - t_start
# m = memory_usage()
# t_start = timeit.default_timer()
# chk = [ans['v3']['median'].sum(), ans['v3']['std'].sum()]
# chkt = timeit.default_timer() - t_start
# write_log(task=task, data=data_name, in_rows=in_rows, question=question, out_rows=ans.shape[0], out_cols=ans.shape[1], solution=solution, version=ver, git=git, fun=fun, run=1, time_sec=t, mem_gb=m, cache=cache, chk=make_chk(chk), chk_time_sec=chkt, on_disk=on_disk)
# del ans
# gc.collect()
# t_start = timeit.default_timer()
# ans = x.groupby(['id4','id5']).agg({'v3': ['median','std']})
# print(ans.shape, flush=True)
# t = timeit.default_timer() - t_start
# m = memory_usage()
# t_start = timeit.default_timer()
# chk = [ans['v3']['median'].sum(), ans['v3']['std'].sum()]
# chkt = timeit.default_timer() - t_start
# write_log(task=task, data=data_name, in_rows=in_rows, question=question, out_rows=ans.shape[0], out_cols=ans.shape[1], solution=solution, version=ver, git=git, fun=fun, run=2, time_sec=t, mem_gb=m, cache=cache, chk=make_chk(chk), chk_time_sec=chkt, on_disk=on_disk)
# print(ans.head(3), flush=True)
# print(ans.tail(3), flush=True)
# del ans

# question = "max v1 - min v2 by id3" # q7
# gc.collect()
# t_start = timeit.default_timer()
# ans = x.groupby('id3').agg({'v1':'max', 'v2':'min'}).assign(range_v1_v2=lambda x: x['v1']-x['v2'])[['range_v1_v2']]
# print(ans.shape, flush=True)
# t = timeit.default_timer() - t_start
# m = memory_usage()
# t_start = timeit.default_timer()
# chk = [ans['range_v1_v2'].sum()]
# chkt = timeit.default_timer() - t_start
# write_log(task=task, data=data_name, in_rows=in_rows, question=question, out_rows=ans.shape[0], out_cols=ans.shape[1], solution=solution, version=ver, git=git, fun=fun, run=1, time_sec=t, mem_gb=m, cache=cache, chk=make_chk(chk), chk_time_sec=chkt, on_disk=on_disk)
# del ans
# gc.collect()
# t_start = timeit.default_timer()
# ans = x.groupby('id3').agg({'v1':'max', 'v2':'min'}).assign(range_v1_v2=lambda x: x['v1']-x['v2'])[['range_v1_v2']]
# print(ans.shape, flush=True)
# t = timeit.default_timer() - t_start
# m = memory_usage()
# t_start = timeit.default_timer()
# chk = [ans['range_v1_v2'].sum()]
# chkt = timeit.default_timer() - t_start
# write_log(task=task, data=data_name, in_rows=in_rows, question=question, out_rows=ans.shape[0], out_cols=ans.shape[1], solution=solution, version=ver, git=git, fun=fun, run=2, time_sec=t, mem_gb=m, cache=cache, chk=make_chk(chk), chk_time_sec=chkt, on_disk=on_disk)
# print(ans.head(3), flush=True)
# print(ans.tail(3), flush=True)
# del ans

# question = "largest two v3 by id6" # q8
# gc.collect()
# t_start = timeit.default_timer()
# ans = x[~x['v3'].isna()][['id6','v3']].groupby('id6').apply(lambda x: x.nlargest(2, columns='v3'), meta={'id6':'Int64', 'v3':'float64'})[['v3']]
# ans.reset_index(level='id6', inplace=True)
# ans.reset_index(drop=True, inplace=True) # drop because nlargest creates some extra new index field
# print(ans.shape, flush=True)
# t = timeit.default_timer() - t_start
# m = memory_usage()
# t_start = timeit.default_timer()
# chk = [ans['v3'].sum()]
# chkt = timeit.default_timer() - t_start
# write_log(task=task, data=data_name, in_rows=in_rows, question=question, out_rows=ans.shape[0], out_cols=ans.shape[1], solution=solution, version=ver, git=git, fun=fun, run=1, time_sec=t, mem_gb=m, cache=cache, chk=make_chk(chk), chk_time_sec=chkt, on_disk=on_disk)
# del ans
# gc.collect()
# t_start = timeit.default_timer()
# ans = x[~x['v3'].isna()][['id6','v3']].groupby('id6').apply(lambda x: x.nlargest(2, columns='v3'), meta={'id6':'Int64', 'v3':'float64'})[['v3']]
# ans.reset_index(level='id6', inplace=True)
# ans.reset_index(drop=True, inplace=True)
# print(ans.shape, flush=True)
# t = timeit.default_timer() - t_start
# m = memory_usage()
# t_start = timeit.default_timer()
# chk = [ans['v3'].sum()]
# chkt = timeit.default_timer() - t_start
# write_log(task=task, data=data_name, in_rows=in_rows, question=question, out_rows=ans.shape[0], out_cols=ans.shape[1], solution=solution, version=ver, git=git, fun=fun, run=2, time_sec=t, mem_gb=m, cache=cache, chk=make_chk(chk), chk_time_sec=chkt, on_disk=on_disk)
# print(ans.head(3), flush=True)
# print(ans.tail(3), flush=True)
# del ans

# question = "regression v1 v2 by id2 id4" # q9
# gc.collect()
# t_start = timeit.default_timer()
# ans = x[['id2','id4','v1','v2']].groupby(['id2','id4']).apply(lambda x: pd.Series({'r2': x.corr()['v1']['v2']**2}), meta={'r2':'float64'})
# print(ans.shape, flush=True)
# t = timeit.default_timer() - t_start
# m = memory_usage()
# t_start = timeit.default_timer()
# chk = [ans['r2'].sum()]
# chkt = timeit.default_timer() - t_start
# write_log(task=task, data=data_name, in_rows=in_rows, question=question, out_rows=ans.shape[0], out_cols=ans.shape[1], solution=solution, version=ver, git=git, fun=fun, run=1, time_sec=t, mem_gb=m, cache=cache, chk=make_chk(chk), chk_time_sec=chkt, on_disk=on_disk)
# del ans
# gc.collect()
# t_start = timeit.default_timer()
# ans = x[['id2','id4','v1','v2']].groupby(['id2','id4']).apply(lambda x: pd.Series({'r2': x.corr()['v1']['v2']**2}), meta={'r2':'float64'})
# print(ans.shape, flush=True)
# t = timeit.default_timer() - t_start
# m = memory_usage()
# t_start = timeit.default_timer()
# chk = [ans['r2'].sum()]
# chkt = timeit.default_timer() - t_start
# write_log(task=task, data=data_name, in_rows=in_rows, question=question, out_rows=ans.shape[0], out_cols=ans.shape[1], solution=solution, version=ver, git=git, fun=fun, run=2, time_sec=t, mem_gb=m, cache=cache, chk=make_chk(chk), chk_time_sec=chkt, on_disk=on_disk)
# print(ans.head(3), flush=True)
# print(ans.tail(3), flush=True)
# del ans

# question = "sum v3 count by id1:id6" # q10
# gc.collect()
# t_start = timeit.default_timer()
# ans = x.groupby(['id1','id2','id3','id4','id5','id6']).agg({'v3':'sum', 'v1':'size'}) # column name different than expected, ignore it because: ValueError: Metadata inference failed in `rename`: Original error is below: ValueError('Level values must be unique: [nan, nan] on level 0',)
# print(ans.shape, flush=True)
# t = timeit.default_timer() - t_start
# m = memory_usage()
# t_start = timeit.default_timer()
# chk = [ans.v3.sum(), ans.v1.sum()]
# chkt = timeit.default_timer() - t_start
# write_log(task=task, data=data_name, in_rows=in_rows, question=question, out_rows=ans.shape[0], out_cols=ans.shape[1], solution=solution, version=ver, git=git, fun=fun, run=1, time_sec=t, mem_gb=m, cache=cache, chk=make_chk(chk), chk_time_sec=chkt, on_disk=on_disk)
# del ans
# gc.collect()
# t_start = timeit.default_timer()
# ans = x.groupby(['id1','id2','id3','id4','id5','id6']).agg({'v3':'sum', 'v1':'size'})
# print(ans.shape, flush=True)
# t = timeit.default_timer() - t_start
# m = memory_usage()
# t_start = timeit.default_timer()
# chk = [ans.v3.sum(), ans.v1.sum()]
# chkt = timeit.default_timer() - t_start
# write_log(task=task, data=data_name, in_rows=in_rows, question=question, out_rows=ans.shape[0], out_cols=ans.shape[1], solution=solution, version=ver, git=git, fun=fun, run=2, time_sec=t, mem_gb=m, cache=cache, chk=make_chk(chk), chk_time_sec=chkt, on_disk=on_disk)
# print(ans.head(3), flush=True)
# print(ans.tail(3), flush=True)
# del ans

print("grouping finished, took %0.3fs" % (timeit.default_timer()-task_init), flush=True)

exit(0)
Loading