Skip to content
This repository was archived by the owner on Apr 1, 2026. It is now read-only.

Commit f8241d7

Browse files
committed
fix: handle empty selections inside unpivot and melt layout arrays
1 parent 460dbdc commit f8241d7

File tree

2 files changed

+78
-20
lines changed

2 files changed

+78
-20
lines changed

bigframes/core/blocks.py

Lines changed: 53 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1822,9 +1822,9 @@ def melt(
18221822
Arguments correspond to pandas.melt arguments.
18231823
"""
18241824
# TODO: Implement col_level and ignore_index
1825-
value_labels: pd.Index = pd.Index(
1826-
[self.col_id_to_label[col_id] for col_id in value_vars]
1827-
)
1825+
value_labels: pd.Index = self.column_labels[
1826+
[self.value_columns.index(col_id) for col_id in value_vars]
1827+
]
18281828
id_labels = [self.col_id_to_label[col_id] for col_id in id_vars]
18291829

18301830
unpivot_expr, (var_col_ids, unpivot_out, passthrough_cols) = unpivot(
@@ -3417,6 +3417,7 @@ def unpivot(
34173417
joined_array, (labels_mapping, column_mapping) = labels_array.relational_join(
34183418
array_value, type="cross"
34193419
)
3420+
34203421
new_passthrough_cols = [column_mapping[col] for col in passthrough_columns]
34213422
# Last column is offsets
34223423
index_col_ids = [labels_mapping[col] for col in labels_array.column_ids[:-1]]
@@ -3426,20 +3427,24 @@ def unpivot(
34263427
unpivot_exprs: List[ex.Expression] = []
34273428
# Supports producing multiple stacked ouput columns for stacking only part of hierarchical index
34283429
for input_ids in unpivot_columns:
3429-
# row explode offset used to choose the input column
3430-
# we use offset instead of label as labels are not necessarily unique
3431-
cases = itertools.chain(
3432-
*(
3433-
(
3434-
ops.eq_op.as_expr(explode_offsets_id, ex.const(i)),
3435-
ex.deref(column_mapping[id_or_null])
3436-
if (id_or_null is not None)
3437-
else ex.const(None),
3430+
col_expr: ex.Expression
3431+
if not input_ids:
3432+
col_expr = ex.const(None)
3433+
else:
3434+
# row explode offset used to choose the input column
3435+
# we use offset instead of label as labels are not necessarily unique
3436+
cases = itertools.chain(
3437+
*(
3438+
(
3439+
ops.eq_op.as_expr(explode_offsets_id, ex.const(i)),
3440+
ex.deref(column_mapping[id_or_null])
3441+
if (id_or_null is not None)
3442+
else ex.const(None),
3443+
)
3444+
for i, id_or_null in enumerate(input_ids)
34383445
)
3439-
for i, id_or_null in enumerate(input_ids)
34403446
)
3441-
)
3442-
col_expr = ops.case_when_op.as_expr(*cases)
3447+
col_expr = ops.case_when_op.as_expr(*cases)
34433448
unpivot_exprs.append(col_expr)
34443449

34453450
joined_array, unpivot_col_ids = joined_array.compute_values(unpivot_exprs)
@@ -3457,19 +3462,47 @@ def _pd_index_to_array_value(
34573462
Create an ArrayValue from a list of label tuples.
34583463
The last column will be row offsets.
34593464
"""
3465+
id_gen = bigframes.core.identifiers.standard_id_strings()
3466+
col_ids = [next(id_gen) for _ in range(index.nlevels)]
3467+
offset_id = next(id_gen)
3468+
34603469
rows = []
34613470
labels_as_tuples = utils.index_as_tuples(index)
34623471
for row_offset in range(len(index)):
3463-
id_gen = bigframes.core.identifiers.standard_id_strings()
34643472
row_label = labels_as_tuples[row_offset]
34653473
row_label = (row_label,) if not isinstance(row_label, tuple) else row_label
34663474
row = {}
3467-
for label_part, id in zip(row_label, id_gen):
3468-
row[id] = label_part if pd.notnull(label_part) else None
3469-
row[next(id_gen)] = row_offset
3475+
for label_part, col_id in zip(row_label, col_ids):
3476+
row[col_id] = label_part if pd.notnull(label_part) else None
3477+
row[offset_id] = row_offset
34703478
rows.append(row)
34713479

3472-
return core.ArrayValue.from_pyarrow(pa.Table.from_pylist(rows), session=session)
3480+
import pyarrow as pa
3481+
3482+
if not rows:
3483+
from bigframes.dtypes import bigframes_dtype_to_arrow_dtype
3484+
3485+
dtypes_list = getattr(index, "dtypes", None)
3486+
if dtypes_list is None:
3487+
dtypes_list = (
3488+
[index.dtype] if hasattr(index, "dtype") else [pd.Float64Dtype()]
3489+
)
3490+
3491+
fields = []
3492+
for col_id, dtype in zip(col_ids, dtypes_list):
3493+
try:
3494+
pa_type = bigframes_dtype_to_arrow_dtype(dtype)
3495+
except Exception:
3496+
pa_type = pa.string()
3497+
fields.append(pa.field(col_id, pa_type))
3498+
fields.append(pa.field(offset_id, pa.int64()))
3499+
schema = pa.schema(fields)
3500+
pt = pa.Table.from_pylist([], schema=schema)
3501+
else:
3502+
pt = pa.Table.from_pylist(rows)
3503+
pt = pt.rename_columns([*col_ids, offset_id])
3504+
3505+
return core.ArrayValue.from_pyarrow(pt, session=session)
34733506

34743507

34753508
def _resolve_index_col(

tests/system/small/test_dataframe.py

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5902,6 +5902,31 @@ def test_to_gbq_table_labels(scalars_df_index):
59025902
assert table.labels["test"] == "labels"
59035903

59045904

5905+
def test_to_gbq_obj_ref_persists(session):
5906+
# Test that saving and loading an Object Reference retains its dtype
5907+
bdf = session.from_glob_path(
5908+
"gs://cloud-samples-data/vision/ocr/*.jpg", name="uris"
5909+
).head(1)
5910+
5911+
destination_table = "bigframes-dev.bigframes_tests_sys.test_obj_ref_persistence"
5912+
bdf.to_gbq(destination_table, if_exists="replace")
5913+
5914+
loaded_df = session.read_gbq(destination_table)
5915+
assert loaded_df["uris"].dtype == dtypes.OBJ_REF_DTYPE
5916+
5917+
5918+
def test_dataframe_melt_multiindex(session):
5919+
# Tests that `melt` operations via count do not cause MultiIndex drops in Arrow
5920+
df = pd.DataFrame({"A": [1], "B": ["string"], "C": [3]})
5921+
df.columns = pd.MultiIndex.from_tuples(
5922+
[("Group1", "A"), ("Group2", "B"), ("Group1", "C")]
5923+
)
5924+
bdf = session.read_pandas(df)
5925+
5926+
count_df = bdf.count().to_pandas()
5927+
assert count_df.shape[0] == 3
5928+
5929+
59055930
@pytest.mark.parametrize(
59065931
("col_names", "ignore_index"),
59075932
[

0 commit comments

Comments
 (0)