val rdd = sc.parallelize(Seq((\"vskp\", Array(2.0, 1.0, 2.1, 5.4)),(\"hyd\",Array(1.5, 0.5, 0.9, 3.7)),(\"hyd\", Array(1.5, 0.5, 0.9, 3.2)),(\"tvm\", Array(8.0, 2.9,
For java developpers, try to call this method:
private static Dataset cloneDataset(Dataset ds) {
List filterColumns = new ArrayList<>();
List filterColumnsNames = new ArrayList<>();
scala.collection.Iterator it = ds.exprEnc().schema().toIterator();
while (it.hasNext()) {
String columnName = it.next().name();
filterColumns.add(ds.col(columnName));
filterColumnsNames.add(columnName);
}
ds = ds.select(JavaConversions.asScalaBuffer(filterColumns).seq()).toDF(scala.collection.JavaConverters.asScalaIteratorConverter(filterColumnsNames.iterator()).asScala().toSeq());
return ds;
}
on both datasets just before the joining, it clone the datasets into new ones:
df1 = cloneDataset(df1);
df2 = cloneDataset(df2);
Dataset join = df1.join(df2, col("column_name"));
// if it didn't work try this
final Dataset join = cloneDataset(df1.join(df2, columns_seq));