from pyspark.sql import functions as F
df = spark.createDataFrame([["v1", [1,2,3]], ["v2", [4,5]], ["v3",[1,7]]],["id","arr"])
df1= df.select("*", F.explode("arr").alias("explode_arr")).groupBy("explode_arr").agg(F.collect_set("id").alias("ids"))
df2=df.select("*", F.explode("arr").alias("explode_arr")).join(df1, ["explode_arr"],\
"inner").groupBy("ids").agg(F.collect_set("arr").alias("array_set")).\
select("ids",F.array_distinct(F.expr("flatten(array_set)")).alias("intersection_arrays"))
df3= df2.where(F.size("ids")>1).select(F.explode("ids").alias("ids")).select(F.array("ids").alias("ids"))
df4= df2.join(df3.withColumn("flag", F.lit(1)),["ids"],"left_outer").where(F.col("flag").isNull()).drop("flag")
df4.show()
+--------+-------------------+
| ids|intersection_arrays|
+--------+-------------------+
| [v2]| [4, 5]|
|[v3, v1]| [1, 7, 2, 3]|
+--------+-------------------+