PySpark Create Relationship between DataFrame Columns - dataframe

I am trying to implement some logic to get a relationship between ID and Link based on the logic below.
Logic -
if id 1 has link with 2 and 2 has link with 3, then relation is 1 -> 2, 1 -> 3, 2 -> 1, 2 -> 3, 3 -> 1, 3 -> 2
Similarly if 1 with 4, 4 with 7 and 7 with 5 then relation is 1 -> 4, 1 -> 5, 1 -> 7, 4 -> 1, 4 -> 5, 4 -> 7, 5 -> 1, 5 -> 4, 5 -> 7
Input DataFrame -
+---+----+
| id|link|
+---+----+
| 1| 2|
| 3| 1|
| 4| 2|
| 6| 5|
| 9| 7|
| 9| 10|
+---+----+
I am trying to achieve below output-
+---+----+
| Id|Link|
+---+----+
| 1| 2|
| 1| 3|
| 1| 4|
| 2| 1|
| 2| 3|
| 2| 4|
| 3| 1|
| 3| 2|
| 3| 4|
| 4| 1|
| 4| 2|
| 4| 3|
| 5| 6|
| 6| 5|
| 7| 9|
| 7| 10|
| 9| 7|
| 9| 10|
| 10| 7|
| 10| 9|
+---+----+
I have tried many way, but it's not at all working. I have tried following codes as well
df = spark.createDataFrame([(1, 2), (3, 1), (4, 2), (6, 5), (9, 7), (9, 10)], ["id", "link"])
ids = df.select("Id").distinct().rdd.flatMap(lambda x: x).collect()
links = df.select("Link").distinct().rdd.flatMap(lambda x: x).collect()
combinations = [(id, link) for id in ids for link in links]
df_combinations = spark.createDataFrame(combinations, ["Id", "Link"])
result = df_combinations.join(df, ["Id", "Link"], "left_anti").union(df).dropDuplicates()
result = result.sort(asc("Id"), asc("Link"))
and
df = spark.createDataFrame([(1, 2), (3, 1), (4, 2), (6, 5), (9, 7), (9, 10)], ["id", "link"])
combinations = df.alias("a").crossJoin(df.alias("b")) \
.filter(F.col("a.id") != F.col("b.id"))\
.select(col("a.id").alias("a_id"), col("b.id").alias("b_id"), col("a.link").alias("a_link"), col("b.link").alias("b_link"))
window = Window.partitionBy("a_id").orderBy("a_id", "b_link")
paths = combinations.groupBy("a_id", "b_link") \
.agg(F.first("b_id").over(window).alias("id")) \
.groupBy("id").agg(F.collect_list("b_link").alias("links"))
result = paths.select("id", F.explode("links").alias("link"))
result = result.union(df.selectExpr("id as id_", "link as link_"))
Any help would be much appreciated.

This is not a general approach but you can use the graphframes package. You might struggle to set it up but one can use it, the result is simple.
import os
sc.addPyFile(os.path.expanduser('graphframes-0.8.1-spark3.0-s_2.12.jar'))
from graphframes import *
e = df.select('id', 'link').toDF('src', 'dst')
v = e.select('src').toDF('id') \
.union(e.select('dst')) \
.distinct()
g = GraphFrame(v, e)
sc.setCheckpointDir("/tmp/graphframes")
df = g.connectedComponents()
df.join(df.withColumnRenamed('id', 'link'), ['component'], 'inner') \
.drop('component') \
.filter('id != link') \
.show()
+---+----+
| id|link|
+---+----+
| 7| 10|
| 7| 9|
| 3| 2|
| 3| 4|
| 3| 1|
| 5| 6|
| 6| 5|
| 9| 10|
| 9| 7|
| 1| 2|
| 1| 4|
| 1| 3|
| 10| 9|
| 10| 7|
| 4| 2|
| 4| 1|
| 4| 3|
| 2| 4|
| 2| 1|
| 2| 3|
+---+----+
connectedComponents method returns the component id for each vertex, that is unique for each vertex group (that is connected by edge and seperated if there is no edge to the other component). So you can do the cartesian product for each component without the vertex itself.
Added answer
Inspired from the above approach, I looked up and found the networkx package.
import networkx as nx
df = df.toPandas()
G = nx.from_pandas_edgelist(df, 'id', 'link')
components = [[list(c)] for c in nx.connected_components(G)]
df2 = spark.createDataFrame(components, ['array']) \
.withColumn('component', f.monotonically_increasing_id()) \
.select('component', f.explode('array').alias('id'))
df2.join(df2.withColumnRenamed('id', 'link'), ['component'], 'inner') \
.drop('component') \
.filter('id != link') \
.show()
+---+----+
| id|link|
+---+----+
| 1| 2|
| 1| 3|
| 1| 4|
| 2| 1|
| 2| 3|
| 2| 4|
| 3| 1|
| 3| 2|
| 3| 4|
| 4| 1|
| 4| 2|
| 4| 3|
| 5| 6|
| 6| 5|
| 9| 10|
| 9| 7|
| 10| 9|
| 10| 7|
| 7| 9|
| 7| 10|
+---+----+

Related

SQL grouped running sum

I have some data like this
data = [("1","1"), ("1","1"), ("1","1"), ("2","1"), ("2","1"), ("3","1"), ("3","1"), ("4","1"),]
df =spark.createDataFrame(data=data,schema=["id","imp"])
df.createOrReplaceTempView("df")
+---+---+
| id|imp|
+---+---+
| 1| 1|
| 1| 1|
| 1| 1|
| 2| 1|
| 2| 1|
| 3| 1|
| 3| 1|
| 4| 1|
+---+---+
I want the count of IDs grouped by ID, it's running sum and total sum. This is the code I'm using
query = """
select id,
count(id) as count,
sum(count(id)) over (order by count(id) desc) as running_sum,
sum(count(id)) over () as total_sum
from df
group by id
order by count desc
"""
spark.sql(query).show()
+---+-----+-----------+---------+
| id|count|running_sum|total_sum|
+---+-----+-----------+---------+
| 1| 3| 3| 8|
| 2| 2| 7| 8|
| 3| 2| 7| 8|
| 4| 1| 8| 8|
+---+-----+-----------+---------+
The problem is with the running_sum column. For some reason it automatically groups the count 2 while summing and shows 7 for both ID 2 and 3.
This is the result I'm expecting
+---+-----+-----------+---------+
| id|count|running_sum|total_sum|
+---+-----+-----------+---------+
| 1| 3| 3| 8|
| 2| 2| 5| 8|
| 3| 2| 7| 8|
| 4| 1| 8| 8|
+---+-----+-----------+---------+
You should do the running sum in an outer query.
spark.sql('''
select *,
sum(cnt) over (order by id rows between unbounded preceding and current row) as run_sum,
sum(cnt) over (partition by '1') as tot_sum
from (
select id, count(id) as cnt
from data_tbl
group by id)
'''). \
show()
# +---+---+-------+-------+
# | id|cnt|run_sum|tot_sum|
# +---+---+-------+-------+
# | 1| 3| 3| 8|
# | 2| 2| 5| 8|
# | 3| 2| 7| 8|
# | 4| 1| 8| 8|
# +---+---+-------+-------+
Using dataframe API
data_sdf. \
groupBy('id'). \
agg(func.count('id').alias('cnt')). \
withColumn('run_sum',
func.sum('cnt').over(wd.partitionBy().orderBy('id').rowsBetween(-sys.maxsize, 0))
). \
withColumn('tot_sum', func.sum('cnt').over(wd.partitionBy())). \
show()
# +---+---+-------+-------+
# | id|cnt|run_sum|tot_sum|
# +---+---+-------+-------+
# | 1| 3| 3| 8|
# | 2| 2| 5| 8|
# | 3| 2| 7| 8|
# | 4| 1| 8| 8|
# +---+---+-------+-------+

spark sql spark.range(7).select('*,'id % 3 as "bucket").show // how to understand ('*,'id % 3 as "bucket")

spark.range(7).select('*,'id % 3 as "bucket").show
// result:
+---+------+
| id|bucket|
+---+------+
| 0| 0|
| 1| 1|
| 2| 2|
| 3| 0|
| 4| 1|
| 5| 2|
| 6| 0|
+---+------+
spark.range(7).withColumn("bucket",$"id" % 3).show
///result:
+---+------+
| id|bucket|
+---+------+
| 0| 0|
| 1| 1|
| 2| 2|
| 3| 0|
| 4| 1|
| 5| 2|
| 6| 0|
+---+------+
I want to know what to make of *, and the whole select statement
Is the bottom of these two ways equivalent?
spark.range(7).select('*,'id % 3 as "bucket").show
spark.range(7).select($"*",$"id" % 3 as "bucket").show
spark.range(7).select(col("*"),col("id") % 3 as "bucket").show
val df = spark.range(7)
df.select(df("*"),df("id") % 3 as "bucket").show
These four ways are equivalent;
// https://spark.apache.org/docs/2.4.4/api/scala/index.html#org.apache.spark.sql.Column

Pyspark dataframes group by

I have dataframe like below
|123 |124 |125 |
+-----+-----+-----+
| 1| 2| 3|
| 9| 9| 4|
| 4| 12| 1|
| 2| 4| 8|
| 7| 6| 3|
| 19| 11| 2|
| 21| 10| 10
i need the data to be in
1:[123,125]
2:[123,124,125]
3:[125]
Order is not required to be sorted . I am new to dataframes in pyspark any help would be appreciated
There are no melt or pivot APIs in pyspark that will accomplish this directly. Instead, flatmap from the RDD into a new dataframe and aggregate:
df.show()
+---+---+---+
|123|124|125|
+---+---+---+
| 1| 2| 3|
| 9| 9| 4|
| 4| 12| 1|
| 2| 4| 8|
| 7| 6| 3|
| 19| 11| 2|
| 21| 10| 10|
+---+---+---+
For each column or each row in the RDD, output a row with two columns: the value of the column and the column name:
cols = df.columns
(df.rdd
.flatMap(lambda row: [(row[c], c) for c in cols]).toDF(["value", "column_name"])
.show())
+-----+-----------+
|value|column_name|
+-----+-----------+
| 1| 123|
| 2| 124|
| 3| 125|
| 9| 123|
| 9| 124|
| 4| 125|
| 4| 123|
| 12| 124|
| 1| 125|
| 2| 123|
| 4| 124|
| 8| 125|
| 7| 123|
| 6| 124|
| 3| 125|
| 19| 123|
| 11| 124|
| 2| 125|
| 21| 123|
| 10| 124|
+-----+-----------+
Then, group by the value and aggregate the column names into a list:
from pyspark.sql import functions as f
(df.rdd
.flatMap(lambda row: [(row[c], c) for c in cols]).toDF(["value", "column_name"])
.groupby("value").agg(f.collect_list("column_name"))
.show())
+-----+-------------------------+
|value|collect_list(column_name)|
+-----+-------------------------+
| 19| [123]|
| 7| [123]|
| 6| [124]|
| 9| [123, 124]|
| 1| [123, 125]|
| 10| [124, 125]|
| 3| [125, 125]|
| 12| [124]|
| 8| [125]|
| 11| [124]|
| 2| [124, 123, 125]|
| 4| [125, 123, 124]|
| 21| [123]|
+-----+-------------------------+

need to perform multi-column join on a dataframe with alook-up dataframe

I have two dataframes like so
+---+---+---+---+---+
| c1| c2| c3| c4| c5|
+---+---+---+---+---+
| 0| 1| 2| 3| 4|
| 5| 6| 7| 8| 9|
+---+---+---+---+---+
+---+---+
|key|val|
+---+---+
| 0| A|
| 1| B|
| 2| C|
| 3| D|
| 4| E|
| 5| F|
| 6| G|
| 7| H|
| 8| I|
| 9| J|
+---+---+
I want to lookup each column on df1 with the equivalent key in df2 and return the lookup val from df2 for each.
Here is the code to produce the two input dataframes
df1 = sc.parallelize([('0','1','2','3','4',), ('5','6','7','8','9',)]).toDF(['c1','c2','c3','c4','c5'])
df1.show()
df2 = sc.parallelize([('0','A',), ('1','B', ),('2','C', ),('3','D', ),('4','E',),\
('5','F',), ('6','G', ),('7','H', ),('8','I', ),('9','J',)]).toDF(['key','val'])
df2.show()
I want to join the above to produce the following
+---+---+---+---+---+---+---+---+---+---+
| c1| c2| c3| c4| c5|lu1|lu2|lu3|lu4|lu5|
+---+---+---+---+---+---+---+---+---+---+
| 0| 1| 2| 3| 4|A |B |C |D |E |
| 5| 6| 7| 8| 9|F |G |H |I |J |
+---+---+---+---+---+---+---+---+--+----+
I can get it to work for a single column like so but I'm not sure how to extend it to all columns
df1.join(df2, df1.c1 == df2.key).select('c1','val').show()
+---+---+
| c1|val|
+---+---+
| 0| A|
| 5| F|
+---+---+
You can just chain the join:
df1
.join(df2, on=df1.c1 == df2.key, how='left')
.withColumnRenamed('val', 'lu1') \
.join(df2, on=df1.c2 == df2.key, how='left) \
.withColumnRenamed('val', 'lu2') \
.etc
You can even do it in a loop, but don't do it with too many columns:
from pyspark.sql import functions as f
df = df1
for i in range(1, 6):
df = df \
.join(df2.alias(str(i)), on=f.col('c{}'.format(i)) == f.col("{}.key".format(i)), how='left') \
.withColumnRenamed('val', 'lu{}'.format(i))
df \
.select('c1', 'c2', 'c3', 'c4', 'c5', 'lu1', 'lu2', 'lu3', 'lu4', 'lu5') \
.show()
output
+---+---+---+---+---+---+---+---+---+---+
| c1| c2| c3| c4| c5|lu1|lu2|lu3|lu4|lu5|
+---+---+---+---+---+---+---+---+---+---+
| 5| 6| 7| 8| 9| F| G| H| I| J|
| 0| 1| 2| 3| 4| A| B| C| D| E|
+---+---+---+---+---+---+---+---+---+---+

How to flatten a pyspark dataframe? (spark 1.6)

I'm working with Spark 1.6
Here are my data :
eDF = sqlsc.createDataFrame([Row(v=1, eng_1=10,eng_2=20),
Row(v=2, eng_1=15,eng_2=30),
Row(v=3, eng_1=8,eng_2=12)])
eDF.select('v','eng_1','eng_2').show()
+---+-----+-----+
| v|eng_1|eng_2|
+---+-----+-----+
| 1| 10| 20|
| 2| 15| 30|
| 3| 8| 12|
+---+-----+-----+
I would like to 'flatten' this table.
That is to say :
+---+-----+---+
| v| key|val|
+---+-----+---+
| 1|eng_1| 10|
| 1|eng_2| 20|
| 2|eng_1| 15|
| 2|eng_2| 30|
| 3|eng_1| 8|
| 3|eng_2| 12|
+---+-----+---+
Note that since I'm working with Spark 1.6, I can't use pyspar.sql.functions.create_map or pyspark.sql.functions.posexplode.
Use rdd.flatMap to flatten it:
df = spark.createDataFrame(
eDF.rdd.flatMap(
lambda r: [Row(v=r.v, key=col, val=r[col]) for col in ['eng_1', 'eng_2']]
)
)
df.show()
+-----+---+---+
| key| v|val|
+-----+---+---+
|eng_1| 1| 10|
|eng_2| 1| 20|
|eng_1| 2| 15|
|eng_2| 2| 30|
|eng_1| 3| 8|
|eng_2| 3| 12|
+-----+---+---+