DEV Community

Greeshma Pogula
Greeshma Pogula

Posted on

Swiggy Problem

Task-1

df = spark.sparkContext.parallelize([])
df = spark.read.json(input_path,multiLine=True)
df = df.select('cust_name', 'id', 'name', 'city', 'avgrating', 'cuisines', 'costfortwos', 'deliverytime',
'mindeliverytime', 'maxdeliverytime', 'locality', 'date', 'amount')
df = df.na.drop(how="all")
df = df.na.fill('N/A')
df = df.withColumn("delivery_rating",when( col("deliverytime") <=40,4).otherwise(3))
df = df.withColumn("deliverymin",when((df.deliverytime <=19),lit(10)).
when((df.deliverytime >=20)&(df.deliverytime <=29),lit(20)).
when((df.deliverytime >=30)&(df.deliverytime <=39),lit(30)).
when((df.deliverytime >=40)&(df.deliverytime <=49),lit(40)).
when((df.deliverytime >=50)&(df.deliverytime <=59),lit(50)))
return df

Task-2

df_10 = clean_df
df_10 = df_10.groupBy(["name"]).agg(count("name").alias("no_of_orders"))
df_10 = df_10.sort(desc("no_of_orders")).limit(10)
return df_10

Task-3

df_avg = clean_df #Replace this line with your actual code
df_avg = df_avg.groupBy(["city"]).agg(avg("avgrating").alias("avg_rating"),
max("avgrating").alias("max_rating"),min("avgrating").alias("min_rating"))
df_avg = df_avg.filter(df_avg.avg_rating>3.5)
return df_avg

Task-4

df_del = clean_df
df_del = df_del.groupBy(["deliverymin"]).agg(count("deliverymin").alias("delivertime_count"))
df_del = df_del.sort("delivertime_count")
return df_del

Top comments (0)