import pandas as pd
import pyspark.sql.functions as F
def value_counts(spark_df, colm, order=1, n=10):
"""
Count top n values in the given column and show in the given order
Parameters
----------
spark_df : pyspark.sql.dataframe.DataFrame
Data
colm : string
Name of the column to count values in
order : int, default=1
1: sort the column descending by value counts and keep nulls at top
2: sort the column ascending by values
3: sort the column descending by values
4: do 2 and 3 (combine top n and bottom n after sorting the column by values ascending)
n : int, default=10
Number of top values to display
Returns
----------
Value counts in pandas dataframe
"""
if order==1 :
return pd.DataFrame(spark_df.select(colm).groupBy(colm).count().orderBy(F.desc_nulls_first("count")).head(n),columns=["value","count"])
if order==2 :
return pd.DataFrame(spark_df.select(colm).groupBy(colm).count().orderBy(F.asc(colm)).head(n),columns=["value","count"])
if order==3 :
return pd.DataFrame(spark_df.select(colm).groupBy(colm).count().orderBy(F.desc(colm)).head(n),columns=["value","count"])
if order==4 :
return pd.concat([pd.DataFrame(spark_df.select(colm).groupBy(colm).count().orderBy(F.asc(colm)).head(n),columns=["value","count"]),
pd.DataFrame(spark_df.select(colm).groupBy(colm).count().orderBy(F.desc(colm)).head(n),columns=["value","count"])])
我认为您希望使用数据帧习惯用法groupBy和count。
例如,给定以下数据帧,每行一个状态:
结果如下:
相关问题 更多 >
编程相关推荐