Events sequence funnel - pandas

Is there any other way to do events sequence funnel ?
maybe using group by with .size() or anything more simple and readable?
df = pd.DataFrame({
'user_id' : [1,2,3,3,3,3,1,1],
'time' : ['2022-01-01 00:01:00', '2022-01-01 00:02:00', '2022-01-01 00:02:00','2022-01-01 00:03:00',
'2022-01-01 00:04:00', '2022-01-01 00:05:00', '2022-01-01 00:06:00', '2022-01-01 00:07:00'],
'action' : ['pageview', 'pageview', 'pageview', 'add_to_cart', 'checkout', 'payment', 'add_to_cart' ,'checkout'] })
users = df.pivot_table(
index='user_id',
columns='action',
values='time',
aggfunc='min')
pageview = users.eval('~pageview.isnull()')
add_to_cart = users.eval('#step_1' and 'add_to_cart > pageview')
checkout = users.eval('#step_2' and 'checkout > add_to_cart')
payment = users.eval('#step_3' and 'payment > checkout')
n_pageview = users[pageview].shape[0]
n_add_to_cart = users[add_to_cart].shape[0]
n_checkout = users[checkout].shape[0]
n_payment = users[payment].shape[0]
print(f'pageview {n_pageview}')
print(f'add_to_cart {n_add_to_cart}')
print(f'checkout {n_checkout}')
print(f'payment {n_payment}')

Related

Extract words from a column and count frequency

Does anyone know if there's an efficient way to extract all the words from a single column and count the frequency of each word in SQL Server? I only have read-only access to my database so I can't create a self-defined function to do this.
Here's a reproducible example:
CREATE TABLE words
(
id INT PRIMARY KEY,
text_column VARCHAR(1000)
);
INSERT INTO words (id, text_column)
VALUES
(1, 'SQL Server is a popular database management system'),
(2, 'It is widely used for data storage and retrieval'),
(3, 'SQL Server is a powerful tool for data analysis');
I have found this code but it's not working correctly, and I think it's too complicated to understand:
WITH E1(N) AS
(
SELECT 1
FROM (VALUES
(1),(1),(1),(1),(1),(1),(1),(1),(1),(1)
) t(N)
),
E2(N) AS (SELECT 1 FROM E1 a CROSS JOIN E1 b),
E4(N) AS (SELECT 1 FROM E2 a CROSS JOIN E2 b)
SELECT
LOWER(x.Item) AS [Word],
COUNT(*) AS [Counts]
FROM
(SELECT * FROM words) a
CROSS APPLY
(SELECT
ItemNumber = ROW_NUMBER() OVER(ORDER BY l.N1),
Item = LTRIM(RTRIM(SUBSTRING(a.text_column, l.N1, l.L1)))
FROM
(SELECT
s.N1,
L1 = ISNULL(NULLIF(CHARINDEX(' ',a.text_column,s.N1),0)-s.N1,4000)
FROM
(SELECT 1
UNION ALL
SELECT t.N+1
FROM
(SELECT TOP (ISNULL(DATALENGTH(a.text_column)/2,0))
ROW_NUMBER() OVER (ORDER BY (SELECT NULL))
FROM E4) t(N)
WHERE SUBSTRING(a.text_column ,t.N,1) = ' '
) s(N1)
) l(N1, L1)
) x
WHERE
x.item <> ''
AND x.Item NOT IN ('0o', '0s', '3a', '3b', '3d', '6b', '6o', 'a', 'a1', 'a2', 'a3', 'a4', 'ab', 'able', 'about', 'above', 'abst', 'ac', 'accordance', 'according', 'accordingly', 'across', 'act', 'actually', 'ad', 'added', 'adj', 'ae', 'af', 'affected', 'affecting', 'affects', 'after', 'afterwards', 'ag', 'again', 'against', 'ah', 'ain', 'ain''t', 'aj', 'al', 'all', 'allow', 'allows', 'almost', 'alone', 'along', 'already', 'also', 'although', 'always', 'am', 'among', 'amongst', 'amoungst', 'amount', 'an', 'and', 'announce', 'another', 'any', 'anybody', 'anyhow', 'anymore', 'anyone', 'anything', 'anyway', 'anyways', 'anywhere', 'ao', 'ap', 'apart', 'apparently', 'appear', 'appreciate', 'appropriate', 'approximately', 'ar', 'are', 'aren', 'arent', 'aren''t', 'arise', 'around', 'as', 'a''s', 'aside', 'ask', 'asking', 'associated', 'at', 'au', 'auth', 'av', 'available', 'aw', 'away', 'awfully', 'ax', 'ay', 'az', 'b', 'b1', 'b2', 'b3', 'ba', 'back', 'bc', 'bd', 'be', 'became', 'because', 'become', 'becomes', 'becoming', 'been', 'before', 'beforehand', 'begin', 'beginning', 'beginnings', 'begins', 'behind', 'being', 'believe', 'below', 'beside', 'besides', 'best', 'better', 'between', 'beyond', 'bi', 'bill', 'biol', 'bj', 'bk', 'bl', 'bn', 'both', 'bottom', 'bp', 'br', 'brief', 'briefly', 'bs', 'bt', 'bu', 'but', 'bx', 'by', 'c', 'c1', 'c2', 'c3', 'ca', 'call', 'came', 'can', 'cannot', 'cant', 'can''t', 'cause', 'causes', 'cc', 'cd', 'ce', 'certain', 'certainly', 'cf', 'cg', 'ch', 'changes', 'ci', 'cit', 'cj', 'cl', 'clearly', 'cm', 'c''mon', 'cn', 'co', 'com', 'come', 'comes', 'con', 'concerning', 'consequently', 'consider', 'considering', 'contain', 'containing', 'contains', 'corresponding', 'could', 'couldn', 'couldnt', 'couldn''t', 'course', 'cp', 'cq', 'cr', 'cry', 'cs', 'c''s', 'ct', 'cu', 'currently', 'cv', 'cx', 'cy', 'cz', 'd', 'd2', 'da', 'date', 'dc', 'dd', 'de', 'definitely', 'describe', 'described', 'despite', 'detail', 'df', 'di', 'did', 'didn', 'didn''t', 'different', 'dj', 'dk', 'dl', 'do', 'does', 'doesn', 'doesn''t', 'doing', 'don', 'done', 'don''t', 'down', 'downwards', 'dp', 'dr', 'ds', 'dt', 'du', 'due', 'during', 'dx', 'dy', 'e', 'e2', 'e3', 'ea', 'each', 'ec', 'ed', 'edu', 'ee', 'ef', 'effect', 'eg', 'ei', 'eight', 'eighty', 'either', 'ej', 'el', 'eleven', 'else', 'elsewhere', 'em', 'empty', 'en', 'end', 'ending', 'enough', 'entirely', 'eo', 'ep', 'eq', 'er', 'es', 'especially', 'est', 'et', 'et-al', 'etc', 'eu', 'ev', 'even', 'ever', 'every', 'everybody', 'everyone', 'everything', 'everywhere', 'ex', 'exactly', 'example', 'except', 'ey', 'f', 'f2', 'fa', 'far', 'fc', 'few', 'ff', 'fi', 'fifteen', 'fifth', 'fify', 'fill', 'find', 'fire', 'first', 'five', 'fix', 'fj', 'fl', 'fn', 'fo', 'followed', 'following', 'follows', 'for', 'former', 'formerly', 'forth', 'forty', 'found', 'four', 'fr', 'from', 'front', 'fs', 'ft', 'fu', 'full', 'further', 'furthermore', 'fy', 'g', 'ga', 'gave', 'ge', 'get', 'gets', 'getting', 'gi', 'give', 'given', 'gives', 'giving', 'gj', 'gl', 'go', 'goes', 'going', 'gone', 'got', 'gotten', 'gr', 'greetings', 'gs', 'gy', 'h', 'h2', 'h3', 'had', 'hadn', 'hadn''t', 'happens', 'hardly', 'has', 'hasn', 'hasnt', 'hasn''t', 'have', 'haven', 'haven''t', 'having', 'he', 'hed', 'he''d', 'he''ll', 'hello', 'help', 'hence', 'her', 'here', 'hereafter', 'hereby', 'herein', 'heres', 'here''s', 'hereupon', 'hers', 'herself', 'hes', 'he''s', 'hh', 'hi', 'hid', 'him', 'himself', 'his', 'hither', 'hj', 'ho', 'home', 'hopefully', 'how', 'howbeit', 'however', 'how''s', 'hr', 'hs', 'http', 'hu', 'hundred', 'hy', 'i', 'i2', 'i3', 'i4', 'i6', 'i7', 'i8', 'ia', 'ib', 'ibid', 'ic', 'id', 'i''d', 'ie', 'if', 'ig', 'ignored', 'ih', 'ii', 'ij', 'il', 'i''ll', 'im', 'i''m', 'immediate', 'immediately', 'importance', 'important', 'in', 'inasmuch', 'inc', 'indeed', 'index', 'indicate', 'indicated', 'indicates', 'information', 'inner', 'insofar', 'instead', 'interest', 'into', 'invention', 'inward', 'io', 'ip', 'iq', 'ir', 'is', 'isn', 'isn''t', 'it', 'itd', 'it''d', 'it''ll', 'its', 'it''s', 'itself', 'iv', 'i''ve', 'ix', 'iy', 'iz', 'j', 'jj', 'jr', 'js', 'jt', 'ju', 'just', 'k', 'ke', 'keep', 'keeps', 'kept', 'kg', 'kj', 'km', 'know', 'known', 'knows', 'ko', 'l', 'l2', 'la', 'largely', 'last', 'lately', 'later', 'latter', 'latterly', 'lb', 'lc', 'le', 'least', 'les', 'less', 'lest', 'let', 'lets', 'let''s', 'lf', 'like', 'liked', 'likely', 'line', 'little', 'lj', 'll', 'll', 'ln', 'lo', 'look', 'looking', 'looks', 'los', 'lr', 'ls', 'lt', 'ltd', 'm', 'm2', 'ma', 'made', 'mainly', 'make', 'makes', 'many', 'may', 'maybe', 'me', 'mean', 'means', 'meantime', 'meanwhile', 'merely', 'mg', 'might', 'mightn', 'mightn''t', 'mill', 'million', 'mine', 'miss', 'ml', 'mn', 'mo', 'more', 'moreover', 'most', 'mostly', 'move', 'mr', 'mrs', 'ms', 'mt', 'mu', 'much', 'mug', 'must', 'mustn', 'mustn''t', 'my', 'myself', 'n', 'n2', 'na', 'name', 'namely', 'nay', 'nc', 'nd', 'ne', 'near', 'nearly', 'necessarily', 'necessary', 'need', 'needn', 'needn''t', 'needs', 'neither', 'never', 'nevertheless', 'new', 'next', 'ng', 'ni', 'nine', 'ninety', 'nj', 'nl', 'nn', 'no', 'nobody', 'non', 'none', 'nonetheless', 'noone', 'nor', 'normally', 'nos', 'not', 'noted', 'nothing', 'novel', 'now', 'nowhere', 'nr', 'ns', 'nt', 'ny', 'o', 'oa', 'ob', 'obtain', 'obtained', 'obviously', 'oc', 'od', 'of', 'off', 'often', 'og', 'oh', 'oi', 'oj', 'ok', 'okay', 'ol', 'old', 'om', 'omitted', 'on', 'once', 'one', 'ones', 'only', 'onto', 'oo', 'op', 'oq', 'or', 'ord', 'os', 'ot', 'other', 'others', 'otherwise', 'ou', 'ought', 'our', 'ours', 'ourselves', 'out', 'outside', 'over', 'overall', 'ow', 'owing', 'own', 'ox', 'oz', 'p', 'p1', 'p2', 'p3', 'page', 'pagecount', 'pages', 'par', 'part', 'particular', 'particularly', 'pas', 'past', 'pc', 'pd', 'pe', 'per', 'perhaps', 'pf', 'ph', 'pi', 'pj', 'pk', 'pl', 'placed', 'please', 'plus', 'pm', 'pn', 'po', 'poorly', 'possible', 'possibly', 'potentially', 'pp', 'pq', 'pr', 'predominantly', 'present', 'presumably', 'previously', 'primarily', 'probably', 'promptly', 'proud', 'provides', 'ps', 'pt', 'pu', 'put', 'py', 'q', 'qj', 'qu', 'que', 'quickly', 'quite', 'qv', 'r', 'r2', 'ra', 'ran', 'rather', 'rc', 'rd', 're', 'readily', 'really', 'reasonably', 'recent', 'recently', 'ref', 'refs', 'regarding', 'regardless', 'regards', 'related', 'relatively', 'research', 'research-articl', 'respectively', 'resulted', 'resulting', 'results', 'rf', 'rh', 'ri', 'right', 'rj', 'rl', 'rm', 'rn', 'ro', 'rq', 'rr', 'rs', 'rt', 'ru', 'run', 'rv', 'ry', 's', 's2', 'sa', 'said', 'same', 'saw', 'say', 'saying', 'says', 'sc', 'sd', 'se', 'sec', 'second', 'secondly', 'section', 'see', 'seeing', 'seem', 'seemed', 'seeming', 'seems', 'seen', 'self', 'selves', 'sensible', 'sent', 'serious', 'seriously', 'seven', 'several', 'sf', 'shall', 'shan', 'shan''t', 'she', 'shed', 'she''d', 'she''ll', 'shes', 'she''s', 'should', 'shouldn', 'shouldn''t', 'should''ve', 'show', 'showed', 'shown', 'showns', 'shows', 'si', 'side', 'significant', 'significantly', 'similar', 'similarly', 'since', 'sincere', 'six', 'sixty', 'sj', 'sl', 'slightly', 'sm', 'sn', 'so', 'some', 'somebody', 'somehow', 'someone', 'somethan', 'something', 'sometime', 'sometimes', 'somewhat', 'somewhere', 'soon', 'sorry', 'sp', 'specifically', 'specified', 'specify', 'specifying', 'sq', 'sr', 'ss', 'st', 'still', 'stop', 'strongly', 'sub', 'substantially', 'successfully', 'such', 'sufficiently', 'suggest', 'sup', 'sure', 'sy', 'system', 'sz', 't', 't1', 't2', 't3', 'take', 'taken', 'taking', 'tb', 'tc', 'td', 'te', 'tell', 'ten', 'tends', 'tf', 'th', 'than', 'thank', 'thanks', 'thanx', 'that', 'that''ll', 'thats', 'that''s', 'that''ve', 'the', 'their', 'theirs', 'them', 'themselves', 'then', 'thence', 'there', 'thereafter', 'thereby', 'thered', 'therefore', 'therein', 'there''ll', 'thereof', 'therere', 'theres', 'there''s', 'thereto', 'thereupon', 'there''ve', 'these', 'they', 'theyd', 'they''d', 'they''ll', 'theyre', 'they''re', 'they''ve', 'thickv', 'thin', 'think', 'third', 'this', 'thorough', 'thoroughly', 'those', 'thou', 'though', 'thoughh', 'thousand', 'three', 'throug', 'through', 'throughout', 'thru', 'thus', 'ti', 'til', 'tip', 'tj', 'tl', 'tm', 'tn', 'to', 'together', 'too', 'took', 'top', 'toward', 'towards', 'tp', 'tq', 'tr', 'tried', 'tries', 'truly', 'try', 'trying', 'ts', 't''s', 'tt', 'tv', 'twelve', 'twenty', 'twice', 'two', 'tx', 'u', 'u201d', 'ue', 'ui', 'uj', 'uk', 'um', 'un', 'under', 'unfortunately', 'unless', 'unlike', 'unlikely', 'until', 'unto', 'uo', 'up', 'upon', 'ups', 'ur', 'us', 'use', 'used', 'useful', 'usefully', 'usefulness', 'uses', 'using', 'usually', 'ut', 'v', 'va', 'value', 'various', 'vd', 've', 've', 'very', 'via', 'viz', 'vj', 'vo', 'vol', 'vols', 'volumtype', 'vq', 'vs', 'vt', 'vu', 'w', 'wa', 'want', 'wants', 'was', 'wasn', 'wasnt', 'wasn''t', 'way', 'we', 'wed', 'we''d', 'welcome', 'well', 'we''ll', 'well-b', 'went', 'were', 'we''re', 'weren', 'werent', 'weren''t', 'we''ve', 'what', 'whatever', 'what''ll', 'whats', 'what''s', 'when', 'whence', 'whenever', 'when''s', 'where', 'whereafter', 'whereas', 'whereby', 'wherein', 'wheres', 'where''s', 'whereupon', 'wherever', 'whether', 'which', 'while', 'whim', 'whither', 'who', 'whod', 'whoever', 'whole', 'who''ll', 'whom', 'whomever', 'whos', 'who''s', 'whose', 'why', 'why''s', 'wi', 'widely', 'will', 'willing', 'wish', 'with', 'within', 'without', 'wo', 'won', 'wonder', 'wont', 'won''t', 'words', 'world', 'would', 'wouldn', 'wouldnt', 'wouldn''t', 'www', 'x', 'x1', 'x2', 'x3', 'xf', 'xi', 'xj', 'xk', 'xl', 'xn', 'xo', 'xs', 'xt', 'xv', 'xx', 'y', 'y2', 'yes', 'yet', 'yj', 'yl', 'you', 'youd', 'you''d', 'you''ll', 'your', 'youre', 'you''re', 'yours', 'yourself', 'yourselves', 'you''ve', 'yr', 'ys', 'yt', 'z', 'zero', 'zi', 'zz')
GROUP BY x.Item
ORDER BY COUNT(*) DESC
Here's the result of the above code, as you can see it's not counting correctly:
Word Counts
server 2
sql 2
data 1
database 1
popular 1
powerful 1
Can anyone help on this? Would be really appreciated!
You can make use of String_split here, such as
select value Word, Count(*) Counts
from words
cross apply String_Split(text_column, ' ')
where value not in(exclude list)
group by value
order by counts desc;
You should should the string_split function -- like this
SELECT id, value as aword
FROM words
CROSS APPLY STRING_SPLIT(text_column, ',');
This will create a table with all the words by id -- to get the count do this:
SELECT aword, count(*) as counts
FROM (
SELECT id, value as aword
FROM words
CROSS APPLY STRING_SPLIT(text_column, ',');
) x
GROUP BY aword
You may need to lower case the LOWER(text_column) if you want it to not matter
If you don't have access to STRING_SPLIT function, you can use weird xml trick to convert space to a word node and then shred it with nodes function:
select word, COUNT(*)
from (
select n.value('.', 'nvarchar(50)') AS word
from (
VALUES
(1, 'SQL Server is a popular database management system'),
(2, 'It is widely used for data storage and retrieval'),
(3, 'SQL Server is a powerful tool for data analysis')
) AS t (id, txt)
CROSS APPLY (
SELECT CAST('<x>' + REPLACE(txt, ' ', '</x><x>') + '</x>' AS XML) x
) x
CROSS APPLY x.nodes('x') z(n)
) w
GROUP BY word
Of course, this will fail on "bad" words and invalid xml-characters but it can be worked on. Text processing has never been SQL Server's strong-point though, so probably better to use some NLP library to do this kind of stuff

Google Analytics data api dictionary to pandas data frame

I exported google analytics data in below dictionary format with 3 Dimensions and 2 metrics. How can I change this format to pandas data frame. I don't need the columns rowCount,minimums,maximums,nextPageToken. Thank you
{'reports': [{'columnHeader': {'dimensions': ['ga:date', 'ga:eventCategory',
'ga:eventAction'], 'metricHeader': {'metricHeaderEntries': [{'name': 'ga:totalEvents', 'type':
'INTEGER'}
, {'name': 'ga:UniqueEvents', 'type': 'INTEGER'}, {'name': 'ga:eventvalue', 'type':
'INTEGER'}]}},
'data':
{'rows': [{'dimensions': ['20220820', 'accordion ', 'accordion'], 'metrics':
[{'values': ['547', '528', '0']}]},
{'dimensions': ['20220817', 'accordion click', 'benefits'], 'metrics': [{'values': ['26',
'26', '0']}]},
{'dimensions': ['20220818', 'accordion click', 'for-your-dog '], 'metrics': [{'values': ['1',
'1', '0']}]},
{'dimensions': ['20220819', 'account', 'register'], 'metrics': [{'values': ['1465', '1345',
'0']}]},
{'dimensions': ['20220820', 'account', 'reminders'], 'metrics': [{'values': ['59', '54',
'0']}]},
, 'rowCount': 17, 'minimums': [{'values': ['1', '1', '0']}], 'maximums': [{'values':
['40676', '37725', '5001337']}]}, 'nextPageToken': '1000'}]}
final dataframe format below

How to apply str.split() on pandas column?

Using Simple Data:
df = pd.DataFrame({'ids': [0,1,2], 'value': ['2 4 10 0 14', '5 91 19 20 0', '1 1 1 2 44']})
I need to convert the column to array, so I use:
df.iloc[:,-1] = df.iloc[:,-1].apply(lambda x: str(x).split())
X = df.iloc[:, 1:]
X = np.array(X.values)
but the problem is the data is being nested and I just need a matrix (3,5). How to make this properly and fast for large data (avoid looping)?
As said in the comments by #anky, #ScottBoston. You can use string method split along with expand parameter and finally change to NumPy:
df.iloc[:, 1].str.split(expand=True).values
array([['2', '4', '10', '0', '14'],
['5', '91', '19', '20', '0'],
['1', '1', '1', '2', '44']], dtype=object)

Conditional grouping in pandas and transpose

With an input dataframe framed out of a given CSV, I need to transpose the data based on certain conditions. The groupby should be applied based on Key value.
For any value in the same 'Key' group, if the 'Type' is "T", these values should be written on "T" columns labelled as T1, T2, T3...and so on.
For any value in the same 'Key' group, if the 'Type' is "P" and 'Code' ends with "00" these values should be written on "U" columns labelled as U1, U2, U3...and so on.
For any value in the same 'Key' group, if the 'Type' is "P" and 'Code' doesn't end with "00" these values should be written on "P" columns labelled as P1, P2, P3...and so on.
There might be n number of values of type T & P for any Key value and the output columns for T & P should be updated accordingly
Input Dataframe:
df = pd.DataFrame({'Key': ['1', '1', '1', '1', '1', '2', '2', '2', '2', '2'],
'Value': ['T101', 'T102', 'P101', 'P102', 'P103', 'T201', 'T202', 'P201', 'P202', 'P203'],
'Type': ['T', 'T', 'P', 'P', 'P', 'T', 'T', 'P', 'P', 'P'],
'Code': ['0', '0', 'ABC00', 'TWY01', 'JTH02', '0', '0', 'OUJ00', 'LKE00', 'WDF45']
})
Expected Dataframe:
Can anyone suggest an effective solution for this case?
Here's a possible solution using pivot.
import pandas as pd
df = pd.DataFrame({'Key': ['1', '1', '1', '1', '1', '2', '2', '2', '2', '2'],
'Value': ['T101', 'T102', 'P101', 'P102', 'P103', 'T201', 'T202', 'P201', 'P202', 'P203'],
'Type': ['T', 'T', 'P', 'P', 'P', 'T', 'T', 'P', 'P', 'P'],
'Code': ['0', '0', 'ABC00', 'TWY01', 'JTH02', '0', '0', 'OUJ00', 'LKE00', 'WDF45']
})
# Set up the U label
df.loc[(df['Code'].apply(lambda x: x.endswith('00'))) & (df['Type'] == 'P'), 'Type'] = 'U'
# Type indexing by key by type
df = df.join(df.groupby(['Key','Type']).cumcount().rename('Tcount').to_frame() + 1)
df['Type'] = df['Type'] + df['Tcount'].astype('str')
# Pivot the table
pv =df.loc[:,['Key','Type','Value']].pivot(index='Key', columns='Type', values='Value')
>>>pv
Type P1 P2 T1 T2 U1 U2
Key
1 P102 P103 T101 T102 P101 NaN
2 P203 NaN T201 T202 P201 P202
cdf = df.loc[df['Code'] != '0', ['Key', 'Code']].groupby('Key')['Code'].apply(lambda x: ','.join(x))
>>>cdf
Key
1 ABC00,TWY01,JTH02
2 OUJ00,LKE00,WDF45
Name: Code, dtype: object
>>>pv.join(cdf)
P1 P2 T1 T2 U1 U2 Code
Key
1 P102 P103 T101 T102 P101 None ABC00,TWY01,JTH02
2 P203 None T201 T202 P201 P202 OUJ00,LKE00,WDF45

Converting formula from Crystal Reports to SSRS

I'll try and keep this as short as possible but I'm trying to convert a formula cell from crystal report to SSRS.
Here is the query:
SELECT
(SELECT START_DATE
FROM APPS.GL_PERIODS
WHERE PERIOD_TYPE = 'Month'
AND TRUNC(SYSDATE-:Days) BETWEEN START_DATE AND END_DATE) STR_DATE,
(SELECT END_DATE
FROM APPS.GL_PERIODS
WHERE PERIOD_TYPE = 'Month'
AND TRUNC(SYSDATE-:Days) BETWEEN START_DATE AND END_DATE) END_DATE,
DECODE(RT.ORGANIZATION_ID, 104, 'LPD',RT.ORGANIZATION_ID) ORG,
SUBSTR(POV.VENDOR_NAME, 1, 24) VENDOR_NAME,
DECODE(SUBSTR(PHA.SEGMENT1, 2,1), 'E', 'EXPENSE', 'e', 'EXPENSE', 'P', 'PRODUCT', 'p', 'PRODUCT', ' OTHER') PO_TYPE,
DECODE(SIGN(TRUNC(RT.TRANSACTION_DATE) - TRUNC(NVL(PLL.PROMISED_DATE - 3, PLL.NEED_BY_DATE - 3))), -1, 'LATE', 'ON TIME') PERFORMANCE,
COUNT(*) LINE_COUNT
FROM
APPS.RCV_TRANSACTIONS RT,
APPS.PO_HEADERS_ALL PHA,
APPS.PO_LINES_ALL PLA,
APPS.PO_LINE_LOCATIONS_ALL PLL,
APPS.PO_VENDORS POV
WHERE
RT.ORGANIZATION_ID = 104
AND RT.TRANSACTION_DATE >= (SELECT START_DATE
FROM APPS.GL_PERIODS
WHERE PERIOD_TYPE = 'Month'
AND TRUNC(SYSDATE-:Days) BETWEEN START_DATE AND END_DATE)
AND RT.TRANSACTION_DATE < (SELECT END_DATE + 1
FROM APPS.GL_PERIODS
WHERE PERIOD_TYPE = 'Month'
AND TRUNC(SYSDATE-:Days) BETWEEN START_DATE AND END_DATE)
AND RT.TRANSACTION_TYPE = 'RECEIVE'
AND RT.PO_HEADER_ID = PLL.PO_HEADER_ID
AND RT.PO_LINE_LOCATION_ID = PLL.LINE_LOCATION_ID
AND RT.PO_LINE_ID = PLL.PO_LINE_ID
AND RT.ORGANIZATION_ID = PLL.SHIP_TO_ORGANIZATION_ID
AND PLA.PO_LINE_ID = PLL.PO_LINE_ID
AND PLA.PO_HEADER_ID = PLL.PO_HEADER_ID
AND PHA.PO_HEADER_ID = PLA.PO_HEADER_ID
AND PHA.VENDOR_ID = POV.VENDOR_ID
GROUP BY
DECODE(RT.ORGANIZATION_ID, 104, 'LPD', RT.ORGANIZATION_ID),
SUBSTR(POV.VENDOR_NAME, 1, 24),
DECODE(SUBSTR(PHA.SEGMENT1, 2, 1), 'E', 'EXPENSE', 'e', 'EXPENSE', 'P', 'PRODUCT', 'p', 'PRODUCT', ' OTHER'),
DECODE(SIGN(TRUNC(RT.TRANSACTION_DATE) - TRUNC(NVL(PLL.PROMISED_DATE - 3, PLL.NEED_BY_DATE - 3))), -1, 'LATE', 'ON TIME')
ORDER BY
ORG, VENDOR_NAME, PO_TYPE, PERFORMANCE
In crystal the formula is
SUM({query.LINE_COUNT},{query.PERFORMANCE}) % SUM({query.LINE_COUNT}, {query.PO_TYPE})
This cell basically is just calculating the percentage of on time deliveries and late ones.