![Oracle Drags Its Feet in the JavaScript Trademark Dispute](https://cdn.sanity.io/images/cgdhsj6q/production/919c3b22c24f93884c548d60cbb338e819ff2435-1024x1024.webp?w=400&fit=max&auto=format)
Security News
Oracle Drags Its Feet in the JavaScript Trademark Dispute
Oracle seeks to dismiss fraud claims in the JavaScript trademark dispute, delaying the case and avoiding questions about its right to the name.
The Independent Marketing Sciences package is a Python library designed to process incoming data into a format tailored for projects, particularly those utilising weekly time series data. This package offers a suite of functions for efficient data collection, manipulation, visualisation and analysis.
from imsciences import dataprocessing, geoprocessing, datapull, datavis
ims_proc = dataprocessing()
ims_geo = geoprocessing()
ims_pull = datapull()
ims_vis = datavis()
get_wd_levels
get_wd_levels(levels)
get_wd_levels(0)
aggregate_daily_to_wc_long
aggregate_daily_to_wc_long(df, date_column, group_columns, sum_columns, wc, aggregation='sum')
aggregate_daily_to_wc_long(df, 'date', ['platform'], ['cost', 'impressions', 'clicks'], 'mon', 'average')
convert_monthly_to_daily
convert_monthly_to_daily(df, date_column, divide=True)
convert_monthly_to_daily(df, 'date')
week_of_year_mapping
week_of_year_mapping(df, week_col, start_day_str)
week_of_year_mapping(df, 'week', 'mon')
rename_cols
rename_cols(df, name='ame_')
rename_cols(df, 'ame_facebook')
merge_new_and_old
merge_new_and_old(old_df, old_col, new_df, new_col, cutoff_date, date_col_name='OBS')
merge_new_and_old(df1, 'old_col', df2, 'new_col', '2023-01-15')
merge_dataframes_on_column
merge_dataframes_on_column(dataframes, common_column='OBS', merge_how='outer')
merge_dataframes_on_column([df1, df2, df3], common_column='OBS', merge_how='outer')
merge_and_update_dfs
merge_and_update_dfs(df1, df2, key_column)
merge_and_update_dfs(processed_facebook, finalised_meta, 'OBS')
convert_us_to_uk_dates
convert_us_to_uk_dates(df, date_col)
convert_us_to_uk_dates(df, 'date')
combine_sheets
combine_sheets(all_sheets)
combine_sheets({'Sheet1': df1, 'Sheet2': df2})
pivot_table
pivot_table(df, index_col, columns, values_col, filters_dict=None, fill_value=0, aggfunc='sum', margins=False, margins_name='Total', datetime_trans_needed=True, reverse_header_order=False, fill_missing_weekly_dates=False, week_commencing='W-MON')
pivot_table(df, 'OBS', 'Channel Short Names', 'Value', filters_dict={'Master Include': ' == 1'}, fill_value=0)
apply_lookup_table_for_columns
apply_lookup_table_for_columns(df, col_names, to_find_dict, if_not_in_dict='Other', new_column_name='Mapping')
apply_lookup_table_for_columns(df, col_names, {'spend': 'spd'}, if_not_in_dict='Other', new_column_name='Metrics Short')
aggregate_daily_to_wc_wide
aggregate_daily_to_wc_wide(df, date_column, group_columns, sum_columns, wc='sun', aggregation='sum', include_totals=False)
aggregate_daily_to_wc_wide(df, 'date', ['platform'], ['cost', 'impressions'], 'mon', 'average', True)
merge_cols_with_seperator
merge_cols_with_seperator(df, col_names, separator='_', output_column_name='Merged')
merge_cols_with_seperator(df, ['Campaign', 'Product'], separator='|', output_column_name='Merged Columns')
check_sum_of_df_cols_are_equal
check_sum_of_df_cols_are_equal(df_1, df_2, cols_1, cols_2)
check_sum_of_df_cols_are_equal(df_1, df_2, 'Media Cost', 'Spend')
convert_2_df_cols_to_dict
convert_2_df_cols_to_dict(df, key_col, value_col)
convert_2_df_cols_to_dict(df, 'Campaign', 'Channel')
create_FY_and_H_columns
create_FY_and_H_columns(df, index_col, start_date, starting_FY, short_format='No', half_years='No', combined_FY_and_H='No')
create_FY_and_H_columns(df, 'Week', '2022-10-03', 'FY2023', short_format='Yes')
keyword_lookup_replacement
keyword_lookup_replacement(df, col, replacement_rows, cols_to_merge, replacement_lookup_dict, output_column_name='Updated Column')
keyword_lookup_replacement(df, 'channel', 'Paid Search Generic', ['channel', 'segment'], lookup_dict, output_column_name='Channel New')
create_new_version_of_col_using_LUT
create_new_version_of_col_using_LUT(df, keys_col, value_col, dict_for_specific_changes, new_col_name='New Version of Old Col')
create_new_version_of_col_using_LUT(df, 'Campaign Name', 'Campaign Type', lookup_dict)
convert_df_wide_2_long
convert_df_wide_2_long(df, value_cols, variable_col_name='Stacked', value_col_name='Value')
convert_df_wide_2_long(df, ['col1', 'col2'], variable_col_name='Var', value_col_name='Val')
manually_edit_data
manually_edit_data(df, filters_dict, col_to_change, new_value, change_in_existing_df_col='No', new_col_to_change_name='New', manual_edit_col_name=None, add_notes='No', existing_note_col_name=None, note=None)
manually_edit_data(df, {'col1': '== 1'}, 'col2', 'new_val', add_notes='Yes', note='Manual Update')
format_numbers_with_commas
format_numbers_with_commas(df, decimal_length_chosen=2)
format_numbers_with_commas(df, decimal_length_chosen=1)
filter_df_on_multiple_conditions
filter_df_on_multiple_conditions(df, filters_dict)
filter_df_on_multiple_conditions(df, {'col1': '>= 5', 'col2': '== 'val''})
read_and_concatenate_files
read_and_concatenate_files(folder_path, file_type='csv')
read_and_concatenate_files('/path/to/files', file_type='xlsx')
upgrade_outdated_packages
upgrade_outdated_packages(exclude_packages=['twine'])
upgrade_outdated_packages(exclude_packages=['pip', 'setuptools'])
convert_mixed_formats_dates
convert_mixed_formats_dates(df, column_name)
convert_mixed_formats_dates(df, 'date_col')
fill_weekly_date_range
fill_weekly_date_range(df, date_column, freq='W-MON')
fill_weekly_date_range(df, 'date_col')
add_prefix_and_suffix
add_prefix_and_suffix(df, prefix='', suffix='', date_col=None)
add_prefix_and_suffix(df, prefix='pre_', suffix='_suf', date_col='date_col')
create_dummies
create_dummies(df, date_col=None, dummy_threshold=0, add_total_dummy_col='No', total_col_name='total')
create_dummies(df, date_col='date_col', dummy_threshold=1)
replace_substrings
replace_substrings(df, column, replacements, to_lower=False, new_column=None)
replace_substrings(df, 'text_col', {'old': 'new'}, to_lower=True, new_column='updated_text')
add_total_column
add_total_column(df, exclude_col=None, total_col_name='Total')
add_total_column(df, exclude_col='date_col')
apply_lookup_table_based_on_substring
apply_lookup_table_based_on_substring(df, column_name, category_dict, new_col_name='Category', other_label='Other')
apply_lookup_table_based_on_substring(df, 'text_col', {'sub1': 'cat1', 'sub2': 'cat2'})
compare_overlap
compare_overlap(df1, df2, date_col)
compare_overlap(df1, df2, 'date_col')
week_commencing_2_week_commencing_conversion_isoweekday
week_commencing_2_week_commencing_conversion_isoweekday(df, date_col, week_commencing='mon')
week_commencing_2_week_commencing_conversion_isoweekday(df, 'date_col', week_commencing='fri')
seasonality_feature_extraction
seasonality_feature_extraction(df, kpi_var, n_features=10, test_size=0.1, random_state=42, shuffle=False)
seasonality_feature_extraction(df, 'kpi_total_sales', n_features=5, test_size=0.2, random_state=123, shuffle=True)
pull_ga
pull_ga(credentials_file, property_id, start_date, country, metrics)
pull_ga('GeoExperiment-31c5f5db2c39.json', '111111111', '2023-10-15', 'United Kingdom', ['totalUsers', 'newUsers'])
process_itv_analysis
process_itv_analysis(self, raw_df, itv_path, cities_path, media_spend_path, output_path, test_group, control_group, columns_to_aggregate, aggregator_list)
process_itv_analysis(df, 'itv regional mapping.csv', 'Geo_Mappings_with_Coordinates.xlsx', 'IMS.xlsx', 'itv_for_test_analysis_itvx.csv', ['West', 'Westcountry', 'Tyne Tees'], ['Central Scotland', 'North Scotland'], ['newUsers', 'transactions'], ['sum', 'sum'])
process_city_analysis
process_city_analysis(raw_df, spend_df, output_path, test_group, control_group, columns_to_aggregate, aggregator_list)
process_city_analysis(df, spend, output, ['Barnsley'], ['Aberdeen'], ['newUsers', 'transactions'], ['sum', 'sum'])
plot_one
plot_one(df1, col1, date_column)
plot_one(df, 'sales', 'date')
plot_two
plot_two(df1, col1, df2, col2, date_column, same_axis=True)
plot_two(df1, 'sales', df2, 'revenue', 'date', same_axis=False)
plot_chart
plot_chart(df, date_col, value_cols, chart_type='line', title='Chart', x_title='Date', y_title='Values')
plot_chart(df, 'date', ['sales', 'revenue'], chart_type='line', title='Sales and Revenue')
pull_fred_data
pull_fred_data(week_commencing, series_id_list)
pull_fred_data('mon', ['GPDIC1', 'Y057RX1Q020SBEA', 'GCEC1', 'ND000333Q', 'Y006RX1Q020SBEA'])
pull_boe_data
pull_boe_data(week_commencing)
pull_boe_data('mon')
pull_oecd
pull_oecd(country='GBR', week_commencing='mon', start_date='2020-01-01')
pull_oecd('GBR', 'mon', '2000-01-01')
get_google_mobility_data
get_google_mobility_data(country, wc)
get_google_mobility_data('United Kingdom', 'mon')
pull_seasonality
pull_seasonality(week_commencing, start_date, countries)
pull_seasonality('mon', '2020-01-01', ['US', 'GB'])
pull_weather
pull_weather(week_commencing, start_date, country)
pull_weather('mon', '2020-01-01', 'GBR')
pull_macro_ons_uk
pull_macro_ons_uk(additional_list, week_commencing, sector)
pull_macro_ons_uk(['HBOI'], 'mon', 'fast_food')
pull_yfinance
pull_yfinance(tickers, week_start_day)
pull_yfinance(['^FTMC', '^IXIC'], 'mon')
pull_sports_events
pull_sports_events(start_date, week_commencing)
pull_sports_events('2020-01-01', 'mon')
Install the IMS package via pip:
pip install imsciences
This project is licensed under the MIT License.
FAQs
IMS Data Processing Package
We found that imsciences demonstrated a healthy version release cadence and project activity because the last version was released less than a year ago. It has 5 open source maintainers collaborating on the project.
Did you know?
Socket for GitHub automatically highlights issues in each pull request and monitors the health of all your open source dependencies. Discover the contents of your packages and block harmful activity before you install or update your dependencies.
Security News
Oracle seeks to dismiss fraud claims in the JavaScript trademark dispute, delaying the case and avoiding questions about its right to the name.
Security News
The Linux Foundation is warning open source developers that compliance with global sanctions is mandatory, highlighting legal risks and restrictions on contributions.
Security News
Maven Central now validates Sigstore signatures, making it easier for developers to verify the provenance of Java packages.