#Required libraries
import requests
import pandas as pd
import numpy as np
import json
import urllib.request
import re
#Get token
page = urllib.request.urlopen(<insert link school account>)
content = str(page.read())
#Extract token from single quotes with regex expression
token = re.findall("(?<=')[^']+(?=')", content)
print(token)
#Base url, Bearer Authorization, and headers
base = '<insert base link for api>'
end_url = ' -H "Authorization: Bearer ' + token[0]
bearer = 'Bearer ' + str(token[0])
headers = {"Authorization": bearer}
['Bb/6Vmy5s9a1aKCzeF+ulH2k5FdY3g+eK3UXhgT0fA2x/9WMoEDXOYw3t3GhYUOzU7IgCipHDdcjbghoUJkbo94zkO75dC8jTZVScxJ/c8FHzxY+q72TB9au/b5u48gqLjze3fco58zVZjr/o6Of1JZFub19Qu0i']
#Function to get the appointments report from 12-Twenty
"""
Create function that change the NumberOfPages items and iterate with a while loop adding the rows to the existing sheet.
"""
def get_appointments(page):
r = requests.get('<insert link>', headers=headers, params={'PageSize': 500, 'PageNumber': page})
json_file = r.json()
global number_pages_appointments
number_pages_appointments = json_file['NumberOfPages']
appointments = pd.DataFrame(json_file["Items"])
return appointments
x = 2
appointments = get_appointments(1)
while x <= number_pages_appointments:
appointments = pd.concat([appointments, get_appointments(x)])
print(appointments.shape)
x += 1
#appointments.to_csv('appointments.csv')
#!cp appointments.csv "drive/My Drive/CDL Api Files"
(1000, 21) (1500, 21) (2000, 21) (2500, 21) (3000, 21) (3500, 21) (4000, 21) (4500, 21) (5000, 21) (5500, 21) (6000, 21) (6122, 21)
#Function to get a specific appointment details
"""
*args:
- page: don't edit (it's defined by the while loop)
- id: insert appointment id
"""
def get_single_appointment(page, id):
r = requests.get('<insert link>/{}'.format(id), headers=headers, params={'PageSize': 500, 'PageNumber': page})
single_appointment = r.json()
#global number_pages_tables
#number_pages_tables = json_file['NumberOfPages']
#single_appointment = pd.DataFrame(json_file)
return single_appointment
single_appointment = get_single_appointment(1, <insert id>)
single_appointment
#single_appointment.to_csv('single_appointment.csv')
{'Id': 220007404009129, 'AppointmentBlockId': 0, 'StartTime': '2022-11-10T19:00:00.000', 'EndTime': '2022-11-10T19:15:00.000', 'StudentId': 540016052574275, 'StudentName': 'Cristian Leo', 'Topic': None, 'PreferredLocationId': 10007603100255, 'PreferredLocationName': 'Drop-In', 'Location': None, 'VirtualMeetingUrl': None, 'PreferredAppointmentTypeId': 10004401013497, 'PreferredAppointmentTypeName': 'Job & Internship Search', 'ContactNumber': None, 'TargetEmployer': None, 'ReasonForVisitIds': [], 'ReasonForVisitNames': '', 'DidAttend': True, 'IsNoShow': None, 'Title': None, 'AdviserId': 540016052278632}
#Function to get appointments details by appointment blocks
"""
This call is useful to get merging columns to the final dataset.
"""
def get_appointment_blocks(page):
r = requests.get('<insert link>', headers=headers, params={"PageSize": 500, "PageNumber": page})
json_file = r.json()
global number_pages_appointment_blocks
number_pages_appointment_blocks = json_file['NumberOfPages']
appointment_blocks = pd.DataFrame(json_file['Items'])
return appointment_blocks
x = 1
appointment_blocks = get_appointment_blocks(1)
while x <= number_pages_appointments:
x += 1
appointment_blocks = pd.concat([appointment_blocks, get_appointment_blocks(x)])
print(appointment_blocks.shape)
#appointment_blocks.to_csv('appointment_blocks.csv')
(1000, 11) (1500, 11) (2000, 11) (2500, 11) (3000, 11) (3500, 11) (4000, 11) (4500, 11) (5000, 11) (5500, 11) (6000, 11) (6500, 11) (7000, 11)
#Function to get student details
def get_students(page):
r = requests.get('<insert link>', headers=headers, params={'PageSize': 500, 'PageNumber': page})
json_file = r.json()
global number_pages_students
number_pages_students = json_file['NumberOfPages']
students = pd.DataFrame(json_file["Items"])
return students
x = 1
students = get_students(1)
while x <= number_pages_students:
x += 1
students = pd.concat([students, get_students(x)])
print(students.shape)
#students.to_csv('students.csv')
(1000, 102) (1500, 102) (2000, 102) (2500, 103) (3000, 103) (3500, 103) (4000, 103) (4500, 103) (5000, 103) (5500, 103) (6000, 103) (6500, 103) (7000, 103) (7500, 103) (8000, 103) (8500, 103) (9000, 103) (9500, 103) (10000, 103) (10500, 103) (11000, 103) (11500, 103) (12000, 103) (12500, 103) (13000, 103) (13500, 103) (13588, 103) (13588, 103)
#Function to get student details by student id
"""
*args:
- page: don't edit (it's defined by the while loop)
- id: insert student id (it's not the UNI)
"""
def get_single_student(page, id):
r = requests.get('<insert link>/{}'.format(id), headers=headers, params={'PageSize': 500, 'PageNumber': page})
single_student = r.json()
return single_student
x = 1
single_student = get_single_student(1, <insert sudent id>)
single_student
#single_student.to_csv('students.csv')
#Function to get documents uploaded in 12-Twenty
"""
It's for future reference. The section is now empty.
"""
def get_documents(page):
r = requests.get('<insert link>', headers=headers, params={'PageSize': 500, 'PageNumber': page})
documents = r.json()
return documents
documents = get_documents(1)
documents
[]
#Function to get attributes
def get_attributes(page):
r = requests.get('<insert link>', headers=headers, params={'PageSize': 500, 'PageNumber': page})
json_file = r.json()
#global number_pages_documents
#number_pages_documents = json_file['NumberOfPages']
attributes = pd.DataFrame(json_file)
return attributes
attributes = get_attributes(1)
attributes
#attributes.to_csv('attributes.csv')
Id | ShortDisplayName | LongDisplayName | EntityTypeId | EntityTypeName | FieldTypeId | FieldTypeName | IsCore | LookupId | LookupName | GroupId | GroupName | IsEnabled | DisplayIndex | HelpText | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 1000700 | 12twenty ID | 12twenty ID | 1001 | None | 30 | Numeric ID | True | NaN | None | 100 | General | True | NaN | None |
1 | 1000702 | Auto Approve Cover Letters | Auto Approve Cover Letters | 1001 | None | 8 | Checkbox | True | NaN | None | 100 | General | False | NaN | None |
2 | 1000707 | Auto Approve Other Documents | Auto Approve Other Documents | 1001 | None | 8 | Checkbox | True | NaN | None | 100 | General | False | NaN | None |
3 | 1000705 | Auto Approve Recommendation Letters | Auto Approve Recommendation Letters | 1001 | None | 8 | Checkbox | True | NaN | None | 100 | General | False | NaN | None |
4 | 1000703 | Auto Approve Transcripts | Auto Approve Transcripts | 1001 | None | 8 | Checkbox | True | NaN | None | 100 | General | False | NaN | None |
... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
726 | 1000733 | Registration End Date | Registration End Date | 5800 | None | 19 | Date/Time | True | NaN | None | 100 | General | False | NaN | None |
727 | 1000732 | Registration Start Date | Registration Start Date | 5800 | None | 19 | Date/Time | True | NaN | None | 100 | General | False | NaN | None |
728 | 1000730 | Start Date | Start Date | 5800 | None | 19 | Date/Time | True | NaN | None | 100 | General | False | NaN | None |
729 | 1000729 | Time Zone | Time Zone | 5800 | None | 1 | Dropdown (Single Select) | True | 276.0 | Time Zone | 100 | General | False | NaN | None |
730 | 1000758 | Work Authorization Requirement | Work Authorization Requirement | 5800 | None | 9 | Dropdown (Multiple Select) | True | 284.0 | Work Auth Requirement | 100 | General | False | NaN | None |
731 rows × 15 columns
#Function to get single attribute details by ID
"""
*args:
- page: don't edit (it's defined by the while loop)
- id: insert attribute id
"""
def get_single_attribute(page, id):
r = requests.get('<insert link>/{}'.format(id), headers=headers, params={'PageSize': 500, 'PageNumber': page})
json_file = r.json()
#global number_pages_documents
#number_pages_documents = json_file['NumberOfPages']
single_attribute = pd.DataFrame(json_file)
return single_attribute
single_attribute = get_single_attribute(1, <insert document id>)
single_attribute
def get_contact_meetings(page):
r = requests.get('<insert link>', headers=headers, params={'PageSize': 500, 'PageNumber': page})
json_file = r.json()
global number_pages_meetings
number_pages_meetings = json_file['NumberOfPages']
contact_meetings = pd.DataFrame(json_file["Items"])
return contact_meetings
x = 2
contact_meetings = get_contact_meetings(1)
while x <= number_pages_meetings:
contact_meetings = pd.concat([contact_meetings, get_contact_meetings(x)])
print(contact_meetings.shape)
x += 1
#contact_meetings.to_csv('contact_meetings.csv')
#!cp contact_meetings.csv "drive/My Drive/CDL Api Files"
def get_contacts(page):
r = requests.get('<insert link>', headers=headers, params={'PageSize': 500, 'PageNumber': page})
json_file = r.json()
global number_pages_contacts
number_pages_contacts = json_file['NumberOfPages']
contacts = pd.DataFrame(json_file["Items"])
return contacts
x = 2
contacts = get_contacts(1)
while x <= number_pages_contacts:
contacts = pd.concat([contacts, get_contacts(x)])
print(contacts.shape)
x += 1
contacts.to_csv('contacts.csv')
#!cp contacts.csv "drive/My Drive/CDL Api Files"
def get_events(page):
r = requests.get('<insert link>', headers=headers, params={'PageSize': 500, 'PageNumber': page})
json_file = r.json()
global number_pages_events
number_pages_events = json_file['NumberOfPages']
events = pd.DataFrame(json_file["Items"])
return events
x = 2
events = get_events(1)
while x <= number_pages_events:
events = pd.concat([events, get_events(x)])
print(events.shape)
x += 1
events.to_csv('events.csv')
def get_event_student_registration(page):
r = requests.get('<insert link>', headers=headers, params={'PageSize': 500, 'PageNumber': page})
json_file = r.json()
global number_pages_student_registration
number_pages_student_registration = json_file['NumberOfPages']
event_student_registration = pd.DataFrame(json_file["Items"])
return event_student_registration
x = 2
event_student_registration = get_event_student_registration(1)
while x <= number_pages_student_registration:
event_student_registration = pd.concat([event_student_registration, get_event_student_registration(x)])
print(event_student_registration.shape)
x += 1
event_student_registration.to_csv('event_student_registration.csv')
def get_experiences(page):
r = requests.get('<insert link>', headers=headers, params={'PageSize': 500, 'PageNumber': page})
json_file = r.json()
global number_pages_experiences
number_pages_experiences = json_file['NumberOfPages']
experiences = pd.DataFrame(json_file["Items"])
return experiences
x = 1
experiences = get_experiences(1)
while x <= number_pages_experiences:
x += 1
experiences = pd.concat([experiences, get_experiences(x)])
print(experiences.shape)
experiences.to_csv('experiences.csv')
(16, 24)
#Used only to post files
def get_files(page):
r = requests.get('<insert link>', headers=headers, params={'PageSize': 500, 'PageNumber': page})
files = r.json()
return files
def get_jobs(page):
r = requests.get('<insert link>', headers=headers, params={'PageSize': 500, 'PageNumber': page})
json_file = r.json()
jobs = pd.DataFrame(json_file["Items"])
return jobs
def get_lookups(page):
r = requests.get('<insert link>', headers=headers, params={'PageSize': 500, 'PageNumber': page})
json_file = r.json()
global number_pages_lookups
lookups = pd.DataFrame(json_file)
return lookups
lookups = get_lookups(1)
'while x <= number_pages_lookups:\n lookups = pd.concat([lookups, get_lookups(x)])\n print(lookups.shape)\n x += 1\n\nlookups.to_csv(\'lookups.csv\')\n!cp lookups.csv "drive/My Drive/CDL Api Files'
lookups.tail()
Name | DisplayName | IsCore | Id | |
---|---|---|---|---|
112 | Work Auth Requirement | Work Auth Requirement | True | 284 |
113 | Work Authorization Status | Work Authorization Status | False | 10008603101237 |
114 | World Region | World Region | True | 202 |
115 | Year | Year | True | 286 |
116 | Years of Experience | Years of Experience | False | 10008603101690 |
def get_lookup_options(page, lookup_id):
r = requests.get('<insert link>/{}/options'.format(lookup_id), headers=headers, params={'PageSize': 500, 'PageNumber': page})
json_file = r.json()
global number_pages_lookups
lookup_option = pd.DataFrame(json_file)
return lookup_option
lookup_option = get_lookup_options(1, 10008603101690)
lookup_option
Id | Name | DisplayIndex | Group | |
---|---|---|---|---|
0 | 15086031038282 | 0 - 3 Years | 1 | None |
1 | 15086031038283 | 3 - 5 Years | 2 | None |
2 | 15086031038284 | 5+ Years | 3 | None |
def get_notes(page):
r = requests.get('<insert link>', headers=headers, params={'PageSize': 500, 'PageNumber': page})
notes = r.json()
#notes = pd.DataFrame(json_file)
return notes
def get_tasks(page):
r = requests.get('<insert link>', headers=headers, params={'PageSize': 500, 'PageNumber': page})
json_file = r.json()
tasks = pd.DataFrame(json_file["Items"])
return tasks
#Get carreer advisers based on id
coaches_df = appointment_blocks[['AdviserId', 'AdviserName']].drop_duplicates(subset='AdviserId')
data_appointments = appointments.merge(coaches_df, on='AdviserId', how='left')
#Gen list of unique coaches and map them to staff or temp
data_appointments['AdviserName'].unique()
mapping_coaches = {'X': 'Staff', 'X': 'Temporary', 'X': 'Temporary',
'X': 'Staff', 'X': 'Staff', 'X': 'Staff',
'X': 'Staff', 'X': 'Staff', 'X': 'Staff', 'X': 'Staff',
'X': 'Staff', 'X': 'Staff', 'X': 'Temporary',
'X': 'Staff', 'X': 'Staff', 'X': 'Staff',
'X': 'Staff', 'X': 'Staff'}
def add_coach(coach_name, coach_type):
mapping_coaches[coach_name] = coach_type
data_appointments['Coach Type'] = data_appointments['AdviserName'].replace(mapping_coaches)
#Get email, program, graduation term, graduation year(merge with students data frame)
data_appointments = data_appointments.merge(students[['Id','StudentId', 'EmailAddress', 'Program', 'GraduationTerm', 'GraduationClass']], left_on='StudentId', right_on='Id', how='left')
data_appointments['Program'] = data_appointments['Program'].map(lambda x: x['Name'])
#Get date as extraction from StartTime
data_appointments['Date'] = pd.to_datetime(data_appointments['StartTime']).dt.date
#Get time of date
data_appointments['Time Of Day'] = np.where(pd.to_datetime(data_appointments['StartTime']).dt.hour>=12, 'Afternoon', 'Morning')
#Get FY based on rule
data_appointments['FY'] = data_appointments['Date'].map(lambda x: x.year+1 if x.strftime('%m-%d') >= '05-15' else x.year)
#Get term based on dates
data_appointments['M-D'] = data_appointments['Date'].map(lambda x: x.strftime('%m-%d'))
data_appointments['Term'] = np.where(data_appointments['M-D'] <= '05-14', "Spring", np.where(data_appointments['M-D'] >= '09-01', 'Fall', 'Summer'))
#Get program shortcut (map with unique list of program names)
mapping_programs = {'Applied Analytics': 'APAN', 'Sustainability Management':'SUMA',
'Human Capital Management':'HCM', 'Nonprofit Management':'NOPM',
'Negotiation and Conflict Resolution':'NECR',
'Enterprise Risk Management':'ERM',
'Technology Management':'TEMT', 'Construction Administration':'CNAD',
'Bioethics':'BIET', 'Sports Management':'SPORT', 'Strategic Communication': 'STRAT COMM',
'Information and Knowledge Strategy':'IKNS', 'Non-degree':'NON-DEGREE','Technology Management Exec':'TEMT EXEC',
'Wealth Management':'WEALTH MGMT', 'Narrative Medicine':'NARRATIVE MEDICINE',
'Strategic Communication Exec':'STRAT COMM EXEC', 'Sustainability Science':'SUSC',
'Narrative Medicine CPA':'NARRATIVE MEDICINE CPA', 'Actuarial Science':'ACTUARIAL SICNECE',
'Sustainability Finance CPA':'SUSTAINABILITY FINANCE CPA', 'Advanced Business Certificate':'ADVANCED BUSINESS CERTIFICATE',
'Insurance Management':'INSURANCE MGMT', 'Bioethics CPA':'BIOETHICS CPA', 'Centralized Univ':'CENTRALIZED UNIV'}
data_appointments['Program Shortcut'] = data_appointments['Program'].replace(mapping_programs)
#YOE (merge with other dataframe)
appointments_report = pd.read_excel('/Users/cristianleo/Desktop/Data_Science/Job CDL/Files/appointment_report.xlsx')
appointments_report = appointments_report[['Student Id', 'YOE', 'Employment Status at Beginning of Program', 'Desired Industry', 'Graduation Term']].drop_duplicates(subset='Student Id')
data_appointments = data_appointments.merge(appointments_report, left_on='StudentId_y', right_on='Student Id', how='left').drop('GraduationTerm', axis=1)
#DidAttend (map unique values to true and false)
data_appointments['DidAttend'].unique()
attended_mapping = {None:'No', False: 'No', True: 'Yes'}
data_appointments['Attendance'] = data_appointments['DidAttend'].replace(attended_mapping)
data_appointments['Attendance'].unique()
#Rename columns for clarification
data_appointments['AppointmentId'] = data_appointments['Id_x']
data_appointments['StudentId'] = data_appointments['StudentId_x']
data_appointments['UNI'] = data_appointments['StudentId_y']
#Drop duplicated columns
data_appointments = data_appointments.drop(['Id_y', 'Student Id'], axis=1)
#Store a csv
#data_appointments.to_csv('data_appointments.csv')
/Users/cristianleo/opt/anaconda3/lib/python3.9/site-packages/openpyxl/styles/stylesheet.py:226: UserWarning: Workbook contains no default style, apply openpyxl's default warn("Workbook contains no default style, apply openpyxl's default")
#Authenticate
from google.cloud import api_keys_v2
from google.cloud.api_keys_v2 import Key
def create_api_key(project_id: str) -> Key:
"""
Creates and restrict an API key.
TODO(Developer):
1. Before running this sample,
set up ADC as described in https://cloud.google.com/docs/authentication/external/set-up-adc
2. Make sure you have the necessary permission to create API keys.
Args:
project_id: Google Cloud project id.
Returns:
response: Returns the created API Key.
"""
# Create the API Keys client.
client = api_keys_v2.ApiKeysClient()
key = api_keys_v2.Key()
key.display_name = "My first API key"
# Initialize request and set arguments.
request = api_keys_v2.CreateKeyRequest()
request.parent = f"projects/{project_id}/locations/global"
request.key = key
# Make the request and wait for the operation to complete.
response = client.create_key(request=request).result()
print(f"Successfully created an API key: {response.name}")
# For authenticating with the API key, use the value in "response.key_string".
# To restrict the usage of this API key, use the value in "response.name".
return response
#Last data preprocessing for suitable format to Google Sheets API
data_appointments = data_appointments.fillna('N/A')
data_appointments['Date'] = data_appointments['Date'].apply(str)
data_appointments_array = np.array(data_appointments).tolist()
#To overwrite Excel File
"""
BEFORE RUNNING:
---------------
1. If not already done, enable the Google Sheets API
and check the quota for your project at
https://console.developers.google.com/apis/api/sheets
2. Install the Python client library for Google APIs by running
`pip install --upgrade google-api-python-client`
"""
from pprint import pprint
from googleapiclient import discovery
# TODO: Change placeholder below to generate authentication credentials. See
# https://developers.google.com/sheets/quickstart/python#step_3_set_up_the_sample
#
# Authorize using one of the following scopes:
# 'https://www.googleapis.com/auth/drive'
# 'https://www.googleapis.com/auth/drive.file'
# 'https://www.googleapis.com/auth/spreadsheets'
credentials, _ = google.auth.default()
service = discovery.build('sheets', 'v4', credentials=credentials)
# The ID of the spreadsheet to update.
spreadsheet_id = '<insert spreadsheet id>' # TODO: Update placeholder value.
# The A1 notation of the values to update.
range_ = 'A1:AO' # TODO: Update placeholder value.
# How the input data should be interpreted.
value_input_option = 'USER_ENTERED' # TODO: Update placeholder value.
value_range_body = {
# TODO: Add desired entries to the request body. All existing entries
# will be replaced.
"values": [data_appointments.columns.to_list()]
}
request = service.spreadsheets().values().update(spreadsheetId=spreadsheet_id, range=range_, valueInputOption=value_input_option, body=value_range_body)
response = request.execute()
# TODO: Change code below to process the `response` dict:
pprint(response)
#Create while loop to iterate over blocks of 1000
x = 0
y = 1000
start_cell = 2
n_rounds = len(data_appointments_array)/1000
n = 1
while n <= n_rounds+1:
range_ = 'A{}:AO'.format(start_cell)
value_range_body = {
"values": data_appointments_array[x:y]
}
request = service.spreadsheets().values().update(spreadsheetId=spreadsheet_id, range=range_, valueInputOption=value_input_option, body=value_range_body)
response = request.execute()
pprint(response)
n += 1
x += 1000
y += 1000
start_cell += 1000
{'spreadsheetId': '1JyGw5xEDFZffEgjsAsXTsAoThKWWlYPJQzvyp2HKCxc', 'updatedCells': 41, 'updatedColumns': 41, 'updatedRange': 'Sheet1!A1:AO1', 'updatedRows': 1} {'spreadsheetId': '1JyGw5xEDFZffEgjsAsXTsAoThKWWlYPJQzvyp2HKCxc', 'updatedCells': 41000, 'updatedColumns': 41, 'updatedRange': 'Sheet1!A2:AO1001', 'updatedRows': 1000} {'spreadsheetId': '1JyGw5xEDFZffEgjsAsXTsAoThKWWlYPJQzvyp2HKCxc', 'updatedCells': 41000, 'updatedColumns': 41, 'updatedRange': 'Sheet1!A1002:AO2001', 'updatedRows': 1000} {'spreadsheetId': '1JyGw5xEDFZffEgjsAsXTsAoThKWWlYPJQzvyp2HKCxc', 'updatedCells': 41000, 'updatedColumns': 41, 'updatedRange': 'Sheet1!A2002:AO3001', 'updatedRows': 1000} {'spreadsheetId': '1JyGw5xEDFZffEgjsAsXTsAoThKWWlYPJQzvyp2HKCxc', 'updatedCells': 41000, 'updatedColumns': 41, 'updatedRange': 'Sheet1!A3002:AO4001', 'updatedRows': 1000} {'spreadsheetId': '1JyGw5xEDFZffEgjsAsXTsAoThKWWlYPJQzvyp2HKCxc', 'updatedCells': 41000, 'updatedColumns': 41, 'updatedRange': 'Sheet1!A4002:AO5001', 'updatedRows': 1000} {'spreadsheetId': '1JyGw5xEDFZffEgjsAsXTsAoThKWWlYPJQzvyp2HKCxc', 'updatedCells': 41000, 'updatedColumns': 41, 'updatedRange': 'Sheet1!A5002:AO6001', 'updatedRows': 1000} {'spreadsheetId': '1JyGw5xEDFZffEgjsAsXTsAoThKWWlYPJQzvyp2HKCxc', 'updatedCells': 5002, 'updatedColumns': 41, 'updatedRange': 'Sheet1!A6002:AO6123', 'updatedRows': 122}
The cells below are for future reference. Don't run them for code execution.
from __future__ import print_function
import os.path
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
# If modifying these scopes, delete the file token.json.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
# The ID and range of a sample spreadsheet.
SAMPLE_SPREADSHEET_ID = '<insert spreadsheet id>'
SAMPLE_RANGE_NAME = 'Sheet1!A1:B'
def main():
"""Shows basic usage of the Sheets API.
Prints values from a sample spreadsheet.
"""
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('tokenservice.json'):
creds = Credentials.from_authorized_user_file('tokenservice.json', SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.json', 'w') as token:
token.write(creds.to_json())
try:
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,
range=SAMPLE_RANGE_NAME).execute()
values = result.get('values', [])
if not values:
print('No data found.')
return
print('Name, Major:')
for row in values:
# Print columns A and E, which correspond to indices 0 and 4.
print('%s' % (row[0]))
except HttpError as err:
print(err)
if __name__ == '__main__':
main()
Please visit this URL to authorize this application: https://accounts.google.com/o/oauth2/auth?response_type=code&client_id=709540880767-crqgo47huh702ecq5d6nlo1vsatpt7qb.apps.googleusercontent.com&redirect_uri=http%3A%2F%2Flocalhost%3A63844%2F&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fspreadsheets&state=wd4XLeQyFLzVXDI5msxhGUE8piOKYA&access_type=offline Name, Major: ciao
#Create Spreadsheet
from __future__ import print_function
import google.auth
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
def create(title):
"""
Creates the Sheet the user has access to.
Load pre-authorized user credentials from the environment.
TODO(developer) - See https://developers.google.com/identity
for guides on implementing OAuth2 for the application.
"""
creds, _ = google.auth.default()
# pylint: disable=maybe-no-member
try:
service = build('sheets', 'v4', credentials=creds)
spreadsheet = {
'properties': {
'title': title
}
}
spreadsheet = service.spreadsheets().create(body=spreadsheet,
fields='spreadsheetId') \
.execute()
print(f"Spreadsheet ID: {(spreadsheet.get('spreadsheetId'))}")
return spreadsheet.get('spreadsheetId')
except HttpError as error:
print(f"An error occurred: {error}")
return error
if __name__ == '__main__':
# Pass: title
create("appointments_api")
#Append values to sheet
from __future__ import print_function
import google.auth
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
import os
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="/Users/cristianleo/tokenservice.json"
def append_values(spreadsheet_id, range_name, value_input_option,
_values):
"""
Creates the batch_update the user has access to.
Load pre-authorized user credentials from the environment.
TODO(developer) - See https://developers.google.com/identity
for guides on implementing OAuth2 for the application.
"""
creds, _ = google.auth.default()
# pylint: disable=maybe-no-member
try:
service = build('sheets', 'v4', credentials=creds)
values = [
[
# Cell values ...
],
# Additional rows ...
]
body = {
'values': values
}
result = service.spreadsheets().values().append(
spreadsheetId=spreadsheet_id, range=range_name,
valueInputOption=value_input_option, body=body).execute()
print(f"{(result.get('updates').get('updatedCells'))} cells appended.")
return result
except HttpError as error:
print(f"An error occurred: {error}")
return error
if __name__ == '__main__':
# Pass: spreadsheet_id, range_name value_input_option and _values)
append_values("<insert spreadsheet id>",
"Sheet1!A1:B", "USER_ENTERED",
[
['F', 'B'],
['C', 'D']
])
None cells appended.
#Update spreadsheet
from __future__ import print_function
import google.auth
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
def sheets_batch_update(spreadsheet_id, title, find, replacement):
"""
Update the sheet details in batch, the user has access to.
Load pre-authorized user credentials from the environment.
TODO(developer) - See https://developers.google.com/identity
for guides on implementing OAuth2 for the application.
"""
creds, _ = google.auth.default()
# pylint: disable=maybe-no-member
try:
service = build('sheets', 'v4', credentials=creds)
requests = []
# Change the spreadsheet's title.
requests.append({
'updateSpreadsheetProperties': {
'properties': {
'title': title
},
'fields': 'title'
}
})
# Find and replace text
requests.append({
'findReplace': {
'find': find,
'replacement': replacement,
'allSheets': True
}
})
# Add additional requests (operations) ...
body = {
'requests': requests
}
response = service.spreadsheets().batchUpdate(
spreadsheetId=spreadsheet_id,
body=body).execute()
find_replace_response = response.get('replies')[1].get('findReplace')
print('{0} replacements made.'.format(
find_replace_response.get('occurrencesChanged')))
return response
except HttpError as error:
print(f"An error occurred: {error}")
return error
if __name__ == '__main__':
sheets_batch_update('<insert spreadsheet id>', 'title', 'find', 'replacement')
None replacements made.
from pprint import pprint
from googleapiclient import discovery
import os
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="/Users/tokenservice.json"
# TODO: Change placeholder below to generate authentication credentials. See
# https://developers.google.com/sheets/quickstart/python#step_3_set_up_the_sample
#
# Authorize using one of the following scopes:
# 'https://www.googleapis.com/auth/drive'
# 'https://www.googleapis.com/auth/drive.file'
# 'https://www.googleapis.com/auth/spreadsheets'
credentials = None
service = discovery.build('sheets', 'v4', credentials=credentials)
# The spreadsheet to apply the updates to.
spreadsheet_id = '<insert spreadsheet id>' # TODO: Update placeholder value.
batch_update_spreadsheet_request_body = {
# A list of updates to apply to the spreadsheet.
# Requests will be applied in the order they are specified.
# If any request is not valid, no requests will be applied.
'requests': ['a'], # TODO: Update placeholder value.
# TODO: Add desired entries to the request body.
}
request = service.spreadsheets().batchUpdate(spreadsheetId=spreadsheet_id, body=batch_update_spreadsheet_request_body)
response = request.execute()
# TODO: Change code below to process the `response` dict:
pprint(response)
from google.colab import auth
import gspread
from google.auth import default
#autenticating to google
auth.authenticate_user()
creds, _ = default()
gc = gspread.authorize(creds)
def flattenjson(b, delim):
val = {}
for i in b.keys():
if isinstance(b[i], dict):
get = flattenjson(b[i], delim)
for j in get.keys():
val[i + delim + j] = get[j]
else:
val[i] = b[i]
return val