1616programmatically scale a Google Cloud Bigtable cluster."""
1717
1818import argparse
19+ import logging
1920import os
2021import time
2122
2627
2728PROJECT = os .environ ['GOOGLE_CLOUD_PROJECT' ]
2829
30+ logger = logging .getLogger ('bigtable.metricscaler' )
31+ logger .addHandler (logging .StreamHandler ())
32+ logger .setLevel (logging .INFO )
2933
30- def get_cpu_load ():
34+
35+ def get_cpu_load (bigtable_instance , bigtable_cluster ):
3136 """Returns the most recent Cloud Bigtable CPU load measurement.
3237
3338 Returns:
@@ -40,12 +45,13 @@ def get_cpu_load():
4045 metric_type = 'bigtable.googleapis.com/'
4146 'cluster/cpu_load' ,
4247 minutes = 5 )
48+ cpu_query = cpu_query .select_resources (instance = bigtable_instance , cluster = bigtable_cluster )
4349 cpu = next (cpu_query .iter ())
4450 return cpu .points [0 ].value .double_value
4551 # [END bigtable_cpu]
4652
4753
48- def get_storage_utilization ():
54+ def get_storage_utilization (bigtable_instance , bigtable_cluster ):
4955 """Returns the most recent Cloud Bigtable storage utilization measurement.
5056
5157 Returns:
@@ -58,6 +64,7 @@ def get_storage_utilization():
5864 metric_type = 'bigtable.googleapis.com/'
5965 'cluster/storage_utilization' ,
6066 minutes = 5 )
67+ utilization_query = utilization_query .select_resources (instance = bigtable_instance , cluster = bigtable_cluster )
6168 utilization = next (utilization_query .iter ())
6269 return utilization .points [0 ].value .double_value
6370 # [END bigtable_metric_scaler_storage_utilization]
@@ -111,15 +118,15 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
111118 current_node_count + size_change_step , max_node_count )
112119 cluster .serve_nodes = new_node_count
113120 cluster .update ()
114- print ('Scaled up from {} to {} nodes.' .format (
121+ logger . info ('Scaled up from {} to {} nodes.' .format (
115122 current_node_count , new_node_count ))
116123 else :
117124 if current_node_count > min_node_count :
118125 new_node_count = max (
119126 current_node_count - size_change_step , min_node_count )
120127 cluster .serve_nodes = new_node_count
121128 cluster .update ()
122- print ('Scaled down from {} to {} nodes.' .format (
129+ logger . info ('Scaled down from {} to {} nodes.' .format (
123130 current_node_count , new_node_count ))
124131 # [END bigtable_scale]
125132
@@ -145,10 +152,10 @@ def main(
145152 long_sleep (int): How long to sleep after the number of nodes is
146153 changed
147154 """
148- cluster_cpu = get_cpu_load ()
149- cluster_storage = get_storage_utilization ()
150- print ('Detected cpu of {}' .format (cluster_cpu ))
151- print ('Detected storage utilization of {}' .format (cluster_storage ))
155+ cluster_cpu = get_cpu_load (bigtable_instance , bigtable_cluster )
156+ cluster_storage = get_storage_utilization (bigtable_instance , bigtable_cluster )
157+ logger . info ('Detected cpu of {}' .format (cluster_cpu ))
158+ logger . info ('Detected storage utilization of {}' .format (cluster_storage ))
152159 try :
153160 if cluster_cpu > high_cpu_threshold or cluster_storage > high_storage_threshold :
154161 scale_bigtable (bigtable_instance , bigtable_cluster , True )
@@ -158,10 +165,10 @@ def main(
158165 scale_bigtable (bigtable_instance , bigtable_cluster , False )
159166 time .sleep (long_sleep )
160167 else :
161- print ('CPU within threshold, sleeping.' )
168+ logger . info ('CPU within threshold, sleeping.' )
162169 time .sleep (short_sleep )
163170 except Exception as e :
164- print ("Error during scaling: %s" , e )
171+ logger . error ("Error during scaling: %s" , e )
165172
166173
167174if __name__ == '__main__' :
0 commit comments