Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Updating dataproc container cost to be multiplied by number of cores #648

Merged
merged 9 commits into from
Nov 3, 2023
6 changes: 3 additions & 3 deletions user_tools/src/spark_rapids_pytools/cloud_api/dataproc.py
Original file line number Diff line number Diff line change
Expand Up @@ -534,16 +534,16 @@ def _calculate_group_cost(self, cluster_inst: ClusterGetAccessor, node_type: Spa
mem_gb = float(mem_mb) / 1024
cores_cost = self.price_provider.get_cpu_price(node_mc_type) * int(cores_count)
memory_cost = self.price_provider.get_ram_price(node_mc_type) * mem_gb
dataproc_cost = self.price_provider.get_container_cost() * int(cores_count)
# calculate the GPU cost
gpu_per_machine, gpu_type = cluster_inst.get_gpu_per_node(node_type)
gpu_cost = 0.0
if gpu_per_machine > 0:
gpu_unit_price = self.price_provider.get_gpu_price(gpu_type)
gpu_cost = gpu_unit_price * gpu_per_machine
return nodes_cnt * (cores_cost + memory_cost + gpu_cost)
return nodes_cnt * (cores_cost + memory_cost + dataproc_cost + gpu_cost)

def _get_cost_per_cluster(self, cluster: ClusterGetAccessor):
master_cost = self._calculate_group_cost(cluster, SparkNodeType.MASTER)
workers_cost = self._calculate_group_cost(cluster, SparkNodeType.WORKER)
dataproc_cost = self.price_provider.get_container_cost()
return master_cost + workers_cost + dataproc_cost
return master_cost + workers_cost
Original file line number Diff line number Diff line change
Expand Up @@ -196,5 +196,4 @@ class DataprocGkeSavingsEstimator(DataprocSavingsEstimator):
def _get_cost_per_cluster(self, cluster: ClusterGetAccessor):
master_cost = self._calculate_group_cost(cluster, SparkNodeType.MASTER)
workers_cost = self._calculate_group_cost(cluster, SparkNodeType.WORKER)
dataproc_gke_cost = self.price_provider.get_container_cost()
return master_cost + workers_cost + dataproc_gke_cost
return master_cost + workers_cost
parthosa marked this conversation as resolved.
Show resolved Hide resolved
6 changes: 3 additions & 3 deletions user_tools/src/spark_rapids_pytools/cloud_api/onprem.py
Original file line number Diff line number Diff line change
Expand Up @@ -300,18 +300,18 @@ def __calculate_dataproc_group_cost(self, cluster_inst: ClusterGetAccessor, node

cores_cost = self.price_provider.get_cpu_price(node_mc_type) * int(cores_count)
memory_cost = self.price_provider.get_ram_price(node_mc_type) * mem_gb
dataproc_cost = self.price_provider.get_container_cost() * int(cores_count)
# calculate the GPU cost
gpu_per_machine, gpu_type = cluster_inst.get_gpu_per_node(node_type)
gpu_cost = 0.0
if gpu_per_machine > 0:
gpu_unit_price = self.price_provider.get_gpu_price(gpu_type)
gpu_cost = gpu_unit_price * gpu_per_machine
return nodes_cnt * (cores_cost + memory_cost + gpu_cost)
return nodes_cnt * (cores_cost + memory_cost + dataproc_cost + gpu_cost)
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Need to remove dataproc_cost reference here


def _get_cost_per_cluster(self, cluster: ClusterGetAccessor):
if self.price_provider.name.casefold() == 'dataproc':
master_cost = self.__calculate_dataproc_group_cost(cluster, SparkNodeType.MASTER)
workers_cost = self.__calculate_dataproc_group_cost(cluster, SparkNodeType.WORKER)
dataproc_cost = self.price_provider.get_container_cost()
total_cost = master_cost + workers_cost + dataproc_cost
total_cost = master_cost + workers_cost
return total_cost
Loading