Skip to content

Commit 7011278

Browse files
leo-naekaAdrian Turjak
authored and
Adrian Turjak
committed
Add incoming/indexer/storage information to V1 API's status endpoint
1 parent 18c135a commit 7011278

File tree

18 files changed

+232
-34
lines changed

18 files changed

+232
-34
lines changed

gnocchi/incoming/__init__.py

+4
Original file line numberDiff line numberDiff line change
@@ -257,6 +257,10 @@ def finish_sack_processing(sack):
257257
"""Mark sack processing has finished."""
258258
pass
259259

260+
@staticmethod
261+
def get_health_status():
262+
raise exceptions.NotImplementedError
263+
260264

261265
@utils.retry_on_exception_and_log("Unable to initialize incoming driver")
262266
def get_driver(conf):

gnocchi/incoming/ceph.py

+4
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323

2424
from gnocchi.common import ceph
2525
from gnocchi import incoming
26+
from gnocchi.status import get_ceph_health_status
2627

2728
rados = ceph.rados
2829

@@ -232,3 +233,6 @@ def process_measures_for_sack(self, sack):
232233
self.ioctx.remove_omap_keys(op, tuple(processed_keys))
233234
self.ioctx.operate_write_op(op, str(sack),
234235
flags=self.OMAP_WRITE_FLAGS)
236+
237+
def get_health_status(self):
238+
return get_ceph_health_status(self)

gnocchi/incoming/file.py

+4
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
import six
2626

2727
from gnocchi import incoming
28+
from gnocchi.status import get_file_health_status
2829
from gnocchi import utils
2930

3031
LOG = daiquiri.getLogger(__name__)
@@ -206,3 +207,6 @@ def process_measures_for_sack(self, sack):
206207

207208
for metric_id, files in six.iteritems(processed_files):
208209
self._delete_measures_files_for_metric(metric_id, files)
210+
211+
def get_health_status(self):
212+
return get_file_health_status(self)

gnocchi/incoming/redis.py

+4
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121

2222
from gnocchi.common import redis
2323
from gnocchi import incoming
24+
from gnocchi.status import get_redis_health_status
2425

2526

2627
LOG = daiquiri.getLogger(__name__)
@@ -193,3 +194,6 @@ def finish_sack_processing(self, sack):
193194
# Delete the sack key which handles no data but is used to get a SET
194195
# notification in iter_on_sacks_to_process
195196
self._client.delete(str(sack))
197+
198+
def get_health_status(self):
199+
return get_redis_health_status(self)

gnocchi/incoming/s3.py

+17-13
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424

2525
from gnocchi.common import s3
2626
from gnocchi import incoming
27+
from gnocchi.status import get_s3_health_status
2728

2829
boto3 = s3.boto3
2930
botocore = s3.botocore
@@ -42,21 +43,21 @@ def __init__(self, conf, greedy=True):
4243
s3.get_connection(conf)
4344
)
4445

45-
self._bucket_name_measures = (
46+
self._bucket_name = (
4647
self._bucket_prefix + "-" + self.MEASURE_PREFIX
4748
)
4849

4950
def __str__(self):
50-
return "%s: %s" % (self.__class__.__name__, self._bucket_name_measures)
51+
return "%s: %s" % (self.__class__.__name__, self._bucket_name)
5152

5253
def _get_storage_sacks(self):
53-
response = self.s3.get_object(Bucket=self._bucket_name_measures,
54+
response = self.s3.get_object(Bucket=self._bucket_name,
5455
Key=self.CFG_PREFIX)
5556
return json.loads(response['Body'].read().decode())[self.CFG_SACKS]
5657

5758
def set_storage_settings(self, num_sacks):
5859
data = {self.CFG_SACKS: num_sacks}
59-
self.s3.put_object(Bucket=self._bucket_name_measures,
60+
self.s3.put_object(Bucket=self._bucket_name,
6061
Key=self.CFG_PREFIX,
6162
Body=json.dumps(data).encode())
6263

@@ -67,7 +68,7 @@ def remove_sacks(num_sacks):
6768

6869
def upgrade(self, num_sacks):
6970
try:
70-
s3.create_bucket(self.s3, self._bucket_name_measures,
71+
s3.create_bucket(self.s3, self._bucket_name,
7172
self._region_name)
7273
except botocore.exceptions.ClientError as e:
7374
if e.response['Error'].get('Code') not in (
@@ -80,7 +81,7 @@ def upgrade(self, num_sacks):
8081
def _store_new_measures(self, metric_id, data):
8182
now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S")
8283
self.s3.put_object(
83-
Bucket=self._bucket_name_measures,
84+
Bucket=self._bucket_name,
8485
Key="/".join((str(self.sack_for_metric(metric_id)),
8586
str(metric_id),
8687
str(uuid.uuid4()) + now)),
@@ -97,7 +98,7 @@ def _build_report(self, details):
9798
else:
9899
kwargs = {}
99100
response = self.s3.list_objects_v2(
100-
Bucket=self._bucket_name_measures,
101+
Bucket=self._bucket_name,
101102
**kwargs)
102103
for c in response.get('Contents', ()):
103104
if c['Key'] != self.CFG_PREFIX:
@@ -118,7 +119,7 @@ def _list_files(self, path_items, **kwargs):
118119
except KeyError:
119120
pass
120121
response = self.s3.list_objects_v2(
121-
Bucket=self._bucket_name_measures,
122+
Bucket=self._bucket_name,
122123
Prefix="/".join(path_items) + "/",
123124
**kwargs)
124125
yield response
@@ -136,7 +137,7 @@ def _list_measure_files_for_metric(self, sack, metric_id):
136137
def delete_unprocessed_measures_for_metric(self, metric_id):
137138
sack = self.sack_for_metric(metric_id)
138139
files = self._list_measure_files_for_metric(sack, metric_id)
139-
s3.bulk_delete(self.s3, self._bucket_name_measures, files)
140+
s3.bulk_delete(self.s3, self._bucket_name, files)
140141

141142
def has_unprocessed(self, metric_id):
142143
sack = self.sack_for_metric(metric_id)
@@ -152,7 +153,7 @@ def process_measure_for_metrics(self, metric_ids):
152153
all_files.extend(files)
153154
for f in files:
154155
response = self.s3.get_object(
155-
Bucket=self._bucket_name_measures,
156+
Bucket=self._bucket_name,
156157
Key=f)
157158
measures[metric_id] = numpy.concatenate((
158159
measures[metric_id],
@@ -162,7 +163,7 @@ def process_measure_for_metrics(self, metric_ids):
162163
yield measures
163164

164165
# Now clean objects
165-
s3.bulk_delete(self.s3, self._bucket_name_measures, all_files)
166+
s3.bulk_delete(self.s3, self._bucket_name, all_files)
166167

167168
@contextlib.contextmanager
168169
def process_measures_for_sack(self, sack):
@@ -177,7 +178,7 @@ def process_measures_for_sack(self, sack):
177178
continue
178179

179180
response = self.s3.get_object(
180-
Bucket=self._bucket_name_measures,
181+
Bucket=self._bucket_name,
181182
Key=f)
182183
measures[metric_id] = numpy.concatenate((
183184
measures[metric_id],
@@ -187,4 +188,7 @@ def process_measures_for_sack(self, sack):
187188
yield measures
188189

189190
# Now clean objects
190-
s3.bulk_delete(self.s3, self._bucket_name_measures, files)
191+
s3.bulk_delete(self.s3, self._bucket_name, files)
192+
193+
def get_health_status(self):
194+
return get_s3_health_status(self)

gnocchi/incoming/swift.py

+4
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222

2323
from gnocchi.common import swift
2424
from gnocchi import incoming
25+
from gnocchi.status import get_swift_health_status
2526
from gnocchi import utils
2627

2728
swclient = swift.swclient
@@ -144,3 +145,6 @@ def process_measures_for_sack(self, sack):
144145
yield measures
145146

146147
swift.bulk_delete(self.swift, sack_name, files)
148+
149+
def get_health_status(self):
150+
return get_swift_health_status(self)

gnocchi/indexer/__init__.py

+4
Original file line numberDiff line numberDiff line change
@@ -477,3 +477,7 @@ def get_resource_attributes_schemas():
477477
@staticmethod
478478
def get_resource_type_schema():
479479
raise exceptions.NotImplementedError
480+
481+
@staticmethod
482+
def get_health_status():
483+
raise exceptions.NotImplementedError

gnocchi/indexer/sqlalchemy.py

+4
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@
5050
from gnocchi.indexer import sqlalchemy_base as base
5151
from gnocchi.indexer import sqlalchemy_types as types
5252
from gnocchi import resource_type
53+
from gnocchi.status import get_sqlalchemy_health_status
5354
from gnocchi import utils
5455

5556
Base = base.Base
@@ -1176,6 +1177,9 @@ def _build_sort_keys(sorts, unique_keys):
11761177

11771178
return sort_keys, sort_dirs
11781179

1180+
def get_health_status(self):
1181+
return get_sqlalchemy_health_status(self)
1182+
11791183

11801184
def _operator_in(field_name, value):
11811185
# Do not generate empty IN comparison

gnocchi/rest/api.py

+45-19
Original file line numberDiff line numberDiff line change
@@ -2092,42 +2092,68 @@ class StatusController(rest.RestController):
20922092
@pecan.expose('json')
20932093
def get(details=True):
20942094
enforce("get status", {})
2095-
try:
2096-
members_req = pecan.request.coordinator.get_members(
2097-
metricd.MetricProcessor.GROUP_ID)
2098-
except tooz.NotImplemented:
2099-
members_req = None
2095+
2096+
# Add status for incoming/indexer/storage
2097+
response_data = {
2098+
'incoming': {
2099+
'status': pecan.request.incoming.get_health_status(),
2100+
},
2101+
'indexer': {
2102+
'status': pecan.request.indexer.get_health_status(),
2103+
},
2104+
'storage': {
2105+
'status': pecan.request.storage.get_health_status(),
2106+
}
2107+
}
2108+
2109+
# Always return the detail, but set status code to 503
2110+
# if a component is not available
2111+
pecan.response.status = 200 if all([
2112+
component['status']['is_available']
2113+
for component in response_data.values()]) else 503
2114+
2115+
# Add storage measures to process
21002116
try:
21012117
report = pecan.request.incoming.measures_report(
2102-
strtobool("details", details))
2118+
strtobool('details', details))
21032119
except incoming.ReportGenerationError:
2104-
abort(503, 'Unable to generate status. Please retry.')
2105-
report_dict = {"storage": {"summary": report['summary']}}
2120+
abort(503, "Unable to generate status. Please retry.")
2121+
response_data['storage']['summary'] = report['summary']
21062122
if 'details' in report:
2107-
report_dict["storage"]["measures_to_process"] = report['details']
2108-
report_dict['metricd'] = {}
2109-
if members_req:
2123+
response_data['storage']['measures_to_process'] = report['details']
2124+
2125+
# Add metricd status
2126+
try:
2127+
members_req = pecan.request.coordinator.get_members(
2128+
metricd.MetricProcessor.GROUP_ID)
2129+
except tooz.NotImplemented:
2130+
response_data['metricd'] = {
2131+
'processors': None,
2132+
'statistics': {}
2133+
}
2134+
else:
21102135
members = members_req.get()
21112136
caps = [
21122137
pecan.request.coordinator.get_member_capabilities(
21132138
metricd.MetricProcessor.GROUP_ID, member)
21142139
for member in members
21152140
]
2116-
report_dict['metricd']['processors'] = [
2117-
member.decode() for member in members
2118-
]
21192141
members_data = {}
21202142
for member, cap in six.moves.zip(members, caps):
21212143
caps_data = {
21222144
six.ensure_str(k): v
21232145
for k, v in six.iteritems(cap.get())
21242146
}
21252147
members_data[member.decode()] = caps_data
2126-
report_dict['metricd']['statistics'] = members_data
2127-
else:
2128-
report_dict['metricd']['processors'] = None
2129-
report_dict['metricd']['statistics'] = {}
2130-
return report_dict
2148+
2149+
response_data['metricd'] = {
2150+
'processors': [
2151+
member.decode() for member in members
2152+
],
2153+
'statistics': members_data
2154+
}
2155+
2156+
return response_data
21312157

21322158

21332159
class MetricsBatchController(object):

0 commit comments

Comments
 (0)