Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rename 'Test Cluster' to 'Unit Testing' to unstutter class and item naming #23495

Merged
merged 36 commits into from
Nov 15, 2022
Merged
Changes from 1 commit
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
fe30505
Renamed Test Cluster to just test, to unstutter names.
andy31415 Nov 4, 2022
4630dce
zap regen
andy31415 Nov 4, 2022
d6c6a64
make all clusters compile
andy31415 Nov 4, 2022
9719cb4
Make tests compile
andy31415 Nov 4, 2022
e91e7e2
Restyle
andy31415 Nov 4, 2022
7ebcbae
Fix expected JNI filename codegen
andy31415 Nov 4, 2022
bd3c10b
Fix darwin code renaming
andy31415 Nov 4, 2022
9e83800
Update cirque test for naming (no cluster suffix)
andy31415 Nov 4, 2022
c385d17
More updates for python: TestCluster to Clusters.Test
andy31415 Nov 4, 2022
59668dd
Correct cirque test to expect cluster name to be Test
andy31415 Nov 4, 2022
56bde96
Merge branch 'master' into unstutter_test_cluster
andy31415 Nov 7, 2022
ad57c09
Start renaming to Unit Testing for a more complete cluster name
andy31415 Nov 7, 2022
a84236a
more updates to maning for the test cluster to unit testing
andy31415 Nov 7, 2022
5754c13
make define consistent
andy31415 Nov 7, 2022
9810cb6
Restyled by clang-format
restyled-commits Nov 7, 2022
bc9e16b
Restyled by autopep8
restyled-commits Nov 7, 2022
58a2312
Fix up naming in the test cluster implementation
andy31415 Nov 7, 2022
e65a86a
Restyled by clang-format
restyled-commits Nov 7, 2022
e71af57
Fix darwin build
andy31415 Nov 7, 2022
62fb41e
Fix cirque
andy31415 Nov 7, 2022
6d0710f
Fix cirque
andy31415 Nov 7, 2022
c4f5f78
Fix cirque
andy31415 Nov 7, 2022
c0b7e97
Fix repl examples
andy31415 Nov 7, 2022
e1cd94f
Restyled by autopep8
restyled-commits Nov 7, 2022
afe4d6a
Fix android output files
andy31415 Nov 8, 2022
2178401
Fix cirque
andy31415 Nov 8, 2022
4050449
Merge branch 'master' into unstutter_test_cluster
andy31415 Nov 8, 2022
747d9e9
Another cirque fix
andy31415 Nov 8, 2022
a25a145
update ipynb code for Objects.UnitTesting
andy31415 Nov 8, 2022
1216076
Replace more unusual auto-sed for cluster_objects
andy31415 Nov 8, 2022
47a5ac7
Replace comment in ipynmb
andy31415 Nov 8, 2022
2d17792
Restyled by autopep8
restyled-commits Nov 8, 2022
fee5c0b
Fix expected unit test cluster name. hoping this is the last cirque fix
andy31415 Nov 8, 2022
8fc48ab
Merge branch 'master' into unstutter_test_cluster
andy31415 Nov 10, 2022
9c20b0b
Merge branch 'master' into unstutter_test_cluster
andy31415 Nov 10, 2022
6967b1f
Merge branch 'master' into unstutter_test_cluster
andy31415 Nov 15, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Restyled by autopep8
  • Loading branch information
restyled-commits authored and andy31415 committed Nov 7, 2022
commit e1cd94f7701734f0f0703c378b41de450125db67
130 changes: 65 additions & 65 deletions src/controller/python/test/test_scripts/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -782,24 +782,24 @@ async def TestFabricSensitive(self, nodeid: int):
# Update the expected data's fabric index to that we just read back
# before we attempt to compare the data
#
expectedDataFabric1[0].fabricIndex= self.currentFabric1
expectedDataFabric1[1].fabricIndex= self.currentFabric1
expectedDataFabric1[0].fabricIndex = self.currentFabric1
expectedDataFabric1[1].fabricIndex = self.currentFabric1

self.logger.info("Comparing data on fabric1...")
if (expectedDataFabric1 != readListDataFabric1):
raise AssertionError("Got back mismatched data")

self.logger.info("Reading back data from fabric2...")

data= await self.devCtrl2.ReadAttribute(nodeid, [(1, Clusters.UnitTesting.Attributes.ListFabricScoped)])
readListDataFabric2= data[1][Clusters.UnitTesting][Clusters.UnitTesting.Attributes.ListFabricScoped]
data = await self.devCtrl2.ReadAttribute(nodeid, [(1, Clusters.UnitTesting.Attributes.ListFabricScoped)])
readListDataFabric2 = data[1][Clusters.UnitTesting][Clusters.UnitTesting.Attributes.ListFabricScoped]

#
# Update the expected data's fabric index to that we just read back
# before we attempt to compare the data
#
expectedDataFabric2[0].fabricIndex= self.currentFabric2
expectedDataFabric2[1].fabricIndex= self.currentFabric2
expectedDataFabric2[0].fabricIndex = self.currentFabric2
expectedDataFabric2[1].fabricIndex = self.currentFabric2

self.logger.info("Comparing data on fabric2...")
if (expectedDataFabric2 != readListDataFabric2):
Expand All @@ -809,7 +809,7 @@ async def TestFabricSensitive(self, nodeid: int):
"Reading back unfiltered data across all fabrics from fabric1...")

def CompareUnfilteredData(accessingFabric, otherFabric, expectedData):
index= 0
index = 0

self.logger.info(
f"Comparing data from accessing fabric {accessingFabric}...")
Expand All @@ -823,7 +823,7 @@ def CompareUnfilteredData(accessingFabric, otherFabric, expectedData):
if (item != expectedData[index]):
raise AssertionError("Got back mismatched data")

index= index + 1
index = index + 1
else:
#
# We should not be able to see any fabric sensitive data from the non accessing fabric.
Expand All @@ -832,32 +832,32 @@ def CompareUnfilteredData(accessingFabric, otherFabric, expectedData):
# which should automatically be initialized with defaults and compare that
# against what we got back.
#
expectedDefaultData= Clusters.UnitTesting.Structs.TestFabricScoped()
expectedDefaultData.fabricIndex= otherFabric
expectedDefaultData = Clusters.UnitTesting.Structs.TestFabricScoped()
expectedDefaultData.fabricIndex = otherFabric

if (item != expectedDefaultData):
raise AssertionError("Got back mismatched data")

data= await self.devCtrl.ReadAttribute(nodeid, [(1, Clusters.UnitTesting.Attributes.ListFabricScoped)], fabricFiltered=False)
readListDataFabric= data[1][Clusters.UnitTesting][Clusters.UnitTesting.Attributes.ListFabricScoped]
data = await self.devCtrl.ReadAttribute(nodeid, [(1, Clusters.UnitTesting.Attributes.ListFabricScoped)], fabricFiltered=False)
readListDataFabric = data[1][Clusters.UnitTesting][Clusters.UnitTesting.Attributes.ListFabricScoped]
CompareUnfilteredData(self.currentFabric1,
self.currentFabric2, expectedDataFabric1)

data= await self.devCtrl2.ReadAttribute(nodeid, [(1, Clusters.UnitTesting.Attributes.ListFabricScoped)], fabricFiltered=False)
readListDataFabric= data[1][Clusters.UnitTesting][Clusters.UnitTesting.Attributes.ListFabricScoped]
data = await self.devCtrl2.ReadAttribute(nodeid, [(1, Clusters.UnitTesting.Attributes.ListFabricScoped)], fabricFiltered=False)
readListDataFabric = data[1][Clusters.UnitTesting][Clusters.UnitTesting.Attributes.ListFabricScoped]
CompareUnfilteredData(self.currentFabric2,
self.currentFabric1, expectedDataFabric2)

self.logger.info("Writing smaller list from alpha (again)")

expectedDataFabric1[0].fabricIndex= 100
expectedDataFabric1[0].fabricSensitiveInt8u= 53
expectedDataFabric1[0].optionalFabricSensitiveInt8u= 54
expectedDataFabric1[0].nullableFabricSensitiveInt8u= 55
expectedDataFabric1[0].nullableOptionalFabricSensitiveInt8u= Clusters.Types.NullValue
expectedDataFabric1[0].fabricSensitiveCharString= "alpha3"
expectedDataFabric1[0].fabricSensitiveStruct.a= 56
expectedDataFabric1[0].fabricSensitiveInt8uList= [51, 52, 53, 54]
expectedDataFabric1[0].fabricIndex = 100
expectedDataFabric1[0].fabricSensitiveInt8u = 53
expectedDataFabric1[0].optionalFabricSensitiveInt8u = 54
expectedDataFabric1[0].nullableFabricSensitiveInt8u = 55
expectedDataFabric1[0].nullableOptionalFabricSensitiveInt8u = Clusters.Types.NullValue
expectedDataFabric1[0].fabricSensitiveCharString = "alpha3"
expectedDataFabric1[0].fabricSensitiveStruct.a = 56
expectedDataFabric1[0].fabricSensitiveInt8uList = [51, 52, 53, 54]

expectedDataFabric1.pop(1)

Expand All @@ -866,19 +866,19 @@ def CompareUnfilteredData(accessingFabric, otherFabric, expectedData):
self.logger.info(
"Reading back data (again) from fabric2 to ensure it hasn't changed")

data= await self.devCtrl2.ReadAttribute(nodeid, [(1, Clusters.UnitTesting.Attributes.ListFabricScoped)])
readListDataFabric2= data[1][Clusters.UnitTesting][Clusters.UnitTesting.Attributes.ListFabricScoped]
data = await self.devCtrl2.ReadAttribute(nodeid, [(1, Clusters.UnitTesting.Attributes.ListFabricScoped)])
readListDataFabric2 = data[1][Clusters.UnitTesting][Clusters.UnitTesting.Attributes.ListFabricScoped]
if (expectedDataFabric2 != readListDataFabric2):
raise AssertionError("Got back mismatched data")

self.logger.info(
"Reading back data (again) from fabric1 to ensure it hasn't changed")

data= await self.devCtrl.ReadAttribute(nodeid, [(1, Clusters.UnitTesting.Attributes.ListFabricScoped)])
readListDataFabric1= data[1][Clusters.UnitTesting][Clusters.UnitTesting.Attributes.ListFabricScoped]
data = await self.devCtrl.ReadAttribute(nodeid, [(1, Clusters.UnitTesting.Attributes.ListFabricScoped)])
readListDataFabric1 = data[1][Clusters.UnitTesting][Clusters.UnitTesting.Attributes.ListFabricScoped]

self.logger.info("Comparing data on fabric1...")
expectedDataFabric1[0].fabricIndex= self.currentFabric1
expectedDataFabric1[0].fabricIndex = self.currentFabric1
if (expectedDataFabric1 != readListDataFabric1):
raise AssertionError("Got back mismatched data")

Expand All @@ -888,22 +888,22 @@ async def TestResubscription(self, nodeid: int):
trigger CASE session establishment and subscription restablishment. Both the attempt and successful
restablishment of the subscription are validated.
'''
cv= asyncio.Condition()
resubAttempted= False
resubSucceeded= True
cv = asyncio.Condition()
resubAttempted = False
resubSucceeded = True

async def OnResubscriptionAttempted(transaction, errorEncountered: int, nextResubscribeIntervalMsec: int):
self.logger.info("Re-subscription Attempted")
nonlocal resubAttempted
resubAttempted= True
resubAttempted = True

async def OnResubscriptionSucceeded(transaction):
self.logger.info("Re-subscription Succeeded")
nonlocal cv
async with cv:
cv.notify()

subscription= await self.devCtrl.ReadAttribute(nodeid, [(Clusters.Basic.Attributes.ClusterRevision)], reportInterval=(0, 5))
subscription = await self.devCtrl.ReadAttribute(nodeid, [(Clusters.Basic.Attributes.ClusterRevision)], reportInterval=(0, 5))

#
# Register async callbacks that will fire when a re-sub is attempted or succeeds.
Expand All @@ -920,7 +920,7 @@ async def OnResubscriptionSucceeded(transaction):

async with cv:
if (not(resubAttempted) or not(resubSucceeded)):
res= await asyncio.wait_for(cv.wait(), 3)
res = await asyncio.wait_for(cv.wait(), 3)
if not res:
self.logger.error("Timed out waiting for resubscription to succeed")
return False
Expand All @@ -946,13 +946,13 @@ def SetNetworkCommissioningParameters(self, dataset: str):
def TestOnOffCluster(self, nodeid: int, endpoint: int, group: int):
self.logger.info(
"Sending On/Off commands to device {} endpoint {}".format(nodeid, endpoint))
err, resp= self.devCtrl.ZCLSend("OnOff", "On", nodeid,
err, resp = self.devCtrl.ZCLSend("OnOff", "On", nodeid,
endpoint, group, {}, blocking=True)
if err != 0:
self.logger.error(
"failed to send OnOff.On: error is {} with im response{}".format(err, resp))
return False
err, resp= self.devCtrl.ZCLSend("OnOff", "Off", nodeid,
err, resp = self.devCtrl.ZCLSend("OnOff", "Off", nodeid,
endpoint, group, {}, blocking=True)
if err != 0:
self.logger.error(
Expand All @@ -964,12 +964,12 @@ def TestLevelControlCluster(self, nodeid: int, endpoint: int, group: int):
self.logger.info(
f"Sending MoveToLevel command to device {nodeid} endpoint {endpoint}")
try:
commonArgs= dict(transitionTime=0, optionsMask=1, optionsOverride=1)
commonArgs = dict(transitionTime=0, optionsMask=1, optionsOverride=1)

# Move to 1
self.devCtrl.ZCLSend("LevelControl", "MoveToLevel", nodeid,
endpoint, group, dict(**commonArgs, level=1), blocking=True)
res= self.devCtrl.ZCLReadAttribute(cluster="LevelControl",
res = self.devCtrl.ZCLReadAttribute(cluster="LevelControl",
attribute="CurrentLevel",
nodeid=nodeid,
endpoint=endpoint,
Expand All @@ -980,7 +980,7 @@ def TestLevelControlCluster(self, nodeid: int, endpoint: int, group: int):
# Move to 254
self.devCtrl.ZCLSend("LevelControl", "MoveToLevel", nodeid,
endpoint, group, dict(**commonArgs, level=254), blocking=True)
res= self.devCtrl.ZCLReadAttribute(cluster="LevelControl",
res = self.devCtrl.ZCLReadAttribute(cluster="LevelControl",
attribute="CurrentLevel",
nodeid=nodeid,
endpoint=endpoint,
Expand All @@ -998,11 +998,11 @@ def TestResolve(self, nodeid):
"Resolve: node id = {:08x}".format(nodeid))
try:
self.devCtrl.ResolveNode(nodeid=nodeid)
addr= None
addr = None

start= time.time()
start = time.time()
while not addr:
addr= self.devCtrl.GetAddressAndPort(nodeid)
addr = self.devCtrl.GetAddressAndPort(nodeid)
if time.time() - start > 10:
self.logger.exception(f"Timeout waiting for address...")
break
Expand All @@ -1020,7 +1020,7 @@ def TestResolve(self, nodeid):
return False

def TestReadBasicAttributes(self, nodeid: int, endpoint: int, group: int):
basic_cluster_attrs= {
basic_cluster_attrs = {
"VendorName": "TEST_VENDOR",
"VendorID": 0xFFF1,
"ProductName": "TEST_PRODUCT",
Expand All @@ -1032,18 +1032,18 @@ def TestReadBasicAttributes(self, nodeid: int, endpoint: int, group: int):
"SoftwareVersion": 1,
"SoftwareVersionString": "1.0",
}
failed_zcl= {}
failed_zcl = {}
for basic_attr, expected_value in basic_cluster_attrs.items():
try:
res= self.devCtrl.ZCLReadAttribute(cluster="Basic",
res = self.devCtrl.ZCLReadAttribute(cluster="Basic",
attribute=basic_attr,
nodeid=nodeid,
endpoint=endpoint,
groupid=group)
TestResult(f"Read attribute {basic_attr}", res).assertValueEqual(
expected_value)
except Exception as ex:
failed_zcl[basic_attr]= str(ex)
failed_zcl[basic_attr] = str(ex)
if failed_zcl:
self.logger.exception(f"Following attributes failed: {failed_zcl}")
return False
Expand All @@ -1055,14 +1055,14 @@ class AttributeWriteRequest:
cluster: str
attribute: str
value: Any
expected_status: IM.Status= IM.Status.Success
expected_status: IM.Status = IM.Status.Success

requests= [
requests = [
AttributeWriteRequest("Basic", "NodeLabel", "Test"),
AttributeWriteRequest("Basic", "Location",
"a pretty loooooooooooooog string", IM.Status.ConstraintError),
]
failed_zcl= []
failed_zcl = []
for req in requests:
try:
try:
Expand All @@ -1080,7 +1080,7 @@ class AttributeWriteRequest:
continue
else:
raise ex
res= self.devCtrl.ZCLReadAttribute(
res = self.devCtrl.ZCLReadAttribute(
cluster=req.cluster, attribute=req.attribute, nodeid=nodeid, endpoint=endpoint, groupid=group)
TestResult(f"Read attribute {req.cluster}.{req.attribute}", res).assertValueEqual(
req.value)
Expand All @@ -1092,17 +1092,17 @@ class AttributeWriteRequest:
return True

def TestSubscription(self, nodeid: int, endpoint: int):
desiredPath= None
receivedUpdate= 0
updateLock= threading.Lock()
updateCv= threading.Condition(updateLock)
desiredPath = None
receivedUpdate = 0
updateLock = threading.Lock()
updateCv = threading.Condition(updateLock)

def OnValueChange(path: Attribute.TypedAttributePath, transaction: Attribute.SubscriptionTransaction) -> None:
nonlocal desiredPath, updateCv, updateLock, receivedUpdate
if path.Path != desiredPath:
return

data= transaction.GetAttribute(path)
data = transaction.GetAttribute(path)
logger.info(
f"Received report from server: path: {path.Path}, value: {data}")
with updateLock:
Expand All @@ -1112,9 +1112,9 @@ def OnValueChange(path: Attribute.TypedAttributePath, transaction: Attribute.Sub
class _conductAttributeChange(threading.Thread):
def __init__(self, devCtrl: ChipDeviceCtrl.ChipDeviceController, nodeid: int, endpoint: int):
super(_conductAttributeChange, self).__init__()
self.nodeid= nodeid
self.endpoint= endpoint
self.devCtrl= devCtrl
self.nodeid = nodeid
self.endpoint = endpoint
self.devCtrl = devCtrl

def run(self):
for i in range(5):
Expand All @@ -1123,13 +1123,13 @@ def run(self):
"OnOff", "Toggle", self.nodeid, self.endpoint, 0, {})

try:
desiredPath= Clusters.Attribute.AttributePath(
desiredPath = Clusters.Attribute.AttributePath(
EndpointId=1, ClusterId=6, AttributeId=0)
# OnOff Cluster, OnOff Attribute
subscription= self.devCtrl.ZCLSubscribeAttribute(
subscription = self.devCtrl.ZCLSubscribeAttribute(
"OnOff", "OnOff", nodeid, endpoint, 1, 10)
subscription.SetAttributeUpdateCallback(OnValueChange)
changeThread= _conductAttributeChange(
changeThread = _conductAttributeChange(
self.devCtrl, nodeid, endpoint)
# Reset the number of subscriptions received as subscribing causes a callback.
changeThread.start()
Expand Down Expand Up @@ -1171,8 +1171,8 @@ def TestNonControllerAPIs(self):
TODO: Add more tests for APIs
'''
try:
cluster= self.devCtrl.GetClusterHandler()
clusterInfo= cluster.GetClusterInfoById(0xFFF1FC05) # TestCluster
cluster = self.devCtrl.GetClusterHandler()
clusterInfo = cluster.GetClusterInfoById(0xFFF1FC05) # TestCluster
if clusterInfo["clusterName"] != "Test":
raise Exception(
f"Wrong cluster info clusterName: {clusterInfo['clusterName']} expected 'Test'")
Expand All @@ -1186,11 +1186,11 @@ def TestFabricScopedCommandDuringPase(self, nodeid: int):

The nodeid is the PASE pseudo-node-ID used during PASE establishment
'''
status= None
status = None
try:
response= asyncio.run(self.devCtrl.SendCommand(
response = asyncio.run(self.devCtrl.SendCommand(
nodeid, 0, Clusters.OperationalCredentials.Commands.UpdateFabricLabel("roboto")))
except IM.InteractionModelError as ex:
status= ex.status
status = ex.status

return status == IM.Status.UnsupportedAccess