Skip to content

Commit d7a7642

Browse files
authored
feat: Read and append table data (#36)
1 parent 810174a commit d7a7642

File tree

8 files changed

+268
-8
lines changed

8 files changed

+268
-8
lines changed

nisystemlink/clients/dataframe/_data_frame_client.py

+51-2
Original file line numberDiff line numberDiff line change
@@ -31,14 +31,14 @@ def api_info(self) -> models.ApiInfo:
3131

3232
@get(
3333
"tables",
34-
args=(
34+
args=[
3535
Query("take"),
3636
Query("id"),
3737
Query("orderBy"),
3838
Query("orderByDescending"),
3939
Query("continuationToken"),
4040
Query("workspace"),
41-
),
41+
],
4242
)
4343
def list_tables(
4444
self,
@@ -148,3 +148,52 @@ def modify_tables(
148148
tables were modified successfully.
149149
"""
150150
...
151+
152+
@get(
153+
"tables/{id}/data",
154+
args=[
155+
Path("id"),
156+
Query("columns"),
157+
Query("orderBy"),
158+
Query("orderByDescending"),
159+
Query("take"),
160+
Query("continuationToken"),
161+
],
162+
)
163+
def get_table_data(
164+
self,
165+
id: str,
166+
columns: Optional[List[str]] = None,
167+
order_by: Optional[List[str]] = None,
168+
order_by_descending: Optional[bool] = None,
169+
take: Optional[int] = None,
170+
continuation_token: Optional[str] = None,
171+
) -> models.PagedTableRows:
172+
"""Reads raw data from the table identified by its ID.
173+
174+
Args:
175+
id: Unique ID of a DataFrame table.
176+
columns: Columns to include in the response. Data will be returned in the same order as
177+
the columns. If not specified, all columns are returned.
178+
order_by: List of columns to sort by. Multiple columns may be specified to order rows
179+
that have the same value for prior columns. The columns used for ordering do not
180+
need to be included in the columns list, in which case they are not returned. If
181+
not specified, then the order in which results are returned is undefined.
182+
order_by_descending: Whether to sort descending instead of ascending. Defaults to false.
183+
take: Limits the returned list to the specified number of results. Defaults to 500.
184+
continuation_token: The token used to paginate results.
185+
186+
Returns:
187+
models.PagedTableRows: The table data and total number of rows with a continuation token.
188+
"""
189+
...
190+
191+
@post("tables/{id}/data", args=[Path, Body])
192+
def append_table_data(self, id: str, data: models.AppendTableDataRequest) -> None:
193+
"""Appends one or more rows of data to the table identified by its ID.
194+
195+
Args:
196+
id: Unique ID of a DataFrame table.
197+
data: The rows of data to append and any additional options.
198+
"""
199+
...

nisystemlink/clients/dataframe/models/__init__.py

+3
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,17 @@
1+
from ._append_table_data_request import AppendTableDataRequest
12
from ._api_info import ApiInfo, Operation, OperationsV1
23
from ._create_table_request import CreateTableRequest
34
from ._column import Column
45
from ._column_type import ColumnType
6+
from ._data_frame import DataFrame
57
from ._data_type import DataType
68
from ._delete_tables_partial_success import DeleteTablesPartialSuccess
79
from ._modify_tables_partial_success import ModifyTablesPartialSuccess
810
from ._modify_table_request import ColumnMetadataPatch, ModifyTableRequest
911
from ._modify_tables_request import ModifyTablesRequest, TableMetdataModification
1012
from ._order_by import OrderBy
1113
from ._paged_tables import PagedTables
14+
from ._paged_table_rows import PagedTableRows
1215
from ._query_tables_request import QueryTablesRequest
1316
from ._table_metadata import TableMetadata
1417

Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
from typing import Optional
2+
3+
from nisystemlink.clients.core._uplink._json_model import JsonModel
4+
5+
from ._data_frame import DataFrame
6+
7+
8+
class AppendTableDataRequest(JsonModel):
9+
"""Contains the rows to append and optional flags. The ``frame`` field is
10+
required unless ``endOfData`` is true.
11+
"""
12+
13+
frame: Optional[DataFrame] = None
14+
"""The data frame containing the rows to append."""
15+
16+
end_of_data: Optional[bool] = None
17+
"""Whether the table should expect any additional rows to be appended in future requests."""
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
from typing import List, Optional
2+
3+
from nisystemlink.clients.core._uplink._json_model import JsonModel
4+
5+
6+
class DataFrame(JsonModel):
7+
"""Data read from or to be written to a table.
8+
9+
Values may be ``None`` (if the column is of type ``NULLABLE``) or encoded as
10+
a string in a format according to each column's datatype:
11+
12+
* BOOL: One of ``"true"`` or ``"false"``, case-insensitive.
13+
* INT32: Any integer number in the range [-2147483648, 2147483647],
14+
surrounded by quotes.
15+
* INT64: Any integer number in the range [-9223372036854775808,
16+
9223372036854775807], surrounded by quotes.
17+
* FLOAT32: A decimal number using a period for the decimal point, optionally
18+
in scientific notation, in the range [-3.40282347E+38, 3.40282347E+38],
19+
surrounded by quotes. Not all values within the range can be represented
20+
with 32 bits. To preserve the exact binary encoding of the value when
21+
converting to a string, clients should serialize 9 digits after the
22+
decimal. Instead of a number, the value may be ``"NaN"`` (not a number),
23+
``"Infinity"`` (positive infinity), or ``"-Infinity"`` (negative
24+
infinity), case-sensitive.
25+
* FLOAT64: A decimal number using a period for the decimal point, optionally
26+
in scientific notation, in the range [-1.7976931348623157E+308,
27+
1.7976931348623157E+308], surrounded by quotes. Not all values within the
28+
range can be represented with 64 bits. To preserve the exact binary
29+
encoding of the value when converting to a string, clients should
30+
serialize 17 digits after the decimal. Instead of a number, the value may
31+
be ``"NaN"`` (not a number), ``"Infinity"`` (positive infinity), or
32+
``"-Infinity"`` (negative infinity), case-sensitive.
33+
* STRING: Any quoted string.
34+
* TIMESTAMP: A date and time with millisecond precision in ISO-8601 format
35+
and time zone. For example: ``"2022-08-19T16:17:30.123Z"``. If a time zone
36+
is not provided, UTC is assumed. If a time zone other than UTC is
37+
provided, the value will be converted to UTC. If more than three digits of
38+
fractional seconds are provided, the time will be truncated to three
39+
digits (i.e. milliseconds).
40+
41+
The format is the same as a serialized Pandas DataFrame with orient="split"
42+
and index=False. See
43+
https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html.
44+
45+
When providing a DataFrame for appending rows, any of the table's columns
46+
not specified will receive a value of ``None``. If any such columns aren't
47+
nullable, an error will be returned. If the entire columns property is left
48+
out, each row is assumed to contain all columns in the order specified when
49+
the table was created.
50+
"""
51+
52+
columns: Optional[List[str]] = None
53+
"""The names and order of the columns included in the data frame."""
54+
55+
data: List[List[Optional[str]]]
56+
"""The data for each row with the order specified in the columns property.
57+
Must contain a value for each column in the columns property."""
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
from nisystemlink.clients.core._uplink._with_paging import WithPaging
2+
3+
from ._data_frame import DataFrame
4+
5+
6+
class PagedTableRows(WithPaging):
7+
"""Contains the result of a query for rows of data."""
8+
9+
frame: DataFrame
10+
"""The data frame containing the rows of data."""
11+
12+
total_row_count: int
13+
"""The total number of rows matched by the query across all pages of results."""

poetry.lock

+46-1
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

+10-5
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,11 @@ name = "nisystemlink-clients"
33
version = "0.7.0"
44
description = "NI-SystemLink Python API"
55
authors = ["National Instruments"]
6-
maintainers = ["Carson Moore <carson.moore@ni.com>", "Paul Spangler <paul.spangler@ni.com>", "Cameron Waterman <cameron.waterman@ni.com>"]
6+
maintainers = [
7+
"Carson Moore <carson.moore@ni.com>",
8+
"Paul Spangler <paul.spangler@ni.com>",
9+
"Cameron Waterman <cameron.waterman@ni.com>",
10+
]
711
keywords = ["nisystemlink", "systemlink"]
812
license = "MIT"
913
readme = "README.rst"
@@ -42,6 +46,7 @@ mypy = "^0.982"
4246
flake8-docstrings = "^1.6.0"
4347
poethepoet = "^0.16.4"
4448
types-requests = "^2.28.11.4"
49+
responses = "^0.22.0"
4550

4651
[tool.poe.tasks]
4752
test = "pytest tests -m \"(not slow) and (not cloud) and (not enterprise)\""
@@ -67,10 +72,10 @@ markers = [
6772
exclude = ".*\\.pyi"
6873

6974
[tool.semantic_release]
70-
branch = "master"
71-
version_toml = ["pyproject.toml:tool.poetry.version"]
72-
build_command = "poetry build"
73-
major_on_zero = false
75+
branch = "master"
76+
version_toml = ["pyproject.toml:tool.poetry.version"]
77+
build_command = "poetry build"
78+
major_on_zero = false
7479

7580
[build-system]
7681
requires = ["poetry-core"]

tests/integration/dataframe/test_dataframe.py

+71
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
from typing import List, Optional
44

55
import pytest # type: ignore
6+
import responses
67
from nisystemlink.clients.core import ApiException
78
from nisystemlink.clients.dataframe import DataFrameClient
89
from nisystemlink.clients.dataframe import models
@@ -276,3 +277,73 @@ def test__modify_tables__returns_partial_success(self, client: DataFrameClient):
276277
assert response.modified_table_ids == [id]
277278
assert response.failed_modifications == [updates[1]]
278279
assert len(response.error.inner_errors) == 1
280+
281+
def test__read_and_write_data__works(self, client: DataFrameClient, create_table):
282+
id = create_table(
283+
models.CreateTableRequest(
284+
columns=[
285+
models.Column(
286+
name="index",
287+
data_type=models.DataType.Int32,
288+
column_type=models.ColumnType.Index,
289+
),
290+
models.Column(
291+
name="value",
292+
data_type=models.DataType.Float64,
293+
column_type=models.ColumnType.Nullable,
294+
),
295+
models.Column(
296+
name="ignore_me",
297+
data_type=models.DataType.Bool,
298+
column_type=models.ColumnType.Nullable,
299+
),
300+
]
301+
)
302+
)
303+
304+
frame = models.DataFrame(
305+
columns=["index", "value", "ignore_me"],
306+
data=[["1", "3.3", "True"], ["2", None, "False"], ["3", "1.1", "True"]],
307+
)
308+
309+
client.append_table_data(
310+
id, models.AppendTableDataRequest(frame=frame, end_of_data=True)
311+
)
312+
313+
# TODO: Remove mock when service supports flushing
314+
with responses.RequestsMock() as rsps:
315+
rsps.add(
316+
responses.GET,
317+
f"{client.session.base_url}tables/{id}/data",
318+
json={
319+
"frame": {
320+
"columns": ["index", "value"],
321+
"data": [["3", "1.1"], ["1", "3.3"], ["2", None]],
322+
},
323+
"totalRowCount": 3,
324+
"continuationToken": None,
325+
},
326+
)
327+
328+
response = client.get_table_data(
329+
id, columns=["index", "value"], order_by=["value"]
330+
)
331+
332+
assert response.total_row_count == 3
333+
assert response.frame == models.DataFrame(
334+
columns=["index", "value"],
335+
data=[["3", "1.1"], ["1", "3.3"], ["2", None]],
336+
)
337+
338+
def test__write_invalid_data__raises(
339+
self, client: DataFrameClient, test_tables: List[str]
340+
):
341+
id = test_tables[0]
342+
343+
frame = models.DataFrame(
344+
columns=["index", "non_existent_column"],
345+
data=[["1", "2"], ["2", "2"], ["3", "3"]],
346+
)
347+
348+
with pytest.raises(ApiException, match="400 Bad Request"):
349+
client.append_table_data(id, models.AppendTableDataRequest(frame=frame))

0 commit comments

Comments
 (0)