Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions mdio/dataset.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ namespace internal {
* @return An `mdio::Result` containing the .zarray JSON metadata on success, or
* an error on failure.
*/
Result<nlohmann::json> get_zarray(const ::nlohmann::json metadata) {
inline Result<nlohmann::json> get_zarray(const ::nlohmann::json metadata) {
// derive .zarray json metadata (without reading it).
auto json =
metadata; // Why am I doing this? It's an extra copy that does nothing!
Expand Down Expand Up @@ -125,7 +125,7 @@ Result<nlohmann::json> get_zarray(const ::nlohmann::json metadata) {
* @param json_variables The JSON variables.
* @return An `mdio::Future<void>` representing the asynchronous write.
*/
Future<void> write_zmetadata(
inline Future<void> write_zmetadata(
const ::nlohmann::json& dataset_metadata,
const std::vector<::nlohmann::json>& json_variables) {
// header material at the root of the dataset ...
Expand Down Expand Up @@ -252,7 +252,7 @@ Future<void> write_zmetadata(
* It will default to the "file" driver if no prefix is found.
* @param dataset_path The path to the dataset.
*/
Future<tensorstore::KvStore> dataset_kvs_store(
inline Future<tensorstore::KvStore> dataset_kvs_store(
const std::string& dataset_path) {
// the tensorstore driver needs a bucket field
::nlohmann::json kvstore;
Expand Down Expand Up @@ -298,7 +298,7 @@ Future<tensorstore::KvStore> dataset_kvs_store(
* @return An `mdio::Future` containing the .zmetadata JSON on success, or an
* error on failure.
*/
Future<std::tuple<::nlohmann::json, std::vector<::nlohmann::json>>>
inline Future<std::tuple<::nlohmann::json, std::vector<::nlohmann::json>>>
from_zmetadata(const std::string& dataset_path) {
// e.g. dataset_path = "zarrs/acceptance/";
// FIXME - enable async
Expand Down
27 changes: 14 additions & 13 deletions mdio/dataset_factory.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
* @param raw A string to be encoded
* @return A string encoded in base64
*/
std::string encode_base64(const std::string raw) {
inline std::string encode_base64(const std::string raw) {
std::string encoded = absl::Base64Escape(raw);
return encoded;
}
Expand All @@ -50,7 +50,7 @@ std::string encode_base64(const std::string raw) {
* @return A string representing the dtype in numpy format limited to the dtypes
* supported by MDIO Dataset
*/
tensorstore::Result<std::string> to_zarr_dtype(const std::string dtype) {
inline tensorstore::Result<std::string> to_zarr_dtype(const std::string dtype) {
// Convert the input dtype to Zarr dtype
if (dtype == "int8") {
return "<i1";
Expand Down Expand Up @@ -93,8 +93,8 @@ tensorstore::Result<std::string> to_zarr_dtype(const std::string dtype) {
* @return OkStatus if successful, InvalidArgumentError if dtype is not
* supported
*/
absl::Status transform_dtype(nlohmann::json& input /*NOLINT*/,
nlohmann::json& variable /*NOLINT*/) {
inline absl::Status transform_dtype(nlohmann::json& input /*NOLINT*/,
nlohmann::json& variable /*NOLINT*/) {
if (input["dataType"].contains("fields")) {
nlohmann::json dtypeFields = nlohmann::json::array();
for (const auto& field : input["dataType"]["fields"]) {
Expand Down Expand Up @@ -124,8 +124,8 @@ absl::Status transform_dtype(nlohmann::json& input /*NOLINT*/,
* @return OkStatus if successful, InvalidArgumentError if compressor is invalid
* for MDIO
*/
absl::Status transform_compressor(nlohmann::json& input /*NOLINT*/,
nlohmann::json& variable /*NOLINT*/) {
inline absl::Status transform_compressor(nlohmann::json& input /*NOLINT*/,
nlohmann::json& variable /*NOLINT*/) {
if (input.contains("compressor")) {
if (input["compressor"].contains("name")) {
if (input["compressor"]["name"] != "blosc") {
Expand Down Expand Up @@ -182,7 +182,7 @@ absl::Status transform_compressor(nlohmann::json& input /*NOLINT*/,
* before this step This presumes that the user does not attempt to use these
* functions directly
*/
void transform_shape(
inline void transform_shape(
nlohmann::json& input /*NOLINT*/, nlohmann::json& variable /*NOLINT*/,
std::unordered_map<std::string, uint64_t>& dimensionMap /*NOLINT*/) {
if (input["dimensions"][0].is_object()) {
Expand All @@ -208,8 +208,8 @@ void transform_shape(
* @param variable A Variable stub (Will be modified)
* @return OkStatus if successful, InvalidArgumentError if the path is invalid
*/
absl::Status transform_metadata(const std::string& path,
nlohmann::json& variable /*NOLINT*/) {
inline absl::Status transform_metadata(const std::string& path,
nlohmann::json& variable /*NOLINT*/) {
std::string bucket =
"NULL"; // Default value, if is NULL don't add a bucket field
std::string driver = "file";
Expand Down Expand Up @@ -261,7 +261,7 @@ absl::Status transform_metadata(const std::string& path,
* @param dimensionMap A map of dimension names to sizes
* @return A Variable spec or an error if the Variable spec is invalid
*/
tensorstore::Result<nlohmann::json> from_json_to_spec(
inline tensorstore::Result<nlohmann::json> from_json_to_spec(
nlohmann::json& json /*NOLINT*/,
std::unordered_map<std::string, uint64_t>& dimensionMap /*NOLINT*/,
const std::string& path) {
Expand Down Expand Up @@ -404,8 +404,8 @@ tensorstore::Result<nlohmann::json> from_json_to_spec(
* @return A map of dimension names to sizes or error if the dimensions are not
* consistently sized
*/
tensorstore::Result<std::unordered_map<std::string, uint64_t>> get_dimensions(
nlohmann::json& spec /*NOLINT*/) {
inline tensorstore::Result<std::unordered_map<std::string, uint64_t>>
get_dimensions(nlohmann::json& spec /*NOLINT*/) {
std::unordered_map<std::string, uint64_t> dimensions;
for (auto& variable : spec["variables"]) {
if (variable["dimensions"][0].is_object()) {
Expand Down Expand Up @@ -438,7 +438,8 @@ tensorstore::Result<std::unordered_map<std::string, uint64_t>> get_dimensions(
* @param spec A Dataset spec
* @return A vector of Variable specs or an error if the Dataset spec is invalid
*/
tensorstore::Result<std::tuple<nlohmann::json, std::vector<nlohmann::json>>>
inline tensorstore::Result<
std::tuple<nlohmann::json, std::vector<nlohmann::json>>>
Construct(nlohmann::json& spec /*NOLINT*/, const std::string& path) {
// Validation should only return status codes. If it returns data then it
// should be a "constructor"
Expand Down
10 changes: 5 additions & 5 deletions mdio/dataset_validator.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@
* @brief Checks if a key exists in a map
* Specific for our case of {coordinate: index} mapping
*/
bool contains(const std::unordered_set<std::string>& set,
const std::string key) {
inline bool contains(const std::unordered_set<std::string>& set,
const std::string key) {
return set.count(key);
}

Expand All @@ -42,7 +42,7 @@ bool contains(const std::unordered_set<std::string>& set,
* @return OkStatus if valid, NotFoundError if schema file load fails,
* InvalidArgumentError if validation fails for any reason
*/
absl::Status validate_schema(nlohmann::json& spec /*NOLINT*/) {
inline absl::Status validate_schema(nlohmann::json& spec /*NOLINT*/) {
nlohmann::json targetSchema =
nlohmann::json::parse(kDatasetSchema, nullptr, false);
if (targetSchema.is_discarded()) {
Expand Down Expand Up @@ -72,7 +72,7 @@ absl::Status validate_schema(nlohmann::json& spec /*NOLINT*/) {
* @return OkStatus if valid, InvalidArgumentError if a coordinate does not have
* a matching Variable.
*/
absl::Status validate_coordinates_present(const nlohmann::json& spec) {
inline absl::Status validate_coordinates_present(const nlohmann::json& spec) {
// Build a mapping of all the dimension coordinates
std::unordered_set<std::string>
dimension; // name of all 1-d Variables who's name matches the dimension
Expand Down Expand Up @@ -145,7 +145,7 @@ absl::Status validate_coordinates_present(const nlohmann::json& spec) {
* reason

*/
absl::Status validate_dataset(nlohmann::json& spec /*NOLINT*/) {
inline absl::Status validate_dataset(nlohmann::json& spec /*NOLINT*/) {
absl::Status schemaStatus = validate_schema(spec);
if (!schemaStatus.ok()) {
return schemaStatus;
Expand Down
2 changes: 1 addition & 1 deletion mdio/utils/delete.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ namespace utils {
* @return OK result if the dataset was valid and deleted successfully,
* otherwise an error result
*/
Result<void> DeleteDataset(const std::string dataset_path) {
inline Result<void> DeleteDataset(const std::string dataset_path) {
// Open the dataset
// This is to ensure that what is getting deleted by MDIO is a valid MDIO
// dataset itself.
Expand Down
8 changes: 5 additions & 3 deletions mdio/variable.h
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ namespace internal {
* @return A driver specific message if the status is a missing driver message,
* otherwise the original status.
*/
absl::Status CheckMissingDriverStatus(const absl::Status& status) {
inline absl::Status CheckMissingDriverStatus(const absl::Status& status) {
std::string error(status.message());
if (error.find("Error parsing object member \"driver\"") !=
std::string::npos) {
Expand Down Expand Up @@ -1837,10 +1837,12 @@ Result<VariableData<T, R, OriginKind>> from_variable(
size_t element_size = variable.dtype().size();

if (variable.dtype() == constants::kFloat32) {
auto* data = reinterpret_cast<float*>(_array.data());
auto* data =
reinterpret_cast<float*>(_array.byte_strided_origin_pointer().get());
std::fill_n(data, num_elements, std::numeric_limits<float>::quiet_NaN());
} else { // double
auto* data = reinterpret_cast<double*>(_array.data());
auto* data =
reinterpret_cast<double*>(_array.byte_strided_origin_pointer().get());
std::fill_n(data, num_elements, std::numeric_limits<double>::quiet_NaN());
}
}
Expand Down