Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix typo in field descriptions #505

Merged
merged 2 commits into from
May 17, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 7 additions & 7 deletions src/datasets/datasets.md
Original file line number Diff line number Diff line change
Expand Up @@ -87,13 +87,13 @@ fields:
type: string
required: true
index: true
description: "Owner or custodian of the dataset, usually first name + last name. The string may contain a list of persons, which should then be seperated by semicolons."
description: "Owner or custodian of the dataset, usually first name + last name. The string may contain a list of persons, which should then be separated by semicolons."
file: dataset
schema:
type: string
swagger:
type: String,
description: "Owner or custodian of the dataset, usually first name + last name. The string may contain a list of persons, which should then be seperated by semicolons."
description: "Owner or custodian of the dataset, usually first name + last name. The string may contain a list of persons, which should then be separated by semicolons."
database:
type: String
required: true
Expand All @@ -102,14 +102,14 @@ fields:
type: string
legacy:
type: string
description: "Email of the owner or custodian of the dataset. The string may contain a list of emails, which should then be seperated by semicolons."
description: "Email of the owner or custodian of the dataset. The string may contain a list of emails, which should then be separated by semicolons."
file: dataset
schema:
type: string
swagger:
type: String
required: false
description: "Email of the owner or custodian of the dataset. The string may contain a list of emails, which should then be seperated by semicolons."
description: "Email of the owner or custodian of the dataset. The string may contain a list of emails, which should then be separated by semicolons."
database:
type: String
required: false
Expand All @@ -134,13 +134,13 @@ fields:
type: string
required: true
index: true
description: "Email of contact person for this dataset. The string may contain a list of emails, which should then be seperated by semicolons."
description: "Email of contact person for this dataset. The string may contain a list of emails, which should then be separated by semicolons."
file: dataset
schema:
swagger:
type: String,
required: true,
description: "Email of contact person for this dataset. The string may contain a list of emails, which should then be seperated by semicolons."
description: "Email of contact person for this dataset. The string may contain a list of emails, which should then be separated by semicolons."
database:
type: String
required: true
Expand Down Expand Up @@ -552,7 +552,7 @@ fields:
swagger:
type: "array",
items_ref_schema: Datablock
description: "When archiving a dataset, all files contained in the dataset are listed here together with their checksum information. Several datablocks can be created if the file listing is too long for a single datablock. This partitioning decision is done by the archiving system to allow for chunks of datablocks with managable sizes. E.g a datasets consisting of 10 TB of data could be split into 10 datablocks of about 1 TB each. The upper limit set by the data catalog system itself is given by the fact that documents must be smaller than 16 MB, which typically allows for datasets of about 100000 files."
description: "When archiving a dataset, all files contained in the dataset are listed here together with their checksum information. Several datablocks can be created if the file listing is too long for a single datablock. This partitioning decision is done by the archiving system to allow for chunks of datablocks with manageable sizes. E.g a datasets consisting of 10 TB of data could be split into 10 datablocks of about 1 TB each. The upper limit set by the data catalog system itself is given by the fact that documents must be smaller than 16 MB, which typically allows for datasets of about 100000 files."
database:
type: [DatablockSchema]
- name: scientificMetadata
Expand Down
6 changes: 3 additions & 3 deletions src/datasets/dto/create-dataset.dto.ts
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ export class CreateDatasetDto extends OwnableDto {
type: String,
required: true,
description:
"Owner or custodian of the dataset, usually first name + last name. The string may contain a list of persons, which should then be seperated by semicolons.",
"Owner or custodian of the dataset, usually first name + last name. The string may contain a list of persons, which should then be separated by semicolons.",
})
@IsString()
readonly owner: string;
Expand All @@ -37,7 +37,7 @@ export class CreateDatasetDto extends OwnableDto {
type: String,
required: false,
description:
"Email of the owner or custodian of the dataset. The string may contain a list of emails, which should then be seperated by semicolons.",
"Email of the owner or custodian of the dataset. The string may contain a list of emails, which should then be separated by semicolons.",
})
@IsOptional()
@IsEmail()
Expand All @@ -57,7 +57,7 @@ export class CreateDatasetDto extends OwnableDto {
type: String,
required: true,
description:
"Email of the contact person for this dataset. The string may contain a list of emails, which should then be seperated by semicolons.",
"Email of the contact person for this dataset. The string may contain a list of emails, which should then be separated by semicolons.",
})
@IsEmail()
readonly contactEmail: string;
Expand Down
8 changes: 4 additions & 4 deletions src/datasets/schemas/dataset.schema.ts
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ export class DatasetClass extends OwnableClass {
type: String,
required: true,
description:
"Owner or custodian of the dataset, usually first name + last name. The string may contain a list of persons, which should then be seperated by semicolons.",
"Owner or custodian of the dataset, usually first name + last name. The string may contain a list of persons, which should then be separated by semicolons.",
})
@Prop({
type: String,
Expand All @@ -85,7 +85,7 @@ export class DatasetClass extends OwnableClass {
type: String,
required: false,
description:
"Email of the owner or custodian of the dataset. The string may contain a list of emails, which should then be seperated by semicolons.",
"Email of the owner or custodian of the dataset. The string may contain a list of emails, which should then be separated by semicolons.",
})
@Prop({
type: String,
Expand All @@ -109,7 +109,7 @@ export class DatasetClass extends OwnableClass {
type: String,
required: true,
description:
"Email of the contact person for this dataset. The string may contain a list of emails, which should then be seperated by semicolons.",
"Email of the contact person for this dataset. The string may contain a list of emails, which should then be separated by semicolons.",
})
@Prop({
type: String,
Expand Down Expand Up @@ -373,7 +373,7 @@ export class DatasetClass extends OwnableClass {
items: { $ref: getSchemaPath(Datablock) },
required: false,
description:
"When archiving a dataset, all files contained in the dataset are listed here together with their checksum information. Several datablocks can be created if the file listing is too long for a single datablock. This partitioning decision is done by the archiving system to allow for chunks of datablocks with managable sizes. E.g a datasets consisting of 10 TB of data could be split into 10 datablocks of about 1 TB each. The upper limit set by the data catalog system itself is given by the fact that documents must be smaller than 16 MB, which typically allows for datasets of about 100000 files.",
"When archiving a dataset, all files contained in the dataset are listed here together with their checksum information. Several datablocks can be created if the file listing is too long for a single datablock. This partitioning decision is done by the archiving system to allow for chunks of datablocks with manageable sizes. E.g a datasets consisting of 10 TB of data could be split into 10 datablocks of about 1 TB each. The upper limit set by the data catalog system itself is given by the fact that documents must be smaller than 16 MB, which typically allows for datasets of about 100000 files.",
})
@Prop({ type: [DatablockSchema], default: [] })
datablocks: Datablock[];
Expand Down