Skip to content

Commit

Permalink
[Vertex AI] Use struct instead of enum for HarmCategory
Browse files Browse the repository at this point in the history
  • Loading branch information
andrewheard committed Sep 26, 2024
1 parent 5ed86cd commit 6049f5c
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 12 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@ extension HarmCategory: CustomStringConvertible {
case .harassment: "Harassment"
case .hateSpeech: "Hate speech"
case .sexuallyExplicit: "Sexually explicit"
case .unknown: "Unknown"
default:
"Unknown"
}
}
}
Expand Down
43 changes: 33 additions & 10 deletions FirebaseVertexAI/Sources/Safety.swift
Original file line number Diff line number Diff line change
Expand Up @@ -97,21 +97,44 @@ public struct SafetySetting {
}

/// Categories describing the potential harm a piece of content may pose.
public enum HarmCategory: String, Sendable {
public struct HarmCategory: Sendable, Equatable, Hashable {
/// Unknown. A new server value that isn't recognized by the SDK.
case unknown = "HARM_CATEGORY_UNKNOWN"
public static var unknown: HarmCategory {
return self.init(category: "HARM_CATEGORY_UNKNOWN")
}

/// Harassment content.
case harassment = "HARM_CATEGORY_HARASSMENT"
public static var harassment: HarmCategory {
return self.init(category: "HARM_CATEGORY_HARASSMENT")
}

/// Negative or harmful comments targeting identity and/or protected attributes.
case hateSpeech = "HARM_CATEGORY_HATE_SPEECH"
public static var hateSpeech: HarmCategory {
return self.init(category: "HARM_CATEGORY_HATE_SPEECH")
}

/// Contains references to sexual acts or other lewd content.
case sexuallyExplicit = "HARM_CATEGORY_SEXUALLY_EXPLICIT"
public static var sexuallyExplicit: HarmCategory {
return self.init(category: "HARM_CATEGORY_SEXUALLY_EXPLICIT")
}

/// Promotes or enables access to harmful goods, services, or activities.
case dangerousContent = "HARM_CATEGORY_DANGEROUS_CONTENT"
public static var dangerousContent: HarmCategory {
return self.init(category: "HARM_CATEGORY_DANGEROUS_CONTENT")
}

static let allCategories = [
HarmCategory.harassment.category,
HarmCategory.hateSpeech.category,
HarmCategory.sexuallyExplicit.category,
HarmCategory.dangerousContent.category,
]

let category: String

init(category: String) {
self.category = category
}
}

// MARK: - Codable Conformances
Expand Down Expand Up @@ -139,17 +162,17 @@ extension SafetyRating: Decodable {}
@available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
extension HarmCategory: Codable {
public init(from decoder: Decoder) throws {
let value = try decoder.singleValueContainer().decode(String.self)
guard let decodedCategory = HarmCategory(rawValue: value) else {
let category = try decoder.singleValueContainer().decode(String.self)
guard HarmCategory.allCategories.contains(category) else {
VertexLog.error(
code: .generateContentResponseUnrecognizedHarmCategory,
"Unrecognized HarmCategory with value \"\(value)\"."
"Unrecognized HarmCategory with value \"\(category)\"."
)
self = .unknown
return
}

self = decodedCategory
self.init(category: category)
}
}

Expand Down
2 changes: 1 addition & 1 deletion FirebaseVertexAI/Tests/Unit/GenerativeModelTests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -1422,6 +1422,6 @@ struct AppCheckErrorFake: Error {}
extension SafetyRating: Swift.Comparable {
public static func < (lhs: FirebaseVertexAI.SafetyRating,
rhs: FirebaseVertexAI.SafetyRating) -> Bool {
return lhs.category.rawValue < rhs.category.rawValue
return lhs.category.category < rhs.category.category
}
}

0 comments on commit 6049f5c

Please sign in to comment.