aboutsummaryrefslogtreecommitdiff
path: root/weed/mq/kafka/integration/seaweedmq_handler_topics.go
blob: b635b40af0cd57205ad9c264f465eb5e638ff18d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
package integration

import (
	"context"
	"fmt"
	"time"

	"github.com/seaweedfs/seaweedfs/weed/glog"
	"github.com/seaweedfs/seaweedfs/weed/mq/schema"
	"github.com/seaweedfs/seaweedfs/weed/pb"
	"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
	"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
	"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
	"github.com/seaweedfs/seaweedfs/weed/security"
	"github.com/seaweedfs/seaweedfs/weed/util"
)

// CreateTopic creates a new topic in both Kafka registry and SeaweedMQ
func (h *SeaweedMQHandler) CreateTopic(name string, partitions int32) error {
	return h.CreateTopicWithSchema(name, partitions, nil)
}

// CreateTopicWithSchema creates a topic with optional value schema
func (h *SeaweedMQHandler) CreateTopicWithSchema(name string, partitions int32, recordType *schema_pb.RecordType) error {
	return h.CreateTopicWithSchemas(name, partitions, nil, recordType)
}

// CreateTopicWithSchemas creates a topic with optional key and value schemas
func (h *SeaweedMQHandler) CreateTopicWithSchemas(name string, partitions int32, keyRecordType *schema_pb.RecordType, valueRecordType *schema_pb.RecordType) error {
	// Check if topic already exists in filer
	if h.checkTopicInFiler(name) {
		return fmt.Errorf("topic %s already exists", name)
	}

	// Create SeaweedMQ topic reference
	seaweedTopic := &schema_pb.Topic{
		Namespace: "kafka",
		Name:      name,
	}

	// Configure topic with SeaweedMQ broker via gRPC
	if len(h.brokerAddresses) > 0 {
		brokerAddress := h.brokerAddresses[0] // Use first available broker
		glog.V(1).Infof("Configuring topic %s with broker %s", name, brokerAddress)

		// Load security configuration for broker connection
		util.LoadSecurityConfiguration()
		grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.mq")

		err := pb.WithBrokerGrpcClient(false, brokerAddress, grpcDialOption, func(client mq_pb.SeaweedMessagingClient) error {
			// Convert dual schemas to flat schema format
			var flatSchema *schema_pb.RecordType
			var keyColumns []string
			if keyRecordType != nil || valueRecordType != nil {
				flatSchema, keyColumns = schema.CombineFlatSchemaFromKeyValue(keyRecordType, valueRecordType)
			}

			_, err := client.ConfigureTopic(context.Background(), &mq_pb.ConfigureTopicRequest{
				Topic:             seaweedTopic,
				PartitionCount:    partitions,
				MessageRecordType: flatSchema,
				KeyColumns:        keyColumns,
			})
			if err != nil {
				return fmt.Errorf("configure topic with broker: %w", err)
			}
			glog.V(1).Infof("successfully configured topic %s with broker", name)
			return nil
		})
		if err != nil {
			return fmt.Errorf("failed to configure topic %s with broker %s: %w", name, brokerAddress, err)
		}
	} else {
		glog.Warningf("No brokers available - creating topic %s in gateway memory only (testing mode)", name)
	}

	// Topic is now stored in filer only via SeaweedMQ broker
	// No need to create in-memory topic info structure

	// Offset management now handled directly by SMQ broker - no initialization needed

	// Invalidate cache after successful topic creation
	h.InvalidateTopicExistsCache(name)

	glog.V(1).Infof("Topic %s created successfully with %d partitions", name, partitions)
	return nil
}

// CreateTopicWithRecordType creates a topic with flat schema and key columns
func (h *SeaweedMQHandler) CreateTopicWithRecordType(name string, partitions int32, flatSchema *schema_pb.RecordType, keyColumns []string) error {
	// Check if topic already exists in filer
	if h.checkTopicInFiler(name) {
		return fmt.Errorf("topic %s already exists", name)
	}

	// Create SeaweedMQ topic reference
	seaweedTopic := &schema_pb.Topic{
		Namespace: "kafka",
		Name:      name,
	}

	// Configure topic with SeaweedMQ broker via gRPC
	if len(h.brokerAddresses) > 0 {
		brokerAddress := h.brokerAddresses[0] // Use first available broker
		glog.V(1).Infof("Configuring topic %s with broker %s", name, brokerAddress)

		// Load security configuration for broker connection
		util.LoadSecurityConfiguration()
		grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.mq")

		err := pb.WithBrokerGrpcClient(false, brokerAddress, grpcDialOption, func(client mq_pb.SeaweedMessagingClient) error {
			_, err := client.ConfigureTopic(context.Background(), &mq_pb.ConfigureTopicRequest{
				Topic:             seaweedTopic,
				PartitionCount:    partitions,
				MessageRecordType: flatSchema,
				KeyColumns:        keyColumns,
			})
			if err != nil {
				return fmt.Errorf("failed to configure topic: %w", err)
			}

			glog.V(1).Infof("successfully configured topic %s with broker", name)
			return nil
		})

		if err != nil {
			return err
		}
	} else {
		glog.Warningf("No broker addresses configured, topic %s not created in SeaweedMQ", name)
	}

	// Topic is now stored in filer only via SeaweedMQ broker
	// No need to create in-memory topic info structure

	glog.V(1).Infof("Topic %s created successfully with %d partitions using flat schema", name, partitions)
	return nil
}

// DeleteTopic removes a topic from both Kafka registry and SeaweedMQ
func (h *SeaweedMQHandler) DeleteTopic(name string) error {
	// Check if topic exists in filer
	if !h.checkTopicInFiler(name) {
		return fmt.Errorf("topic %s does not exist", name)
	}

	// Get topic info to determine partition count for cleanup
	topicInfo, exists := h.GetTopicInfo(name)
	if !exists {
		return fmt.Errorf("topic %s info not found", name)
	}

	// Close all publisher sessions for this topic
	for partitionID := int32(0); partitionID < topicInfo.Partitions; partitionID++ {
		if h.brokerClient != nil {
			h.brokerClient.ClosePublisher(name, partitionID)
		}
	}

	// Topic removal from filer would be handled by SeaweedMQ broker
	// No in-memory cache to clean up

	// Offset management handled by SMQ broker - no cleanup needed

	return nil
}

// TopicExists checks if a topic exists in SeaweedMQ broker (includes in-memory topics)
// Uses a 5-second cache to reduce broker queries
func (h *SeaweedMQHandler) TopicExists(name string) bool {
	// Check cache first
	h.topicExistsCacheMu.RLock()
	if entry, found := h.topicExistsCache[name]; found {
		if time.Now().Before(entry.expiresAt) {
			h.topicExistsCacheMu.RUnlock()
			return entry.exists
		}
	}
	h.topicExistsCacheMu.RUnlock()

	// Cache miss or expired - query broker

	var exists bool
	// Check via SeaweedMQ broker (includes in-memory topics)
	if h.brokerClient != nil {
		var err error
		exists, err = h.brokerClient.TopicExists(name)
		if err != nil {
			// Don't cache errors
			return false
		}
	} else {
		// Return false if broker is unavailable
		return false
	}

	// Update cache
	h.topicExistsCacheMu.Lock()
	h.topicExistsCache[name] = &topicExistsCacheEntry{
		exists:    exists,
		expiresAt: time.Now().Add(h.topicExistsCacheTTL),
	}
	h.topicExistsCacheMu.Unlock()

	return exists
}

// InvalidateTopicExistsCache removes a topic from the existence cache
// Should be called after creating or deleting a topic
func (h *SeaweedMQHandler) InvalidateTopicExistsCache(name string) {
	h.topicExistsCacheMu.Lock()
	delete(h.topicExistsCache, name)
	h.topicExistsCacheMu.Unlock()
}

// GetTopicInfo returns information about a topic from broker
func (h *SeaweedMQHandler) GetTopicInfo(name string) (*KafkaTopicInfo, bool) {
	// Get topic configuration from broker
	if h.brokerClient != nil {
		config, err := h.brokerClient.GetTopicConfiguration(name)
		if err == nil && config != nil {
			topicInfo := &KafkaTopicInfo{
				Name:       name,
				Partitions: config.PartitionCount,
				CreatedAt:  config.CreatedAtNs,
			}
			return topicInfo, true
		}
		glog.V(2).Infof("Failed to get topic configuration for %s from broker: %v", name, err)
	}

	// Fallback: check if topic exists in filer (for backward compatibility)
	if !h.checkTopicInFiler(name) {
		return nil, false
	}

	// Return default info if broker query failed but topic exists in filer
	topicInfo := &KafkaTopicInfo{
		Name:       name,
		Partitions: 1, // Default to 1 partition if broker query failed
		CreatedAt:  0,
	}

	return topicInfo, true
}

// ListTopics returns all topic names from SeaweedMQ broker (includes in-memory topics)
func (h *SeaweedMQHandler) ListTopics() []string {
	// Get topics from SeaweedMQ broker (includes in-memory topics)
	if h.brokerClient != nil {
		topics, err := h.brokerClient.ListTopics()
		if err == nil {
			return topics
		}
	}

	// Return empty list if broker is unavailable
	return []string{}
}

// checkTopicInFiler checks if a topic exists in the filer
func (h *SeaweedMQHandler) checkTopicInFiler(topicName string) bool {
	if h.filerClientAccessor == nil {
		return false
	}

	var exists bool
	h.filerClientAccessor.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
		request := &filer_pb.LookupDirectoryEntryRequest{
			Directory: "/topics/kafka",
			Name:      topicName,
		}

		_, err := client.LookupDirectoryEntry(context.Background(), request)
		exists = (err == nil)
		return nil // Don't propagate error, just check existence
	})

	return exists
}

// listTopicsFromFiler lists all topics from the filer
func (h *SeaweedMQHandler) listTopicsFromFiler() []string {
	if h.filerClientAccessor == nil {
		return []string{}
	}

	var topics []string

	h.filerClientAccessor.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
		request := &filer_pb.ListEntriesRequest{
			Directory: "/topics/kafka",
		}

		stream, err := client.ListEntries(context.Background(), request)
		if err != nil {
			return nil // Don't propagate error, just return empty list
		}

		for {
			resp, err := stream.Recv()
			if err != nil {
				break // End of stream or error
			}

			if resp.Entry != nil && resp.Entry.IsDirectory {
				topics = append(topics, resp.Entry.Name)
			} else if resp.Entry != nil {
			}
		}
		return nil
	})

	return topics
}