# frozen_string_literal: true
module Gitlab
module Elasticsearch
class Logs
# How many log lines to fetch in a query
LOGS_LIMIT = 500
def initialize(client)
@client = client
end
def pod_logs(namespace, pod_name, container_name = nil, search = nil, start_time = nil, end_time = nil)
query = { bool: { must: [] } }.tap do |q|
filter_pod_name(q, pod_name)
filter_namespace(q, namespace)
filter_container_name(q, container_name)
filter_search(q, search)
filter_times(q, start_time, end_time)
end
body = build_body(query)
response = @client.search body: body
format_response(response)
end
private
def build_body(query)
{
query: query,
# reverse order so we can query N-most recent records
sort: [
{ "@timestamp": { order: :desc } },
{ "offset": { order: :desc } }
],
# only return these fields in the response
_source: ["@timestamp", "message"],
# fixed limit for now, we should support paginated queries
size: ::Gitlab::Elasticsearch::Logs::LOGS_LIMIT
}
end
def filter_pod_name(query, pod_name)
query[:bool][:must] << {
match_phrase: {
"kubernetes.pod.name" => {
query: pod_name
}
}
}
end
def filter_namespace(query, namespace)
query[:bool][:must] << {
match_phrase: {
"kubernetes.namespace" => {
query: namespace
}
}
}
end
def filter_container_name(query, container_name)
# A pod can contain multiple containers.
# By default we return logs from every container
return if container_name.nil?
query[:bool][:must] << {
match_phrase: {
"kubernetes.container.name" => {
query: container_name
}
}
}
end
def filter_search(query, search)
return if search.nil?
query[:bool][:must] << {
simple_query_string: {
query: search,
fields: [:message],
default_operator: :and
}
}
end
def filter_times(query, start_time, end_time)
return unless start_time || end_time
time_range = { range: { :@timestamp => {} } }.tap do |tr|
tr[:range][:@timestamp][:gte] = start_time if start_time
tr[:range][:@timestamp][:lt] = end_time if end_time
end
query[:bool][:filter] = [time_range]
end
def format_response(response)
result = response.fetch("hits", {}).fetch("hits", []).map do |hit|
{
timestamp: hit["_source"]["@timestamp"],
message: hit["_source"]["message"]
}
end
# we queried for the N-most recent records but we want them ordered oldest to newest
result.reverse
end
end
end
end
# frozen_string_literal: true
module Gitlab
module UsageCounters
class Common
class << self
def increment(project_id)
Gitlab::Redis::SharedState.with { |redis| redis.hincrby(base_key, project_id, 1) }
end
def usage_totals
Gitlab::Redis::SharedState.with do |redis|
total_sum = 0
totals = redis.hgetall(base_key).each_with_object({}) do |(project_id, count), result|
total_sum += result[project_id.to_i] = count.to_i
end
totals[:total] = total_sum
totals
end
end
def base_key
raise NotImplementedError
end
end
end
end
end
# frozen_string_literal: true
module Gitlab
module UsageCounters
class PodLogs < Common
def self.base_key
'POD_LOGS_USAGE_COUNTS'
end
end
end
end
......@@ -6110,6 +6110,9 @@ msgstr ""
msgid "CycleAnalyticsStage|should be under a group"
msgstr ""
 
msgid "CycleAnalytics|%{selectedLabelsCount} selected (%{maxLabels} max)"
msgstr ""
msgid "CycleAnalytics|%{stageCount} stages selected"
msgstr ""
 
......@@ -6131,6 +6134,9 @@ msgstr ""
msgid "CycleAnalytics|Number of tasks"
msgstr ""
 
msgid "CycleAnalytics|Only %{maxLabels} labels can be selected at this time"
msgstr ""
msgid "CycleAnalytics|Project selected"
msgid_plural "CycleAnalytics|%d projects selected"
msgstr[0] ""
......@@ -13170,6 +13176,9 @@ msgstr ""
msgid "No licenses found."
msgstr ""
 
msgid "No matching labels"
msgstr ""
msgid "No matching results"
msgstr ""
 
......@@ -19906,6 +19915,9 @@ msgstr ""
msgid "There was an error fetching the environments information."
msgstr ""
 
msgid "There was an error fetching the top labels for the selected group"
msgstr ""
msgid "There was an error fetching the variables."
msgstr ""
 
......@@ -20864,6 +20876,9 @@ msgstr ""
msgid "Total artifacts size: %{total_size}"
msgstr ""
 
msgid "Total cores (vCPUs)"
msgstr ""
msgid "Total issues"
msgstr ""
 
......
......
......@@ -7,6 +7,8 @@ module QA
attr_accessor :title
attribute :id
def_delegators :key, :private_key, :public_key, :md5_fingerprint
def key
......@@ -21,6 +23,35 @@ module QA
profile_page.add_key(public_key, title)
end
end
def fabricate_via_api!
api_post
end
def api_delete
QA::Runtime::Logger.debug("Deleting SSH key with title '#{title}' and fingerprint '#{md5_fingerprint}'")
super
end
def api_get_path
"/user/keys/#{id}"
end
def api_post_path
'/user/keys'
end
def api_post_body
{
title: title,
key: public_key
}
end
def api_delete_path
"/user/keys/#{id}"
end
end
end
end
......@@ -8,7 +8,7 @@ module QA
it 'user adds and then removes an SSH key', :smoke do
Flow::Login.sign_in
key = Resource::SSHKey.fabricate! do |resource|
key = Resource::SSHKey.fabricate_via_browser_ui! do |resource|
resource.title = key_title
end
......
......
......@@ -11,29 +11,24 @@ module QA
let(:key_title) { "key for ssh tests #{Time.now.to_f}" }
let(:ssh_key) do
Resource::SSHKey.fabricate! do |resource|
Resource::SSHKey.fabricate_via_api! do |resource|
resource.title = key_title
end
end
around do |example|
# Create an SSH key to be used with Git
# Create an SSH key to be used with Git, then remove it after the test
Flow::Login.sign_in
ssh_key
example.run
# Remove the SSH key
Flow::Login.sign_in
Page::Main::Menu.perform(&:click_settings_link)
Page::Profile::Menu.perform(&:click_ssh_keys)
Page::Profile::SSHKeys.perform do |ssh_keys|
ssh_keys.remove_key(key_title)
end
ssh_key.remove_via_api!
Page::Main::Menu.perform(&:sign_out_if_signed_in)
end
it 'user pushes to the repository' do
# Create a project to push to
project = Resource::Project.fabricate_via_api! do |project|
project.name = 'git-protocol-project'
end
......@@ -68,11 +63,8 @@ module QA
project.visit!
project.wait_for_push_new_branch
# Check that the push worked
expect(page).to have_content(file_name)
expect(page).to have_content(file_content)
# And check that the correct Git protocol was used
expect(git_protocol_reported).to eq(git_protocol)
end
end
......
......
......@@ -11,7 +11,7 @@ module QA
it 'user adds an ssh key and pushes code to the repository' do
Flow::Login.sign_in
key = Resource::SSHKey.fabricate! do |resource|
key = Resource::SSHKey.fabricate_via_api! do |resource|
resource.title = key_title
end
......
......
# frozen_string_literal: true
require 'spec_helper'
describe Projects::LogsController do
include KubernetesHelpers
let_it_be(:user) { create(:user) }
let_it_be(:project) { create(:project) }
let_it_be(:environment) do
create(:environment, name: 'production', project: project)
end
let(:pod_name) { "foo" }
let(:container) { 'container-1' }
before do
project.add_maintainer(user)
sign_in(user)
end
describe 'GET #index' do
let(:empty_project) { create(:project) }
it 'renders empty logs page if no environment exists' do
empty_project.add_maintainer(user)
get :index, params: { namespace_id: empty_project.namespace, project_id: empty_project }
expect(response).to be_ok
expect(response).to render_template 'empty_logs'
end
it 'renders index template' do
get :index, params: environment_params
expect(response).to be_ok
expect(response).to render_template 'index'
end
end
shared_examples 'pod logs service' do |endpoint, service|
let(:service_result) do
{
status: :success,
logs: ['Log 1', 'Log 2', 'Log 3'],
pods: [pod_name],
pod_name: pod_name,
container_name: container
}
end
let(:service_result_json) { JSON.parse(service_result.to_json) }
let_it_be(:cluster) { create(:cluster, :provided_by_gcp, environment_scope: '*', projects: [project]) }
before do
allow_next_instance_of(service) do |instance|
allow(instance).to receive(:execute).and_return(service_result)
end
end
it 'returns the service result' do
get endpoint, params: environment_params(pod_name: pod_name, format: :json)
expect(response).to have_gitlab_http_status(:success)
expect(json_response).to eq(service_result_json)
end
it 'registers a usage of the endpoint' do
expect(::Gitlab::UsageCounters::PodLogs).to receive(:increment).with(project.id)
get endpoint, params: environment_params(pod_name: pod_name, format: :json)
expect(response).to have_gitlab_http_status(:success)
end
it 'sets the polling header' do
get endpoint, params: environment_params(pod_name: pod_name, format: :json)
expect(response).to have_gitlab_http_status(:success)
expect(response.headers['Poll-Interval']).to eq('3000')
end
context 'when service is processing' do
let(:service_result) { nil }
it 'returns a 202' do
get endpoint, params: environment_params(pod_name: pod_name, format: :json)
expect(response).to have_gitlab_http_status(:accepted)
end
end
shared_examples 'unsuccessful execution response' do |message|
let(:service_result) do
{
status: :error,
message: message
}
end
it 'returns the error' do
get endpoint, params: environment_params(pod_name: pod_name, format: :json)
expect(response).to have_gitlab_http_status(:bad_request)
expect(json_response).to eq(service_result_json)
end
end
context 'when service is failing' do
it_behaves_like 'unsuccessful execution response', 'some error'
end
context 'when cluster is nil' do
let!(:cluster) { nil }
it_behaves_like 'unsuccessful execution response', 'Environment does not have deployments'
end
context 'when namespace is empty' do
before do
allow(environment).to receive(:deployment_namespace).and_return('')
end
it_behaves_like 'unsuccessful execution response', 'Environment does not have deployments'
end
end
describe 'GET #k8s' do
it_behaves_like 'pod logs service', :k8s, PodLogs::KubernetesService
end
describe 'GET #elasticsearch' do
it_behaves_like 'pod logs service', :elasticsearch, PodLogs::ElasticsearchService
end
def environment_params(opts = {})
opts.reverse_merge(namespace_id: project.namespace,
project_id: project,
environment_name: environment.name)
end
end
......@@ -70,6 +70,7 @@ describe 'Project navbar' do
_('Environments'),
_('Error Tracking'),
_('Serverless'),
_('Logs'),
_('Kubernetes')
]
},
......
......
......@@ -26,6 +26,9 @@
"stop_path": { "type": "string" },
"cancel_auto_stop_path": { "type": "string" },
"folder_path": { "type": "string" },
"logs_path": { "type": "string" },
"logs_api_path": { "type": "string" },
"enable_advanced_logs_querying": { "type": "boolean" },
"created_at": { "type": "string", "format": "date-time" },
"updated_at": { "type": "string", "format": "date-time" },
"auto_stop_at": { "type": "string", "format": "date-time" },
......
......
{
"took": 7087,
"timed_out": false,
"_shards": {
"total": 151,
"successful": 151,
"skipped": 0,
"failed": 0,
"failures": []
},
"hits": {
"total": 486924,
"max_score": null,
"hits": [
{
"_index": "filebeat-6.7.0-2019.10.25",
"_type": "doc",
"_id": "SkbxAW4BWzhswgK-C5-R",
"_score": null,
"_source": {
"message": "10.8.2.1 - - [25/Oct/2019:08:03:22 UTC] \"GET / HTTP/1.1\" 200 13",
"@timestamp": "2019-12-13T14:35:34.034Z"
},
"sort": [
9999998,
1571990602947
]
},
{
"_index": "filebeat-6.7.0-2019.10.27",
"_type": "doc",
"_id": "wEigD24BWzhswgK-WUU2",
"_score": null,
"_source": {
"message": "10.8.2.1 - - [27/Oct/2019:23:49:54 UTC] \"GET / HTTP/1.1\" 200 13",
"@timestamp": "2019-12-13T14:35:35.034Z"
},
"sort": [
9999949,
1572220194500
]
},
{
"_index": "filebeat-6.7.0-2019.11.04",
"_type": "doc",
"_id": "gE6uOG4BWzhswgK-M0x2",
"_score": null,
"_source": {
"message": "10.8.2.1 - - [04/Nov/2019:23:09:24 UTC] \"GET / HTTP/1.1\" 200 13",
"@timestamp": "2019-12-13T14:35:36.034Z"
},
"sort": [
9999944,
1572908964497
]
},
{
"_index": "filebeat-6.7.0-2019.10.30",
"_type": "doc",
"_id": "0klPHW4BWzhswgK-nfCF",
"_score": null,
"_source": {
"message": "- -\u003e /",
"@timestamp": "2019-12-13T14:35:37.034Z"
},
"sort": [
9999934,
1572449784442
]
}
]
}
}
{
"query": {
"bool": {
"must": [
{
"match_phrase": {
"kubernetes.pod.name": {
"query": "production-6866bc8974-m4sk4"
}
}
},
{
"match_phrase": {
"kubernetes.namespace": {
"query": "autodevops-deploy-9-production"
}
}
}
]
}
},
"sort": [
{
"@timestamp": {
"order": "desc"
}
},
{
"offset": {
"order": "desc"
}
}
],
"_source": [
"@timestamp",
"message"
],
"size": 500
}
{
"query": {
"bool": {
"must": [
{
"match_phrase": {
"kubernetes.pod.name": {
"query": "production-6866bc8974-m4sk4"
}
}
},
{
"match_phrase": {
"kubernetes.namespace": {
"query": "autodevops-deploy-9-production"
}
}
},
{
"match_phrase": {
"kubernetes.container.name": {
"query": "auto-deploy-app"
}
}
}
]
}
},
"sort": [
{
"@timestamp": {
"order": "desc"
}
},
{
"offset": {
"order": "desc"
}
}
],
"_source": [
"@timestamp",
"message"
],
"size": 500
}
{
"query": {
"bool": {
"must": [
{
"match_phrase": {
"kubernetes.pod.name": {
"query": "production-6866bc8974-m4sk4"
}
}
},
{
"match_phrase": {
"kubernetes.namespace": {
"query": "autodevops-deploy-9-production"
}
}
}
],
"filter": [
{
"range": {
"@timestamp": {
"lt": "2019-12-13T14:35:34.034Z"
}
}
}
]
}
},
"sort": [
{
"@timestamp": {
"order": "desc"
}
},
{
"offset": {
"order": "desc"
}
}
],
"_source": [
"@timestamp",
"message"
],
"size": 500
}
{
"query": {
"bool": {
"must": [
{
"match_phrase": {
"kubernetes.pod.name": {
"query": "production-6866bc8974-m4sk4"
}
}
},
{
"match_phrase": {
"kubernetes.namespace": {
"query": "autodevops-deploy-9-production"
}
}
},
{
"simple_query_string": {
"query": "foo +bar ",
"fields": [
"message"
],
"default_operator": "and"
}
}
]
}
},
"sort": [
{
"@timestamp": {
"order": "desc"
}
},
{
"offset": {
"order": "desc"
}
}
],
"_source": [
"@timestamp",
"message"
],
"size": 500
}
{
"query": {
"bool": {
"must": [
{
"match_phrase": {
"kubernetes.pod.name": {
"query": "production-6866bc8974-m4sk4"
}
}
},
{
"match_phrase": {
"kubernetes.namespace": {
"query": "autodevops-deploy-9-production"
}
}
}
],
"filter": [
{
"range": {
"@timestamp": {
"gte": "2019-12-13T14:35:34.034Z"
}
}
}
]
}
},
"sort": [
{
"@timestamp": {
"order": "desc"
}
},
{
"offset": {
"order": "desc"
}
}
],
"_source": [
"@timestamp",
"message"
],
"size": 500
}
{
"query": {
"bool": {
"must": [
{
"match_phrase": {
"kubernetes.pod.name": {
"query": "production-6866bc8974-m4sk4"
}
}
},
{
"match_phrase": {
"kubernetes.namespace": {
"query": "autodevops-deploy-9-production"
}
}
}
],
"filter": [
{
"range": {
"@timestamp": {
"gte": "2019-12-13T14:35:34.034Z",
"lt": "2019-12-13T14:35:34.034Z"
}
}
}
]
}
},
"sort": [
{
"@timestamp": {
"order": "desc"
}
},
{
"offset": {
"order": "desc"
}
}
],
"_source": [
"@timestamp",
"message"
],
"size": 500
}
/* eslint-disable no-new, promise/catch-or-return */
import JSZip from 'jszip';
import SketchLoader from '~/blob/sketch';
describe('Sketch viewer', () => {
const generateZipFileArrayBuffer = (zipFile, resolve, done) => {
zipFile.generateAsync({ type: 'arrayBuffer' }).then(content => {
resolve(content);
setTimeout(() => {
done();
}, 100);
});
};
jest.mock('jszip');
describe('Sketch viewer', () => {
preloadFixtures('static/sketch_viewer.html');
beforeEach(() => {
loadFixtures('static/sketch_viewer.html');
window.URL = {
createObjectURL: jest.fn(() => 'http://foo/bar'),
};
});
afterEach(() => {
window.URL = {};
});
describe('with error message', () => {
beforeEach(done => {
spyOn(SketchLoader.prototype, 'getZipFile').and.callFake(
jest.spyOn(SketchLoader.prototype, 'getZipFile').mockImplementation(
() =>
new Promise((resolve, reject) => {
reject();
setTimeout(() => {
done();
});
}),
);
new SketchLoader(document.getElementById('js-sketch-viewer'));
return new SketchLoader(document.getElementById('js-sketch-viewer'));
});
it('renders error message', () => {
......@@ -43,39 +38,39 @@ describe('Sketch viewer', () => {
);
});
it('removes render the loading icon', () => {
it('removes the loading icon', () => {
expect(document.querySelector('.js-loading-icon')).toBeNull();
});
});
describe('success', () => {
beforeEach(done => {
spyOn(SketchLoader.prototype, 'getZipFile').and.callFake(
() =>
new Promise(resolve => {
const zipFile = new JSZip();
zipFile
.folder('previews')
.file(
'preview.png',
'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAMAAAAoyzS7AAAAA1BMVEUAAACnej3aAAAAAXRSTlMAQObYZgAAAA1JREFUeNoBAgD9/wAAAAIAAVMrnDAAAAAASUVORK5CYII=',
{
base64: true,
const loadAsyncMock = {
files: {
'previews/preview.png': {
async: jest.fn(),
},
);
},
};
generateZipFileArrayBuffer(zipFile, resolve, done);
loadAsyncMock.files['previews/preview.png'].async.mockImplementation(
() =>
new Promise(resolve => {
resolve('foo');
done();
}),
);
new SketchLoader(document.getElementById('js-sketch-viewer'));
jest.spyOn(SketchLoader.prototype, 'getZipFile').mockResolvedValue();
jest.spyOn(JSZip, 'loadAsync').mockResolvedValue(loadAsyncMock);
return new SketchLoader(document.getElementById('js-sketch-viewer'));
});
it('does not render error message', () => {
expect(document.querySelector('#js-sketch-viewer p')).toBeNull();
});
it('removes render the loading icon', () => {
it('removes the loading icon', () => {
expect(document.querySelector('.js-loading-icon')).toBeNull();
});
......@@ -94,27 +89,4 @@ describe('Sketch viewer', () => {
expect(link.target).toBe('_blank');
});
});
describe('incorrect file', () => {
beforeEach(done => {
spyOn(SketchLoader.prototype, 'getZipFile').and.callFake(
() =>
new Promise(resolve => {
const zipFile = new JSZip();
generateZipFileArrayBuffer(zipFile, resolve, done);
}),
);
new SketchLoader(document.getElementById('js-sketch-viewer'));
});
it('renders error message', () => {
expect(document.querySelector('#js-sketch-viewer p')).not.toBeNull();
expect(document.querySelector('#js-sketch-viewer p').textContent.trim()).toContain(
'Cannot show preview.',
);
});
});
});